hexsha
stringlengths 40
40
| size
int64 140
1.03M
| ext
stringclasses 94
values | lang
stringclasses 21
values | max_stars_repo_path
stringlengths 3
663
| max_stars_repo_name
stringlengths 4
120
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
368k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
663
| max_issues_repo_name
stringlengths 4
120
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
663
| max_forks_repo_name
stringlengths 4
135
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 140
1.03M
| avg_line_length
float64 2.32
23.1k
| max_line_length
int64 11
938k
| alphanum_fraction
float64 0.01
1
| score
float32 3
4.25
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cb38fe28f078cf020523f313e091214c323885ba
| 11,622 |
go
|
Go
|
workflow.go
|
snipem/awgo
|
09f767b094816cd865fa3b396d09023baeaa8ff5
|
[
"MIT"
] | 736 |
2017-09-08T20:39:13.000Z
|
2022-03-31T15:04:35.000Z
|
workflow.go
|
snipem/awgo
|
09f767b094816cd865fa3b396d09023baeaa8ff5
|
[
"MIT"
] | 76 |
2018-01-17T02:29:39.000Z
|
2022-03-19T10:57:01.000Z
|
workflow.go
|
snipem/awgo
|
09f767b094816cd865fa3b396d09023baeaa8ff5
|
[
"MIT"
] | 39 |
2017-09-15T06:49:24.000Z
|
2022-01-25T10:39:50.000Z
|
// Copyright (c) 2018 Dean Jackson <[email protected]>
// MIT Licence - http://opensource.org/licenses/MIT
package aw
import (
"fmt"
"io"
"log"
"os"
"os/exec"
"path/filepath"
"runtime/debug"
"sync"
"time"
"go.deanishe.net/fuzzy"
"github.com/deanishe/awgo/keychain"
"github.com/deanishe/awgo/util"
)
// AwGoVersion is the semantic version number of this library.
const AwGoVersion = "0.27.1"
// Default Workflow settings. Can be changed with the corresponding Options.
//
// See the Options and Workflow documentation for more information.
const (
DefaultLogPrefix = "\U0001F37A" // Beer mug
DefaultMaxLogSize = 1048576 // 1 MiB
DefaultMaxResults = 0 // No limit, i.e. send all results to Alfred
DefaultSessionName = "AW_SESSION_ID" // Workflow variable session ID is stored in
DefaultMagicPrefix = "workflow:" // Prefix to call "magic" actions
)
var (
startTime time.Time // Time execution started
// The workflow object operated on by top-level functions.
// wf *Workflow
// Flag, as we only want to set up logging once
// TODO: Better, more pluggable logging
logInitialized bool
)
// init creates the default Workflow.
func init() {
startTime = time.Now()
}
// Mockable function to run commands
type commandRunner func(name string, arg ...string) error
// Run command via exec.Command
func runCommand(name string, arg ...string) error {
return exec.Command(name, arg...).Run()
}
// Mockable exit function
var exitFunc = os.Exit
// Workflow provides a consolidated API for building Script Filters.
//
// As a rule, you should create a Workflow in init or main and call your main
// entry-point via Workflow.Run(), which catches panics, and logs & shows the
// error in Alfred.
//
// Script Filter
//
// To generate feedback for a Script Filter, use Workflow.NewItem() to create
// new Items and Workflow.SendFeedback() to send the results to Alfred.
//
// Run Script
//
// Use the TextErrors option, so any rescued panics are printed as text,
// not as JSON.
//
// Use ArgVars to set workflow variables, not Workflow/Feedback.
//
// See the _examples/ subdirectory for some full examples of workflows.
type Workflow struct {
sync.WaitGroup
// Interface to workflow's settings.
// Reads workflow variables by type and saves new values to info.plist.
Config *Config
// Call Alfred's AppleScript functions.
Alfred *Alfred
// Cache is a Cache pointing to the workflow's cache directory.
Cache *Cache
// Data is a Cache pointing to the workflow's data directory.
Data *Cache
// Session is a cache that stores session-scoped data. These data
// persist until the user closes Alfred or runs a different workflow.
Session *Session
// Access macOS Keychain. Passwords are saved using the workflow's
// bundle ID as the service name. Passwords are synced between
// devices if you have iCloud Keychain turned on.
Keychain *keychain.Keychain
// The response that will be sent to Alfred. Workflow provides
// convenience wrapper methods, so you don't normally have to
// interact with this directly.
Feedback *Feedback
// Updater fetches updates for the workflow.
Updater Updater
// magicActions contains the magic actions registered for this workflow.
// Several built-in actions are registered by default. See the docs for
// MagicAction for details.
magicActions *magicActions
logPrefix string // Written to debugger to force a newline
maxLogSize int // Maximum size of log file in bytes
magicPrefix string // Overrides DefaultMagicPrefix for magic actions.
maxResults int // max. results to send to Alfred. 0 means send all.
sortOptions []fuzzy.Option // Options for fuzzy filtering
textErrors bool // Show errors as plaintext, not Alfred JSON
helpURL string // URL to help page (shown if there's an error)
dir string // Directory workflow is in
cacheDir string // Workflow's cache directory
dataDir string // Workflow's data directory
sessionName string // Name of the variable sessionID is stored in
sessionID string // Random session ID
execFunc commandRunner // Run external commands
}
// New creates and initialises a new Workflow, passing any Options to
// Workflow.Configure().
//
// For available options, see the documentation for the Option type and the
// following functions.
//
// IMPORTANT: In order to be able to initialise the Workflow correctly,
// New must be run within a valid Alfred environment; specifically
// *at least* the following environment variables must be set:
//
// alfred_workflow_bundleid
// alfred_workflow_cache
// alfred_workflow_data
//
// If you aren't running from Alfred, or would like to specify a
// custom environment, use NewFromEnv().
func New(opts ...Option) *Workflow { return NewFromEnv(nil, opts...) }
// NewFromEnv creates a new Workflows from the specified Env.
// If env is nil, the system environment is used.
func NewFromEnv(env Env, opts ...Option) *Workflow {
if env == nil {
env = sysEnv{}
}
if err := validateEnv(env); err != nil {
panic(err)
}
wf := &Workflow{
Config: NewConfig(env),
Alfred: NewAlfred(env),
Feedback: &Feedback{},
logPrefix: DefaultLogPrefix,
maxLogSize: DefaultMaxLogSize,
maxResults: DefaultMaxResults,
sessionName: DefaultSessionName,
sortOptions: []fuzzy.Option{},
execFunc: runCommand,
}
wf.magicActions = &magicActions{
actions: map[string]MagicAction{},
wf: wf,
}
// default magic actions
wf.Configure(AddMagic(
logMA{wf},
cacheMA{wf},
clearCacheMA{wf},
dataMA{wf},
clearDataMA{wf},
resetMA{wf},
))
wf.Configure(opts...)
wf.Cache = NewCache(wf.CacheDir())
wf.Data = NewCache(wf.DataDir())
wf.Session = NewSession(wf.CacheDir(), wf.SessionID())
wf.Keychain = keychain.New(wf.BundleID())
wf.initializeLogging()
return wf
}
// --------------------------------------------------------------------
// Initialisation methods
// Configure applies one or more Options to Workflow. The returned Option reverts
// all Options passed to Configure.
func (wf *Workflow) Configure(opts ...Option) (previous Option) {
prev := make(options, len(opts))
for i, opt := range opts {
prev[i] = opt(wf)
}
return prev.apply
}
// initializeLogging ensures future log messages are written to
// workflow's log file.
func (wf *Workflow) initializeLogging() {
if logInitialized { // All Workflows use the same global logger
return
}
// Rotate log file if larger than MaxLogSize
fi, err := os.Stat(wf.LogFile())
if err == nil {
if fi.Size() >= int64(wf.maxLogSize) {
newlog := wf.LogFile() + ".1"
if err := os.Rename(wf.LogFile(), newlog); err != nil {
fmt.Fprintf(os.Stderr, "Error rotating log: %v\n", err)
}
fmt.Fprintln(os.Stderr, "Rotated log")
}
}
// Open log file
file, err := os.OpenFile(wf.LogFile(), os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0600)
if err != nil {
wf.Fatal(fmt.Sprintf("Couldn't open log file %s : %v",
wf.LogFile(), err))
}
// Attach logger to file
multi := io.MultiWriter(file, os.Stderr)
log.SetOutput(multi)
// Show filenames and line numbers if Alfred's debugger is open
if wf.Debug() {
log.SetFlags(log.Ltime | log.Lshortfile)
} else {
log.SetFlags(log.Ltime)
}
logInitialized = true
}
// --------------------------------------------------------------------
// API methods
// BundleID returns the workflow's bundle ID. This library will not
// work without a bundle ID, which is set in the workflow's main
// setup sheet in Alfred Preferences.
func (wf *Workflow) BundleID() string {
s := wf.Config.Get(EnvVarBundleID)
if s == "" {
wf.Fatal("No bundle ID set. You *must* set a bundle ID to use AwGo.")
}
return s
}
// Name returns the workflow's name as specified in the workflow's main
// setup sheet in Alfred Preferences.
func (wf *Workflow) Name() string { return wf.Config.Get(EnvVarName) }
// Version returns the workflow's version set in the workflow's configuration
// sheet in Alfred Preferences.
func (wf *Workflow) Version() string { return wf.Config.Get(EnvVarVersion) }
// SessionID returns the session ID for this run of the workflow.
// This is used internally for session-scoped caching.
//
// The session ID is persisted as a workflow variable. It and the session
// persist as long as the user is using the workflow in Alfred. That
// means that the session expires as soon as Alfred closes or the user
// runs a different workflow.
func (wf *Workflow) SessionID() string {
if wf.sessionID == "" {
ev := os.Getenv(wf.sessionName)
if ev != "" {
wf.sessionID = ev
} else {
wf.sessionID = NewSessionID()
}
}
return wf.sessionID
}
// Debug returns true if Alfred's debugger is open.
func (wf *Workflow) Debug() bool { return wf.Config.GetBool(EnvVarDebug) }
// Args returns command-line arguments passed to the program.
// It intercepts "magic args" and runs the corresponding actions, terminating
// the workflow. See MagicAction for full documentation.
func (wf *Workflow) Args() []string {
prefix := DefaultMagicPrefix
if wf.magicPrefix != "" {
prefix = wf.magicPrefix
}
return wf.magicActions.args(os.Args[1:], prefix)
}
// Run runs your workflow function, catching any errors.
// If the workflow panics, Run rescues and displays an error message in Alfred.
func (wf *Workflow) Run(fn func()) {
vstr := wf.Name()
if wf.Version() != "" {
vstr += "/" + wf.Version()
}
vstr = fmt.Sprintf(" %s (AwGo/%v) ", vstr, AwGoVersion)
// Print right after Alfred's introductory blurb in the debugger.
// Alfred strips whitespace.
if wf.logPrefix != "" {
fmt.Fprintln(os.Stderr, wf.logPrefix)
}
log.Println(util.Pad(vstr, "-", 50))
// Clear expired session data
wf.Add(1)
go func() {
defer wf.Done()
if err := wf.Session.Clear(false); err != nil {
log.Printf("[ERROR] clear session: %v", err)
}
}()
// Catch any `panic` and display an error in Alfred.
// Fatal(msg) will terminate the process (via log.Fatal).
defer func() {
if r := recover(); r != nil {
log.Println(util.Pad(" FATAL ERROR ", "-", 50))
log.Printf("%s : %s", r, debug.Stack())
log.Println(util.Pad(" END STACK TRACE ", "-", 50))
// log.Printf("Recovered : %x", r)
err, ok := r.(error)
if ok {
wf.outputErrorMsg(err.Error())
}
wf.outputErrorMsg(fmt.Sprintf("%v", r))
}
}()
// Call the workflow's main function.
fn()
wf.Wait()
finishLog(false)
}
// --------------------------------------------------------------------
// Helper methods
// outputErrorMsg prints and logs error, then exits process.
func (wf *Workflow) outputErrorMsg(msg string) {
if wf.textErrors {
fmt.Print(msg)
} else {
wf.Feedback.Clear()
wf.NewItem(msg).Icon(IconError)
wf.SendFeedback()
}
log.Printf("[ERROR] %s", msg)
// Show help URL or website URL
if wf.helpURL != "" {
log.Printf("Get help at %s", wf.helpURL)
}
finishLog(true)
}
// awDataDir is the directory for AwGo's own data.
func (wf *Workflow) awDataDir() string {
return util.MustExist(filepath.Join(wf.DataDir(), "_aw"))
}
// awCacheDir is the directory for AwGo's own cache.
func (wf *Workflow) awCacheDir() string {
return util.MustExist(filepath.Join(wf.CacheDir(), "_aw"))
}
// --------------------------------------------------------------------
// Package-level only
// finishLog outputs the workflow duration
func finishLog(fatal bool) {
s := util.Pad(fmt.Sprintf(" %v ", time.Since(startTime)), "-", 50)
if fatal {
log.Println(s)
exitFunc(1)
} else {
log.Println(s)
}
}
| 28.485294 | 82 | 0.677078 | 3.078125 |
64ee2dfa3e513e10fe5bd50d86f4148dd09a08fc
| 4,975 |
rs
|
Rust
|
src/app.rs
|
brunograssano/Conways-Game-of-Life
|
a4252e0f7abc37f41ab5c1127fc21680067f4380
|
[
"MIT"
] | null | null | null |
src/app.rs
|
brunograssano/Conways-Game-of-Life
|
a4252e0f7abc37f41ab5c1127fc21680067f4380
|
[
"MIT"
] | null | null | null |
src/app.rs
|
brunograssano/Conways-Game-of-Life
|
a4252e0f7abc37f41ab5c1127fc21680067f4380
|
[
"MIT"
] | null | null | null |
use crate::board::Board;
extern crate piston_window;
use piston::input::{RenderEvent, UpdateEvent};
use piston_window::*;
const MIN_SCALE_FACTOR : f64 = 6.0;
const MAX_SCALE_FACTOR : f64 = 60.0;
const WIDTH: u32 = 600;
const HEIGHT: u32 = 600;
const PADDING: f64 = 1.0;
const NO_PADDING: f64 = 0.0;
#[derive(Copy, Clone)]
struct Offset{
x:i32,
y:i32
}
pub struct App{
padding : f64,
start : bool,
board : Board,
scale_factor : f64,
window_offset : Offset,
window : PistonWindow,
}
impl App{
pub fn new(random_start:bool,colorful_game:bool,padding:bool)->App{
App{
padding : if padding {PADDING} else {NO_PADDING},
start : false,
board : Board::new(random_start,colorful_game),
scale_factor : 6.0,
window_offset : Offset{ x: 0, y: 0 },
window: WindowSettings::new("Conway's Game of Life!", [WIDTH, HEIGHT])
.exit_on_esc(true).resizable(false).build().unwrap(),
}
}
fn draw(&mut self,event:Event) {
let scale_factor = self.scale_factor;
let window_offset = self.window_offset;
let padding = self.padding;
let background_color = [1.0; 4];
let cells = self.board.get_cells();
self.window.draw_2d(&event, |context, graphics, _device| {
clear(background_color, graphics);
for i in 0..100 {
for j in 0..100 {
if cells[i][j].is_alive() {
let color: [f32; 4] = cells[i][j].get_color();
let position: [f64; 4] = [scale_factor * (cells[i][j].get_x() + window_offset.x) as f64,
scale_factor * (cells[i][j].get_y() + window_offset.y) as f64,
scale_factor - padding,
scale_factor - padding];
rectangle(color, position, context.transform, graphics);
}
}
}
});
}
pub fn game_loop(&mut self) {
let mut mouse_pos = (0.0, 0.0);
let mut events = Events::new(EventSettings::new());
while let Some(e) = events.next(&mut self.window) {
if let Some(_r) = e.render_args() {
self.draw(e);
} else if let Some(_u) = e.update_args() {
if self.start {
self.board.update_board();
}
} else if let Some(button) = e.press_args() {
self.manage_buttons(&mut mouse_pos, button);
} else if let Some(cursor) = e.mouse_cursor_args() {
mouse_pos = (cursor[0], cursor[1]);
}
else if let Some(s) = e.mouse_scroll_args() {
self.manage_zoom(s);
}
}
}
fn manage_buttons(&mut self, mouse_pos: &mut (f64, f64), button: Button) {
match button {
Button::Mouse(_button) => {
let (x, y) = mouse_pos;
self.board.toggle_cell(((*x / self.scale_factor) as i32 - self.window_offset.x) as usize,
((*y / self.scale_factor) as i32 - self.window_offset.y) as usize);
}
Button::Keyboard(button) => {
if button == Key::P {
self.start = false;
} else if button == Key::Return {
self.start = true;
} else if button == Key::Left {
if self.window_offset.x < 0 {
self.window_offset.x += 1;
}
} else if button == Key::Right {
if (WIDTH as i32 / self.scale_factor as i32) < 100 && -self.window_offset.x < 100 - (WIDTH as i32 / self.scale_factor as i32) {
self.window_offset.x -= 1;
}
} else if button == Key::Up {
if self.window_offset.y < 0 {
self.window_offset.y += 1;
}
} else if button == Key::Down {
if (HEIGHT as i32 / self.scale_factor as i32) < 100 && -self.window_offset.y < 100 - (HEIGHT as i32 / self.scale_factor as i32) {
self.window_offset.y -= 1;
}
} else if button == Key::R {
self.start = false;
self.board.restart();
}
}
_ => {}
}
}
fn manage_zoom(&mut self, s: [f64; 2]) {
self.scale_factor += s[1];
if self.scale_factor < MIN_SCALE_FACTOR {
self.scale_factor = MIN_SCALE_FACTOR;
self.window_offset.x = 0;
self.window_offset.y = 0;
} else if self.scale_factor > MAX_SCALE_FACTOR{
self.scale_factor = MAX_SCALE_FACTOR;
}
}
}
| 37.126866 | 149 | 0.483417 | 3.140625 |
862ab66606bc3540b7609d3e255fe601f165d12f
| 1,122 |
rs
|
Rust
|
struct-enum-touple/src/main.rs
|
ikuo-suyama/rust-learning-path
|
34b236c9fc2fdfad1ff05be787c04d3d9c63e48d
|
[
"CC0-1.0"
] | null | null | null |
struct-enum-touple/src/main.rs
|
ikuo-suyama/rust-learning-path
|
34b236c9fc2fdfad1ff05be787c04d3d9c63e48d
|
[
"CC0-1.0"
] | null | null | null |
struct-enum-touple/src/main.rs
|
ikuo-suyama/rust-learning-path
|
34b236c9fc2fdfad1ff05be787c04d3d9c63e48d
|
[
"CC0-1.0"
] | null | null | null |
use crate::Transmission::Manual;
#[derive(Debug)]
struct KeyPress(String, char);
#[derive(Debug)]
struct MouseClick {
x: i64,
y: i64,
}
#[derive(Debug)]
enum WebEvent {
WELoad(bool),
WEClick(MouseClick),
WEKeys(KeyPress),
}
// Declare Car struct to describe vehicle with four named fields
#[derive(Debug)]
struct Car {
color: String,
transmission: Transmission,
convertible: bool,
mileage: u32,
}
// Declare enum for Car transmission type
#[derive(PartialEq, Debug)]
enum Transmission {
Manual,
SemiAuto,
Automatic,
}
fn car_factory(color: String, transmission: Transmission, convertible: bool) -> Car {
let car: Car = Car {
color: color,
transmission: transmission,
convertible: convertible,
mileage: 1
};
car
}
fn main() {
let car = car_factory("car".to_string(), Manual, true);
print!("{:#?}", car);
let we_load = WebEvent::WELoad(true);
let click = MouseClick {x:100, y:250};
let we_click = WebEvent::WEClick(click);
println!("{:#?}\n{:#?}", we_load, we_click);
println!("{:?}", we_load)
}
| 20.035714 | 85 | 0.627451 | 3.140625 |
43d47be972349329a477e453aee6a19ccd75e907
| 1,857 |
kt
|
Kotlin
|
ceria/15/src/main/kotlin/Solution.kt
|
VisionistInc/advent-of-code-2020
|
002734670384aa02ca122086035f45dfb2ea9949
|
[
"MIT"
] | null | null | null |
ceria/15/src/main/kotlin/Solution.kt
|
VisionistInc/advent-of-code-2020
|
002734670384aa02ca122086035f45dfb2ea9949
|
[
"MIT"
] | null | null | null |
ceria/15/src/main/kotlin/Solution.kt
|
VisionistInc/advent-of-code-2020
|
002734670384aa02ca122086035f45dfb2ea9949
|
[
"MIT"
] | null | null | null |
import java.io.File
import java.util.Collections
fun main(args : Array<String>) {
val input = File(args.first()).readLines()
println("Solution 1: ${solution1(input)}")
println("Solution 2: ${solution2(input)}")
}
private fun solution1(input :List<String>) :Int {
var nums = input.first().split(",").map{ it.toInt() }.toMutableList()
while (nums.size < 2020) {
var lastNumSpoken = nums.last()
if (Collections.frequency(nums, lastNumSpoken) == 1) {
nums.add(0)
} else {
var lastSpokenIndex = nums.dropLast(1).lastIndexOf(lastNumSpoken)
nums.add((nums.size - 1) - lastSpokenIndex)
}
}
return nums.last()
}
private fun solution2(input :List<String>) :Int {
var nums = input.first().split(",").map{ it.toInt() }
var numsMap = mutableMapOf<Int, Pair<Int, Int>>() // num => Pair(firstIndex, secondIndex)
for (n in nums.indices) {
numsMap.put(nums[n], Pair(n, -1))
}
var turn = nums.size
var lastSpoken = nums.last()
while (turn < 30000000) {
if (numsMap.containsKey(lastSpoken)) {
var indexPair = numsMap.get(lastSpoken)
if (indexPair!!.second == -1 && indexPair.first == turn - 1) {
// not spoken before
lastSpoken = 0
} else {
// spoken before
val lastSpokenPair = numsMap.get(lastSpoken)
lastSpoken = lastSpokenPair!!.second - lastSpokenPair.first
}
} else {
// not spoken before
lastSpoken = 0
}
if (numsMap.containsKey(lastSpoken)) {
val lastSpokenPair = numsMap.get(lastSpoken)
val newFirst = if (lastSpokenPair!!.second == -1) lastSpokenPair.first else lastSpokenPair.second
var updatedPair = Pair(newFirst, turn)
numsMap.put(lastSpoken, updatedPair)
} else {
numsMap.put(lastSpoken, Pair(turn, -1))
}
turn++
}
return lastSpoken
}
| 28.136364 | 103 | 0.627894 | 3.734375 |
c38bc943b9094e5d9c5728591fdad9f3c80704a6
| 3,382 |
go
|
Go
|
apis/anidb/anidbapi.go
|
chetbishop/golanganidb
|
ed12b991958a65da07a225639b055b5009fed115
|
[
"MIT"
] | null | null | null |
apis/anidb/anidbapi.go
|
chetbishop/golanganidb
|
ed12b991958a65da07a225639b055b5009fed115
|
[
"MIT"
] | null | null | null |
apis/anidb/anidbapi.go
|
chetbishop/golanganidb
|
ed12b991958a65da07a225639b055b5009fed115
|
[
"MIT"
] | null | null | null |
package anidbapi
import (
"encoding/xml"
"github.com/chetbishop/golanganidb/env"
"io/ioutil"
"log"
"net/http"
"os"
"strconv"
"strings"
"time"
)
//AnimeTitlesCheck checks to see if the anime-titles.xml file from AniDB has
//been downloaded in the last 24 hours. AnimeTitlesCheck will download
//anime-titles.xml if the file is older than 24 hours or has not been
//downloaded.
func AnimeTitlesCheck(RunningConfig *env.Config) {
savelocation := RunningConfig.ProgramConfigPath + "/cache/anime-titles.xml"
anititles, err := os.Stat(savelocation)
if err != nil {
log.Println("anime-titles.dat does not exist ... Downloading")
AnimeTitlesGet(savelocation)
} else {
log.Println("checking to see if 24 hours has passed since last anime list download")
daypassed := testTime24h(anititles.ModTime())
if daypassed == true {
log.Println("Downloading ")
AnimeTitlesGet(savelocation)
}
}
}
//AnimeTitlesGet downloades the anime-titles.xml file from AniDB.
func AnimeTitlesGet(savelocation string) {
log.Println("downloading anime titles")
res, err := http.Get("http://anidb.net/api/anime-titles.xml.gz")
if err != nil {
log.Println(err)
}
animelist, err := ioutil.ReadAll(res.Body)
res.Body.Close()
if err != nil {
log.Println(err)
}
os.Remove(savelocation)
ioutil.WriteFile(savelocation, animelist, 0600)
}
//testTime24h tests to see if 24 hours has passed between two times.
func testTime24h(modtime time.Time) bool {
timediff := time.Now().Sub(modtime).Hours()
var result bool
if timediff > 24 {
result = true
} else {
result = false
}
return result
}
//AnimeParse takes anime-titles.xml and produces an AnimeTitles struct with
//all revelent information
func AnimeParse(xmlFilestring string) AnimeTitles {
xmlFile, err := os.Open(xmlFilestring)
if err != nil {
log.Println("Error opening file:", err)
}
defer xmlFile.Close()
log.Println("opened file")
b, _ := ioutil.ReadAll(xmlFile)
var titles AnimeTitles
xml.Unmarshal(b, &titles)
return titles
}
//AnimeSearch will seach an AnimeTitles struct for an anime name and language.
//It will return the aid number(s) and anime name(s) from the AnimeTitles struct.
func AnimeSearch(animeTitlesStruct AnimeTitles, animename string, animelang string) []AnimeTitleSearchResults {
var searchresults []AnimeTitleSearchResults
for _, aid := range animeTitlesStruct.AnimeList {
for x, title := range aid.Title {
if AnimeTitleCompare(aid.Title[x], animename, animelang) == true {
var result AnimeTitleSearchResults
result.Name = title.Name
result.Aid = strconv.Itoa(aid.Aid)
searchresults = append(searchresults, result)
}
}
}
return searchresults
}
func AnimeTitleCompare(animetitle AnimeTitle, animename string, animelang string) bool {
structname := strings.ToLower(animetitle.Name)
structlang := strings.ToLower(animetitle.Lang)
animename = strings.ToLower(animename)
animelang = strings.ToLower(animelang)
if strings.Contains(structname, animename) == true {
if structlang == animelang {
return true
}
}
return false
}
func AnimeSearchWrapper(RunningConfig *env.Config, animename string, animelang string) []AnimeTitleSearchResults {
AnimeTitlesCheck(RunningConfig)
animexml := AnimeParse(RunningConfig.ProgramConfigPath + "/cache/anime-titles.xml")
results := AnimeSearch(animexml, animename, animelang)
return results
}
| 28.905983 | 114 | 0.74453 | 3.046875 |
b0e459a945defc98ffd1d1da406b299c12a2fd17
| 1,880 |
rs
|
Rust
|
src/util.rs
|
friedm/emote
|
a290863a59ec382093c3c3c580675cc93bc43c17
|
[
"MIT"
] | null | null | null |
src/util.rs
|
friedm/emote
|
a290863a59ec382093c3c3c580675cc93bc43c17
|
[
"MIT"
] | null | null | null |
src/util.rs
|
friedm/emote
|
a290863a59ec382093c3c3c580675cc93bc43c17
|
[
"MIT"
] | null | null | null |
use std::path::PathBuf;
use std::fs::File;
use std::io;
use std::io::prelude::*;
use blake2::{Blake2b, Digest};
pub struct FileUtil {
path: PathBuf,
hash_path: Option<PathBuf>
}
impl FileUtil {
pub fn new(path: PathBuf) -> FileUtil {
FileUtil {
path: path,
hash_path: None
}
}
pub fn new_hashed(path: PathBuf, hash_path: PathBuf) -> FileUtil {
FileUtil {
path: path,
hash_path: Some(hash_path)
}
}
pub fn write<'a>(&self, to_write: &'a str) -> io::Result<()> {
write(&self.path, to_write)?;
self.store_hash()
}
pub fn read<'a>(&self) -> io::Result<String> {
read(&self.path)
}
pub fn store_hash(&self) -> io::Result<()> {
if self.hash_path.is_none() {
return Ok(())
}
let hash = self.get_hash()?;
let hash_path = self.hash_path.clone().unwrap();
write(&hash_path, &hash)
}
pub fn is_stale(&self) -> io::Result<bool> {
if self.hash_path.is_none() ||
!&self.hash_path.clone().unwrap().is_file() {
return Ok(true);
}
let cached_hash = read(&self.hash_path.clone().unwrap())?;
Ok(self.get_hash()? != cached_hash)
}
fn get_hash(&self) -> io::Result<String> {
let mut f = File::open(&self.path)?;
let hash = Blake2b::digest_reader(&mut f)?;
Ok(format!("{:x}", hash))
}
pub fn exists(&self) -> bool {
self.path.is_file()
}
}
fn write<'a>(path: &PathBuf, s: &'a str) -> io::Result<()> {
let mut f = File::create(path)?;
f.write_all(s.as_bytes())?;
f.sync_all()?;
Ok(())
}
fn read(path: &PathBuf) -> io::Result<String> {
let mut f = File::open(path)?;
let mut config = String::new();
f.read_to_string(&mut config)?;
Ok(config)
}
| 23.797468 | 70 | 0.529787 | 3.109375 |
7f7a4650098d3b4a7845fa2a4c792997f2bb73cf
| 1,290 |
go
|
Go
|
middleware.go
|
benpate/steranko
|
5a943f55e59dc1739b42bbcbe8e7166d4e7446ef
|
[
"Apache-2.0"
] | null | null | null |
middleware.go
|
benpate/steranko
|
5a943f55e59dc1739b42bbcbe8e7166d4e7446ef
|
[
"Apache-2.0"
] | 16 |
2022-01-21T08:31:19.000Z
|
2022-03-29T08:29:15.000Z
|
middleware.go
|
benpate/steranko
|
5a943f55e59dc1739b42bbcbe8e7166d4e7446ef
|
[
"Apache-2.0"
] | null | null | null |
package steranko
import (
"github.com/labstack/echo/v4"
)
// Middleware wraps the original echo context with the Steranko context.
func (s *Steranko) Middleware(next echo.HandlerFunc) echo.HandlerFunc {
return func(ctx echo.Context) error {
// Verify that the request is valid
if err := s.ApproveRequest(ctx); err != nil {
return err
}
return next(&Context{
Context: ctx,
steranko: s,
})
}
}
// Middleware is a standalone middleware that works for multi-tenant
// environments, where you may need to use a factory to load the specific
// steranko settings depending on the domain being called.
func Middleware(factory Factory) echo.MiddlewareFunc {
// this is the middleware function
return func(next echo.HandlerFunc) echo.HandlerFunc {
// this handles the specific request
return func(ctx echo.Context) error {
// find the correct steranko instance
s, err := factory.Steranko(ctx)
// handle errors (if necessary)
if err != nil {
return err
}
// Verify that the request is valid
if err := s.ApproveRequest(ctx); err != nil {
return err
}
// call the next function in the chain, now
// using a Steranko context instead of the original
return next(&Context{
Context: ctx,
steranko: s,
})
}
}
}
| 22.631579 | 73 | 0.690698 | 3.015625 |
0cc52afa5bda9e011a3f67aa407ce29b267af421
| 1,409 |
py
|
Python
|
Unit 7 Objects/LessonQ33.1.py
|
ItsMrTurtle/PythonChris
|
4513dea336e68f48fabf480ad87bc538a323c2cd
|
[
"MIT"
] | null | null | null |
Unit 7 Objects/LessonQ33.1.py
|
ItsMrTurtle/PythonChris
|
4513dea336e68f48fabf480ad87bc538a323c2cd
|
[
"MIT"
] | null | null | null |
Unit 7 Objects/LessonQ33.1.py
|
ItsMrTurtle/PythonChris
|
4513dea336e68f48fabf480ad87bc538a323c2cd
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Wed May 27 18:48:24 2020
@author: Christopher Cheng
"""
class Stack(object):
def __init__ (self):
self.stack = []
def get_stack_elements(self):
return self.stack.copy()
def add_one(self, item):
self.stack.append(item)
def add_many(self,item,n): # item is still a single string, n times
for i in range (n):
self.stack.append(item)
def remove_one(self):
self.stack.pop()
def remove_many(self,n):
for i in range(n):
self.stack.pop()
def size(self):
return len(self.stack)
def prettyprint(self):
for thing in self.stack[::-1]:
print("|_", thing,"_|")
def add_list(self, L):
for e in L:
self.stack.append(e)
def __str__ (self):
ret = ""
for thing in self.stack[::-1]:
ret += ("|_" + str(thing) + "_|\n")
return ret
class Circle (object):
def __init__(self):
self.radius = 0
def change_radius(self, radius):
self.radius = radius
def get_radius (self):
return self.radius
def __str__(self):
return "circle: " + str(self.radius)
circles = Stack()
one_circle = Circle()
one_circle.change_radius(1)
circles.add_one(one_circle)
two_circle = Circle()
two_circle.change_radius(2)
circles.add_one(two_circle)
print(circles)
| 26.092593 | 71 | 0.581973 | 3.375 |
c6a3217cda54bca18c67d9ba32319378821a2c42
| 3,093 |
kt
|
Kotlin
|
app/src/main/java/org/rdtoolkit/ui/sessions/TestSessionsAdapter.kt
|
eambriza/rd-toolkit
|
333629b3dcecb9060b465074a7455c2e04da3e7a
|
[
"Apache-2.0"
] | null | null | null |
app/src/main/java/org/rdtoolkit/ui/sessions/TestSessionsAdapter.kt
|
eambriza/rd-toolkit
|
333629b3dcecb9060b465074a7455c2e04da3e7a
|
[
"Apache-2.0"
] | null | null | null |
app/src/main/java/org/rdtoolkit/ui/sessions/TestSessionsAdapter.kt
|
eambriza/rd-toolkit
|
333629b3dcecb9060b465074a7455c2e04da3e7a
|
[
"Apache-2.0"
] | null | null | null |
package org.rdtoolkit.ui.sessions
import android.view.LayoutInflater
import android.view.View
import android.view.ViewGroup
import android.widget.TextView
import androidx.recyclerview.widget.DiffUtil
import androidx.recyclerview.widget.ListAdapter
import androidx.recyclerview.widget.RecyclerView
import org.rdtoolkit.R
import org.rdtoolkit.model.session.STATUS
import org.rdtoolkit.model.session.TestReadableState
import org.rdtoolkit.model.session.TestSession
import org.rdtoolkit.ui.sessions.TestSessionsAdapter.TestSessionViewHolder
class TestSessionsAdapter(private val sessionsViewModel : SessionsViewModel
) : ListAdapter<TestSession, TestSessionViewHolder>(DIFF_CALLBACK) {
// Provide a reference to the views for each data item
// Complex data items may need more than one view per item, and
// you provide access to all the views for a data item in a view holder.
// Each data item is just a string in this case that is shown in a TextView.
class TestSessionViewHolder(val view : View) : RecyclerView.ViewHolder(view)
// Create new views (invoked by the layout manager)
override fun onCreateViewHolder(parent: ViewGroup,
viewType: Int): TestSessionViewHolder {
// create a new view
val frame = LayoutInflater.from(parent.context)
.inflate(R.layout.card_view_session, parent, false) as View
return TestSessionViewHolder(frame)
}
// Replace the contents of a view (invoked by the layout manager)
override fun onBindViewHolder(holder: TestSessionViewHolder, position: Int) {
val context = holder.view.context;
var session = this.getItem(position)
holder.view.findViewById<TextView>(R.id.sessions_card_title).text =
String.format(context.getString(R.string.sessions_card_title_text),
sessionsViewModel.getDiagnosticsRepo().getTestProfile(session.testProfileId)
.readableName())
holder.view.findViewById<TextView>(R.id.sessions_card_text_flavor_one).text =
session.configuration.flavorText
holder.view.findViewById<TextView>(R.id.sessions_card_text_flavor_two).text =
session.configuration.flavorTextTwo
var captureButton = holder.view.findViewById<TextView>(R.id.sessions_card_button_capture)
captureButton.tag = session.sessionId
if (session.state == STATUS.RUNNING && session.getTestReadableState() == TestReadableState.READABLE || session.getTestReadableState() == TestReadableState.RESOLVING) {
captureButton.visibility = View.VISIBLE
} else {
captureButton.visibility = View.GONE
}
}
}
var DIFF_CALLBACK = object: DiffUtil.ItemCallback<TestSession>() {
override fun areItemsTheSame(oldItem: TestSession, newItem: TestSession): Boolean {
return oldItem.sessionId == newItem.sessionId
}
override fun areContentsTheSame(oldItem: TestSession, newItem: TestSession): Boolean {
return oldItem.equals(newItem)
}
}
| 42.369863 | 175 | 0.723569 | 3.125 |
0ca59997a346eb090f3898738011c007aac380e0
| 5,550 |
py
|
Python
|
tensorflow/emo_tflearn.py
|
lukewegryn/emo_net
|
5f8f0d047b41a978c2c96e6d0dcd8e8c05d89fe5
|
[
"MIT"
] | 4 |
2017-08-15T06:52:22.000Z
|
2020-02-13T18:18:13.000Z
|
tensorflow/emo_tflearn.py
|
luoda888/emo_net
|
5f8f0d047b41a978c2c96e6d0dcd8e8c05d89fe5
|
[
"MIT"
] | 1 |
2018-06-14T08:42:11.000Z
|
2018-06-14T08:42:11.000Z
|
tensorflow/emo_tflearn.py
|
luoda888/emo_net
|
5f8f0d047b41a978c2c96e6d0dcd8e8c05d89fe5
|
[
"MIT"
] | 6 |
2017-08-04T13:40:35.000Z
|
2021-08-07T11:37:44.000Z
|
# First check the Python version
import sys
if sys.version_info < (3,4):
print('You are running an older version of Python!\n\n' \
'You should consider updating to Python 3.4.0 or ' \
'higher as the libraries built for this course ' \
'have only been tested in Python 3.4 and higher.\n')
print('Try installing the Python 3.5 version of anaconda '
'and then restart `jupyter notebook`:\n' \
'https://www.continuum.io/downloads\n\n')
# Now get necessary libraries
try:
import os
import numpy as np
import matplotlib.pyplot as plt
from skimage.transform import resize
from skimage import data
from scipy.misc import imresize
import IPython.display as ipyd
import csv
import shlex
except ImportError:
import os
import numpy as np
import matplotlib.pyplot as plt
from skimage.transform import resize
from skimage import data
from scipy.misc import imresize
import IPython.display as ipyd
print('Done!')
# Import Tensorflow
try:
import tensorflow as tf
except ImportError:
print("You do not have tensorflow installed!")
print("Follow the instructions on the following link")
print("to install tensorflow before continuing:")
print("")
print("https://github.com/pkmital/CADL#installation-preliminaries")
try:
from libs import utils, gif, datasets, dataset_utils, vae, dft
except ImportError:
print("Make sure you have started notebook in the same directory" +
" as the provided zip file which includes the 'libs' folder" +
" and the file 'utils.py' inside of it. You will NOT be able"
" to complete this assignment unless you restart jupyter"
" notebook inside the directory created by extracting"
" the zip file or cloning the github repo.")
# We'll tell matplotlib to inline any drawn figures like so:
plt.style.use('ggplot')
def import_csv(filename):
labels = []
images = []
with open(filename,'r') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for row in reader:
if row[2] == "Training":
labels.append(row[0])
images.append(row[1])
return labels, images
######## Start actual code ##########
data_file = "/Users/luke/ownCloud/deep_learning/course/final_project/fer2013.csv"
labels,images = import_csv(data_file)
assert(len(labels) == len(images))
#read in the images
imgs = []
for image in images:
imgs.append(np.fromstring(str(image), dtype=np.uint8,sep=' '))
Xs = imgs
ys = labels
Xs = np.array(imgs).astype(np.uint8)
ys = np.array(ys).astype(np.uint8)
#print(ys)
assert(len(Xs) == len(ys))
ds = datasets.Dataset(Xs,ys,one_hot=True,split=[0.8, 0.1, 0.1])
for i in range(0, 10):
ds.X[i].shape
from tensorflow.python.framework.ops import reset_default_graph
reset_default_graph()
# We'll have placeholders just like before which we'll fill in later.
n_input = 48*48
n_output = 7
ds_X_reshape = np.reshape(ds.X,(28709, 48, 48, 1))
ds_valid_images_reshape = np.reshape(ds.valid.images,(ds.valid.images.shape[0],48,48,1))
#https://github.com/tflearn/tflearn/blob/master/examples/images/alexnet.py
#pip install tflearn
import tflearn
from tflearn.layers.normalization import local_response_normalization
from tflearn.layers.estimator import regression
"""
net = tflearn.input_data(shape=[None, 48, 48,1])
net = tflearn.conv_2d(net, 64, 5, activation = 'relu')
net = tflearn.max_pool_2d(net, 3, strides = 2)
net = tflearn.conv_2d(net, 64, 5, activation = 'relu')
net = tflearn.max_pool_2d(net, 3, strides = 2)
net = tflearn.conv_2d(net, 128, 4, activation = 'relu')
net = tflearn.dropout(net, 0.3)
net = tflearn.fully_connected(net, 3072, activation = 'tanh')
net = tflearn.fully_connected(net, 7, activation='softmax')
net = tflearn.regression(net, optimizer='momentum', loss='categorical_crossentropy')
"""
network = tflearn.input_data(shape=[None, 48, 48,1])
network = tflearn.conv_2d(network, 96, 11, strides=4, activation='relu')
network = tflearn.max_pool_2d(network, 3, strides=2)
network = tflearn.local_response_normalization(network)
network = tflearn.conv_2d(network, 256, 5, activation='relu')
network = tflearn.max_pool_2d(network, 3, strides=2)
network = tflearn.local_response_normalization(network)
network = tflearn.conv_2d(network, 384, 3, activation='relu')
network = tflearn.conv_2d(network, 384, 3, activation='relu')
network = tflearn.conv_2d(network, 256, 3, activation='relu')
network = tflearn.max_pool_2d(network, 3, strides=2)
network = tflearn.local_response_normalization(network)
network = tflearn.fully_connected(network, 4096, activation='tanh')
network = tflearn.dropout(network, 0.5)
network = tflearn.fully_connected(network, 4096, activation='tanh')
network = tflearn.dropout(network, 0.5)
network = tflearn.fully_connected(network, 7, activation='softmax')
network = tflearn.regression(network, optimizer='momentum',
loss='categorical_crossentropy')
model = tflearn.DNN(network,checkpoint_path='./emo_net/checkpoint_emo_net',max_checkpoints=3)
model.fit(ds_X_reshape, ds.Y, n_epoch=1000, show_metric=True, shuffle=True, validation_set=0.01, batch_size=64, snapshot_step=200, snapshot_epoch=False, run_id='emo_net')
model.save('./emo_net/emotion_recog.tflearn')
pred = model.predict(ds_X_reshape)
def onehot_to_dense(array):
index = np.argmax(array)
return index
distribution = {0:0, 1:0, 2:0, 3:0, 4:0, 5:0, 6:0}
for i in range(0,len(pred)):
distribution[onehot_to_dense(pred[i])] += 1
print(distribution)
| 37.248322 | 170 | 0.718739 | 3.015625 |
8560c4f78f79c610f95d6129c603369ed2d812a5
| 2,981 |
js
|
JavaScript
|
spec/frontend/packages_and_registries/harbor_registry/components/list/harbor_list_row_spec.js
|
BearerPipelineTest/gitlabhq
|
55e4390933a4e16f8936604d763a6353f088c4c1
|
[
"MIT"
] | null | null | null |
spec/frontend/packages_and_registries/harbor_registry/components/list/harbor_list_row_spec.js
|
BearerPipelineTest/gitlabhq
|
55e4390933a4e16f8936604d763a6353f088c4c1
|
[
"MIT"
] | null | null | null |
spec/frontend/packages_and_registries/harbor_registry/components/list/harbor_list_row_spec.js
|
BearerPipelineTest/gitlabhq
|
55e4390933a4e16f8936604d763a6353f088c4c1
|
[
"MIT"
] | null | null | null |
import { shallowMount, RouterLinkStub as RouterLink } from '@vue/test-utils';
import { GlIcon, GlSprintf, GlSkeletonLoader } from '@gitlab/ui';
import HarborListRow from '~/packages_and_registries/harbor_registry/components/list/harbor_list_row.vue';
import ListItem from '~/vue_shared/components/registry/list_item.vue';
import ClipboardButton from '~/vue_shared/components/clipboard_button.vue';
import { harborListResponse } from '../../mock_data';
describe('Harbor List Row', () => {
let wrapper;
const [item] = harborListResponse.repositories;
const findDetailsLink = () => wrapper.find(RouterLink);
const findClipboardButton = () => wrapper.findComponent(ClipboardButton);
const findTagsCount = () => wrapper.find('[data-testid="tags-count"]');
const findSkeletonLoader = () => wrapper.findComponent(GlSkeletonLoader);
const mountComponent = (props) => {
wrapper = shallowMount(HarborListRow, {
stubs: {
RouterLink,
GlSprintf,
ListItem,
},
propsData: {
item,
...props,
},
});
};
afterEach(() => {
wrapper.destroy();
});
describe('image title and path', () => {
it('contains a link to the details page', () => {
mountComponent();
const link = findDetailsLink();
expect(link.text()).toBe(item.name);
expect(findDetailsLink().props('to')).toMatchObject({
name: 'details',
params: {
id: item.id,
},
});
});
it('contains a clipboard button', () => {
mountComponent();
const button = findClipboardButton();
expect(button.exists()).toBe(true);
expect(button.props('text')).toBe(item.location);
expect(button.props('title')).toBe(item.location);
});
});
describe('tags count', () => {
it('exists', () => {
mountComponent();
expect(findTagsCount().exists()).toBe(true);
});
it('contains a tag icon', () => {
mountComponent();
const icon = findTagsCount().find(GlIcon);
expect(icon.exists()).toBe(true);
expect(icon.props('name')).toBe('tag');
});
describe('loading state', () => {
it('shows a loader when metadataLoading is true', () => {
mountComponent({ metadataLoading: true });
expect(findSkeletonLoader().exists()).toBe(true);
});
it('hides the tags count while loading', () => {
mountComponent({ metadataLoading: true });
expect(findTagsCount().exists()).toBe(false);
});
});
describe('tags count text', () => {
it('with one tag in the image', () => {
mountComponent({ item: { ...item, artifactCount: 1 } });
expect(findTagsCount().text()).toMatchInterpolatedText('1 Tag');
});
it('with more than one tag in the image', () => {
mountComponent({ item: { ...item, artifactCount: 3 } });
expect(findTagsCount().text()).toMatchInterpolatedText('3 Tags');
});
});
});
});
| 29.81 | 106 | 0.598457 | 3.046875 |
0ba6ccc9869c36c54441983043be28e4255463c3
| 3,046 |
py
|
Python
|
models/ffn_ace.py
|
MilesQLi/Theano-Lights
|
59864f4a1b089c04ff0403a6036ee052078fcd7d
|
[
"MIT"
] | 313 |
2015-03-23T15:19:58.000Z
|
2021-05-17T15:40:09.000Z
|
models/ffn_ace.py
|
MilesQLi/Theano-Lights
|
59864f4a1b089c04ff0403a6036ee052078fcd7d
|
[
"MIT"
] | 2 |
2015-08-31T06:35:31.000Z
|
2016-04-04T11:55:43.000Z
|
models/ffn_ace.py
|
Ivaylo-Popov/Theano-Lights
|
3c9de807e42e3875b1e3f4c1e8d02ad1242ddc94
|
[
"MIT"
] | 68 |
2015-05-16T03:26:17.000Z
|
2018-08-19T08:40:18.000Z
|
import theano
import theano.tensor as T
from theano.sandbox.rng_mrg import MRG_RandomStreams
from theano.tensor.nnet.conv import conv2d
from theano.tensor.signal.downsample import max_pool_2d
from theano.tensor.shared_randomstreams import RandomStreams
import numpy as np
from toolbox import *
from modelbase import *
import itertools
class FFN_ace(ModelSLBase):
"""
Auto-classifier-encoder (Georgiev, 2015)
"""
def save(self):
if not os.path.exists('savedmodels\\'):
os.makedirs('savedmodels\\')
self.params.save(self.filename)
def __init__(self, data, hp):
super(FFN_ace, self).__init__(self.__class__.__name__, data, hp)
# batch_size: 10000; learning_rate = 0.0015; lr_halflife = 200, 500
self.epsilon = 0.0001
self.params = Parameters()
self.shared_vars = Parameters()
n_x = self.data['n_x']
n_y = self.data['n_y']
n_h1 = 1200
n_h2 = 1000
n_h3 = 800
n_h4 = 800
scale = hp.init_scale
if hp.load_model and os.path.isfile(self.filename):
self.params.load(self.filename)
else:
with self.params:
w_h = shared_normal((n_x, n_h1), scale=scale)
b_h = shared_zeros((n_h1,))
w_h2 = shared_normal((n_h1, n_h2), scale=scale)
b_h2 = shared_zeros((n_h2,))
w_h3 = shared_normal((n_h2, n_h3), scale=scale)
b_h3 = shared_zeros((n_h3,))
w_h4 = shared_normal((n_h3, n_h4), scale=scale)
b_h4 = shared_zeros((n_h4,))
w_o = shared_normal((n_h4, n_y), scale=scale)
def batch_norm(h):
m = T.mean(h, axis=0, keepdims=True)
std = T.sqrt(T.var(h, axis=0, keepdims=True) + self.epsilon)
h = (h - m) / std
return h
def model(X, params, p_drop_input, p_drop_hidden):
X_noise = X + gaussian(X.shape, p_drop_input)
h = batch_norm(dropout(rectify(T.dot(X_noise, params.w_h) + params.b_h), p_drop_hidden))
# Dual reconstruction error
phx = T.nnet.sigmoid(T.dot(h, T.dot(h.T, X_noise)) / self.hp.batch_size)
log_phx = T.nnet.binary_crossentropy(phx, X_noise).sum()
h2 = dropout(rectify(T.dot(h, params.w_h2) + params.b_h2), p_drop_hidden)
h3 = batch_norm(dropout(rectify(T.dot(h2, params.w_h3) + params.b_h3), p_drop_hidden))
h4 = dropout(rectify(T.dot(h3, params.w_h4) + params.b_h4), p_drop_hidden)
py_x = softmax(T.dot(h4, params.w_o))
return [py_x, log_phx]
noise_py_x, cost_recon = model(self.X, self.params, 0.2, 0.5)
cost_y2 = -T.sum(self.Y * T.log(noise_py_x))
cost = cost_y2 + cost_recon
pyx, _ = model(self.X, self.params, 0., 0.)
map_pyx = T.argmax(pyx, axis=1)
error_map_pyx = T.sum(T.neq(map_pyx, T.argmax(self.Y, axis=1)))
self.compile(cost, error_map_pyx)
| 34.613636 | 97 | 0.591924 | 3.03125 |
c62f2febf46c940ac5e29075f266c58a488e80db
| 984 |
rb
|
Ruby
|
lib/sigimera/version.rb
|
csae1152/sigimera-ruby-client
|
76a28739503ec94f16ce81417ee6e4b483accea0
|
[
"MIT"
] | 1 |
2015-11-07T11:22:01.000Z
|
2015-11-07T11:22:01.000Z
|
lib/sigimera/version.rb
|
csae1152/sigimera-ruby-client
|
76a28739503ec94f16ce81417ee6e4b483accea0
|
[
"MIT"
] | null | null | null |
lib/sigimera/version.rb
|
csae1152/sigimera-ruby-client
|
76a28739503ec94f16ce81417ee6e4b483accea0
|
[
"MIT"
] | null | null | null |
# encoding: UTF-8
module Sigimera
# Module that encapsulates the version number.
module VERSION
# The major version number that changes only if incompatibility with
# the previous version was introduced.
MAJOR = 0
# The minor version number indicates that new features were added.
MINOR = 1
# The tiny number stands for bug fixes.
TINY = 1
# The extra string marks the version as beta, alpha, rcX, ...
EXTRA = nil
# Concatenates the version to a point separated string
STRING = [MAJOR, MINOR, TINY].join('.')
STRING << ".#{EXTRA}" if EXTRA
##
# @return [String]
def self.to_s()
STRING
end
##
# @return [String]
def self.to_str()
STRING
end
##
# @return [Array(Integer, Integer, Integer)]
def self.to_a()
[MAJOR, MINOR, TINY]
end
end
end
| 24.6 | 76 | 0.544715 | 3.109375 |
dd31c452122f2ae3be5d3fd09cc52f5d55f9bbc1
| 2,524 |
go
|
Go
|
exchange_api/tool/huobi.go
|
GitTsewell/exchange_data
|
5261a027a00c6bd54648486ecb73f806be30a910
|
[
"MIT"
] | 54 |
2019-11-21T10:11:08.000Z
|
2022-03-24T11:18:25.000Z
|
exchange_api/tool/huobi.go
|
hsyf/exchange_data
|
5261a027a00c6bd54648486ecb73f806be30a910
|
[
"MIT"
] | 5 |
2019-11-22T03:58:59.000Z
|
2022-03-02T06:22:52.000Z
|
exchange_api/tool/huobi.go
|
hsyf/exchange_data
|
5261a027a00c6bd54648486ecb73f806be30a910
|
[
"MIT"
] | 26 |
2019-12-17T02:29:53.000Z
|
2022-03-24T11:18:31.000Z
|
package tool
import (
"encoding/json"
"exchange_api/config"
"exchange_api/db"
"exchange_api/model"
"fmt"
"github.com/go-redis/redis"
"strconv"
"strings"
"sync"
"time"
)
type HuobiWs struct {
*Wsbuilder
sync.Once
wsConn *WsConn
redis *redis.Client
depthData *model.HuobiDepth
depthCallback func([]byte)
}
func NewHuobiWs(url string) *HuobiWs {
huobiWs := &HuobiWs{Wsbuilder:NewBuilder()}
huobiWs.redis = db.InitRedis()
huobiWs.Wsbuilder.SetUrl(url) // url配置
huobiWs.Wsbuilder.SetCheckStatusTime(time.Second * 30) // 检测时间
huobiWs.WsConfig.Handle = huobiWs.handle
return huobiWs
}
func (hbws *HuobiWs) HuobiSetCallback(f func([]byte)) {
hbws.depthCallback = f
}
func (hbws *HuobiWs) HuobiConnect() {
hbws.Once.Do(func() {
hbws.wsConn = hbws.Wsbuilder.Build()
hbws.wsConn.ReceiveMessage()
})
}
func (hbws *HuobiWs) HuobiSubscribeDepth(msg string) {
hbws.HuobiConnect()
_ = hbws.wsConn.Subscribe(msg)
}
func (hbws *HuobiWs) handle(msg []byte) {
text,err := GzipDecodeHuobi(msg)
if err != nil {
fmt.Println(err)
}
if strings.Contains(string(text),"ping") {
str := strconv.FormatInt(time.Now().Unix(),10)
pong := `{"pong": ` + str + `}`
_ = hbws.wsConn.SendMessage([]byte(pong))
hbws.wsConn.UpdateActiveTime()
}
hbws.depthCallback(text)
}
func (hbws *HuobiWs) HuobiDepth (msg []byte) {
err := json.Unmarshal(msg,&hbws.depthData)
if err == nil && len(hbws.depthData.Tick.Bids) > 0 {
hbws.depthToDb()
}
}
func (hbws *HuobiWs) depthToDb () {
origin := map[string]interface{}{
"sell":hbws.depthData.Tick.Asks,
"buy":hbws.depthData.Tick.Bids,
}
st ,_ := json.Marshal(origin)
rst := map[string]interface{}{
"average_buy" : hbws.depthData.Tick.Asks[0][0],
"average_sell" : hbws.depthData.Tick.Bids[0][0],
"average_price" : hbws.depthData.Tick.Asks[0][0],
"microtime" : time.Now().UnixNano() / 1e6,
"origin" : st,
}
var key string
chs := strings.Split(hbws.depthData.Ch,".")
if strings.Contains(chs[1],"_") { // 期货
key = fmt.Sprintf("huobi:depth:%s:%s",config.FUTURE,chs[1])
}else {
key = fmt.Sprintf("huobi:depth:%s:%s",config.SPOT,chs[1])
}
hbws.redis.HMSet(key,rst)
hbws.redis.Expire(key,time.Minute * 5)
}
func (hbws *HuobiWs) HuobiDepthTmp (msg []byte) {
err := json.Unmarshal(msg,&hbws.depthData)
if err == nil && len(hbws.depthData.Tick.Bids) > 0 {
chs := strings.Split(hbws.depthData.Ch,".")
if res,_ := hbws.redis.SIsMember("tmp:depth:huobi",chs[1]).Result(); !res {
hbws.redis.SAdd("tmp:depth:huobi",chs[1])
}
}
}
| 22.945455 | 77 | 0.673138 | 3.0625 |
9ba75eb0c74e37bc2e8d2cb150dc8c71bb63409e
| 1,196 |
js
|
JavaScript
|
functions/mocks/firebase.mock.js
|
Danielos15/mdb_api
|
3c1b836f97951a367d9665b46b37f30f728beb08
|
[
"MIT"
] | null | null | null |
functions/mocks/firebase.mock.js
|
Danielos15/mdb_api
|
3c1b836f97951a367d9665b46b37f30f728beb08
|
[
"MIT"
] | null | null | null |
functions/mocks/firebase.mock.js
|
Danielos15/mdb_api
|
3c1b836f97951a367d9665b46b37f30f728beb08
|
[
"MIT"
] | null | null | null |
let admin = {
database: () => {
return databaseStub;
},
auth: () => {
return user;
}
};
let user = {
getUser: () => {
return new Promise((resolve, reject) => {
resolve({
id: 550,
displayName: "Test User"
})
})
}
};
let refStub = {
child: (id) => {
this.id = id;
return refStub;
},
once: (value, callback) => {
callback(snapshot);
},
on: (value, callback) => {
callback(snapshot);
},
orderByChild: () => {
return refStub;
},
equalTo: () => {
return refStub;
},
update: () => {
return refStub;
},
push: () => {
return refStub;
},
set: () => {
return setObject();
},
};
let databaseStub = {
path: "",
child: refStub,
ref: function(path) {
this.child.path = path;
this.path = path;
return this.child;
}
};
let setObject = () => {
return new Promise((resolve, reject) => {
resolve({
id: 550,
displayName: "Test User"
})
});
};
let snapshot = {
numChildren: () => {
return 1;
},
forEach: (callback) => {
callback(item);
},
val: () => {
return value;
}
};
let item = {
val: () => {
return value;
}
};
let value = {
rating: 1.2,
movieId: 550,
tvId: 1418,
uid: 1000291
};
module.exports = admin;
| 13.288889 | 43 | 0.533445 | 3.046875 |
7c32dedc3ab78f468bbecad2ceaeeae1e845f80c
| 1,443 |
rs
|
Rust
|
src/utils/input.rs
|
shimarulin/archi
|
e11e377ae12e03c1fc28109e67dac238c0c6fa4c
|
[
"MIT"
] | null | null | null |
src/utils/input.rs
|
shimarulin/archi
|
e11e377ae12e03c1fc28109e67dac238c0c6fa4c
|
[
"MIT"
] | 1 |
2021-08-17T15:26:23.000Z
|
2021-08-17T23:44:39.000Z
|
src/utils/input.rs
|
shimarulin/archi
|
e11e377ae12e03c1fc28109e67dac238c0c6fa4c
|
[
"MIT"
] | null | null | null |
use console::style;
use inquire::error::InquireResult;
use inquire::list_option::ListOption;
pub fn exit_by_user() {
let top_line = style("\n ╔══════════════════════════════════════════════════════════╗").cyan();
let bottom_line =
style("\n ╚══════════════════════════════════════════════════════════╝").cyan();
let cancel_line =
style("\n ║ Installation canceled by user ║").cyan();
println!("{}{}{}", top_line, cancel_line, bottom_line);
std::process::exit(0);
}
pub fn answer_string_handler(answer: InquireResult<String>) -> String {
match answer {
Ok(_) => answer.unwrap(),
Err(_) => {
exit_by_user();
String::from("")
}
}
}
pub fn answer_option_handler(answer: InquireResult<ListOption<String>>) -> String {
match answer {
Ok(_) => String::from(answer.unwrap().value),
Err(_) => {
exit_by_user();
String::from("")
}
}
}
pub fn answer_option_index_handler(answer: InquireResult<ListOption<String>>) -> usize {
match answer {
Ok(_) => answer.unwrap().index,
Err(_) => {
exit_by_user();
0
}
}
}
pub fn answer_boolean_handler(answer: InquireResult<bool>) -> bool {
match answer {
Ok(_) => answer.unwrap(),
Err(_) => {
exit_by_user();
false
}
}
}
| 26.236364 | 100 | 0.486486 | 3.140625 |
0c97d3a32db9b335bffe637b1d619f3774455b40
| 2,930 |
py
|
Python
|
createExeWindows.py
|
intel/RAAD
|
9cca9e72ff61658191e30756bb260173d5600102
|
[
"Intel",
"Apache-2.0"
] | null | null | null |
createExeWindows.py
|
intel/RAAD
|
9cca9e72ff61658191e30756bb260173d5600102
|
[
"Intel",
"Apache-2.0"
] | null | null | null |
createExeWindows.py
|
intel/RAAD
|
9cca9e72ff61658191e30756bb260173d5600102
|
[
"Intel",
"Apache-2.0"
] | null | null | null |
# !/usr/bin/python3
# -*- coding: utf-8 -*-
# *****************************************************************************/
# * Authors: Daniel Garces, Joseph Tarango
# *****************************************************************************/
import os, datetime, traceback, optparse, shutil
import PyInstaller.__main__
def main():
##############################################
# Main function, Options
##############################################
parser = optparse.OptionParser()
parser.add_option("--installer", dest='installer', action='store_true',
default=False, help='Boolean to create installer executable. If false, GUI executable is created '
'instead')
(options, args) = parser.parse_args()
if options.installer is True:
print("Generating Installer...")
pwd = os.getcwd()
dirPath = os.path.join(pwd, 'data/installer')
if os.path.exists(dirPath) and os.path.isdir(dirPath):
print("Previous executable exists. Removing it before generating the new one")
shutil.rmtree(dirPath)
PyInstaller.__main__.run([
'src/installer.py',
'--onefile',
'--clean',
'--debug=all',
# '--windowed',
'--key=RAADEngineTesting123456',
'--workpath=data/installer/temp',
'--distpath=data/installer',
'--specpath=data/installer'
])
else:
print("Generating main GUI...")
pwd = os.getcwd()
dirPath = os.path.join(pwd, 'data/binary')
if os.path.exists(dirPath) and os.path.isdir(dirPath):
print("Previous executable exists. Removing it before generating the new one")
shutil.rmtree(dirPath)
logoLocation = '{0}/src/software/{1}'.format(os.getcwd(), 'Intel_IntelligentSystems.png')
newLocation = '{0}/data/binary/software'.format(os.getcwd())
PyInstaller.__main__.run([
'src/software/gui.py',
'--onefile',
'--clean',
'--debug=all',
# '--windowed',
'--add-data=' + logoLocation + os.pathsep + ".",
'--key=RAADEngineTesting123456',
'--workpath=data/binary/temp',
'--distpath=data/binary',
'--specpath=data/binary',
])
os.mkdir(newLocation)
shutil.copyfile(logoLocation, newLocation + '/Intel_IntelligentSystems.png')
if __name__ == '__main__':
"""Performs execution delta of the process."""
pStart = datetime.datetime.now()
try:
main()
except Exception as errorMain:
print("Fail End Process: {0}".format(errorMain))
traceback.print_exc()
qStop = datetime.datetime.now()
print("Execution time: " + str(qStop - pStart))
| 39.594595 | 121 | 0.509556 | 3.21875 |
1661c8ce2aee04964a72118d5e4f853629801d5b
| 1,728 |
c
|
C
|
badger/tests/crc_selfcheck.c
|
FelixVi/Bedrock
|
82072341902048e5b37022512909d209efb243d6
|
[
"RSA-MD"
] | 17 |
2019-09-29T14:52:18.000Z
|
2022-03-28T21:16:25.000Z
|
badger/tests/crc_selfcheck.c
|
FelixVi/Bedrock
|
82072341902048e5b37022512909d209efb243d6
|
[
"RSA-MD"
] | null | null | null |
badger/tests/crc_selfcheck.c
|
FelixVi/Bedrock
|
82072341902048e5b37022512909d209efb243d6
|
[
"RSA-MD"
] | 4 |
2019-12-04T17:30:38.000Z
|
2021-11-01T01:52:13.000Z
|
/* crc-selfcheck.c */
/* Larry Doolittle, LBNL */
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include "crc32.h"
#define ETH_MAXLEN 1500 /* maximum line length */
struct pbuf {
char buf[ETH_MAXLEN+12];
int cur, len;
};
static int ethernet_check(char *packet, unsigned len)
{
char *p=packet;
unsigned int nout, u;
int mismatch=0;
char given_crc[4];
printf("scanning preamble");
while (*p==0x55 && p<(packet+len)) { printf("."); p++; }
printf("\n");
if ((*p & 0xff) != 0xd5) {
printf("missing SFD (%2.2x %2.2x)\n",
packet[0], *p);
return 2;
}
nout=packet+len-(p+5);
if ((p+5)>(packet+len) || check_crc32(p+1, nout)==0) {
printf("CRC check failed, packet length=%u\n",nout);
return 2;
}
/* Overwrite CRC given in file */
for (u=0; u<4; u++) given_crc[u]=packet[nout+u];
append_crc32(p+1,nout);
for (u=0; u<4; u++) mismatch |= given_crc[u]!=packet[nout+u];
if (mismatch) {
printf("generated CRC mismatch\n");
return 2;
}
printf("PASS packet length=%u\n", nout);
return 0;
}
/* Crude analog of Verilog's $readmemh */
static unsigned int readmemh(FILE *f, char *buff, size_t avail)
{
size_t u;
int rc;
for (u=0; u<avail; u++) {
unsigned int h;
rc=fscanf(f, "%x", &h);
if (rc!=1) break;
buff[u]=h;
}
return u;
}
int main(int argc, char *argv[])
{
FILE *f;
unsigned int l;
char buff[ETH_MAXLEN];
const char *fname;
if (argc > 2) {
fprintf(stderr,"Usage\n");
return 1;
}
if (argc == 2) {
fname = argv[1];
f = fopen(fname,"r");
if (f==NULL) {
perror(fname);
return 1;
}
} else {
f = stdin;
fname = "(stdin)";
}
l = readmemh(f, buff, ETH_MAXLEN);
printf("Read %u octets from file %s\n",l,fname);
return ethernet_check(buff,l);
}
| 19.636364 | 63 | 0.605903 | 3.109375 |
bd566ddbb44a57719cb5cdea82dc1b763c388789
| 5,070 |
rs
|
Rust
|
rust/loqui_connection/src/framed_io.rs
|
NorthIsUp/loqui
|
8d394a7951fd3a82d109becc1aebbd9e7ccc894a
|
[
"MIT"
] | 147 |
2017-10-02T18:16:52.000Z
|
2020-03-16T03:26:40.000Z
|
rust/loqui_connection/src/framed_io.rs
|
NorthIsUp/loqui
|
8d394a7951fd3a82d109becc1aebbd9e7ccc894a
|
[
"MIT"
] | 14 |
2017-09-19T16:13:32.000Z
|
2019-06-25T21:18:47.000Z
|
rust/loqui_connection/src/framed_io.rs
|
NorthIsUp/loqui
|
8d394a7951fd3a82d109becc1aebbd9e7ccc894a
|
[
"MIT"
] | 25 |
2017-10-01T20:10:31.000Z
|
2020-03-19T14:00:20.000Z
|
use crate::error::{LoquiError, LoquiErrorCode};
use bytesize::ByteSize;
use failure::Error;
use futures::sink::SinkExt;
use futures::stream::StreamExt;
use futures::stream::{SplitSink, SplitStream};
use loqui_protocol::{
codec::Codec,
error::ProtocolError,
frames::{GoAway, LoquiFrame},
};
use std::net::Shutdown;
use tokio::net::TcpStream;
use tokio_util::codec::Framed;
/// Used to read frames off the tcp socket.
pub type Reader = SplitStream<Framed<TcpStream, Codec>>;
/// Used to write frames to the tcp socket.
pub struct Writer {
inner: SplitSink<Framed<TcpStream, Codec>, LoquiFrame>,
/// If true, send a go away when the socket is closed.
send_go_away: bool,
}
impl Writer {
/// Create a new `Writer` that can write frames to a tcp socket.
///
/// # Arguments
///
/// * `writer` - framed sink
/// * `send_go_away` - whether or not to send a go away when the connection closes
pub fn new(
writer: SplitSink<Framed<TcpStream, Codec>, LoquiFrame>,
send_go_away: bool,
) -> Self {
Self {
inner: writer,
send_go_away,
}
}
/// Tries to write a `LoquiFrame` to the socket. Returns an error if the socket has closed.
pub async fn write<F: Into<LoquiFrame>>(mut self, frame: F) -> Result<Self, LoquiError> {
match self.inner.send(frame.into()).await {
Ok(()) => Ok(self),
Err(_error) => Err(LoquiError::TcpStreamClosed),
}
}
/// Gracefully closes the socket. Optionally sends a `GoAway` frame before closing.
pub async fn close(mut self, error: Option<&Error>, reader: Option<Reader>) {
if !self.send_go_away {
debug!("Closing. Not sending GoAway. error={:?}", error);
return;
}
let go_away = GoAway {
flags: 0,
code: go_away_code(error) as u16,
payload: vec![],
};
debug!("Closing. Sending GoAway. go_away={:?}", go_away);
match self.inner.send(go_away.into()).await {
Ok(()) => {
if let Some(reader) = reader {
if let Ok(tcp_stream) =
self.inner.reunite(reader).map(|framed| framed.into_inner())
{
let _result = tcp_stream.shutdown(Shutdown::Both);
}
}
}
Err(_error) => {
error!("Error when writing close frame. error={:?}", error);
}
}
}
}
/// Determines the go away code that should be sent.
///
/// # Arguments
///
/// * `error` - optional error to determine the code from
fn go_away_code(error: Option<&Error>) -> LoquiErrorCode {
match error {
None => LoquiErrorCode::Normal,
Some(error) => {
if let Some(protocol_error) = error.downcast_ref::<ProtocolError>() {
let error_code = match protocol_error {
ProtocolError::InvalidOpcode { .. } => LoquiErrorCode::InvalidOpcode,
ProtocolError::PayloadTooLarge { .. }
| ProtocolError::InvalidPayload { .. } => LoquiErrorCode::InternalServerError,
};
return error_code;
}
if let Some(loqui_error) = error.downcast_ref::<LoquiError>() {
return loqui_error.code();
}
LoquiErrorCode::InternalServerError
}
}
}
pub struct ReaderWriter {
pub reader: Reader,
writer: Writer,
}
impl ReaderWriter {
/// Create a new `ReaderWriter` that can read and write frames to a tcp socket.
///
/// # Arguments
///
/// * `tcp_stream` - raw tcp socket
/// * `max_payload_size` - the maximum bytes a frame payload can be
/// * `send_go_away` - whether or not to send a go away when the connection closes
pub fn new(tcp_stream: TcpStream, max_payload_size: ByteSize, send_go_away: bool) -> Self {
let framed_socket = Framed::new(tcp_stream, Codec::new(max_payload_size));
let (writer, reader) = framed_socket.split();
let writer = Writer::new(writer, send_go_away);
Self { reader, writer }
}
/// Tries to write a `LoquiFrame` to the socket. Returns an error if the socket has closed.
pub async fn write<F: Into<LoquiFrame>>(mut self, frame: F) -> Result<Self, LoquiError> {
match self.writer.write(frame.into()).await {
Ok(new_writer) => {
self.writer = new_writer;
Ok(self)
}
Err(_error) => Err(LoquiError::TcpStreamClosed),
}
}
/// Split this `ReaderWriter`, returning the `Reader` and `Writer` parts.
pub fn split(self) -> (Reader, Writer) {
let ReaderWriter { reader, writer } = self;
(reader, writer)
}
/// Gracefully closes the socket. Optionally sends a `GoAway` frame before closing.
pub async fn close(self, error: Option<&Error>) {
self.writer.close(error, Some(self.reader)).await
}
}
| 34.256757 | 98 | 0.57929 | 3.046875 |
57a7711fd21db6344f3ff5016def527cca094279
| 4,832 |
lua
|
Lua
|
scene/ragdogLib.lua
|
sedrew/Medals-and-Ordens-USSR
|
73e2e97a9bd5e47f612b6789bd5e5cc521dc7c11
|
[
"MIT"
] | 1 |
2020-09-22T11:30:50.000Z
|
2020-09-22T11:30:50.000Z
|
scene/ragdogLib.lua
|
Aidar3456/Medals-and-Ordens-USSR
|
2293a8a8ebdef666503294f488705f3a87ab2c05
|
[
"MIT"
] | null | null | null |
scene/ragdogLib.lua
|
Aidar3456/Medals-and-Ordens-USSR
|
2293a8a8ebdef666503294f488705f3a87ab2c05
|
[
"MIT"
] | 1 |
2020-09-22T11:30:55.000Z
|
2020-09-22T11:30:55.000Z
|
------------------------------------------------------------------------
---This library contains a few functions that we're gonna use in several
---parts of this template.
---We use various functions throughout our games and apps to speed up
---the most common practices.
---Each template only contains a handful of these (the one useful to it)
---but we're planning on a release that will contain all our functions
---revised and polished up.
---Made by Ragdog Studios SRL in 2013 http://www.ragdogstudios.com
------------------------------------------------------------------------
local ragdogLib = {};
ragdogLib.applyMaskFromPolygon = function(object, polygon, maskName)
--we use these to scale down the mask so that it looks exactly the same on any device
local pixelWidth, pixelHeight;
local contentWidth, contentHeight = display.contentWidth-(display.screenOriginX*2), display.contentHeight-(display.screenOriginY*2);
if contentWidth > contentHeight then
pixelWidth = display.pixelHeight;
pixelHeight = display.pixelWidth;
else
pixelWidth = display.pixelWidth;
pixelHeight = display.pixelHeight;
end
local maskGroup = display.newGroup();
--create a rect with width and height higher than polygon and rounded up to 2^)
local rectWidth, rectHeight = 1, 1;
while (rectWidth < polygon.contentWidth) do
rectWidth = rectWidth*2;
end
while (rectHeight < polygon.contentHeight) do
rectHeight = rectHeight*2;
end
local blackRect = display.newRect(maskGroup, 0, 0, rectWidth, rectHeight);
blackRect:setFillColor(0, 0, 0);
maskGroup:insert(polygon);
polygon.x, polygon.y = 0, 0;
polygon:setFillColor(1, 1, 1, 1);
maskGroup.x, maskGroup.y = display.contentCenterX, display.contentCenterY;
display.save(maskGroup, maskName or "mask.jpg");
local mask = graphics.newMask(maskName or "mask.jpg", system.DocumentsDirectory);
object:setMask(mask);
--here we scale down the mask to make it consistent across devices
object.maskScaleX = contentWidth/pixelWidth;
object.maskScaleY = object.maskScaleX;
maskGroup:removeSelf();
end
ragdogLib.createPieChart = function(data)
local group = display.newGroup();
-- local brush = { type="image", filename="brush.png"};
local values = data.values;
local mSin, mCos = math.sin, math.cos;
local toRad = math.pi/180;
local currAngle = -90;
local strokesSlices = {};
for i = #values, 1, -1 do
if values[i].percentage <= 0 then
table.remove(values, i);
elseif values[i].percentage == 100 then
values[i].percentage = 99.9;
end
end
for i = 1, #values do
local newAngle = values[i].percentage*360*0.01;
local midAngle1, midAngle2;
local shape;
if newAngle > 180 then
newAngle = currAngle+newAngle;
midAngle1 = currAngle+(newAngle-180-currAngle)*.5;
midAngle2 = midAngle1+(newAngle-90-midAngle1)*.5;
midAngle3 = midAngle2+(newAngle-90-midAngle2)*.5;
midAngle4 = midAngle3+(newAngle-midAngle3)*.5;
shape = {0, 0, mCos(currAngle*toRad)*data.radius*2, mSin(currAngle*toRad)*data.radius*2, mCos(midAngle1*toRad)*data.radius*2, mSin(midAngle1*toRad)*data.radius*2, mCos(midAngle2*toRad)*data.radius*2, mSin(midAngle2*toRad)*data.radius*2,
mCos(midAngle3*toRad)*data.radius*2, mSin(midAngle3*toRad)*data.radius*2, mCos(midAngle4*toRad)*data.radius*2, mSin(midAngle4*toRad)*data.radius*2, mCos(newAngle*toRad)*data.radius*2, mSin(newAngle*toRad)*data.radius*2};
else
newAngle = currAngle+newAngle;
midAngle1 = currAngle+(newAngle-currAngle)*.5;
shape = {0, 0, mCos(currAngle*toRad)*data.radius*2, mSin(currAngle*toRad)*data.radius*2, mCos(midAngle1*toRad)*data.radius*2, mSin(midAngle1*toRad)*data.radius*2, mCos(newAngle*toRad)*data.radius*2, mSin(newAngle*toRad)*data.radius*2};
end
currAngle = newAngle;
local slice = display.newPolygon(group, 0, 0, shape);
slice:setFillColor(unpack(values[i].color));
slice.stroke = brush;
slice.strokeWidth = 2;
slice:setStrokeColor(unpack(values[i].color));
local lowerPointX, higherPointX, lowerPointY, higherPointY = 10000, -10000, 10000, -10000;
for i = 1, #shape, 2 do
if shape[i] < lowerPointX then
lowerPointX = shape[i];
end
if shape[i] > higherPointX then
higherPointX = shape[i];
end
if shape[i+1] < lowerPointY then
lowerPointY = shape[i+1];
end
if shape[i+1] > higherPointY then
higherPointY = shape[i+1];
end
end
slice.x = lowerPointX+(higherPointX-lowerPointX)*.5;
slice.y = lowerPointY+(higherPointY-lowerPointY)*.5;
end
local circle = display.newCircle(0, 0, data.radius)
circle.stroke = brush;
circle.strokeWidth = 2;
ragdogLib.applyMaskFromPolygon(group, circle);
return group;
end
return ragdogLib;
| 37.169231 | 242 | 0.683568 | 3.09375 |
9c72f2fd16ae89337dae62697a243a9f11f56cf2
| 1,265 |
js
|
JavaScript
|
src/matches.actions.js
|
garretjames/react-flux-dating-interface
|
85e1555a77ba7f6de99bf156bd0684c3586fa5a0
|
[
"MIT"
] | null | null | null |
src/matches.actions.js
|
garretjames/react-flux-dating-interface
|
85e1555a77ba7f6de99bf156bd0684c3586fa5a0
|
[
"MIT"
] | null | null | null |
src/matches.actions.js
|
garretjames/react-flux-dating-interface
|
85e1555a77ba7f6de99bf156bd0684c3586fa5a0
|
[
"MIT"
] | null | null | null |
import { altUtils as alt } from "./alt.utils";
const BASE_URL = "https://randomuser.me/api/?";
const INIT_PARAMS =
"inc=id,gender,name,picture,phone,cell,email,dob&nat=US&results=50&seed=initMatches";
class MatchesActions {
getInitMatches() {
return dispatch => {
const requestOptions = {
method: "GET"
};
return fetch(`${BASE_URL}${INIT_PARAMS}`, requestOptions).then(res => {
return res.text().then(text => {
const data = text && JSON.parse(text);
if (!res.ok) {
const error = (data && data.message) || res.statusText;
return this.matchesFailed(error);
}
dispatch(data.results);
});
});
};
}
matchesFailed(errMsg) {
return errMsg;
}
getFilteredMatches(res, opts) {
let filteredResults = [];
return dispatch => {
res.map(match => {
if (match.dob.age < opts.ageMin || match.dob.age > opts.ageMax) {
} else {
if (opts.gender === "any" || opts.gender === match.gender)
filteredResults.push(match);
}
});
dispatch(filteredResults);
};
}
resetMatches() {
return dispatch => dispatch();
}
}
export default alt.createActions(MatchesActions);
| 24.803922 | 87 | 0.56917 | 3.03125 |
8753e9a62f297f27c4bfcf9e3f75862bf0fc1ed6
| 11,626 |
rs
|
Rust
|
src/bezier/offset_scaling.rs
|
Logicalshift/flo_curves
|
fb0741dcb5c27a74e793df317d97971788a3079c
|
[
"Apache-2.0"
] | 53 |
2019-01-31T03:05:00.000Z
|
2022-03-15T05:50:37.000Z
|
src/bezier/offset_scaling.rs
|
Logicalshift/flo_curves
|
fb0741dcb5c27a74e793df317d97971788a3079c
|
[
"Apache-2.0"
] | 17 |
2020-09-12T20:45:12.000Z
|
2022-02-14T23:57:22.000Z
|
src/bezier/offset_scaling.rs
|
Logicalshift/flo_curves
|
fb0741dcb5c27a74e793df317d97971788a3079c
|
[
"Apache-2.0"
] | 5 |
2018-12-02T20:41:45.000Z
|
2022-02-07T12:49:07.000Z
|
use super::curve::*;
use super::normal::*;
use super::characteristics::*;
use crate::geo::*;
use crate::line::*;
use crate::bezier::{CurveSection};
use smallvec::*;
use itertools::*;
// This is loosely based on the algorithm described at: https://pomax.github.io/bezierinfo/#offsetting,
// with numerous changes to allow for variable-width offsets and consistent behaviour (in particular,
// a much more reliable method of subdividing the curve)
//
// This algorithm works by subdividing the original curve into arches. We use the characteristics of the
// curve to do this: by subdividing a curve at its inflection point, we turn it into a series of arches.
// Arches have a focal point that the normal vectors along the curve roughly converge to, so we can
// scale around this point to generate an approximate offset curve (every point of the curve will move
// away from the focal point along its normal axis).
//
// As the focal point is approximate, using the start and end points to compute its location ensures that
// the offset is exact at the start and end of the curve.
//
// Edge cases: curves with inflection points at the start or end, arches where the normal vectors at the
// start and end are in parallel.
//
// Not all arches have normal vectors that converge (close to) a focal point. We can spot these quickly
// because the focal point of any two points is in general not equidistant from those two points: this
// also results in uneven scaling of the start and end points.
//
// TODO: we currently assume that 't' maps to 'length' which is untrue, so this can produce 'lumpy' curves
// when varying the width.
//
// It might be possible to use the canonical curve to better identify how to subdivide curves for the
// best results.
///
/// Computes a series of curves that approximate an offset curve from the specified origin curve.
///
/// This uses a scaling algorithm to compute the offset curve, which is fast but which can produce
/// errors, especially if the initial and final offsets are very different from one another.
///
pub fn offset_scaling<Curve>(curve: &Curve, initial_offset: f64, final_offset: f64) -> Vec<Curve>
where Curve: BezierCurveFactory+NormalCurve,
Curve::Point: Normalize+Coordinate2D {
// Split at the location of any features the curve might have
let sections: SmallVec<[_; 4]> = match features_for_curve(curve, 0.01) {
CurveFeatures::DoubleInflectionPoint(t1, t2) => {
let t1 = if t1 > 0.9999 { 1.0 } else if t1 < 0.0001 { 0.0 } else { t1 };
let t2 = if t2 > 0.9999 { 1.0 } else if t2 < 0.0001 { 0.0 } else { t2 };
if t2 > t1 {
smallvec![(0.0, t1), (t1, t2), (t2, 1.0)]
} else {
smallvec![(0.0, t2), (t2, t1), (t1, 1.0)]
}
}
CurveFeatures::Loop(t1, t3) => {
let t1 = if t1 > 0.9999 { 1.0 } else if t1 < 0.0001 { 0.0 } else { t1 };
let t3 = if t3 > 0.9999 { 1.0 } else if t3 < 0.0001 { 0.0 } else { t3 };
let t2 = (t1+t3)/2.0;
if t3 > t1 {
smallvec![(0.0, t1), (t1, t2), (t2, t3), (t3, 1.0)]
} else {
smallvec![(0.0, t3), (t3, t2), (t2, t1), (t1, 1.0)]
}
}
CurveFeatures::SingleInflectionPoint(t) => {
if t > 0.0001 && t < 0.9999 {
smallvec![(0.0, t), (t, 1.0)]
} else {
smallvec![(0.0, 1.0)]
}
}
_ => { smallvec![(0.0, 1.0)] }
};
let sections = sections.into_iter()
.filter(|(t1, t2)| t1 != t2)
.map(|(t1, t2)| curve.section(t1, t2))
.collect::<SmallVec<[_; 8]>>();
// Offset the set of curves that we retrieved
let offset_distance = final_offset-initial_offset;
sections.into_iter()
.flat_map(|section| {
// Compute the offsets for this section (TODO: use the curve length, not the t values)
let (t1, t2) = section.original_curve_t_values();
let (offset1, offset2) = (t1*offset_distance+initial_offset, t2*offset_distance+initial_offset);
subdivide_offset(§ion, offset1, offset2, 0)
})
.collect()
}
///
/// Attempts a simple offset of a curve, and subdivides it if the midpoint is too far away from the expected distance
///
fn subdivide_offset<'a, CurveIn, CurveOut>(curve: &CurveSection<'a, CurveIn>, initial_offset: f64, final_offset: f64, depth: usize) -> SmallVec<[CurveOut; 2]>
where CurveIn: NormalCurve+BezierCurve,
CurveOut: BezierCurveFactory<Point=CurveIn::Point>,
CurveIn::Point: Coordinate2D+Normalize {
const MAX_DEPTH: usize = 5;
// Fetch the original points
let start = curve.start_point();
let end = curve.end_point();
// The normals at the start and end of the curve define the direction we should move in
let normal_start = curve.normal_at_pos(0.0);
let normal_end = curve.normal_at_pos(1.0);
let normal_start = normal_start.to_unit_vector();
let normal_end = normal_end.to_unit_vector();
// If we can we want to scale the control points around the intersection of the normals
let intersect_point = ray_intersects_ray(&(start, start+normal_start), &(end, end+normal_end));
if intersect_point.is_none() {
if characterize_curve(curve) != CurveCategory::Linear && depth < MAX_DEPTH {
// Collinear normals
let divide_point = 0.5;
let mid_offset = initial_offset + (final_offset - initial_offset) * divide_point;
let left_curve = curve.subsection(0.0, divide_point);
let right_curve = curve.subsection(divide_point, 1.0);
let left_offset = subdivide_offset(&left_curve, initial_offset, mid_offset, depth+1);
let right_offset = subdivide_offset(&right_curve, mid_offset, final_offset, depth+1);
return left_offset.into_iter()
.chain(right_offset)
.collect();
}
}
if let Some(intersect_point) = intersect_point {
// Subdivide again if the intersection point is too close to one or other of the normals
let start_distance = intersect_point.distance_to(&start);
let end_distance = intersect_point.distance_to(&end);
let distance_ratio = start_distance.min(end_distance) / start_distance.max(end_distance);
// TODO: the closer to 1 this value is, the better the quality of the offset (0.99 produces good results)
// but the number of subdivisions tends to be too high: we need to find either a way to generate a better offset
// curve for an arch with a non-centered intersection point, or a better way to pick the subdivision point
if distance_ratio < 0.995 && depth < MAX_DEPTH {
// Try to subdivide at the curve's extremeties
let mut extremeties = curve.find_extremities();
extremeties.retain(|item| item > &0.01 && item < &0.99);
if extremeties.len() == 0 || true {
// No extremeties (or they're all too close to the edges)
let divide_point = 0.5;
let mid_offset = initial_offset + (final_offset - initial_offset) * divide_point;
let left_curve = curve.subsection(0.0, divide_point);
let right_curve = curve.subsection(divide_point, 1.0);
let left_offset = subdivide_offset(&left_curve, initial_offset, mid_offset, depth+1);
let right_offset = subdivide_offset(&right_curve, mid_offset, final_offset, depth+1);
left_offset.into_iter()
.chain(right_offset)
.collect()
} else {
let mut extremeties = extremeties;
extremeties.insert(0, 0.0);
extremeties.push(1.0);
extremeties
.into_iter()
.tuple_windows()
.flat_map(|(t1, t2)| {
let subsection = curve.subsection(t1, t2);
let offset1 = initial_offset + (final_offset - initial_offset) * t1;
let offset2 = initial_offset + (final_offset - initial_offset) * t2;
let res = subdivide_offset(&subsection, offset1, offset2, depth+1);
res
})
.collect()
}
} else {
// Event intersection point
smallvec![offset_by_scaling(curve, initial_offset, final_offset, intersect_point, normal_start, normal_end)]
}
} else {
// No intersection point
smallvec![offset_by_moving(curve, initial_offset, final_offset, normal_start, normal_end)]
}
}
///
/// Offsets a curve by scaling around a central point
///
#[inline]
fn offset_by_scaling<CurveIn, CurveOut>(curve: &CurveIn, initial_offset: f64, final_offset: f64, intersect_point: CurveIn::Point, unit_normal_start: CurveIn::Point, unit_normal_end: CurveIn::Point) -> CurveOut
where CurveIn: NormalCurve+BezierCurve,
CurveOut: BezierCurveFactory<Point=CurveIn::Point>,
CurveIn::Point: Coordinate2D+Normalize {
let start = curve.start_point();
let end = curve.end_point();
let (cp1, cp2) = curve.control_points();
// The control points point at an intersection point. We want to scale around this point so that start and end wind up at the appropriate offsets
let new_start = start + (unit_normal_start * initial_offset);
let new_end = end + (unit_normal_end * final_offset);
let start_scale = (intersect_point.distance_to(&new_start))/(intersect_point.distance_to(&start));
let end_scale = (intersect_point.distance_to(&new_end))/(intersect_point.distance_to(&end));
// When the scale is changing, the control points are effectively 1/3rd and 2/3rds of the way along the curve
let cp1_scale = (end_scale - start_scale) * (1.0/3.0) + start_scale;
let cp2_scale = (end_scale - start_scale) * (2.0/3.0) + start_scale;
let new_cp1 = ((cp1-intersect_point) * cp1_scale) + intersect_point;
let new_cp2 = ((cp2-intersect_point) * cp2_scale) + intersect_point;
CurveOut::from_points(new_start, (new_cp1, new_cp2), new_end)
}
///
/// Given a curve where the start and end normals do not intersect at a point, calculates the offset (by moving the start and end points along the normal)
///
#[inline]
fn offset_by_moving<CurveIn, CurveOut>(curve: &CurveIn, initial_offset: f64, final_offset: f64, unit_normal_start: CurveIn::Point, unit_normal_end: CurveIn::Point) -> CurveOut
where CurveIn: NormalCurve+BezierCurve,
CurveOut: BezierCurveFactory<Point=CurveIn::Point>,
CurveIn::Point: Coordinate2D+Normalize {
let start = curve.start_point();
let end = curve.end_point();
let (cp1, cp2) = curve.control_points();
// Offset start & end by the specified amounts to create the first approximation of a curve
let new_start = start + (unit_normal_start * initial_offset);
let new_cp1 = cp1 + (unit_normal_start * initial_offset);
let new_cp2 = cp2 + (unit_normal_end * final_offset);
let new_end = end + (unit_normal_end * final_offset);
CurveOut::from_points(new_start, (new_cp1, new_cp2), new_end)
}
| 47.453061 | 209 | 0.627387 | 3.171875 |
e8f09f4acc1e578e6bbb291e4f8a3a87b3a0b297
| 908 |
py
|
Python
|
chat/views.py
|
xiaoqiao99/chat
|
ca65ed25fbc277828390b890a50ecadf4675cfb4
|
[
"MIT"
] | 2 |
2019-06-21T10:30:18.000Z
|
2019-07-12T07:46:25.000Z
|
chat/views.py
|
xiaoqiao99/chat
|
ca65ed25fbc277828390b890a50ecadf4675cfb4
|
[
"MIT"
] | 8 |
2020-06-05T19:56:53.000Z
|
2022-03-11T23:41:44.000Z
|
chat/views.py
|
xiaoqiao99/chat
|
ca65ed25fbc277828390b890a50ecadf4675cfb4
|
[
"MIT"
] | 3 |
2020-03-13T03:22:40.000Z
|
2020-07-03T03:03:02.000Z
|
from django.shortcuts import render
# Create your views here.
# chat/views.py
from django.shortcuts import render
from django.utils.safestring import mark_safe
import json
from chat.models import Room
def index(request):
# from channels.layers import get_channel_layer
# from asgiref.sync import async_to_sync
# channel_layer = get_channel_layer()
# async_to_sync(channel_layer.group_send)(
# "chat_lobby",
# {
# 'type': 'chat.message',
# 'message': "6666666yyyyy66666666"
# }
# )
# r = Room.objects.filter(id=46).update(name="33333333") # 如果信号中使用post_save 此更新不会出发信号机制
r = Room()
r.name = "xiao"
r.label = "qq "
r.save()
return render(request, 'chat/index.html', {})
def room(request, room_name):
return render(request, 'chat/room.html', {
'room_name_json': mark_safe(json.dumps(room_name))
})
| 27.515152 | 93 | 0.654185 | 3.046875 |
ac38b34ca1cf1c50e3ba6de45f68d8caf9ee0cb7
| 11,520 |
sql
|
SQL
|
powa--3.1.2--3.2.0.sql
|
ppetrov91/powa-archivist
|
3f582813c9bad2e98b7c1d2b7d2ca232fde68f5f
|
[
"PostgreSQL"
] | null | null | null |
powa--3.1.2--3.2.0.sql
|
ppetrov91/powa-archivist
|
3f582813c9bad2e98b7c1d2b7d2ca232fde68f5f
|
[
"PostgreSQL"
] | null | null | null |
powa--3.1.2--3.2.0.sql
|
ppetrov91/powa-archivist
|
3f582813c9bad2e98b7c1d2b7d2ca232fde68f5f
|
[
"PostgreSQL"
] | null | null | null |
-- complain if script is sourced in psql, rather than via CREATE EXTENSION
--\echo Use "ALTER EXTENSION powa" to load this file. \quit
/* pg_wait_sampling integration - part 1 */
CREATE TYPE public.wait_sampling_type AS (
ts timestamptz,
count bigint
);
/* pg_wait_sampling operator support */
CREATE TYPE wait_sampling_diff AS (
intvl interval,
count bigint
);
CREATE OR REPLACE FUNCTION wait_sampling_mi(
a wait_sampling_type,
b wait_sampling_type)
RETURNS wait_sampling_diff AS
$_$
DECLARE
res wait_sampling_diff;
BEGIN
res.intvl = a.ts - b.ts;
res.count = a.count - b.count;
return res;
END;
$_$
LANGUAGE plpgsql IMMUTABLE STRICT;
CREATE OPERATOR - (
PROCEDURE = wait_sampling_mi,
LEFTARG = wait_sampling_type,
RIGHTARG = wait_sampling_type
);
CREATE TYPE wait_sampling_rate AS (
sec integer,
count_per_sec double precision
);
CREATE OR REPLACE FUNCTION wait_sampling_div(
a wait_sampling_type,
b wait_sampling_type)
RETURNS wait_sampling_rate AS
$_$
DECLARE
res wait_sampling_rate;
sec integer;
BEGIN
res.sec = extract(EPOCH FROM (a.ts - b.ts));
IF res.sec = 0 THEN
sec = 1;
ELSE
sec = res.sec;
END IF;
res.count_per_sec = (a.count - b.count)::double precision / sec;
return res;
END;
$_$
LANGUAGE plpgsql IMMUTABLE STRICT;
CREATE OPERATOR / (
PROCEDURE = wait_sampling_div,
LEFTARG = wait_sampling_type,
RIGHTARG = wait_sampling_type
);
/* end of pg_wait_sampling operator support */
CREATE TABLE public.powa_wait_sampling_history (
coalesce_range tstzrange NOT NULL,
queryid bigint NOT NULL,
dbid oid NOT NULL,
event_type text NOT NULL,
event text NOT NULL,
records public.wait_sampling_type[] NOT NULL,
mins_in_range public.wait_sampling_type NOT NULL,
maxs_in_range public.wait_sampling_type NOT NULL,
PRIMARY KEY (coalesce_range, queryid, dbid, event_type, event)
);
CREATE INDEX ON public.powa_wait_sampling_history (queryid);
CREATE TABLE public.powa_wait_sampling_history_db (
coalesce_range tstzrange NOT NULL,
dbid oid NOT NULL,
event_type text NOT NULL,
event text NOT NULL,
records public.wait_sampling_type[] NOT NULL,
mins_in_range public.wait_sampling_type NOT NULL,
maxs_in_range public.wait_sampling_type NOT NULL,
PRIMARY KEY (coalesce_range, dbid, event_type, event)
);
CREATE TABLE public.powa_wait_sampling_history_current (
queryid bigint NOT NULL,
dbid oid NOT NULL,
event_type text NOT NULL,
event text NOT NULL,
record wait_sampling_type NOT NULL
);
CREATE TABLE public.powa_wait_sampling_history_current_db (
dbid oid NOT NULL,
event_type text NOT NULL,
event text NOT NULL,
record wait_sampling_type NOT NULL
);
/* end of pg_wait_sampling integration - part 1 */
SELECT pg_catalog.pg_extension_config_dump('powa_wait_sampling_history','');
SELECT pg_catalog.pg_extension_config_dump('powa_wait_sampling_history_db','');
SELECT pg_catalog.pg_extension_config_dump('powa_wait_sampling_history_current','');
SELECT pg_catalog.pg_extension_config_dump('powa_wait_sampling_history_current_db','');
CREATE OR REPLACE FUNCTION public.powa_check_created_extensions()
RETURNS event_trigger
LANGUAGE plpgsql
AS $_$
DECLARE
BEGIN
/* We have for now no way for a proper handling of this event,
* as we don't have a table with the list of supported extensions.
* So just call every powa_*_register() function we know each time an
* extension is created. Powa should be in a dedicated database and the
* register function handle to be called several time, so it's not critical
*/
PERFORM public.powa_kcache_register();
PERFORM public.powa_qualstats_register();
PERFORM public.powa_track_settings_register();
PERFORM public.powa_wait_sampling_register();
END;
$_$; /* end of powa_check_created_extensions */
/* end pg_track_settings integration */
/* pg_wait_sampling integration - part 2 */
/*
* register pg_wait_sampling extension
*/
CREATE OR REPLACE function public.powa_wait_sampling_register() RETURNS bool AS
$_$
DECLARE
v_func_present bool;
v_ext_present bool;
BEGIN
SELECT COUNT(*) = 1 INTO v_ext_present FROM pg_extension WHERE extname = 'pg_wait_sampling';
IF ( v_ext_present ) THEN
SELECT COUNT(*) > 0 INTO v_func_present FROM public.powa_functions WHERE module = 'pg_wait_sampling';
IF ( NOT v_func_present) THEN
PERFORM powa_log('registering pg_wait_sampling');
INSERT INTO powa_functions (module, operation, function_name, added_manually, enabled)
VALUES ('pg_wait_sampling', 'snapshot', 'powa_wait_sampling_snapshot', false, true),
('pg_wait_sampling', 'aggregate', 'powa_wait_sampling_aggregate', false, true),
('pg_wait_sampling', 'unregister', 'powa_wait_sampling_unregister', false, true),
('pg_wait_sampling', 'purge', 'powa_wait_sampling_purge', false, true),
('pg_wait_sampling', 'reset', 'powa_wait_sampling_reset', false, true);
END IF;
END IF;
RETURN true;
END;
$_$
language plpgsql; /* end of powa_wait_sampling_register */
/*
* unregister pg_wait_sampling extension
*/
CREATE OR REPLACE function public.powa_wait_sampling_unregister() RETURNS bool AS
$_$
BEGIN
PERFORM powa_log('unregistering pg_wait_sampling');
DELETE FROM public.powa_functions WHERE module = 'pg_wait_sampling';
RETURN true;
END;
$_$
language plpgsql;
/*
* powa_wait_sampling snapshot collection.
*/
CREATE OR REPLACE FUNCTION powa_wait_sampling_snapshot() RETURNS void as $PROC$
DECLARE
result bool;
v_funcname text := 'powa_wait_sampling_snapshot';
v_rowcount bigint;
BEGIN
PERFORM powa_log(format('running %I', v_funcname));
WITH capture AS (
-- the various background processes report wait events but don't have
-- associated queryid. Gather them all under a fake 0 dbid
SELECT COALESCE(pgss.dbid, 0) AS dbid, s.event_type, s.event, s.queryid,
sum(s.count) as count
FROM pg_wait_sampling_profile s
-- pg_wait_sampling doesn't offer a per (userid, dbid, queryid) view,
-- only per pid, but pid can be reused for different databases or users
-- so we cannot deduce db or user from it. However, queryid should be
-- unique across differet databases, so we retrieve the dbid this way.
LEFT JOIN pg_stat_statements(false) pgss ON pgss.queryid = s.queryid
WHERE event_type IS NOT NULL AND event IS NOT NULL
GROUP BY pgss.dbid, s.event_type, s.event, s.queryid
),
by_query AS (
INSERT INTO powa_wait_sampling_history_current (queryid, dbid,
event_type, event, record)
SELECT queryid, dbid, event_type, event, (now(), count)::wait_sampling_type
FROM capture
),
by_database AS (
INSERT INTO powa_wait_sampling_history_current_db (dbid,
event_type, event, record)
SELECT dbid, event_type, event, (now(), sum(count))::wait_sampling_type
FROM capture
GROUP BY dbid, event_type, event
)
SELECT COUNT(*) into v_rowcount
FROM capture;
perform powa_log(format('%I - rowcount: %s',
v_funcname, v_rowcount));
result := true;
END
$PROC$ language plpgsql; /* end of powa_wait_sampling_snapshot */
/*
* powa_wait_sampling aggregation
*/
CREATE OR REPLACE FUNCTION powa_wait_sampling_aggregate() RETURNS void AS $PROC$
DECLARE
result bool;
v_funcname text := 'powa_wait_sampling_aggregate';
v_rowcount bigint;
BEGIN
PERFORM powa_log(format('running %I', v_funcname));
-- aggregate history table
LOCK TABLE powa_wait_sampling_history_current IN SHARE MODE; -- prevent any other update
INSERT INTO powa_wait_sampling_history (coalesce_range, queryid, dbid,
event_type, event, records, mins_in_range, maxs_in_range)
SELECT tstzrange(min((record).ts), max((record).ts),'[]'),
queryid, dbid, event_type, event, array_agg(record),
ROW(min((record).ts),
min((record).count))::wait_sampling_type,
ROW(max((record).ts),
max((record).count))::wait_sampling_type
FROM powa_wait_sampling_history_current
GROUP BY queryid, dbid, event_type, event;
GET DIAGNOSTICS v_rowcount = ROW_COUNT;
perform powa_log(format('%I (powa_wait_sampling_history) - rowcount: %s',
v_funcname, v_rowcount));
TRUNCATE powa_wait_sampling_history_current;
-- aggregate history_db table
LOCK TABLE powa_wait_sampling_history_current_db IN SHARE MODE; -- prevent any other update
INSERT INTO powa_wait_sampling_history_db (coalesce_range, dbid,
event_type, event, records, mins_in_range, maxs_in_range)
SELECT tstzrange(min((record).ts), max((record).ts),'[]'), dbid,
event_type, event, array_agg(record),
ROW(min((record).ts),
min((record).count))::wait_sampling_type,
ROW(max((record).ts),
max((record).count))::wait_sampling_type
FROM powa_wait_sampling_history_current_db
GROUP BY dbid, event_type, event;
GET DIAGNOSTICS v_rowcount = ROW_COUNT;
perform powa_log(format('%I (powa_wait_sampling_history_db) - rowcount: %s',
v_funcname, v_rowcount));
TRUNCATE powa_wait_sampling_history_current_db;
END
$PROC$ language plpgsql; /* end of powa_wait_sampling_aggregate */
/*
* powa_wait_sampling purge
*/
CREATE OR REPLACE FUNCTION powa_wait_sampling_purge() RETURNS void as $PROC$
DECLARE
v_funcname text := 'powa_wait_sampling_purge';
v_rowcount bigint;
BEGIN
PERFORM powa_log(format('running %I', v_funcname));
DELETE FROM powa_wait_sampling_history WHERE upper(coalesce_range) < (now() - current_setting('powa.retention')::interval);
GET DIAGNOSTICS v_rowcount = ROW_COUNT;
perform powa_log(format('%I (powa_wait_sampling_history) - rowcount: %s',
v_funcname, v_rowcount));
DELETE FROM powa_wait_sampling_history_db WHERE upper(coalesce_range) < (now() - current_setting('powa.retention')::interval);
GET DIAGNOSTICS v_rowcount = ROW_COUNT;
perform powa_log(format('%I (powa_wait_sampling_history_db) - rowcount: %s',
v_funcname, v_rowcount));
END;
$PROC$ language plpgsql; /* end of powa_wait_sampling_purge */
/*
* powa_wait_sampling reset
*/
CREATE OR REPLACE FUNCTION powa_wait_sampling_reset() RETURNS void as $PROC$
DECLARE
v_funcname text := 'powa_wait_sampling_reset';
v_rowcount bigint;
BEGIN
PERFORM powa_log('running powa_wait_sampling_reset');
PERFORM powa_log('truncating powa_wait_sampling_history');
TRUNCATE TABLE powa_wait_sampling_history;
PERFORM powa_log('truncating powa_wait_sampling_history_db');
TRUNCATE TABLE powa_wait_sampling_history_db;
PERFORM powa_log('truncating powa_wait_sampling_history_current');
TRUNCATE TABLE powa_wait_sampling_history_current;
PERFORM powa_log('truncating powa_wait_sampling_history_current_db');
TRUNCATE TABLE powa_wait_sampling_history_current_db;
END;
$PROC$ language plpgsql; /* end of powa_wait_sampling_reset */
-- By default, try to register pg_wait_sampling, in case it's alreay here
SELECT * FROM public.powa_wait_sampling_register();
/* end of pg_wait_sampling integration - part 2 */
| 33.882353 | 130 | 0.715799 | 3.25 |
e8e693322ca4748ac11e7ad6f26ec9749c3ce95e
| 904 |
py
|
Python
|
Python Notebook/Python files/data_utility.py
|
wilfy9249/Capstone-Fall-18
|
832632eb00a10240e0ad16c364449d5020814c83
|
[
"MIT"
] | 2 |
2018-10-24T21:32:17.000Z
|
2019-02-19T21:15:29.000Z
|
Python Notebook/Python files/data_utility.py
|
wilfy9249/Capstone-Fall-18
|
832632eb00a10240e0ad16c364449d5020814c83
|
[
"MIT"
] | null | null | null |
Python Notebook/Python files/data_utility.py
|
wilfy9249/Capstone-Fall-18
|
832632eb00a10240e0ad16c364449d5020814c83
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
import pandas as pd
import os
# In[2]:
#function to get current directory
def getCurrentDirectory():
listDirectory = os.listdir('../')
return listDirectory
# In[3]:
#function to read csv file
def readCsvFile(path):
crimes_original = pd.read_csv(path, low_memory=False)
return crimes_original
# In[4]:
#function to filter Data
def filterData(data,column,value):
filterData = data.loc[data[column] == value]
return filterData
# In[5]:
#function to get count of a value
def getCount(data,column,columnName):
data_count = pd.DataFrame({columnName:data.groupby(column).size()}).reset_index()
return data_count
# In[7]:
#function to sort
def sortValue(data,column,ascBoolean):
sorted_data = data.sort_values(column,ascending = ascBoolean)
return sorted_data
# In[ ]:
| 14.580645 | 85 | 0.692478 | 3.125 |
504065756633765bd95e3853532cd460651575be
| 3,054 |
go
|
Go
|
gameboy/audio/audio.go
|
scottyw/tetromino
|
6b424ea1dc255b32d3ad5ef8268964164b802ab1
|
[
"Apache-2.0"
] | 4 |
2018-07-30T06:58:37.000Z
|
2020-01-21T08:37:56.000Z
|
gameboy/audio/audio.go
|
scottyw/goomba
|
128e701a2d8a5152a1a7d6710392faab8ab2d8d1
|
[
"Apache-2.0"
] | null | null | null |
gameboy/audio/audio.go
|
scottyw/goomba
|
128e701a2d8a5152a1a7d6710392faab8ab2d8d1
|
[
"Apache-2.0"
] | 1 |
2019-10-08T19:37:43.000Z
|
2019-10-08T19:37:43.000Z
|
package audio
const (
frameSeqPeriod = 4194304 / 512 // 512Hz
samplerPeriod = 4194304 / 44100 // 44100 Hz
)
// Audio stream
type Audio struct {
l chan float32
r chan float32
ch1 *square
ch2 *square
ch3 *wave
ch4 *noise
control *control
ticks uint64
frameSeqTicks uint64
}
// NewAudio initializes our internal channel for audio data
func New(l, r chan float32) *Audio {
audio := Audio{
l: l,
r: r,
ch1: &square{sweep: &sweep{}},
ch2: &square{},
ch3: &wave{
waveram: [16]uint8{0x84, 0x40, 0x43, 0xAA, 0x2D, 0x78, 0x92, 0x3C, 0x60, 0x59, 0x59, 0xB0, 0x34, 0xB8, 0x2E, 0xDA},
},
ch4: &noise{},
control: &control{},
ticks: 1,
}
// Set default values for the NR registers
audio.WriteNR10(0x80)
audio.WriteNR11(0xbf)
audio.WriteNR12(0xf3)
audio.WriteNR13(0xff)
audio.WriteNR14(0xbf)
audio.WriteNR21(0x3f)
audio.WriteNR23(0xff)
audio.WriteNR24(0xbf)
audio.WriteNR30(0x7f)
audio.WriteNR31(0xff)
audio.WriteNR32(0x9f)
audio.WriteNR33(0xff)
audio.WriteNR34(0xbf)
audio.WriteNR41(0xff)
audio.WriteNR44(0xbf)
audio.WriteNR50(0x77)
audio.WriteNR51(0xf3)
audio.WriteNR52(0xf1)
return &audio
}
// EndMachineCycle emulates the audio hardware at the end of a machine cycle
func (a *Audio) EndMachineCycle() {
// Each machine cycle is four clock cycles
a.tickClock()
a.tickClock()
a.tickClock()
a.tickClock()
a.ch1.triggered = false
a.ch2.triggered = false
a.ch3.triggered = false
a.ch4.triggered = false
}
func (a *Audio) tickClock() {
if a.ticks > 4194304 {
a.ticks = 1
a.frameSeqTicks = 0
}
// Tick every clock cycle
a.tickTimer()
// Tick the frame sequencer at 512 Hz
if a.ticks%frameSeqPeriod == 0 {
a.tickFrameSequencer()
if a.frameSeqTicks >= 512 {
a.frameSeqTicks = 0
}
}
// Tick this function at 44100 Hz
if a.ticks%samplerPeriod == 0 {
a.tickSampler()
}
a.ticks++
}
func (a *Audio) tickTimer() {
if !a.ch1.triggered {
a.ch1.tickTimer()
}
if !a.ch2.triggered {
a.ch2.tickTimer()
}
if !a.ch3.triggered {
a.ch3.tickTimer()
}
if !a.ch4.triggered {
a.ch4.tickTimer()
}
}
func (a *Audio) tickFrameSequencer() {
// Step Length Ctr Vol Env Sweep
// ---------------------------------------
// 0 Clock - -
// 1 - - -
// 2 Clock - Clock
// 3 - - -
// 4 Clock - -
// 5 - - -
// 6 Clock - Clock
// 7 - Clock -
// ---------------------------------------
// Rate 256 Hz 64 Hz 128 Hz
if a.frameSeqTicks%2 == 0 {
a.ch1.tickLength()
a.ch2.tickLength()
a.ch3.tickLength()
a.ch4.tickLength()
}
if (a.frameSeqTicks-7)%8 == 0 {
a.ch1.tickVolumeEnvelope()
a.ch2.tickVolumeEnvelope()
a.ch4.tickVolumeEnvelope()
}
if (a.frameSeqTicks-2)%4 == 0 {
a.ch1.tickSweep()
}
a.frameSeqTicks++
}
func (a *Audio) tickSampler() {
a.takeSample()
}
| 20.092105 | 118 | 0.57924 | 3.1875 |
bcd1c88221aff375bdc09950e02f3f770f4c2daf
| 26,918 |
js
|
JavaScript
|
dev/sheetdb.js
|
classroomtechtools/managebac_openapply_gsuite
|
e5d0d4ba839ab2d5b7e3f4f265ef170d73279e1b
|
[
"MIT"
] | 2 |
2019-05-14T03:56:58.000Z
|
2022-02-16T20:08:01.000Z
|
dev/sheetdb.js
|
classroomtechtools/managebac_openapply_gsuite
|
e5d0d4ba839ab2d5b7e3f4f265ef170d73279e1b
|
[
"MIT"
] | null | null | null |
dev/sheetdb.js
|
classroomtechtools/managebac_openapply_gsuite
|
e5d0d4ba839ab2d5b7e3f4f265ef170d73279e1b
|
[
"MIT"
] | null | null | null |
/*
Wrapper for advanced sheets api that provides us common database operations
*/
'use strict';
/*
Build upon the globalContext (passed as this below) to define all our variables in the "app" variable
We'll have all the virtualized stuff there in the local stack (thus, name conflicts are still possible)
*/
(function(globalContext) {
/*
Tranpose an array, if element isn't present, it'll be undefined
https://stackoverflow.com/questions/4492678/swap-rows-with-columns-transposition-of-a-matrix-in-javascript
*/
function transpose(a) {
return Object.keys(a[0]).map(function(c) {
return a.map(function(r) { return r[c]; });
});
}
/*
Convert column number (0-indexed) into spreadsheets
*/
function zeroIndexedToColumnName(n) {
var ordA = 'A'.charCodeAt(0);
var ordZ = 'Z'.charCodeAt(0);
var len = ordZ - ordA + 1;
var s = "";
while(n >= 0) {
s = String.fromCharCode(n % len + ordA) + s;
n = Math.floor(n / len) - 1;
}
return s;
}
/*
Define a block of code that executes code on entry to that block, and on exit
Even if there is an error (although that behavior can be overwritten)
*/
var contextManager = function () {
function _parseOptions(opt) {
var ret = {};
ret.enter = opt.enter || function () { return null; };
ret.exit = opt.exit || function (arg) {};
ret.params = opt.params || [];
if (!Array.isArray(ret.params)) throw new TypeError("options.params must be an array");
ret.onError = opt.onError || function () {};
return ret;
}
if (arguments.length == 1) {
var options = _parseOptions(arguments[0]);
return function (body) {
var ret = options.enter.apply(null, options.params);
try {
ret = body(ret) || ret;
} catch (err) {
if (options.onError(err, ret) !== null)
if (typeof err === 'string')
throw new Error(err);
else
throw new err.constructor(err.message + ' --> ' + (err.stack ? err.stack.toString(): ''));
} finally {
options.exit(ret);
}
return ret;
};
} else if (arguments.length == 2) {
var bodies = arguments[0],
options = _parseOptions(arguments[1]);
options = _parseOptions(options);
if (!Array.isArray(bodies))
bodies = [bodies];
for (var i = 0; i < bodies.length; i++) {
var body = bodies[i];
var ret = options.enter.apply(null, options.params);
try {
ret = body(ret) || ret;
} catch (err) {
if (options.onError(err, ret) !== null)
throw new err.constructor(err.message + ' --> ' + err.stack.toString());
} finally {
options.exit(ret);
}
}
} else {
throw new Error("Pass either one or two arguments");
}
return ret;
};
/*
Formatter
https://gist.github.com/brainysmurf/b4394974047428edccef27b2abcc4fb3
*/
// ValueError :: String -> Error
var ValueError = function(message) {
var err = new Error(message);
err.name = 'ValueError';
return err;
};
// defaultTo :: a,a? -> a
var defaultTo = function(x, y) {
return y == null ? x : y;
};
// create :: Object -> String,*... -> String
var create = function(transformers) {
return function(template) {
var args = Array.prototype.slice.call(arguments, 1);
var idx = 0;
var state = 'UNDEFINED';
return template.replace(
/([{}])\1|[{](.*?)(?:!(.+?))?[}]/g,
function(match, literal, key, xf) {
if (literal != null) {
return literal;
}
if (key.length > 0) {
if (state === 'IMPLICIT') {
throw ValueError('cannot switch from ' +
'implicit to explicit numbering');
}
state = 'EXPLICIT';
} else {
if (state === 'EXPLICIT') {
throw ValueError('cannot switch from ' +
'explicit to implicit numbering');
}
state = 'IMPLICIT';
key = String(idx);
idx += 1;
}
var value = defaultTo('', lookup(args, key.split('.')));
if (xf == null) {
return value;
} else if (Object.prototype.hasOwnProperty.call(transformers, xf)) {
return transformers[xf](value);
} else {
throw ValueError('no transformer named "' + xf + '"');
}
}
);
};
};
var lookup = function(obj, path) {
if (!/^\d+$/.test(path[0])) {
path = ['0'].concat(path);
}
for (var idx = 0; idx < path.length; idx += 1) {
var key = path[idx];
obj = typeof obj[key] === 'function' ? obj[key]() : obj[key];
}
return obj;
};
// format :: String,*... -> String
var format = create({});
// format.create :: Object -> String,*... -> String
format.create = create;
// format.extend :: Object,Object -> ()
format.extend = function(prototype, transformers) {
var $format = create(transformers);
prototype.format = function() {
var args = Array.prototype.slice.call(arguments);
args.unshift(this);
return $format.apply(globalContext, args);
};
};
// Do not pollute the global namespace, seems like a bad idea
//global.format = format;
// ...instead we will polyfill the String.protype, but you may want to modify this
// for the use of transformers, see documentation for that
format.extend(String.prototype);
// END FORMATTER
/*
The private, main constructor
*/
var DBSheets_ = function (_ss) {
// Module pattern, returns an object with methods
// We use _methods to indicate private stuff
// defaults
_dimension = 'ROWS';
_keyHeaderRow = 0;
_destInfo = [];
_cachedSS = null;
/*
* Methods for simple interactions
*
*/
function _getCachedSS () {
//return Sheets.Spreadsheets.get(_getId());
if (!_cachedSS) {
_cachedSS = Sheets.Spreadsheets.get(_getId());
}
return _cachedSS;
}
/*
* _getId
* @return {String} The spreadsheet ID
*/
function _getId () {
return _ss.spreadsheetId;
}
/*
@param {Object} request Request object
@return {Object} Response object
*/
function _valuesBatchUpdate (request) {
return Sheets.Spreadsheets.Values.batchUpdate(request, _getId());
}
/*
@return effective values, otherwise empty [[]]
*/
function _getValues (range) {
var response = Sheets.Spreadsheets.Values.get(_getId(), range, {
majorDimension: _dimension,
valueRenderOption: "UNFORMATTED_VALUE"
});
return response.values || [[]];
}
/*
Clears to all values in the range
@return null
*/
function _clearRange (range) {
Logger.log('Clearing ' + range);
Sheets.Spreadsheets.Values.clear({}, _getId(), range);
}
/*
Clears the entire tab
*/
function _clearTab (tabTitle) {
var sheets = _getSheets();
var targetTab = null;
sheets.forEach(function (sheet) {
if (sheet.properties.title == tabTitle) {
targetTab = sheet;
}
});
if (targetTab) {
_clearRange(tabTitle + '!1:' + targetTab.properties.gridProperties.rowCount.toString());
}
}
/*
*/
String.prototype.to10 = function(base) {
var lvl = this.length - 1;
var val = (base || 0) + Math.pow(26, lvl) * (this[0].toUpperCase().charCodeAt() - 64 - (lvl ? 0 : 1));
return (this.length > 1) ? (this.substr(1, this.length - 1)).to10(val) : val;
}
function _a1notation2gridrange(a1notation) {
var data = a1notation.match(/(^.+)!(.+):(.+$)/);
if (data == null) {
// For cases when only the sheet name is returned
return {
sheetId: _getSheet(a1notation).properties.sheetId
}
}
var co1 = data[2].match(/(\D+)(\d+)/);
var co2 = data[3].match(/(\D+)(\d+)/);
var gridRange = {
sheetId: _getSheet(data[1]).properties.sheetId,
startRowIndex: co1 ? parseInt(co1[2], 10) - 1 : null,
endRowIndex: co2 ? parseInt(co2[2], 10) : null,
startColumnIndex: co1 ? co1[1].to10() : data[2].to10(),
endColumnIndex: co2 ? co2[1].to10(1) : data[3].to10(1),
};
if (gridRange.startRowIndex == null) delete gridRange.startRowIndex;
if (gridRange.endRowIndex == null) delete gridRange.endRowIndex;
return gridRange;
}
/*
@param {Number,String} sheet if number, returns the sheet at index
if name, return the sheet that has that name
@throws {Error} if sheet is not a number or not a string
@return {Object} returns the target sheet object
@TODO: Use network call to update
*/
function _getSheet(sheet) {
var ss = _getCachedSS();
if (typeof sheet == "number") return ss.sheets[sheet] || null;
if (typeof sheet == "string") {
var sheetName = sheet.split("!")[0]; // take out the
for (var i = 0; i < ss.sheets.length; i++) {
if (ss.sheets[i].properties.title == sheetName) return ss.sheets[i];
}
return null;
}
throw new Error("Passed in " + typeof sheet + " into _getSheet");
}
function _getSheets() {
return Sheets.Spreadsheets.get(_getId()).sheets;
}
/*
_toRange: Convenience function to convert variables into a A1Notation string
@return {String} Legal A1Notation
*/
function _toRange(title, left, right) {
if (title.indexOf(' ') !== -1)
title = "'" + title + "'";
if (typeof right === 'undefined')
return title + '!' + left.toString() + ':' + left.toString();
else
return title + '!' + left.toString() + ':' + right.toString();
}
/*
Makes frozen rows, add headers
*/
function _defineHeaders (sheet, headers) {
var sht = _getSheet(sheet);
var response = Sheets.Spreadsheets.batchUpdate({
requests: [
{
updateSheetProperties: {
properties: {
sheetId: sht.properties.id,
gridProperties: {
frozenRowCount: headers.length,
}
},
fields: 'gridProperties.frozenRowCount',
}
},
]
}, _getId());
this.inputValues(_toRange(sht.properties.title, 1, headers.length), headers);
this.setKeyHeadingRow(0);
}
function _getHeaders (sheet) {
var sht = _getSheet(sheet);
if (!sht) // may be either undefined or null
return [[]];
var numHeaders = sht.properties.gridProperties.frozenRowCount || 0;
if (numHeaders == 0)
return [[]];
return _getValues(_toRange(sht.properties.title, 1, numHeaders));
}
function _getRange ( ) {
var ss = SpreadsheetApp.openById(_getId());
return ss.getRange.apply(ss, arguments);
}
/*
Uses the sheet's headers and range values and converts them into the properties
@param {string} rangeA1Notation The range string
@returns {List[Object]}
*/
function _toObjects(rangeA1Notation) {
var headers = _getHeaders(rangeA1Notation);
var numHeaders = headers.length;
var headings = headers[_keyHeaderRow];
headers = transpose(headers); // transpose so we can refehence by column below
var values = _getValues(rangeA1Notation);
var range = _getRange(rangeA1Notation); // TODO: Shortcut method, could we do this manually?
var rowOffset = (range.getRow() - numHeaders - 1); // getRow returns the row number after the
var columnOffset = (range.getColumn() - 1);
var ret = [];
var co, header, obj;
// Loop through the values
// We need to use headings.length in nested loop to ensure that
// every column
for (var r = 0; r < values.length; r++) {
ro = r + rowOffset;
obj = {};
for (var c = 0; c < headings.length; c++) {
co = c + columnOffset;
heading = headings[co];
obj[heading] = {
value: values[r][c],
a1Notation: range.getSheet().getName() + '!' + range.offset(ro, co).getA1Notation(),
headers: headers[co],
column: co,
row: range.getRow() + r,
columnAsName: zeroIndexedToColumnName(co),
rowAsName: range.getRow().toString(),
};
}
obj.columns = {};
var i = 0;
for (key in obj) {
if (key === 'columns')
continue;
obj.columns[key] = zeroIndexedToColumnName(i) + (range.getRow() + r).toString();
i++;
}
ret.push(obj);
}
return ret;
}
_plugins = [];
_oncePlugins = [];
/*
Returned object
*/
return {
getId: _getId,
clearRange: _clearRange,
clearTab: _clearTab,
setDimensionAsColumns: function () {
_dimension = 'COLUMNS';
},
setDimensionAsRows: function () {
_dimension = 'ROWS';
},
/*
This determines which header row
*/
setKeyHeadingRow: function (value) {
_keyHeaderRow = value;
},
getHeaders: function (sheet) {
return _getHeaders(sheet);
},
/*
Light wrapper to spreadsheet app getRange function
*/
getRange: function () {
return _getRange.apply(null, arguments);
},
a1notation2gridrange: function (a1Notation) {
return _a1notation2gridrange(a1Notation);
},
registerPlugin: function (description, func) {
_plugins.push({description: description, func: func});
},
registerOncePlugin: function (description, func) {
_oncePlugins.push({description: description, func: func});
},
/*
Inserts a row depending on range specification
*/
insertRow: function (range, row) {
return Sheets.Spreadsheets.Values.append({
majorDimension: _dimension,
values: [row]
}, _getId(), range, {
valueInputOption: "USER_ENTERED",
insertDataOption: "INSERT_ROWS",
});
},
getPluginsOverwriteBuildRequests: function (rangeA1Notation) {
objs = _toObjects(rangeA1Notation); // convert to A1
var requests = [];
var utils = {
zeroIndexedToColumnName: zeroIndexedToColumnName,
objects: objs
};
// cycle through the plugins and build results array
_plugins.forEach(function (plugin) {
objs.forEach(function (obj) {
for (prop in obj) {
if (prop == 'columns')
continue;
var objValue = obj[prop];
if (plugin.description.entryPoint &&
objValue.headers[plugin.description.entryPoint.header - 1] == plugin.description.name) {
var newValue = plugin.func(objValue, utils);
if (typeof newValue === 'string') {
newValue = newValue.format(objValue); // overwrites
newValue = newValue.format(obj.columns);
}
requests.push({values: [[newValue]], a1Notation: objValue.a1Notation});
}
}
});
});
return requests;
},
overwriteWithPlugins: function (rangeA1Notation) {
var requests = this.getPluginsOverwriteBuildRequests(rangeA1Notation);
// Add value requests from results and allow the sheet to update
this.withRequestBuilder(function (rb) {
requests.forEach(function (item) {
rb.addValueRequest(rb.utils.valuesRequest(item.values, item.a1Notation));
});
});
},
/*
Calls batchUpdate with "USER_ENTERED"
@return response
*/
inputValues: function (rangeNotation, values) {
var request = {
valueInputOption: 'USER_ENTERED',
data: [
{
range: rangeNotation,
majorDimension: _dimension,
values: values
}
]
};
return _valuesBatchUpdate(request);
},
getEffectiveValues: function (range) {
return _getValues(range);
},
getColumnValues: function (range, column) {
saved = _dimension;
this.setDimensionAsColumns();
var values = _getValues(range);
_dimension = saved;
return values[column].slice();
},
addSheets: function (sheets) {
//Logger.log(_ss.sheets);
},
getSheets: function () {
return _getSheets();
},
defineHeaders: _defineHeaders,
getDestinationInfo: function () { return _destInfo; },
setDestinationForForm: function (formCreationFunc) {
var before = [];
//
var ctx = contextManager({
enter: function (form) {
_getSheets().forEach(function (b) {
var id = b.properties.sheetId;
before.push(id);
});
return form;
},
exit: function (form) {
if (typeof form === 'undefined') {
_destInfo.push({id: null, sheetId: null, error: "Did not pas form into exit"});
return;
}
form.setDestination(FormApp.DestinationType.SPREADSHEET, _getId());
var after = null;
_getSheets().forEach(function (a) {
if (before.indexOf(a.properties.sheetId) === -1) {
after = a;
}
});
if (after == null) {
_destInfo.push({id: null, sheetId:null, error: "Could not detect after creation."});
} else {
_destInfo.push({id: _getId(), sheet: after, sheetId: after.properties.sheetId, index: after.properties.index, error: false});
}
},
});
ctx(formCreationFunc);
return _destInfo;
},
/*
Chainable convenience methods that builds request objects for execution upon completion
*/
withRequestBuilder: contextManager({
enter: function (obj) {
obj.preSSRequests = [];
obj.sRequests = [];
obj.postSSRequests = [];
return obj;
},
exit: function (obj) {
if (obj.preSSRequests.length > 0) {
Sheets.Spreadsheets.batchUpdate({requests:obj.preSSRequests}, _getId()); // TODO: What about "empty response" error
}
if (obj.sRequests.length > 0) {
if (obj._tabsAutoClear) {
var allSheets = obj.sRequests.reduce(function (acc, item) {
acc.push(item.range.match(/(.*)!/)[1]);
return acc;
}, []);
allSheets.filter(function (i, p, a) {
return a.indexOf(i) == p;
}).forEach(function (sheetName) {
_clearTab(sheetName); // use the
});
}
Logger.log('Update values: ' + obj.sRequests.range + ' -> ' + obj.sRequests.values);
Sheets.Spreadsheets.Values.batchUpdate({
valueInputOption: "USER_ENTERED",
data: obj.sRequests
}, _getId());
}
if (obj.postSSRequests.length > 0) {
Sheets.Spreadsheets.batchUpdate({requests:obj.postSSRequests}, _getId()); // TODO: What about "empty response" error
}
},
params: [{
_valuesSortBy: null,
preSSRequests: [],
sRequests: [],
postSSRequests: [],
_tabsAutoClear: false,
tabsAutoClear: function () {
this._tabsAutoClear = true;
Logger.log(this._tabsAutoClear);
},
setValuesSortByIndex: function (sortBy) {
this._valuesSortBy = sortBy;
},
addValueRequest: function (request) {
Logger.log(request.range + ' -> ' + request.values);
this.sRequests.push(request);
return this;
},
addPropertyRequest: function (request) {
this.preSSRequests.push(request);
return this;
},
addSheetPropertyRequest: function (request) {
this.preSSRequests.push(request);
return this;
},
addSheetRequest: function (request) {
this.preSSRequests.push(request);
return this;
},
addSortRangeRequest: function (request) {
this.postSSRequests.push(request);
return this;
},
utils: {
toRange: function (title, left, right) {
if (title.indexOf(' ') !== -1)
title = "'" + title + "'";
if (typeof right === 'undefined')
return title + '!' + left.toString() + ':' + left.toString();
else
return title + '!' + left.toString() + ':' + right.toString();
},
valuesRequestFromRange: function (values, title, left, right) {
return {
majorDimension: _dimension,
range: this.toRange(title, left, right),
values: values
}
},
valuesRequest: function (values, rangeA1Notation, _dim) {
return {
majorDimension: _dim || _dimension,
range: rangeA1Notation,
values: values
}
},
columnCountRequest: function (id, numCols) {
return {
updateSheetProperties: {
properties: {
sheetId: id,
gridProperties: {
columnCount: numCols,
}
},
fields: 'gridProperties.columnCount',
}
};
},
hideGridlinesRequest: function (id, bool) {
return {
updateSheetProperties: {
properties: {
sheetId: id,
gridProperties: {
hideGridlines: bool,
}
},
fields: 'gridProperties.hideGridlines',
}
};
},
rowCountRequest: function (id, numRows) {
return {
updateSheetProperties: {
properties: {
sheetId: id,
gridProperties: {
rowCount: numRows,
}
},
fields: 'gridProperties.rowCount',
}
};
},
frozenRowsRequest: function (id, numRows) {
var sheet = _getSheet(id);
return {
updateSheetProperties: {
properties: {
sheetId: sheet.properties.sheetId,
gridProperties: {
frozenRowCount: numRows,
}
},
fields: 'gridProperties.frozenRowCount',
}
};
},
frozenColumnsRequest: function (id, numCols) {
return {
updateSheetProperties: {
properties: {
sheetId: id,
gridProperties: {
frozenColumnCount: numCols,
}
},
fields: 'gridProperties.frozenColumnCount',
}
};
},
tabColorRequest: function (id, red, green, blue, alpha) {
if (typeof alpha === 'undefined')
alpha = 1;
return {
updateSheetProperties: {
properties: {
sheetId: id,
tabColor: {
red: red,
green: green,
blue: blue,
alpha: alpha
}
},
fields: 'tabColor',
}
};
},
newTabRequest: function (title) {
return {
addSheet: {
properties: {
title: title
}
},
}
},
tabTitleRequest: function (id, title) {
return {
updateSheetProperties: {
properties: {
sheetId: id,
title: title
},
fields: 'title',
},
}
},
sortRequest: function (range, dimensionIndex, sortOrder) {
return {
sortRange: {
range: _a1notation2gridrange(range),
sortSpecs: {
dimensionIndex: dimensionIndex || 0,
sortOrder: sortOrder || 'ASCENDING',
}
}
}
},
},
}],
})
}; // return
}; // DBSheets()
// ENTRY POINT
globalContext.DBSheets = function (_spreadsheetID) {
_spreadsheetID = _spreadsheetID || SpreadsheetApp.getActiveSpreadsheet().getId();
return DBSheets.fromId(_spreadsheetID);
};
// CONSTRUCTORS:
DBSheets.fromId = function (id) {
return DBSheets_(Sheets.Spreadsheets.get(id));
};
DBSheets.fromRange = function (range) {
var ss = range.getSheet().getParent();
return DBSheets.fromId(ss.getId());
};
DBSheets.createWithTitle = function (title) {
var resource = {properties: {title: title}};
return DBSheets_(Sheets.Spreadsheets.create(resource));
};
DBSheets.createWithProperties = function (resource) {
return DBSheets_(Sheets.Spreadsheets.create(resource));
};
})(this);
| 30.833906 | 139 | 0.506984 | 3.140625 |
7fc1931ea10d12569d9def39e0baa88b96ad9546
| 1,749 |
rs
|
Rust
|
src/l2r/market/mean.rs
|
isabella232/Evokit
|
54c21ff318b623a9c1ac168a2323ab2fdb4598cf
|
[
"Apache-2.0"
] | 60 |
2020-03-10T22:59:02.000Z
|
2021-07-16T16:44:23.000Z
|
src/l2r/market/mean.rs
|
etsy/Evokit
|
54c21ff318b623a9c1ac168a2323ab2fdb4598cf
|
[
"Apache-2.0"
] | 1 |
2021-04-05T09:47:44.000Z
|
2021-04-05T09:47:44.000Z
|
src/l2r/market/mean.rs
|
isabella232/Evokit
|
54c21ff318b623a9c1ac168a2323ab2fdb4598cf
|
[
"Apache-2.0"
] | 6 |
2020-03-13T00:27:54.000Z
|
2021-04-05T09:47:26.000Z
|
use crate::l2r::market::utils::*;
/// Indicator for computing the mean
pub struct MeanIndicator {
/// name of the indicator. Also the query-level scorer to use
name: String,
/// Value to scale the score by
scale: Option<f32>,
}
impl MeanIndicator {
/// Returns a new MeanIndicator
pub fn new(name: &str, scale: Option<f32>) -> Self {
MeanIndicator {
name: name.into(),
scale,
}
}
}
impl Indicator for MeanIndicator {
/// Computes the mean across all the requests. Scales if asked.
fn evaluate(&self, metrics: &Vec<&Metrics>) -> f32 {
let mut s = 0.0;
for m in metrics.iter() {
s += m.read_num(&self.name);
}
// Optional scale. Allows the score to be between [0,1]
s / (metrics.len() as f32) * self.scale.unwrap_or(1.)
}
/// Name of indicator
fn name(&self) -> &str {
&self.name
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::collections::HashMap;
#[test]
fn test_mean() {
let mapping1: HashMap<String, f32> = [("avg-price-1".to_string(), 1000.0)]
.iter()
.cloned()
.collect();
let metric1: Metrics = mapping1.into();
let mapping2: HashMap<String, f32> = [("avg-price-1".to_string(), 20.0)]
.iter()
.cloned()
.collect();
let metric2: Metrics = mapping2.into();
let metrics: Vec<&Metrics> = vec![&metric1, &metric2];
let empty_vector: Vec<&Metrics> = vec![];
let indicator = MeanIndicator::new("avg-price-1", None);
assert_eq!(indicator.evaluate(&metrics), 510.0);
assert!(indicator.evaluate(&empty_vector).is_nan());
}
}
| 27.328125 | 82 | 0.554603 | 3.28125 |
6502f4ca30fdd305a49eeefeb8dc2c19d45c0e83
| 2,598 |
py
|
Python
|
dit/divergences/tests/test_jensen_shannon_divergence.py
|
chebee7i/dit
|
59626e34c7938fddeec140522dd2a592ba4f42ef
|
[
"BSD-2-Clause"
] | null | null | null |
dit/divergences/tests/test_jensen_shannon_divergence.py
|
chebee7i/dit
|
59626e34c7938fddeec140522dd2a592ba4f42ef
|
[
"BSD-2-Clause"
] | null | null | null |
dit/divergences/tests/test_jensen_shannon_divergence.py
|
chebee7i/dit
|
59626e34c7938fddeec140522dd2a592ba4f42ef
|
[
"BSD-2-Clause"
] | null | null | null |
"""
Tests for dit.divergences.jensen_shannon_divergence.
"""
from nose.tools import assert_almost_equal, assert_raises
from dit import Distribution
from dit.exceptions import ditException
from dit.divergences.jensen_shannon_divergence import (
jensen_shannon_divergence as JSD,
jensen_shannon_divergence_pmf as JSD_pmf
)
def test_jsd0():
""" Test the JSD of a distribution but with weights misspecified."""
d1 = Distribution("AB", [0.5, 0.5])
assert_raises(ditException, JSD, d1, d1)
def test_jsd1():
""" Test the JSD of a distribution with itself """
d1 = Distribution("AB", [0.5, 0.5])
jsd = JSD([d1, d1])
assert_almost_equal(jsd, 0)
def test_jsd2():
""" Test the JSD with half-overlapping distributions """
d1 = Distribution("AB", [0.5, 0.5])
d2 = Distribution("BC", [0.5, 0.5])
jsd = JSD([d1, d2])
assert_almost_equal(jsd, 0.5)
def test_jsd3():
""" Test the JSD with disjoint distributions """
d1 = Distribution("AB", [0.5, 0.5])
d2 = Distribution("CD", [0.5, 0.5])
jsd = JSD([d1, d2])
assert_almost_equal(jsd, 1.0)
def test_jsd4():
""" Test the JSD with half-overlapping distributions with weights """
d1 = Distribution("AB", [0.5, 0.5])
d2 = Distribution("BC", [0.5, 0.5])
jsd = JSD([d1, d2], [0.25, 0.75])
assert_almost_equal(jsd, 0.40563906222956625)
def test_jsd5():
""" Test that JSD fails when more weights than dists are given """
d1 = Distribution("AB", [0.5, 0.5])
d2 = Distribution("BC", [0.5, 0.5])
assert_raises(ditException, JSD, [d1, d2], [0.1, 0.6, 0.3])
def test_jsd_pmf1():
""" Test the JSD of a distribution with itself """
d1 = [0.5, 0.5]
jsd = JSD_pmf([d1, d1])
assert_almost_equal(jsd, 0)
def test_jsd_pmf2():
""" Test the JSD with half-overlapping distributions """
d1 = [0.5, 0.5, 0.0]
d2 = [0.0, 0.5, 0.5]
jsd = JSD_pmf([d1, d2])
assert_almost_equal(jsd, 0.5)
def test_jsd_pmf3():
""" Test the JSD with disjoint distributions """
d1 = [0.5, 0.5, 0.0, 0.0]
d2 = [0.0, 0.0, 0.5, 0.5]
jsd = JSD_pmf([d1, d2])
assert_almost_equal(jsd, 1.0)
def test_jsd_pmf4():
""" Test the JSD with half-overlapping distributions with weights """
d1 = [0.5, 0.5, 0.0]
d2 = [0.0, 0.5, 0.5]
jsd = JSD_pmf([d1, d2], [0.25, 0.75])
assert_almost_equal(jsd, 0.40563906222956625)
def test_jsd_pmf5():
""" Test that JSD fails when more weights than dists are given """
d1 = [0.5, 0.5, 0.0]
d2 = [0.0, 0.5, 0.5]
assert_raises(ditException, JSD_pmf, [d1, d2], [0.1, 0.6, 0.2, 0.1])
| 30.928571 | 73 | 0.624326 | 3.15625 |
b18bdaaf2161c9d3e5b304ec68d5693505cd1a43
| 1,975 |
asm
|
Assembly
|
tests/assembly/LDY.asm
|
danecreekphotography/6502ts
|
85716cf12f879d7c16c297de3251888c32abba6a
|
[
"MIT"
] | null | null | null |
tests/assembly/LDY.asm
|
danecreekphotography/6502ts
|
85716cf12f879d7c16c297de3251888c32abba6a
|
[
"MIT"
] | null | null | null |
tests/assembly/LDY.asm
|
danecreekphotography/6502ts
|
85716cf12f879d7c16c297de3251888c32abba6a
|
[
"MIT"
] | null | null | null |
; Verifies LDY with all applicable addressing modes
.segment "VECTORS"
.word $eaea
.word init
.word $eaea
.segment "ZEROPAGE"
; Used for zero page address mode testing
zp:
.byte $00 ; Padding so remaining bytes can be accessed in zeropage plus tests
.byte $42 ; Positive
.byte $00 ; Zero
.byte %10010101 ; Negative
; Used for indirect x address mode testing
indirectX:
.byte $00 ; Padding so addresses can be accessed in plus x tests.
.word data + $01 ; Start of actual test data
.word data + $02 ; Zero
.word data + $03 ; Negative
; Used for indirect y address mode testing
indirectY:
.word data ; Address of the actual test data start location
.word data + $FF ; Used for the page boundary test
.data
data:
.byte $00 ; Padding so remaining bytes can be accessed in absolute plus tests
.byte $42 ; Positive
.byte $00 ; Zero
.byte %10010101 ; Negative
; Implicit here is that memory location data + $FF + $02 will be pre-filled with zeros.
; That location gets used to confirm the cycle count it takes to do an indirect Y
; across a page boundary.
.code
init:
; Immediate.
ldy #$42 ; Positive
ldy #$00 ; Zero
ldy #%10010101 ; Negative
; Zeropage. Starts with +1 to skip padding.
ldy zp + $01 ; Positive
ldy zp + $02 ; Zero
ldy zp + $03 ; Negative
; Zeropage plus X. X will be $01
ldy zp,x ; Positive
ldy zp + $01,x ; Zero
ldy zp + $02,x ; Negative
; Absolute. Starts with +1 to skip padding.
ldy data + $01 ; Positive
ldy data + $02 ; Zero
ldy data + $03 ; Negative
; Absolute plus X. X will be $01.
ldy data,x ; Positive
ldy data + $01,x ; Zero
ldy data + $02,x ; Negative
ldy data - $01,x ; Positive across page boundary, y will be $02.
ldy data - $01,x ; Zero across page boundary, y will be $03.
| 28.623188 | 87 | 0.611139 | 3.171875 |
7f30252a8f361a35617999cdf2d6d7c96ecd2a57
| 38,013 |
rs
|
Rust
|
grep-regex/src/matcher.rs
|
tpai/ripgrep
|
09108b7fda7af6db7c1c4f0366301f9a21cc485d
|
[
"MIT",
"Unlicense"
] | null | null | null |
grep-regex/src/matcher.rs
|
tpai/ripgrep
|
09108b7fda7af6db7c1c4f0366301f9a21cc485d
|
[
"MIT",
"Unlicense"
] | null | null | null |
grep-regex/src/matcher.rs
|
tpai/ripgrep
|
09108b7fda7af6db7c1c4f0366301f9a21cc485d
|
[
"MIT",
"Unlicense"
] | 1 |
2020-05-16T03:18:45.000Z
|
2020-05-16T03:18:45.000Z
|
use std::collections::HashMap;
use grep_matcher::{
Captures, LineMatchKind, LineTerminator, Match, Matcher, NoError, ByteSet,
};
use regex::bytes::{CaptureLocations, Regex};
use config::{Config, ConfiguredHIR};
use crlf::CRLFMatcher;
use error::Error;
use multi::MultiLiteralMatcher;
use word::WordMatcher;
/// A builder for constructing a `Matcher` using regular expressions.
///
/// This builder re-exports many of the same options found on the regex crate's
/// builder, in addition to a few other options such as smart case, word
/// matching and the ability to set a line terminator which may enable certain
/// types of optimizations.
///
/// The syntax supported is documented as part of the regex crate:
/// https://docs.rs/regex/*/regex/#syntax
#[derive(Clone, Debug)]
pub struct RegexMatcherBuilder {
config: Config,
}
impl Default for RegexMatcherBuilder {
fn default() -> RegexMatcherBuilder {
RegexMatcherBuilder::new()
}
}
impl RegexMatcherBuilder {
/// Create a new builder for configuring a regex matcher.
pub fn new() -> RegexMatcherBuilder {
RegexMatcherBuilder {
config: Config::default(),
}
}
/// Build a new matcher using the current configuration for the provided
/// pattern.
///
/// The syntax supported is documented as part of the regex crate:
/// https://docs.rs/regex/*/regex/#syntax
pub fn build(&self, pattern: &str) -> Result<RegexMatcher, Error> {
let chir = self.config.hir(pattern)?;
let fast_line_regex = chir.fast_line_regex()?;
let non_matching_bytes = chir.non_matching_bytes();
if let Some(ref re) = fast_line_regex {
trace!("extracted fast line regex: {:?}", re);
}
let matcher = RegexMatcherImpl::new(&chir)?;
trace!("final regex: {:?}", matcher.regex());
Ok(RegexMatcher {
config: self.config.clone(),
matcher: matcher,
fast_line_regex: fast_line_regex,
non_matching_bytes: non_matching_bytes,
})
}
/// Build a new matcher from a plain alternation of literals.
///
/// Depending on the configuration set by the builder, this may be able to
/// build a matcher substantially faster than by joining the patterns with
/// a `|` and calling `build`.
pub fn build_literals<B: AsRef<str>>(
&self,
literals: &[B],
) -> Result<RegexMatcher, Error> {
let slices: Vec<_> = literals.iter().map(|s| s.as_ref()).collect();
if !self.config.can_plain_aho_corasick() || literals.len() < 40 {
return self.build(&slices.join("|"));
}
let matcher = MultiLiteralMatcher::new(&slices)?;
let imp = RegexMatcherImpl::MultiLiteral(matcher);
Ok(RegexMatcher {
config: self.config.clone(),
matcher: imp,
fast_line_regex: None,
non_matching_bytes: ByteSet::empty(),
})
}
/// Set the value for the case insensitive (`i`) flag.
///
/// When enabled, letters in the pattern will match both upper case and
/// lower case variants.
pub fn case_insensitive(&mut self, yes: bool) -> &mut RegexMatcherBuilder {
self.config.case_insensitive = yes;
self
}
/// Whether to enable "smart case" or not.
///
/// When smart case is enabled, the builder will automatically enable
/// case insensitive matching based on how the pattern is written. Namely,
/// case insensitive mode is enabled when both of the following things
/// are true:
///
/// 1. The pattern contains at least one literal character. For example,
/// `a\w` contains a literal (`a`) but `\w` does not.
/// 2. Of the literals in the pattern, none of them are considered to be
/// uppercase according to Unicode. For example, `foo\pL` has no
/// uppercase literals but `Foo\pL` does.
pub fn case_smart(&mut self, yes: bool) -> &mut RegexMatcherBuilder {
self.config.case_smart = yes;
self
}
/// Set the value for the multi-line matching (`m`) flag.
///
/// When enabled, `^` matches the beginning of lines and `$` matches the
/// end of lines.
///
/// By default, they match beginning/end of the input.
pub fn multi_line(&mut self, yes: bool) -> &mut RegexMatcherBuilder {
self.config.multi_line = yes;
self
}
/// Set the value for the any character (`s`) flag, where in `.` matches
/// anything when `s` is set and matches anything except for new line when
/// it is not set (the default).
///
/// N.B. "matches anything" means "any byte" when Unicode is disabled and
/// means "any valid UTF-8 encoding of any Unicode scalar value" when
/// Unicode is enabled.
pub fn dot_matches_new_line(
&mut self,
yes: bool,
) -> &mut RegexMatcherBuilder {
self.config.dot_matches_new_line = yes;
self
}
/// Set the value for the greedy swap (`U`) flag.
///
/// When enabled, a pattern like `a*` is lazy (tries to find shortest
/// match) and `a*?` is greedy (tries to find longest match).
///
/// By default, `a*` is greedy and `a*?` is lazy.
pub fn swap_greed(&mut self, yes: bool) -> &mut RegexMatcherBuilder {
self.config.swap_greed = yes;
self
}
/// Set the value for the ignore whitespace (`x`) flag.
///
/// When enabled, whitespace such as new lines and spaces will be ignored
/// between expressions of the pattern, and `#` can be used to start a
/// comment until the next new line.
pub fn ignore_whitespace(
&mut self,
yes: bool,
) -> &mut RegexMatcherBuilder {
self.config.ignore_whitespace = yes;
self
}
/// Set the value for the Unicode (`u`) flag.
///
/// Enabled by default. When disabled, character classes such as `\w` only
/// match ASCII word characters instead of all Unicode word characters.
pub fn unicode(&mut self, yes: bool) -> &mut RegexMatcherBuilder {
self.config.unicode = yes;
self
}
/// Whether to support octal syntax or not.
///
/// Octal syntax is a little-known way of uttering Unicode codepoints in
/// a regular expression. For example, `a`, `\x61`, `\u0061` and
/// `\141` are all equivalent regular expressions, where the last example
/// shows octal syntax.
///
/// While supporting octal syntax isn't in and of itself a problem, it does
/// make good error messages harder. That is, in PCRE based regex engines,
/// syntax like `\0` invokes a backreference, which is explicitly
/// unsupported in Rust's regex engine. However, many users expect it to
/// be supported. Therefore, when octal support is disabled, the error
/// message will explicitly mention that backreferences aren't supported.
///
/// Octal syntax is disabled by default.
pub fn octal(&mut self, yes: bool) -> &mut RegexMatcherBuilder {
self.config.octal = yes;
self
}
/// Set the approximate size limit of the compiled regular expression.
///
/// This roughly corresponds to the number of bytes occupied by a single
/// compiled program. If the program exceeds this number, then a
/// compilation error is returned.
pub fn size_limit(&mut self, bytes: usize) -> &mut RegexMatcherBuilder {
self.config.size_limit = bytes;
self
}
/// Set the approximate size of the cache used by the DFA.
///
/// This roughly corresponds to the number of bytes that the DFA will
/// use while searching.
///
/// Note that this is a *per thread* limit. There is no way to set a global
/// limit. In particular, if a regex is used from multiple threads
/// simultaneously, then each thread may use up to the number of bytes
/// specified here.
pub fn dfa_size_limit(
&mut self,
bytes: usize,
) -> &mut RegexMatcherBuilder {
self.config.dfa_size_limit = bytes;
self
}
/// Set the nesting limit for this parser.
///
/// The nesting limit controls how deep the abstract syntax tree is allowed
/// to be. If the AST exceeds the given limit (e.g., with too many nested
/// groups), then an error is returned by the parser.
///
/// The purpose of this limit is to act as a heuristic to prevent stack
/// overflow for consumers that do structural induction on an `Ast` using
/// explicit recursion. While this crate never does this (instead using
/// constant stack space and moving the call stack to the heap), other
/// crates may.
///
/// This limit is not checked until the entire Ast is parsed. Therefore,
/// if callers want to put a limit on the amount of heap space used, then
/// they should impose a limit on the length, in bytes, of the concrete
/// pattern string. In particular, this is viable since this parser
/// implementation will limit itself to heap space proportional to the
/// lenth of the pattern string.
///
/// Note that a nest limit of `0` will return a nest limit error for most
/// patterns but not all. For example, a nest limit of `0` permits `a` but
/// not `ab`, since `ab` requires a concatenation, which results in a nest
/// depth of `1`. In general, a nest limit is not something that manifests
/// in an obvious way in the concrete syntax, therefore, it should not be
/// used in a granular way.
pub fn nest_limit(&mut self, limit: u32) -> &mut RegexMatcherBuilder {
self.config.nest_limit = limit;
self
}
/// Set an ASCII line terminator for the matcher.
///
/// The purpose of setting a line terminator is to enable a certain class
/// of optimizations that can make line oriented searching faster. Namely,
/// when a line terminator is enabled, then the builder will guarantee that
/// the resulting matcher will never be capable of producing a match that
/// contains the line terminator. Because of this guarantee, users of the
/// resulting matcher do not need to slowly execute a search line by line
/// for line oriented search.
///
/// If the aforementioned guarantee about not matching a line terminator
/// cannot be made because of how the pattern was written, then the builder
/// will return an error when attempting to construct the matcher. For
/// example, the pattern `a\sb` will be transformed such that it can never
/// match `a\nb` (when `\n` is the line terminator), but the pattern `a\nb`
/// will result in an error since the `\n` cannot be easily removed without
/// changing the fundamental intent of the pattern.
///
/// If the given line terminator isn't an ASCII byte (`<=127`), then the
/// builder will return an error when constructing the matcher.
pub fn line_terminator(
&mut self,
line_term: Option<u8>,
) -> &mut RegexMatcherBuilder {
self.config.line_terminator = line_term.map(LineTerminator::byte);
self
}
/// Set the line terminator to `\r\n` and enable CRLF matching for `$` in
/// regex patterns.
///
/// This method sets two distinct settings:
///
/// 1. It causes the line terminator for the matcher to be `\r\n`. Namely,
/// this prevents the matcher from ever producing a match that contains
/// a `\r` or `\n`.
/// 2. It translates all instances of `$` in the pattern to `(?:\r??$)`.
/// This works around the fact that the regex engine does not support
/// matching CRLF as a line terminator when using `$`.
///
/// In particular, because of (2), the matches produced by the matcher may
/// be slightly different than what one would expect given the pattern.
/// This is the trade off made: in many cases, `$` will "just work" in the
/// presence of `\r\n` line terminators, but matches may require some
/// trimming to faithfully represent the intended match.
///
/// Note that if you do not wish to set the line terminator but would still
/// like `$` to match `\r\n` line terminators, then it is valid to call
/// `crlf(true)` followed by `line_terminator(None)`. Ordering is
/// important, since `crlf` and `line_terminator` override each other.
pub fn crlf(&mut self, yes: bool) -> &mut RegexMatcherBuilder {
if yes {
self.config.line_terminator = Some(LineTerminator::crlf());
} else {
self.config.line_terminator = None;
}
self.config.crlf = yes;
self
}
/// Require that all matches occur on word boundaries.
///
/// Enabling this option is subtly different than putting `\b` assertions
/// on both sides of your pattern. In particular, a `\b` assertion requires
/// that one side of it match a word character while the other match a
/// non-word character. This option, in contrast, merely requires that
/// one side match a non-word character.
///
/// For example, `\b-2\b` will not match `foo -2 bar` since `-` is not a
/// word character. However, `-2` with this `word` option enabled will
/// match the `-2` in `foo -2 bar`.
pub fn word(&mut self, yes: bool) -> &mut RegexMatcherBuilder {
self.config.word = yes;
self
}
}
/// An implementation of the `Matcher` trait using Rust's standard regex
/// library.
#[derive(Clone, Debug)]
pub struct RegexMatcher {
/// The configuration specified by the caller.
config: Config,
/// The underlying matcher implementation.
matcher: RegexMatcherImpl,
/// A regex that never reports false negatives but may report false
/// positives that is believed to be capable of being matched more quickly
/// than `regex`. Typically, this is a single literal or an alternation
/// of literals.
fast_line_regex: Option<Regex>,
/// A set of bytes that will never appear in a match.
non_matching_bytes: ByteSet,
}
impl RegexMatcher {
/// Create a new matcher from the given pattern using the default
/// configuration.
pub fn new(pattern: &str) -> Result<RegexMatcher, Error> {
RegexMatcherBuilder::new().build(pattern)
}
/// Create a new matcher from the given pattern using the default
/// configuration, but matches lines terminated by `\n`.
///
/// This is meant to be a convenience constructor for using a
/// `RegexMatcherBuilder` and setting its
/// [`line_terminator`](struct.RegexMatcherBuilder.html#method.line_terminator)
/// to `\n`. The purpose of using this constructor is to permit special
/// optimizations that help speed up line oriented search. These types of
/// optimizations are only appropriate when matches span no more than one
/// line. For this reason, this constructor will return an error if the
/// given pattern contains a literal `\n`. Other uses of `\n` (such as in
/// `\s`) are removed transparently.
pub fn new_line_matcher(pattern: &str) -> Result<RegexMatcher, Error> {
RegexMatcherBuilder::new()
.line_terminator(Some(b'\n'))
.build(pattern)
}
}
/// An encapsulation of the type of matcher we use in `RegexMatcher`.
#[derive(Clone, Debug)]
enum RegexMatcherImpl {
/// The standard matcher used for all regular expressions.
Standard(StandardMatcher),
/// A matcher for an alternation of plain literals.
MultiLiteral(MultiLiteralMatcher),
/// A matcher that strips `\r` from the end of matches.
///
/// This is only used when the CRLF hack is enabled and the regex is line
/// anchored at the end.
CRLF(CRLFMatcher),
/// A matcher that only matches at word boundaries. This transforms the
/// regex to `(^|\W)(...)($|\W)` instead of the more intuitive `\b(...)\b`.
/// Because of this, the WordMatcher provides its own implementation of
/// `Matcher` to encapsulate its use of capture groups to make them
/// invisible to the caller.
Word(WordMatcher),
}
impl RegexMatcherImpl {
/// Based on the configuration, create a new implementation of the
/// `Matcher` trait.
fn new(expr: &ConfiguredHIR) -> Result<RegexMatcherImpl, Error> {
if expr.config().word {
Ok(RegexMatcherImpl::Word(WordMatcher::new(expr)?))
} else if expr.needs_crlf_stripped() {
Ok(RegexMatcherImpl::CRLF(CRLFMatcher::new(expr)?))
} else {
if let Some(lits) = expr.alternation_literals() {
if lits.len() >= 40 {
let matcher = MultiLiteralMatcher::new(&lits)?;
return Ok(RegexMatcherImpl::MultiLiteral(matcher));
}
}
Ok(RegexMatcherImpl::Standard(StandardMatcher::new(expr)?))
}
}
/// Return the underlying regex object used.
fn regex(&self) -> String {
match *self {
RegexMatcherImpl::Word(ref x) => x.regex().to_string(),
RegexMatcherImpl::CRLF(ref x) => x.regex().to_string(),
RegexMatcherImpl::MultiLiteral(_) => "<N/A>".to_string(),
RegexMatcherImpl::Standard(ref x) => x.regex.to_string(),
}
}
}
// This implementation just dispatches on the internal matcher impl except
// for the line terminator optimization, which is possibly executed via
// `fast_line_regex`.
impl Matcher for RegexMatcher {
type Captures = RegexCaptures;
type Error = NoError;
fn find_at(
&self,
haystack: &[u8],
at: usize,
) -> Result<Option<Match>, NoError> {
use self::RegexMatcherImpl::*;
match self.matcher {
Standard(ref m) => m.find_at(haystack, at),
MultiLiteral(ref m) => m.find_at(haystack, at),
CRLF(ref m) => m.find_at(haystack, at),
Word(ref m) => m.find_at(haystack, at),
}
}
fn new_captures(&self) -> Result<RegexCaptures, NoError> {
use self::RegexMatcherImpl::*;
match self.matcher {
Standard(ref m) => m.new_captures(),
MultiLiteral(ref m) => m.new_captures(),
CRLF(ref m) => m.new_captures(),
Word(ref m) => m.new_captures(),
}
}
fn capture_count(&self) -> usize {
use self::RegexMatcherImpl::*;
match self.matcher {
Standard(ref m) => m.capture_count(),
MultiLiteral(ref m) => m.capture_count(),
CRLF(ref m) => m.capture_count(),
Word(ref m) => m.capture_count(),
}
}
fn capture_index(&self, name: &str) -> Option<usize> {
use self::RegexMatcherImpl::*;
match self.matcher {
Standard(ref m) => m.capture_index(name),
MultiLiteral(ref m) => m.capture_index(name),
CRLF(ref m) => m.capture_index(name),
Word(ref m) => m.capture_index(name),
}
}
fn find(&self, haystack: &[u8]) -> Result<Option<Match>, NoError> {
use self::RegexMatcherImpl::*;
match self.matcher {
Standard(ref m) => m.find(haystack),
MultiLiteral(ref m) => m.find(haystack),
CRLF(ref m) => m.find(haystack),
Word(ref m) => m.find(haystack),
}
}
fn find_iter<F>(
&self,
haystack: &[u8],
matched: F,
) -> Result<(), NoError>
where F: FnMut(Match) -> bool
{
use self::RegexMatcherImpl::*;
match self.matcher {
Standard(ref m) => m.find_iter(haystack, matched),
MultiLiteral(ref m) => m.find_iter(haystack, matched),
CRLF(ref m) => m.find_iter(haystack, matched),
Word(ref m) => m.find_iter(haystack, matched),
}
}
fn try_find_iter<F, E>(
&self,
haystack: &[u8],
matched: F,
) -> Result<Result<(), E>, NoError>
where F: FnMut(Match) -> Result<bool, E>
{
use self::RegexMatcherImpl::*;
match self.matcher {
Standard(ref m) => m.try_find_iter(haystack, matched),
MultiLiteral(ref m) => m.try_find_iter(haystack, matched),
CRLF(ref m) => m.try_find_iter(haystack, matched),
Word(ref m) => m.try_find_iter(haystack, matched),
}
}
fn captures(
&self,
haystack: &[u8],
caps: &mut RegexCaptures,
) -> Result<bool, NoError> {
use self::RegexMatcherImpl::*;
match self.matcher {
Standard(ref m) => m.captures(haystack, caps),
MultiLiteral(ref m) => m.captures(haystack, caps),
CRLF(ref m) => m.captures(haystack, caps),
Word(ref m) => m.captures(haystack, caps),
}
}
fn captures_iter<F>(
&self,
haystack: &[u8],
caps: &mut RegexCaptures,
matched: F,
) -> Result<(), NoError>
where F: FnMut(&RegexCaptures) -> bool
{
use self::RegexMatcherImpl::*;
match self.matcher {
Standard(ref m) => m.captures_iter(haystack, caps, matched),
MultiLiteral(ref m) => m.captures_iter(haystack, caps, matched),
CRLF(ref m) => m.captures_iter(haystack, caps, matched),
Word(ref m) => m.captures_iter(haystack, caps, matched),
}
}
fn try_captures_iter<F, E>(
&self,
haystack: &[u8],
caps: &mut RegexCaptures,
matched: F,
) -> Result<Result<(), E>, NoError>
where F: FnMut(&RegexCaptures) -> Result<bool, E>
{
use self::RegexMatcherImpl::*;
match self.matcher {
Standard(ref m) => m.try_captures_iter(haystack, caps, matched),
MultiLiteral(ref m) => {
m.try_captures_iter(haystack, caps, matched)
}
CRLF(ref m) => m.try_captures_iter(haystack, caps, matched),
Word(ref m) => m.try_captures_iter(haystack, caps, matched),
}
}
fn captures_at(
&self,
haystack: &[u8],
at: usize,
caps: &mut RegexCaptures,
) -> Result<bool, NoError> {
use self::RegexMatcherImpl::*;
match self.matcher {
Standard(ref m) => m.captures_at(haystack, at, caps),
MultiLiteral(ref m) => m.captures_at(haystack, at, caps),
CRLF(ref m) => m.captures_at(haystack, at, caps),
Word(ref m) => m.captures_at(haystack, at, caps),
}
}
fn replace<F>(
&self,
haystack: &[u8],
dst: &mut Vec<u8>,
append: F,
) -> Result<(), NoError>
where F: FnMut(Match, &mut Vec<u8>) -> bool
{
use self::RegexMatcherImpl::*;
match self.matcher {
Standard(ref m) => m.replace(haystack, dst, append),
MultiLiteral(ref m) => m.replace(haystack, dst, append),
CRLF(ref m) => m.replace(haystack, dst, append),
Word(ref m) => m.replace(haystack, dst, append),
}
}
fn replace_with_captures<F>(
&self,
haystack: &[u8],
caps: &mut RegexCaptures,
dst: &mut Vec<u8>,
append: F,
) -> Result<(), NoError>
where F: FnMut(&Self::Captures, &mut Vec<u8>) -> bool
{
use self::RegexMatcherImpl::*;
match self.matcher {
Standard(ref m) => {
m.replace_with_captures(haystack, caps, dst, append)
}
MultiLiteral(ref m) => {
m.replace_with_captures(haystack, caps, dst, append)
}
CRLF(ref m) => {
m.replace_with_captures(haystack, caps, dst, append)
}
Word(ref m) => {
m.replace_with_captures(haystack, caps, dst, append)
}
}
}
fn is_match(&self, haystack: &[u8]) -> Result<bool, NoError> {
use self::RegexMatcherImpl::*;
match self.matcher {
Standard(ref m) => m.is_match(haystack),
MultiLiteral(ref m) => m.is_match(haystack),
CRLF(ref m) => m.is_match(haystack),
Word(ref m) => m.is_match(haystack),
}
}
fn is_match_at(
&self,
haystack: &[u8],
at: usize,
) -> Result<bool, NoError> {
use self::RegexMatcherImpl::*;
match self.matcher {
Standard(ref m) => m.is_match_at(haystack, at),
MultiLiteral(ref m) => m.is_match_at(haystack, at),
CRLF(ref m) => m.is_match_at(haystack, at),
Word(ref m) => m.is_match_at(haystack, at),
}
}
fn shortest_match(
&self,
haystack: &[u8],
) -> Result<Option<usize>, NoError> {
use self::RegexMatcherImpl::*;
match self.matcher {
Standard(ref m) => m.shortest_match(haystack),
MultiLiteral(ref m) => m.shortest_match(haystack),
CRLF(ref m) => m.shortest_match(haystack),
Word(ref m) => m.shortest_match(haystack),
}
}
fn shortest_match_at(
&self,
haystack: &[u8],
at: usize,
) -> Result<Option<usize>, NoError> {
use self::RegexMatcherImpl::*;
match self.matcher {
Standard(ref m) => m.shortest_match_at(haystack, at),
MultiLiteral(ref m) => m.shortest_match_at(haystack, at),
CRLF(ref m) => m.shortest_match_at(haystack, at),
Word(ref m) => m.shortest_match_at(haystack, at),
}
}
fn non_matching_bytes(&self) -> Option<&ByteSet> {
Some(&self.non_matching_bytes)
}
fn line_terminator(&self) -> Option<LineTerminator> {
self.config.line_terminator
}
fn find_candidate_line(
&self,
haystack: &[u8],
) -> Result<Option<LineMatchKind>, NoError> {
Ok(match self.fast_line_regex {
Some(ref regex) => {
regex.shortest_match(haystack).map(LineMatchKind::Candidate)
}
None => {
self.shortest_match(haystack)?.map(LineMatchKind::Confirmed)
}
})
}
}
/// The implementation of the standard regex matcher.
#[derive(Clone, Debug)]
struct StandardMatcher {
/// The regular expression compiled from the pattern provided by the
/// caller.
regex: Regex,
/// A map from capture group name to its corresponding index.
names: HashMap<String, usize>,
}
impl StandardMatcher {
fn new(expr: &ConfiguredHIR) -> Result<StandardMatcher, Error> {
let regex = expr.regex()?;
let mut names = HashMap::new();
for (i, optional_name) in regex.capture_names().enumerate() {
if let Some(name) = optional_name {
names.insert(name.to_string(), i);
}
}
Ok(StandardMatcher { regex, names })
}
}
impl Matcher for StandardMatcher {
type Captures = RegexCaptures;
type Error = NoError;
fn find_at(
&self,
haystack: &[u8],
at: usize,
) -> Result<Option<Match>, NoError> {
Ok(self.regex
.find_at(haystack, at)
.map(|m| Match::new(m.start(), m.end())))
}
fn new_captures(&self) -> Result<RegexCaptures, NoError> {
Ok(RegexCaptures::new(self.regex.capture_locations()))
}
fn capture_count(&self) -> usize {
self.regex.captures_len()
}
fn capture_index(&self, name: &str) -> Option<usize> {
self.names.get(name).map(|i| *i)
}
fn try_find_iter<F, E>(
&self,
haystack: &[u8],
mut matched: F,
) -> Result<Result<(), E>, NoError>
where F: FnMut(Match) -> Result<bool, E>
{
for m in self.regex.find_iter(haystack) {
match matched(Match::new(m.start(), m.end())) {
Ok(true) => continue,
Ok(false) => return Ok(Ok(())),
Err(err) => return Ok(Err(err)),
}
}
Ok(Ok(()))
}
fn captures_at(
&self,
haystack: &[u8],
at: usize,
caps: &mut RegexCaptures,
) -> Result<bool, NoError> {
Ok(self.regex.captures_read_at(
&mut caps.locations_mut(), haystack, at,
).is_some())
}
fn shortest_match_at(
&self,
haystack: &[u8],
at: usize,
) -> Result<Option<usize>, NoError> {
Ok(self.regex.shortest_match_at(haystack, at))
}
}
/// Represents the match offsets of each capturing group in a match.
///
/// The first, or `0`th capture group, always corresponds to the entire match
/// and is guaranteed to be present when a match occurs. The next capture
/// group, at index `1`, corresponds to the first capturing group in the regex,
/// ordered by the position at which the left opening parenthesis occurs.
///
/// Note that not all capturing groups are guaranteed to be present in a match.
/// For example, in the regex, `(?P<foo>\w)|(?P<bar>\W)`, only one of `foo`
/// or `bar` will ever be set in any given match.
///
/// In order to access a capture group by name, you'll need to first find the
/// index of the group using the corresponding matcher's `capture_index`
/// method, and then use that index with `RegexCaptures::get`.
#[derive(Clone, Debug)]
pub struct RegexCaptures(RegexCapturesImp);
#[derive(Clone, Debug)]
enum RegexCapturesImp {
AhoCorasick {
/// The start and end of the match, corresponding to capture group 0.
mat: Option<Match>,
},
Regex {
/// Where the locations are stored.
locs: CaptureLocations,
/// These captures behave as if the capturing groups begin at the given
/// offset. When set to `0`, this has no affect and capture groups are
/// indexed like normal.
///
/// This is useful when building matchers that wrap arbitrary regular
/// expressions. For example, `WordMatcher` takes an existing regex
/// `re` and creates `(?:^|\W)(re)(?:$|\W)`, but hides the fact that
/// the regex has been wrapped from the caller. In order to do this,
/// the matcher and the capturing groups must behave as if `(re)` is
/// the `0`th capture group.
offset: usize,
/// When enable, the end of a match has `\r` stripped from it, if one
/// exists.
strip_crlf: bool,
},
}
impl Captures for RegexCaptures {
fn len(&self) -> usize {
match self.0 {
RegexCapturesImp::AhoCorasick { .. } => 1,
RegexCapturesImp::Regex { ref locs, offset, .. } => {
locs.len().checked_sub(offset).unwrap()
}
}
}
fn get(&self, i: usize) -> Option<Match> {
match self.0 {
RegexCapturesImp::AhoCorasick { mat, .. } => {
if i == 0 {
mat
} else {
None
}
}
RegexCapturesImp::Regex { ref locs, offset, strip_crlf } => {
if !strip_crlf {
let actual = i.checked_add(offset).unwrap();
return locs.pos(actual).map(|(s, e)| Match::new(s, e));
}
// currently don't support capture offsetting with CRLF
// stripping
assert_eq!(offset, 0);
let m = match locs.pos(i).map(|(s, e)| Match::new(s, e)) {
None => return None,
Some(m) => m,
};
// If the end position of this match corresponds to the end
// position of the overall match, then we apply our CRLF
// stripping. Otherwise, we cannot assume stripping is correct.
if i == 0 || m.end() == locs.pos(0).unwrap().1 {
Some(m.with_end(m.end() - 1))
} else {
Some(m)
}
}
}
}
}
impl RegexCaptures {
pub(crate) fn simple() -> RegexCaptures {
RegexCaptures(RegexCapturesImp::AhoCorasick { mat: None })
}
pub(crate) fn new(locs: CaptureLocations) -> RegexCaptures {
RegexCaptures::with_offset(locs, 0)
}
pub(crate) fn with_offset(
locs: CaptureLocations,
offset: usize,
) -> RegexCaptures {
RegexCaptures(RegexCapturesImp::Regex {
locs, offset, strip_crlf: false,
})
}
pub(crate) fn locations(&self) -> &CaptureLocations {
match self.0 {
RegexCapturesImp::AhoCorasick { .. } => {
panic!("getting locations for simple captures is invalid")
}
RegexCapturesImp::Regex { ref locs, .. } => {
locs
}
}
}
pub(crate) fn locations_mut(&mut self) -> &mut CaptureLocations {
match self.0 {
RegexCapturesImp::AhoCorasick { .. } => {
panic!("getting locations for simple captures is invalid")
}
RegexCapturesImp::Regex { ref mut locs, .. } => {
locs
}
}
}
pub(crate) fn strip_crlf(&mut self, yes: bool) {
match self.0 {
RegexCapturesImp::AhoCorasick { .. } => {
panic!("setting strip_crlf for simple captures is invalid")
}
RegexCapturesImp::Regex { ref mut strip_crlf, .. } => {
*strip_crlf = yes;
}
}
}
pub(crate) fn set_simple(&mut self, one: Option<Match>) {
match self.0 {
RegexCapturesImp::AhoCorasick { ref mut mat } => {
*mat = one;
}
RegexCapturesImp::Regex { .. } => {
panic!("setting simple captures for regex is invalid")
}
}
}
}
#[cfg(test)]
mod tests {
use grep_matcher::{LineMatchKind, Matcher};
use super::*;
// Test that enabling word matches does the right thing and demonstrate
// the difference between it and surrounding the regex in `\b`.
#[test]
fn word() {
let matcher = RegexMatcherBuilder::new()
.word(true)
.build(r"-2")
.unwrap();
assert!(matcher.is_match(b"abc -2 foo").unwrap());
let matcher = RegexMatcherBuilder::new()
.word(false)
.build(r"\b-2\b")
.unwrap();
assert!(!matcher.is_match(b"abc -2 foo").unwrap());
}
// Test that enabling a line terminator prevents it from matching through
// said line terminator.
#[test]
fn line_terminator() {
// This works, because there's no line terminator specified.
let matcher = RegexMatcherBuilder::new()
.build(r"abc\sxyz")
.unwrap();
assert!(matcher.is_match(b"abc\nxyz").unwrap());
// This doesn't.
let matcher = RegexMatcherBuilder::new()
.line_terminator(Some(b'\n'))
.build(r"abc\sxyz")
.unwrap();
assert!(!matcher.is_match(b"abc\nxyz").unwrap());
}
// Ensure that the builder returns an error if a line terminator is set
// and the regex could not be modified to remove a line terminator.
#[test]
fn line_terminator_error() {
assert!(RegexMatcherBuilder::new()
.line_terminator(Some(b'\n'))
.build(r"a\nz")
.is_err())
}
// Test that enabling CRLF permits `$` to match at the end of a line.
#[test]
fn line_terminator_crlf() {
// Test normal use of `$` with a `\n` line terminator.
let matcher = RegexMatcherBuilder::new()
.multi_line(true)
.build(r"abc$")
.unwrap();
assert!(matcher.is_match(b"abc\n").unwrap());
// Test that `$` doesn't match at `\r\n` boundary normally.
let matcher = RegexMatcherBuilder::new()
.multi_line(true)
.build(r"abc$")
.unwrap();
assert!(!matcher.is_match(b"abc\r\n").unwrap());
// Now check the CRLF handling.
let matcher = RegexMatcherBuilder::new()
.multi_line(true)
.crlf(true)
.build(r"abc$")
.unwrap();
assert!(matcher.is_match(b"abc\r\n").unwrap());
}
// Test that smart case works.
#[test]
fn case_smart() {
let matcher = RegexMatcherBuilder::new()
.case_smart(true)
.build(r"abc")
.unwrap();
assert!(matcher.is_match(b"ABC").unwrap());
let matcher = RegexMatcherBuilder::new()
.case_smart(true)
.build(r"aBc")
.unwrap();
assert!(!matcher.is_match(b"ABC").unwrap());
}
// Test that finding candidate lines works as expected.
#[test]
fn candidate_lines() {
fn is_confirmed(m: LineMatchKind) -> bool {
match m {
LineMatchKind::Confirmed(_) => true,
_ => false,
}
}
fn is_candidate(m: LineMatchKind) -> bool {
match m {
LineMatchKind::Candidate(_) => true,
_ => false,
}
}
// With no line terminator set, we can't employ any optimizations,
// so we get a confirmed match.
let matcher = RegexMatcherBuilder::new()
.build(r"\wfoo\s")
.unwrap();
let m = matcher.find_candidate_line(b"afoo ").unwrap().unwrap();
assert!(is_confirmed(m));
// With a line terminator and a regex specially crafted to have an
// easy-to-detect inner literal, we can apply an optimization that
// quickly finds candidate matches.
let matcher = RegexMatcherBuilder::new()
.line_terminator(Some(b'\n'))
.build(r"\wfoo\s")
.unwrap();
let m = matcher.find_candidate_line(b"afoo ").unwrap().unwrap();
assert!(is_candidate(m));
}
}
| 35.895184 | 83 | 0.583721 | 3.03125 |
fd6934592a67e5625bc0b0a22fadfa4f79b0a225
| 2,223 |
swift
|
Swift
|
Sources/ShapeUp/CornerShape/EnumeratedCornerShape/CornerTriangle.swift
|
ryanlintott/ShapeUp
|
1e6677c8e7cf80525b85519c2e0b880b339dd100
|
[
"MIT"
] | 30 |
2022-03-24T13:39:10.000Z
|
2022-03-31T14:37:32.000Z
|
Sources/ShapeUp/CornerShape/EnumeratedCornerShape/CornerTriangle.swift
|
ryanlintott/ShapeUp
|
1e6677c8e7cf80525b85519c2e0b880b339dd100
|
[
"MIT"
] | null | null | null |
Sources/ShapeUp/CornerShape/EnumeratedCornerShape/CornerTriangle.swift
|
ryanlintott/ShapeUp
|
1e6677c8e7cf80525b85519c2e0b880b339dd100
|
[
"MIT"
] | null | null | null |
//
// CornerTriangle.swift
// ShapeUp
//
// Created by Ryan Lintott on 2022-03-08.
//
import SwiftUI
/**
A triangular shape with an adjustable top point and individually stylable corners, aligned inside the frame of the view containing it.
The top point is positioned relative to the top left corner and the value is a `RelatableValue` relative to the width of the frame provided. The default is in the middle.
This shape can either be used in a SwiftUI View like any other `InsettableShape`
CornerTriangle(topPoint: .relative(0.6), styles: [
.top: .straight(radius: 10),
.bottomRight: .rounded(radius: .relative(0.3)),
.bottomLeft: .concave(radius: .relative(0.2))
])
.fill()
The corners can be accessed directly for use in a more complex shape
public func corners(in rect: CGRect) -> [Corner] {
CornerTriangle(topPoint: 30)
.corners(in: rect)
.inset(by: 10)
.addingNotch(Notch(.rectangle, depth: 5), afterCornerIndex: 0)
}
*/
public struct CornerTriangle: EnumeratedCornerShape {
public var closed = true
public var insetAmount: CGFloat = 0
/// An enumeration to indicate the three corners of a triangle.
public enum ShapeCorner: CaseIterable, Hashable {
case top
case bottomRight
case bottomLeft
}
public var topPoint: RelatableValue
public var styles: [ShapeCorner: CornerStyle?]
/// Creates a 2d triangular shape with specified top point and styles for each corner.
/// - Parameters:
/// - topPoint: Position of the top point from the top left corner of the frame. Relative values are relative to width.
/// - styles: A dictionary describing the style of each shape corner.
public init(topPoint: RelatableValue = .relative(0.5), styles: [ShapeCorner: CornerStyle] = [:]) {
self.topPoint = topPoint
self.styles = styles
}
public func points(in rect: CGRect) -> [ShapeCorner: CGPoint] {
[
.top: rect.point(.topLeft).moved(dx: topPoint.value(using: rect.width)),
.bottomRight: rect.point(.bottomRight),
.bottomLeft: rect.point(.bottomLeft)
]
}
}
| 34.734375 | 170 | 0.660819 | 3.015625 |
a1b3a896cb50d8d8881e214e879fcc2d4ad725c8
| 6,721 |
go
|
Go
|
vendor/github.com/golang/leveldb/db/options.go
|
Yangjxxxxx/ZNBase
|
dcf993b73250dd5cb63041f4d9cf098941f67b2b
|
[
"MIT",
"BSD-3-Clause"
] | 1,252 |
2015-07-21T14:17:57.000Z
|
2022-03-30T09:19:48.000Z
|
db/options.go
|
huangchulong/leveldb
|
259d9253d71996b7778a3efb4144fe4892342b18
|
[
"BSD-3-Clause"
] | 13 |
2015-07-28T07:06:24.000Z
|
2021-07-08T19:57:54.000Z
|
db/options.go
|
huangchulong/leveldb
|
259d9253d71996b7778a3efb4144fe4892342b18
|
[
"BSD-3-Clause"
] | 176 |
2015-07-21T00:54:58.000Z
|
2022-03-20T05:56:38.000Z
|
// Copyright 2011 The LevelDB-Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package db
// Compression is the per-block compression algorithm to use.
type Compression int
const (
DefaultCompression Compression = iota
NoCompression
SnappyCompression
nCompression
)
// FilterPolicy is an algorithm for probabilistically encoding a set of keys.
// The canonical implementation is a Bloom filter.
//
// Every FilterPolicy has a name. This names the algorithm itself, not any one
// particular instance. Aspects specific to a particular instance, such as the
// set of keys or any other parameters, will be encoded in the []byte filter
// returned by NewFilter.
//
// The name may be written to files on disk, along with the filter data. To use
// these filters, the FilterPolicy name at the time of writing must equal the
// name at the time of reading. If they do not match, the filters will be
// ignored, which will not affect correctness but may affect performance.
type FilterPolicy interface {
// Name names the filter policy.
Name() string
// AppendFilter appends to dst an encoded filter that holds a set of []byte
// keys.
AppendFilter(dst []byte, keys [][]byte) []byte
// MayContain returns whether the encoded filter may contain given key.
// False positives are possible, where it returns true for keys not in the
// original set.
MayContain(filter, key []byte) bool
}
// Options holds the optional parameters for leveldb's DB implementations.
// These options apply to the DB at large; per-query options are defined by
// the ReadOptions and WriteOptions types.
//
// Options are typically passed to a constructor function as a struct literal.
// The GetXxx methods are used inside the DB implementations; they return the
// default parameter value if the *Options receiver is nil or the field value
// is zero.
//
// Read/Write options:
// - Comparer
// - FileSystem
// - FilterPolicy
// - MaxOpenFiles
// Read options:
// - VerifyChecksums
// Write options:
// - BlockRestartInterval
// - BlockSize
// - Compression
// - ErrorIfDBExists
// - WriteBufferSize
type Options struct {
// BlockRestartInterval is the number of keys between restart points
// for delta encoding of keys.
//
// The default value is 16.
BlockRestartInterval int
// BlockSize is the minimum uncompressed size in bytes of each table block.
//
// The default value is 4096.
BlockSize int
// Comparer defines a total ordering over the space of []byte keys: a 'less
// than' relationship. The same comparison algorithm must be used for reads
// and writes over the lifetime of the DB.
//
// The default value uses the same ordering as bytes.Compare.
Comparer Comparer
// Compression defines the per-block compression to use.
//
// The default value (DefaultCompression) uses snappy compression.
Compression Compression
// ErrorIfDBExists is whether it is an error if the database already exists.
//
// The default value is false.
ErrorIfDBExists bool
// FileSystem maps file names to byte storage.
//
// The default value uses the underlying operating system's file system.
FileSystem FileSystem
// FilterPolicy defines a filter algorithm (such as a Bloom filter) that
// can reduce disk reads for Get calls.
//
// One such implementation is bloom.FilterPolicy(10) from the leveldb/bloom
// package.
//
// The default value means to use no filter.
FilterPolicy FilterPolicy
// MaxOpenFiles is a soft limit on the number of open files that can be
// used by the DB.
//
// The default value is 1000.
MaxOpenFiles int
// WriteBufferSize is the amount of data to build up in memory (backed by
// an unsorted log on disk) before converting to a sorted on-disk file.
//
// Larger values increase performance, especially during bulk loads. Up to
// two write buffers may be held in memory at the same time, so you may
// wish to adjust this parameter to control memory usage. Also, a larger
// write buffer will result in a longer recovery time the next time the
// database is opened.
//
// The default value is 4MiB.
WriteBufferSize int
// VerifyChecksums is whether to verify the per-block checksums in a DB.
//
// The default value is false.
VerifyChecksums bool
}
func (o *Options) GetBlockRestartInterval() int {
if o == nil || o.BlockRestartInterval <= 0 {
return 16
}
return o.BlockRestartInterval
}
func (o *Options) GetBlockSize() int {
if o == nil || o.BlockSize <= 0 {
return 4096
}
return o.BlockSize
}
func (o *Options) GetComparer() Comparer {
if o == nil || o.Comparer == nil {
return DefaultComparer
}
return o.Comparer
}
func (o *Options) GetCompression() Compression {
if o == nil || o.Compression <= DefaultCompression || o.Compression >= nCompression {
// Default to SnappyCompression.
return SnappyCompression
}
return o.Compression
}
func (o *Options) GetErrorIfDBExists() bool {
if o == nil {
return false
}
return o.ErrorIfDBExists
}
func (o *Options) GetFileSystem() FileSystem {
if o == nil || o.FileSystem == nil {
return DefaultFileSystem
}
return o.FileSystem
}
func (o *Options) GetFilterPolicy() FilterPolicy {
if o == nil {
return nil
}
return o.FilterPolicy
}
func (o *Options) GetMaxOpenFiles() int {
if o == nil || o.MaxOpenFiles == 0 {
return 1000
}
return o.MaxOpenFiles
}
func (o *Options) GetWriteBufferSize() int {
if o == nil || o.WriteBufferSize <= 0 {
return 4 * 1024 * 1024
}
return o.WriteBufferSize
}
func (o *Options) GetVerifyChecksums() bool {
if o == nil {
return false
}
return o.VerifyChecksums
}
// ReadOptions hold the optional per-query parameters for Get and Find
// operations.
//
// Like Options, a nil *ReadOptions is valid and means to use the default
// values.
type ReadOptions struct {
// No fields so far.
}
// WriteOptions hold the optional per-query parameters for Set and Delete
// operations.
//
// Like Options, a nil *WriteOptions is valid and means to use the default
// values.
type WriteOptions struct {
// Sync is whether to sync underlying writes from the OS buffer cache
// through to actual disk, if applicable. Setting Sync can result in
// slower writes.
//
// If false, and the machine crashes, then some recent writes may be lost.
// Note that if it is just the process that crashes (and the machine does
// not) then no writes will be lost.
//
// In other words, Sync being false has the same semantics as a write
// system call. Sync being true means write followed by fsync.
//
// The default value is false.
Sync bool
}
func (o *WriteOptions) GetSync() bool {
return o != nil && o.Sync
}
| 28.478814 | 86 | 0.724743 | 3.21875 |
7983c20492b8ecc3e03fa850049bd4ec80d3b75e
| 914 |
asm
|
Assembly
|
test/test.Array.splitb.asm
|
richRemer/atlatl
|
169c0c9c29d277dc1295e6c37b0963af6e02741a
|
[
"MIT"
] | null | null | null |
test/test.Array.splitb.asm
|
richRemer/atlatl
|
169c0c9c29d277dc1295e6c37b0963af6e02741a
|
[
"MIT"
] | null | null | null |
test/test.Array.splitb.asm
|
richRemer/atlatl
|
169c0c9c29d277dc1295e6c37b0963af6e02741a
|
[
"MIT"
] | null | null | null |
global test_case
extern Array.splitb
extern Array.eachb
extern std.outb
extern std.outln
extern sys.error
%include "Array.inc"
section .text
test_case:
mov rax, test_array ; Array to split
mov rbx, 4 ; delimit with value 4
call Array.splitb ; split into two arrays (plus delimiter)
push qword[rcx+Array.length] ; preserve leftover length
mov rbx, std.outb ; fn to call
call Array.eachb ; print values from array
mov rax, empty_str ; empty message
call std.outln ; end line
pop rax ; restore length
call sys.error ; exit with array length
section .data
test_bytes: db 0x1, 0x2, 0x3, 0x4, 0x5, 0x6
empty_str: db 0x0
test_array:
istruc Array
at Array.pdata, dq test_bytes
at Array.length, dq 6
iend
| 25.388889 | 76 | 0.588621 | 3.25 |
581a3eefcf42cafa994e0166fa4d04727746ba72
| 2,363 |
h
|
C
|
src/Segues/PageTurn.h
|
ArthurCose/Swoosh
|
249785d9a0365e52cb81eb63790a7b8b15105bec
|
[
"Zlib"
] | null | null | null |
src/Segues/PageTurn.h
|
ArthurCose/Swoosh
|
249785d9a0365e52cb81eb63790a7b8b15105bec
|
[
"Zlib"
] | null | null | null |
src/Segues/PageTurn.h
|
ArthurCose/Swoosh
|
249785d9a0365e52cb81eb63790a7b8b15105bec
|
[
"Zlib"
] | null | null | null |
#pragma once
#include <Swoosh/Segue.h>
#include <Swoosh/Ease.h>
#include <Swoosh/Game.h>
#include <Swoosh/EmbedGLSL.h>
#include <Swoosh/Shaders.h>
using namespace swoosh;
/**
@class PageTurn
@brief Divides the screen into vertices that acts like a turning page, revealing the next sceen
@warning Even when mobile optimization is used, may choke on mobile hardware due to SFML bottlenecks
If optimized for mobile, will capture the scenes once and use less vertices to increase performance on weak hardware
*/
class PageTurn : public Segue {
private:
glsl::PageTurn shader;
sf::Texture last, next;
bool firstPass{ true };
const int cellsize(const quality& mode) {
switch (mode) {
case quality::realtime:
return 10;
case quality::reduced:
return 50;
}
// quality::mobile
return 100;
}
public:
void onDraw(sf::RenderTexture& surface) override {
double elapsed = getElapsed().asMilliseconds();
double duration = getDuration().asMilliseconds();
double alpha = ease::linear(elapsed, duration, 1.0);
const bool optimized = getController().getRequestedQuality() == quality::mobile;
sf::Texture temp, temp2;
surface.clear(this->getLastActivityBGColor());
if (firstPass || !optimized) {
this->drawLastActivity(surface);
surface.display(); // flip and ready the buffer
last = temp = sf::Texture(surface.getTexture()); // Make a copy of the source texture
}
else {
temp = last;
}
shader.setTexture(&temp);
shader.setAlpha((float)alpha);
shader.apply(surface);
surface.display();
sf::Texture copy(surface.getTexture());
sf::Sprite left(copy); // Make a copy of the effect to render later
surface.clear(this->getNextActivityBGColor());
if (firstPass || !optimized) {
this->drawNextActivity(surface);
surface.display(); // flip and ready the buffer
next = temp2 = sf::Texture(surface.getTexture());
}
else {
temp2 = next;
}
sf::Sprite right(temp2);
surface.draw(right);
surface.draw(left);
firstPass = false;
}
PageTurn(sf::Time duration, Activity* last, Activity* next) : Segue(duration, last, next),
shader(getController().getVirtualWindowSize(), cellsize(getController().getRequestedQuality()))
{
/* ... */
}
~PageTurn() { ; }
};
| 25.684783 | 118 | 0.666102 | 3.140625 |
e71b90e5a5df22cc2a39e90be3a05eadd6987744
| 6,247 |
js
|
JavaScript
|
myGame/myGame2.js
|
JennaWu-Cardona/Alien-Game
|
402e2667623c4d7970e5ce5e7e3c1cbf26ec18c3
|
[
"MIT"
] | null | null | null |
myGame/myGame2.js
|
JennaWu-Cardona/Alien-Game
|
402e2667623c4d7970e5ce5e7e3c1cbf26ec18c3
|
[
"MIT"
] | null | null | null |
myGame/myGame2.js
|
JennaWu-Cardona/Alien-Game
|
402e2667623c4d7970e5ce5e7e3c1cbf26ec18c3
|
[
"MIT"
] | null | null | null |
/*global Phaser game game_state eventFunctions*/
/*global Phaser*/
game_state.mainTwo = function() {};
game_state.mainTwo.prototype = {
preload: function() {
game.load.image('sky', 'assets/sky2.png');
game.load.image('debris', 'assets/planet.png');
game.load.spritesheet('alien', 'assets/spaceship.png', 194, 189);
document.removeEventListener("click", eventFunctions.mainStarter);
game.load.audio('spaceship', 'assets/rocketship.wav');
game.load.audio('shutdown','assets/shutdown.wav' );
game.load.audio('win2', 'assets/win2.wav');
game.load.audio('win1', 'assets/win1.wav');
},
create: function() {
// We're going to be using physics, so enable the Arcade Physics system
game.physics.startSystem(Phaser.Physics.ARCADE);
// a simple background for our game
game.add.sprite(0, 0, 'sky');
//the platforms group contains the ground
this.platforms = game.add.group();
//this is the physics for any object created in this group- the platforms
this.platforms.enableBody = true;
//this is the alien
this.player = game.add.sprite(1, -100, 'alien');
// this.player = game.add.sprite(1, game.world.height - 0, 'alien');
//We need to add physics to this character
game.physics.arcade.enable(this.player);
this.player.enableBody = true;
//Player physics properties. Give the little guy a slight bounce
this.player.body.gravity.y = 290;
this.player.body.collideWorldBounds = true;
//the this.player animation
this.player.animations.add('down', [1], 10, true);
// this.player.animations.add('right', [5], 10, true);
this.player.animations.add('left', [3], 10, true);
this.player.animations.add('up', [0], 10, true);
this.player.animations.add('right', [5], 10, true);
// this makes the alien the size we want
this.player.body.setSize(90, 140, 55, 30);
//Our controls
this.cursors = game.input.keyboard.createCursorKeys();
this.platforms = game.add.group();
this.platforms.enableBody = true;
var movingPlatform = this.platforms.create(0, 64, 'debris');
movingPlatform.body.velocity.y = 100;
movingPlatform.body.immovable = true;
movingPlatform.body.checkCollision.down = false;
movingPlatform.body.checkCollision.left = false;
movingPlatform.body.checkCollision.right = false;
//this makes a moving platform
var _this = this;
setInterval(function() {
// var movingPlatform = this.platforms = this.add.physicsGroup();
var movingPlatform = _this.platforms.create(Math.random() * 700, 64, 'debris');
game.physics.enable(movingPlatform, Phaser.Physics.ARCADE);
movingPlatform.body.velocity.y = 100;
movingPlatform.body.immovable = true;
movingPlatform.body.checkCollision.down = false;
movingPlatform.body.checkCollision.left = false;
movingPlatform.body.checkCollision.right = false;
movingPlatform.scored = false;
}, 2000);
//the score
this.scoreText = game.add.text(16, 16, 'Score: 0', {
fontSize: '30px',
fill: 'white'
});
this.score = 0;
spaceship = game.add.audio('spaceship');
shutdown = game.add.audio('shutdown');
win2 = game.add.audio('win2');
win1 = game.add.audio('win1');
game.sound.setDecodedCallback([spaceship, shutdown, win2], start, this);
audio.addEventListener(playFX, this.cursors.up.isDown);
},
update: function() {
// Collide the player and the platforms
game.physics.arcade.collide(this.player, this.platforms, this.scorePlatform, null, this);
// game.physics.arcade.collide(this.player, this.ground);
// the alien's movement
this.player.body.velocity.x = 0;
// this.player.body.velocity.y = 0;
if (this.cursors.left.isDown) {
//move to the left
this.player.body.velocity.x = -270;
this.player.animations.play('left');
}
else if (this.cursors.right.isDown) {
//Move to the right
this.player.body.velocity.x = 270;
this.player.animations.play('right')
}
else if (this.cursors.down.isDown) {
//Move down
this.player.body.velocity.y = 350;
this.player.animations.play('down');
}
else {
//Stand still
this.player.animations.stop();
this.player.frame = 1;
}
console.log(this.player.body.touching.down);
//Allow the this.player to jump if they are touching the ground.
if (this.cursors.up.isDown && this.player.body.touching.down) {
this.player.body.velocity.y = -350;
spaceship.play();
}
if (this.cursors.up.isDown) {
this.player.animations.play('up');
}
//this is supposed the cue the losing screen if you fall
if (this.player.y > 429.9) {
game.state.start('badEndingTwo');
// shutdown.play();
}
//this is supposed to cue the winning screen if you get a certain score
if (this.score === 20) {
game.state.start('goodEndingTwo');
win2.play();
win1.play();
}
//this plays the sound when you lose
if (this.player.y > 429.9) {
shutdown.play();
}
// this.player.x -= 2;
// if (this.player.x < -this.player.width) {
// this.player.x = game.world.width;
// }
// if(this.player.touching.right)
},
//makes score for landing on platforms
scorePlatform: function(player, platform) {
if (!platform.scored) {
this.score++;
this.scoreText.text = "Score: " + this.score;
platform.scored = true;
}
}
};
// game.state.start('main');
game.state.add('mainTwo', game_state.mainTwo);
| 33.406417 | 97 | 0.581719 | 3 |
7286b341a2b518ce84e72417cd69bc9610fbb0e2
| 6,494 |
rs
|
Rust
|
program/tests/program_test/scenarios.rs
|
dboures/mango-v3
|
193f096af9a2da5a579ca83509e3def751f6cbcd
|
[
"MIT"
] | null | null | null |
program/tests/program_test/scenarios.rs
|
dboures/mango-v3
|
193f096af9a2da5a579ca83509e3def751f6cbcd
|
[
"MIT"
] | null | null | null |
program/tests/program_test/scenarios.rs
|
dboures/mango-v3
|
193f096af9a2da5a579ca83509e3def751f6cbcd
|
[
"MIT"
] | null | null | null |
use crate::*;
use solana_sdk::transport::TransportError;
#[allow(dead_code)]
pub fn arrange_deposit_all_scenario(
test: &mut MangoProgramTest,
user_index: usize,
mint_amount: f64,
quote_amount: f64,
) -> Vec<(usize, usize, f64)> {
let mut user_deposits = Vec::new();
for mint_index in 0..test.num_mints - 1 {
user_deposits.push((user_index, mint_index, mint_amount));
}
user_deposits.push((user_index, test.quote_index, quote_amount));
return user_deposits;
}
#[allow(dead_code)]
pub async fn deposit_scenario(
test: &mut MangoProgramTest,
mango_group_cookie: &mut MangoGroupCookie,
deposits: &Vec<(usize, usize, f64)>,
) {
mango_group_cookie.run_keeper(test).await;
for deposit in deposits {
let (user_index, mint_index, amount) = deposit;
let mint = test.with_mint(*mint_index);
let deposit_amount = (*amount * mint.unit) as u64;
test.perform_deposit(&mango_group_cookie, *user_index, *mint_index, deposit_amount).await;
}
}
#[allow(dead_code)]
pub async fn withdraw_scenario(
test: &mut MangoProgramTest,
mango_group_cookie: &mut MangoGroupCookie,
withdraws: &Vec<(usize, usize, f64, bool)>,
) {
mango_group_cookie.run_keeper(test).await;
for (user_index, mint_index, amount, allow_borrow) in withdraws {
let mint = test.with_mint(*mint_index);
let withdraw_amount = (*amount * mint.unit) as u64;
test.perform_withdraw(
&mango_group_cookie,
*user_index,
*mint_index,
withdraw_amount,
*allow_borrow,
)
.await;
}
}
#[allow(dead_code)]
pub async fn withdraw_scenario_with_delegate(
test: &mut MangoProgramTest,
mango_group_cookie: &mut MangoGroupCookie,
withdraw: &(usize, usize, usize, f64, bool),
) -> Result<(), TransportError> {
mango_group_cookie.run_keeper(test).await;
let (user_index, delegate_user_index, mint_index, amount, allow_borrow) = withdraw;
let mint = test.with_mint(*mint_index);
let withdraw_amount = (*amount * mint.unit) as u64;
test.perform_withdraw_with_delegate(
&mango_group_cookie,
*user_index,
*delegate_user_index,
*mint_index,
withdraw_amount,
*allow_borrow,
)
.await
}
#[allow(dead_code)]
pub async fn delegate_scenario(
test: &mut MangoProgramTest,
mango_group_cookie: &mut MangoGroupCookie,
user_index: usize,
delegate_user_index: usize,
) {
test.perform_set_delegate(&mango_group_cookie, user_index, delegate_user_index).await;
}
#[allow(dead_code)]
pub async fn reset_delegate_scenario(
test: &mut MangoProgramTest,
mango_group_cookie: &mut MangoGroupCookie,
user_index: usize,
) {
test.perform_reset_delegate(&mango_group_cookie, user_index).await;
}
#[allow(dead_code)]
pub async fn place_spot_order_scenario(
test: &mut MangoProgramTest,
mango_group_cookie: &mut MangoGroupCookie,
spot_orders: &Vec<(usize, usize, serum_dex::matching::Side, f64, f64)>,
) {
mango_group_cookie.run_keeper(test).await;
for spot_order in spot_orders {
let (user_index, market_index, order_side, order_size, order_price) = *spot_order;
let mut spot_market_cookie = mango_group_cookie.spot_markets[market_index];
spot_market_cookie
.place_order(test, mango_group_cookie, user_index, order_side, order_size, order_price)
.await;
mango_group_cookie.users_with_spot_event[market_index].push(user_index);
}
}
#[allow(dead_code)]
pub async fn place_spot_order_scenario_with_delegate(
test: &mut MangoProgramTest,
mango_group_cookie: &mut MangoGroupCookie,
spot_order: &(usize, usize, usize, serum_dex::matching::Side, f64, f64),
) -> Result<(), TransportError> {
mango_group_cookie.run_keeper(test).await;
let (user_index, delegate_user_index, market_index, order_side, order_size, order_price) =
*spot_order;
let mut spot_market_cookie = mango_group_cookie.spot_markets[market_index];
mango_group_cookie.users_with_spot_event[market_index].push(user_index);
spot_market_cookie
.place_order_with_delegate(
test,
mango_group_cookie,
user_index,
delegate_user_index,
order_side,
order_size,
order_price,
)
.await
}
#[allow(dead_code)]
pub async fn place_perp_order_scenario(
test: &mut MangoProgramTest,
mango_group_cookie: &mut MangoGroupCookie,
perp_orders: &Vec<(usize, usize, mango::matching::Side, f64, f64)>,
) {
mango_group_cookie.run_keeper(test).await;
for perp_order in perp_orders {
let (user_index, market_index, order_side, order_size, order_price) = *perp_order;
let mut perp_market_cookie = mango_group_cookie.perp_markets[market_index];
perp_market_cookie
.place_order(
test,
mango_group_cookie,
user_index,
order_side,
order_size,
order_price,
PlacePerpOptions::default(),
)
.await;
mango_group_cookie.users_with_perp_event[market_index].push(user_index);
}
}
#[allow(dead_code)]
pub async fn match_spot_order_scenario(
test: &mut MangoProgramTest,
mango_group_cookie: &mut MangoGroupCookie,
matched_spot_orders: &Vec<Vec<(usize, usize, serum_dex::matching::Side, f64, f64)>>,
) {
for matched_spot_order in matched_spot_orders {
// place_spot_order_scenario() starts by running the keeper
place_spot_order_scenario(test, mango_group_cookie, matched_spot_order).await;
mango_group_cookie.run_keeper(test).await;
mango_group_cookie.consume_spot_events(test).await;
}
mango_group_cookie.run_keeper(test).await;
}
#[allow(dead_code)]
pub async fn match_perp_order_scenario(
test: &mut MangoProgramTest,
mango_group_cookie: &mut MangoGroupCookie,
matched_perp_orders: &Vec<Vec<(usize, usize, mango::matching::Side, f64, f64)>>,
) {
for matched_perp_order in matched_perp_orders {
// place_perp_order_scenario() starts by running the keeper
place_perp_order_scenario(test, mango_group_cookie, matched_perp_order).await;
mango_group_cookie.run_keeper(test).await;
mango_group_cookie.consume_perp_events(test).await;
}
mango_group_cookie.run_keeper(test).await;
}
| 32.79798 | 99 | 0.68879 | 3 |
f0199c2ddd6cf1a82c3279d8fee04fa2d5d2f015
| 3,674 |
py
|
Python
|
env2048.py
|
qhduan/rl-2048
|
9730d366625ac7ffdd8875586ffbb8615468f110
|
[
"MIT"
] | 3 |
2022-02-10T02:19:58.000Z
|
2022-03-06T14:39:20.000Z
|
env2048.py
|
qhduan/rl-2048
|
9730d366625ac7ffdd8875586ffbb8615468f110
|
[
"MIT"
] | null | null | null |
env2048.py
|
qhduan/rl-2048
|
9730d366625ac7ffdd8875586ffbb8615468f110
|
[
"MIT"
] | null | null | null |
import logic
import numpy as np
import gym
ACTION_MAP = {
0: 'up',
1: 'down',
2: 'left',
3: 'right'
}
class Env2048(gym.Env):
metadata = {'render.modes': ['human']}
def __init__(self, n=4, max_idle=100, seed=None):
super(Env2048, self).__init__()
self.n = n
self.max_idle = max_idle
self.action_map = ACTION_MAP
# up, down, left, right
self.action_space = gym.spaces.Discrete(4)
self.observation_space = gym.spaces.Box(
low=0, high=255,
shape=(self.n, self.n, 2 ** n), dtype=np.uint8)
self.eye = np.eye(2 ** n)
self.reward_range = (float('-inf'), float('inf'))
if seed is not None:
self.seed(seed)
def seed(self, seed):
np.random.seed(seed)
def reset(self):
self.matrix = logic.new_game(self.n)
self.reward_i = self.i = 0
self.total_reward = 0
return self.obs
@property
def obs(self):
m = np.array(self.matrix)
m = np.clip(m, 1, float('inf')) # from 0, 2, 4, 8, ... to 1, 2, 4, 8
m = np.log2(m).astype(np.int64) # from 1, 2, 4, 8,..., 2048 to 0, 1, 2, 3, ..., 11
m = self.eye[m]
m = m * 255
m = m.astype(np.uint8)
obs = m
return obs
def step(self, action):
if isinstance(action, str) and action in ('up', 'down', 'left', 'right'):
pass
if isinstance(action, (int, np.int64, np.int32)):
action = self.action_map[int(action)]
else:
print(action, type(action))
raise
old_score = np.sort(np.array(self.matrix).flatten())[::-1]
old_matrix = str(self.matrix)
# import pdb; pdb.set_trace()
if action == 'up':
self.matrix, updated = logic.up(self.matrix)
elif action == 'down':
self.matrix, updated = logic.down(self.matrix)
elif action == 'left':
self.matrix, updated = logic.left(self.matrix)
elif action == 'right':
self.matrix, updated = logic.right(self.matrix)
new_matrix = str(self.matrix)
new_score = np.sort(np.array(self.matrix).flatten())[::-1]
reward = np.sum((new_score - old_score) * (new_score >= old_score)) * 4
reward = float(reward)
self.total_reward += reward
self.i += 1
if updated: # matrix有更新
self.matrix = logic.add_two(self.matrix)
if logic.game_state(self.matrix) == 'win':
print('you win')
return self.obs, 10000.0, True, {'i': self.i, 'ri': self.reward_i, 'tr': self.total_reward}
elif logic.game_state(self.matrix) == 'lose':
return self.obs, 100.0, True, {'i': self.i, 'ri': self.reward_i, 'tr': self.total_reward}
idle = False
if old_matrix == new_matrix:
idle = True
if idle:
reward = -1
else:
self.reward_i = self.i
if self.i - self.reward_i > self.max_idle:
return self.obs, -100, True, {'i': self.i, 'ri': self.reward_i, 'tr': self.total_reward}
return self.obs, reward, False, {'i': self.i, 'ri': self.reward_i, 'tr': self.total_reward}
def render(self, mode='human'):
pass
def close(self):
pass
def main():
env = Env2048()
obs = env.reset()
print(obs)
for _ in range(1000):
obs, reward, done, info = env.step(np.random.choice(['right', 'left', 'up', 'down']))
print(obs)
print(reward, done, info)
if done:
break
if __name__ == '__main__':
main()
| 29.15873 | 107 | 0.531301 | 3.296875 |
0cc75fc2057f1d904d4d63b853c8dc9ff11fc8ab
| 987 |
py
|
Python
|
featureflags/config.py
|
enverbisevac/ff-python-server-sdk
|
e7c809229d13517e0bf4b28fc0a556e693c9034e
|
[
"Apache-2.0"
] | null | null | null |
featureflags/config.py
|
enverbisevac/ff-python-server-sdk
|
e7c809229d13517e0bf4b28fc0a556e693c9034e
|
[
"Apache-2.0"
] | null | null | null |
featureflags/config.py
|
enverbisevac/ff-python-server-sdk
|
e7c809229d13517e0bf4b28fc0a556e693c9034e
|
[
"Apache-2.0"
] | null | null | null |
"""Configuration is a base class that has default values that you can change
during the instance of the client class"""
from typing import Callable
BASE_URL = "https://config.feature-flags.uat.harness.io/api/1.0"
MINUTE = 60
PULL_INTERVAL = 1 * MINUTE
class Config(object):
def __init__(self, base_url: str = BASE_URL,
pull_interval: int = PULL_INTERVAL,
cache: object = None,
store: object = None,
enable_stream: bool = False):
self.base_url = base_url
self.pull_interval = pull_interval
self.cache = cache
self.store = store
self.enable_stream = enable_stream
default_config = Config()
def with_base_url(base_url: str) -> Callable:
def func(config: Config) -> None:
config.base_url = base_url
return func
def with_stream_enabled(value: bool) -> Callable:
def func(config: Config) -> None:
config.enable_stream = value
return func
| 25.973684 | 76 | 0.64843 | 3.0625 |
5a30701fa71831ac461b05fd618384ba360897f3
| 1,222 |
rs
|
Rust
|
src/redis_utils.rs
|
nickelc/ocypod
|
fe4b693870f67fb7a8b25aa626af286c6b65d1ca
|
[
"Apache-2.0"
] | 170 |
2019-03-29T13:27:44.000Z
|
2022-03-18T14:55:36.000Z
|
src/redis_utils.rs
|
nickelc/ocypod
|
fe4b693870f67fb7a8b25aa626af286c6b65d1ca
|
[
"Apache-2.0"
] | 27 |
2018-12-04T17:08:10.000Z
|
2022-03-11T08:39:03.000Z
|
src/redis_utils.rs
|
nickelc/ocypod
|
fe4b693870f67fb7a8b25aa626af286c6b65d1ca
|
[
"Apache-2.0"
] | 15 |
2018-12-06T22:08:45.000Z
|
2022-03-03T01:44:16.000Z
|
//! Miscellaneous Redis utilities and helper functions.
use redis::{aio::ConnectionLike, from_redis_value, FromRedisValue, Pipeline, RedisResult, Value};
/// Helper function for getting nested data structures from Redis pipelines.
///
/// Used for e.g. querying for vectors of tuples from:
/// pipe.hget(key1, [x, y, z])
/// .hget(key2, [x, y, z])
/// .hget(key3, [x, y, z])
///
/// let (a, b, c): Vec<(x_type, y_type, z_type)> = vec_from_redis_pipe(pipe, conn)?;
pub async fn vec_from_redis_pipe<C: ConnectionLike, T: FromRedisValue>(
conn: &mut C,
pipe: &Pipeline,
) -> RedisResult<Vec<T>> {
let values: Vec<Value> = pipe.query_async(conn).await?;
let mut results = Vec::with_capacity(values.len());
for v in values {
results.push(from_redis_value::<T>(&v)?);
}
Ok(results)
}
/// Simplifies async Redis transactions.
#[macro_export]
macro_rules! transaction_async {
($conn:expr, $keys:expr, $body:expr) => {
loop {
redis::cmd("WATCH").arg($keys).query_async($conn).await?;
if let Some(response) = $body {
redis::cmd("UNWATCH").query_async($conn).await?;
break response;
}
}
};
}
| 30.55 | 97 | 0.612111 | 3.25 |
6b2e2d629434ad3550714c3ae2005e4258b9f4c7
| 9,766 |
rs
|
Rust
|
src/tests/iteration.rs
|
soro/rusty-hdrhistogram
|
2b0d261027c2fef898def3f4a358edebd4f55cd0
|
[
"MIT"
] | 1 |
2019-07-05T14:23:00.000Z
|
2019-07-05T14:23:00.000Z
|
src/tests/iteration.rs
|
soro/rusty-hdrhistogram
|
2b0d261027c2fef898def3f4a358edebd4f55cd0
|
[
"MIT"
] | null | null | null |
src/tests/iteration.rs
|
soro/rusty-hdrhistogram
|
2b0d261027c2fef898def3f4a358edebd4f55cd0
|
[
"MIT"
] | null | null | null |
use tests::util::*;
#[test]
fn percentiles() {
let histogram = stat_histo();
for value in histogram.percentiles(5) {
let value_at_pctl = histogram.get_value_at_percentile(value.percentile);
assert_eq!(
value.value_iterated_to,
histogram.highest_equivalent_value(value_at_pctl)
);
}
}
#[test]
fn linear_bucket_values() {
let mut index = 0;
let histogram = stat_histo();
let raw_histogram = raw_stat_histo();
for value in raw_histogram.linear_bucket_values(100000) {
let count_added_in_this_bucket = value.count_added_in_this_iteration_step;
if index == 0 {
assert_eq!(
10000,
count_added_in_this_bucket,
"Raw Linear 100 msec bucket # 0 added a count of 10000"
);
} else if index == 999 {
assert_eq!(
1,
count_added_in_this_bucket,
"Raw Linear 100 msec bucket # 999 added a count of 1"
);
} else {
assert_eq!(
0,
count_added_in_this_bucket,
"Raw Linear 100 msec bucket # {} added a count of 0",
index
);
}
index += 1;
}
assert_eq!(1000, index);
index = 0;
let mut total_added_counts = 0;
for value in histogram.linear_bucket_values(10000) {
let count_added_in_this_bucket = value.count_added_in_this_iteration_step;
if index == 0 {
assert_eq!(
10000,
count_added_in_this_bucket,
"Linear 1 sec bucket # 0 [{}..{}] added a count of 10000",
value.value_iterated_from,
value.value_iterated_to
);
}
total_added_counts += value.count_added_in_this_iteration_step;
index += 1;
}
assert_eq!(
10000,
index,
"There should be 10000 linear buckets of size 10000 usec between 0 and 100 sec."
);
assert_eq!(
20000,
total_added_counts,
"Total added counts should be 20000"
);
index = 0;
total_added_counts = 0;
for value in histogram.linear_bucket_values(1000) {
let count_added_in_this_bucket = value.count_added_in_this_iteration_step;
if index == 1 {
assert_eq!(
10000,
count_added_in_this_bucket,
"Linear 1 sec bucket # 0 [{}..{}] added a count of 10000",
value.value_iterated_from,
value.value_iterated_to
);
}
total_added_counts += value.count_added_in_this_iteration_step;
index += 1;
}
assert_eq!(
100007,
index,
"There should be 100007 linear buckets of size 1000 usec between 0 and 100 sec."
);
assert_eq!(
20000,
total_added_counts,
"Total added counts should be 20000"
);
}
#[test]
fn logarithmic_bucket_values() {
let histogram = stat_histo();
let raw_histogram = raw_stat_histo();
let mut index = 0;
for value in raw_histogram.logarithmic_bucket_values(10000, 2.0) {
let count_added_in_this_bucket = value.count_added_in_this_iteration_step;
if index == 0 {
assert_eq!(
10000,
count_added_in_this_bucket,
"Raw Logarithmic 10 msec bucket # 0 added a count of 10000"
);
} else if index == 14 {
assert_eq!(
1,
count_added_in_this_bucket,
"Raw Logarithmic 10 msec bucket # 14 added a count of 1"
);
} else {
assert_eq!(
0,
count_added_in_this_bucket,
"Raw Logarithmic 100 msec bucket # {} added a count of 0",
index
);
}
index += 1;
}
assert_eq!(14, index - 1);
index = 0;
let mut total_added_counts = 0;
for value in histogram.logarithmic_bucket_values(10000, 2.0) {
let count_added_in_this_bucket = value.count_added_in_this_iteration_step;
if index == 0 {
assert_eq!(
10000,
count_added_in_this_bucket,
"Logarithmic 10 msec bucket # 0 [{}..{}] added a count of 10000",
value.value_iterated_from,
value.value_iterated_to
);
}
total_added_counts += value.count_added_in_this_iteration_step;
index += 1;
}
assert_eq!(
14,
index - 1,
"There should be 14 Logarithmic buckets of size 10000 usec between 0 and 100 sec."
);
assert_eq!(
20000,
total_added_counts,
"Total added counts should be 20000"
);
}
#[test]
fn recorded_values() {
let histogram = stat_histo();
let raw_histogram = raw_stat_histo();
let mut index = 0;
for value in raw_histogram.recorded_values() {
let count_added_in_this_bucket = value.count_added_in_this_iteration_step;
if index == 0 {
assert_eq!(
10000,
count_added_in_this_bucket,
"Raw recorded value bucket # 0 added a count of 10000"
);
} else {
assert_eq!(
1,
count_added_in_this_bucket,
"Raw recorded value bucket # {} added a count of 1",
index
);
}
index += 1;
}
assert_eq!(2, index);
index = 0;
let mut total_added_counts = 0;
for value in histogram.recorded_values() {
let count_added_in_this_bucket = value.count_added_in_this_iteration_step;
if index == 0 {
assert_eq!(
10000,
count_added_in_this_bucket,
"Recorded bucket # 0 [{}..{}] added a count of 10000",
value.value_iterated_from,
value.value_iterated_to
);
}
assert!(
value.count_at_value_iterated_to != 0,
"The count in recorded bucket #{} is not 0",
index
);
assert_eq!(
value.count_at_value_iterated_to,
value.count_added_in_this_iteration_step,
"The count in recorded bucket # {} is exactly the amount added since the last iteration",
index
);
total_added_counts += value.count_added_in_this_iteration_step;
index += 1;
}
assert_eq!(
20000,
total_added_counts,
"Total added counts should be 20000"
);
}
#[test]
fn all_values() {
let histogram = stat_histo();
let raw_histogram = raw_stat_histo();
let mut index = 0;
#[allow(unused_assignments)]
let mut latest_value_at_index = 0;
let mut total_count_to_this_point = 0;
let mut total_value_to_this_point = 0;
for value in raw_histogram.all_values() {
let count_added_in_this_bucket = value.count_added_in_this_iteration_step;
if index == 1000 {
assert_eq!(
10000,
count_added_in_this_bucket,
"Raw allValues bucket # 0 added a count of 10000"
);
} else if histogram.values_are_equivalent(value.value_iterated_to, 100000000) {
assert_eq!(
1,
count_added_in_this_bucket,
"Raw allValues value bucket # {} added a count of 1",
index
);
} else {
assert_eq!(
0,
count_added_in_this_bucket,
"Raw allValues value bucket # {} added a count of 0",
index
);
}
latest_value_at_index = value.value_iterated_to;
total_count_to_this_point += value.count_at_value_iterated_to;
assert_eq!(
total_count_to_this_point,
value.total_count_to_this_value,
"total Count should match"
);
total_value_to_this_point += value.count_at_value_iterated_to * latest_value_at_index;
assert_eq!(
total_value_to_this_point,
value.total_value_to_this_value,
"total Value should match"
);
index += 1;
}
assert_eq!(
histogram.counts_array_length(),
index,
"index should be equal to countsArrayLength"
);
index = 0;
let mut total_added_counts = 0;
for value in histogram.all_values() {
let count_added_in_this_bucket = value.count_added_in_this_iteration_step;
if index == 1000 {
assert_eq!(
10000,
count_added_in_this_bucket,
"AllValues bucket # 0 [{}..{}] added a count of 10000",
value.value_iterated_from,
value.value_iterated_to
);
}
assert_eq!(
value.count_at_value_iterated_to,
value.count_added_in_this_iteration_step,
"The count in AllValues bucket # {} is exactly the amount added since the last iteration",
index
);
total_added_counts += value.count_added_in_this_iteration_step;
assert!(
histogram.values_are_equivalent(histogram.value_from_index(index), value.value_iterated_to),
"valueFromIndex(index) should be equal to getValueIteratedTo()"
);
index += 1;
}
assert_eq!(
histogram.counts_array_length(),
index,
"index should be equal to countsArrayLength"
);
assert_eq!(
20000,
total_added_counts,
"Total added counts should be 20000"
);
}
| 30.235294 | 104 | 0.558366 | 3.015625 |
abb7ecb34cea94ce1cec160c554422b0d6521ec0
| 2,325 |
lua
|
Lua
|
viewer/view_lap.lua
|
pixeljetstream/r3e-trace-analysis
|
0748715bd0d1f5b630a8f7e556e4547576ba118e
|
[
"MIT"
] | 10 |
2015-10-22T06:12:22.000Z
|
2021-12-20T21:22:07.000Z
|
viewer/view_lap.lua
|
pixeljetstream/r3e-trace-analysis
|
0748715bd0d1f5b630a8f7e556e4547576ba118e
|
[
"MIT"
] | null | null | null |
viewer/view_lap.lua
|
pixeljetstream/r3e-trace-analysis
|
0748715bd0d1f5b630a8f7e556e4547576ba118e
|
[
"MIT"
] | null | null | null |
local wx = require "wx"
local ffi = require "ffi"
local gl = require "glewgl"
local glu = require "glutils"
local utils = require "utils"
local r3e = require "r3e"
local r3etrace = require "r3etrace"
local math3d = require "math3d"
local v3,v4,m4 = math3d.namespaces.v3,math3d.namespaces.v4,math3d.namespaces.m4
local config = gCONFIG
local r3emap = gR3EMAP
local helpers = gHELPERS
local sys = gSYS
local toMS = helpers.toMS
---------------------------------------------
local function initLapView(frame, ID_LAP)
local control = wx.wxListCtrl(frame, ID_LAP,
wx.wxDefaultPosition, wx.wxSize(110, 300),
wx.wxLC_REPORT + wx.wxLC_SINGLE_SEL)
local function lapString( trace, i, sel )
local str = trace.lapData[i].valid and tostring(i) or "("..tostring(i)..")"
return sel and ""..str.." ||" or str
end
local content = {}
local lktrace = {}
local lastTrace
local lastLap
local lastIdx
local function lap(trace, lap)
local idx = lktrace[trace] + lap - 1
if (lastLap) then
control:SetItem(lastIdx, 0, lapString(lastTrace,lastLap,false))
end
control:SetItem(idx, 0, lapString(trace,lap,true))
lastTrace = trace
lastLap = lap
lastIdx = idx
end
local function append(trace)
local offset = #content
lktrace[trace] = offset
for i,v in ipairs(trace.lapData) do
local idx = offset + i - 1
control:InsertItem(idx, lapString(trace, i))
control:SetItem(idx, 1, toMS(v.time))
control:SetItem(idx, 2, helpers.getTraceShortName(trace))
content[idx] = {trace, i}
end
end
local function open(trace)
lastLap = nil
content = {}
lktrace = {}
control:ClearAll()
control:InsertColumn(0, "Lap")
control:InsertColumn(1, "Time")
control:InsertColumn(2, "File")
control:SetColumnWidth(0,36)
control:SetColumnWidth(1,60)
control:SetColumnWidth(2,200)
append(trace)
end
function control.getFromIdx(idx)
local trace, lap = content[idx][1],content[idx][2]
return trace,lap
end
sys.registerHandler(sys.events.lap, lap)
sys.registerHandler(sys.events.open, open)
sys.registerHandler(sys.events.append, append)
return control
end
return initLapView
| 24.734043 | 79 | 0.63828 | 3.125 |
9c40d66869385f33e81b20122abb9a4fb93c228a
| 6,018 |
js
|
JavaScript
|
src/components/SuggestField.js
|
grachet/react-online-doc
|
b58e1bffbb5a0d75b447dfb83b71ed5a2b3624d6
|
[
"MIT"
] | null | null | null |
src/components/SuggestField.js
|
grachet/react-online-doc
|
b58e1bffbb5a0d75b447dfb83b71ed5a2b3624d6
|
[
"MIT"
] | null | null | null |
src/components/SuggestField.js
|
grachet/react-online-doc
|
b58e1bffbb5a0d75b447dfb83b71ed5a2b3624d6
|
[
"MIT"
] | null | null | null |
import React from 'react';
import PropTypes from 'prop-types';
import Autosuggest from 'react-autosuggest';
import parse from 'autosuggest-highlight/parse';
import TextField from '@material-ui/core/TextField';
import Paper from '@material-ui/core/Paper';
import MenuItem from '@material-ui/core/MenuItem';
import {withStyles} from '@material-ui/core/styles';
import InputBase from "@material-ui/core/InputBase";
function renderInputComponent(inputProps) {
const {
classes, inputRef = () => {
}, ref, ...other
} = inputProps;
return <InputBase
placeholder="Search…"
classes={{
root: classes.inputRoot,
input: classes.inputInput,
}}
InputProps={{
inputRef: node => {
ref(node);
inputRef(node);
},
classes: {
input: classes.input,
},
}}
{...other}
/>
// return (
// <TextField
// autoFocus
// fullWidth
// InputProps={{
// inputRef: node => {
// ref(node);
// inputRef(node);
// },
// classes: {
// input: classes.input,
// },
// }}
// {...other}
// />
// );
}
function escapeRegexCharacters(str) {
return str.replace(/[.*+?^${}()|[\]\\]/g, '\\$&');
}
function match(text, query) {
return (
query
.trim()
.split(" ")
.reduce((result, word) => {
if (!word.length) return result;
const wordLen = word.length;
const regex = new RegExp(escapeRegexCharacters(word), 'i');
console.log(text);
const {index = -1} = text.match(regex);
if (index > -1) {
result.push([index, index + wordLen]);
// Replace what we just found with spaces so we don't find it again.
text =
text.slice(0, index) +
new Array(wordLen + 1).join(' ') +
text.slice(index + wordLen);
}
return result;
}, [])
.sort((match1, match2) => {
return match1[0] - match2[0];
})
);
};
function renderSuggestion(suggestion, {query, isHighlighted}) {
const matches = match(suggestion.label, query);
const parts = parse(suggestion.label, matches);
return (
<MenuItem selected={isHighlighted} component="div">
<div>
{parts.map((part, index) => {
return part.highlight ? (
<span key={String(index)} style={{fontWeight: 500, color: "#124191"}}>
{part.text}
</span>
) : (
<strong key={String(index)} style={{fontWeight: 300}}>
{part.text}
</strong>
);
})}
</div>
</MenuItem>
);
}
const styles = theme => ({
root: {
flexGrow: 1,
},
searchIcon: {
width: theme.spacing.unit * 9,
height: '100%',
position: 'absolute',
pointerEvents: 'none',
display: 'flex',
alignItems: 'center',
justifyContent: 'center',
},
inputRoot: {
color: 'inherit',
width: '100%',
},
container: {
position: 'relative',
},
suggestionsContainerOpen: {
position: 'absolute',
zIndex: 200,
marginTop: theme.spacing.unit,
left: 0,
right: 0,
},
suggestion: {
display: 'block',
},
inputInput: {
paddingTop: theme.spacing.unit,
paddingRight: theme.spacing.unit,
paddingBottom: theme.spacing.unit,
paddingLeft: theme.spacing.unit * 10,
transition: theme.transitions.create('width'),
width: '100%',
[theme.breakpoints.up('sm')]: {
width: 120,
'&:focus': {
width: 200,
},
},
},
suggestionsList: {
margin: 0,
padding: 0,
listStyleType: 'none',
},
divider: {
height: theme.spacing.unit * 2,
},
});
class SuggestField extends React.Component {
state = {
single: '',
suggestions: [],
};
getSuggestionValue = (suggestion) => {
this.props.setValue(suggestion.value);
return this.props.hideValue ? "" : suggestion.label;
}
resetField = () => {
this.setState({single: ""})
}
getSuggestions = (value) => {
const escapedValue = escapeRegexCharacters(value.trim().toLowerCase());
if (escapedValue === '') {
return [];
}
const regex = new RegExp(escapedValue, 'i');
return this.props.data.filter(suggestion => regex.test(suggestion.label)).slice(0, this.props.numberSuggestionsMax);
}
handleSuggestionsFetchRequested = ({value}) => {
this.setState({
suggestions: this.getSuggestions(value),
});
};
handleSuggestionsClearRequested = () => {
this.setState({
suggestions: [],
});
};
handleChange = name => (event, {newValue}) => {
this.setState({
[name]: newValue,
});
};
render() {
const {classes} = this.props;
const autosuggestProps = {
renderInputComponent,
suggestions: this.state.suggestions,
onSuggestionsFetchRequested: this.handleSuggestionsFetchRequested,
onSuggestionsClearRequested: this.handleSuggestionsClearRequested,
getSuggestionValue: this.getSuggestionValue,
renderSuggestion,
};
return (
<div className={classes.root}>
<Autosuggest
{...autosuggestProps}
inputProps={{
classes,
placeholder: this.props.placeholder,
value: this.state.single,
onChange: this.handleChange('single'),
}}
theme={{
container: classes.container,
suggestionsContainerOpen: classes.suggestionsContainerOpen,
suggestionsList: classes.suggestionsList,
suggestion: classes.suggestion,
}}
renderSuggestionsContainer={options => (
<Paper {...options.containerProps} square>
{options.children}
</Paper>
)}
/>
</div>
);
}
}
SuggestField.propTypes = {
classes: PropTypes.object.isRequired,
};
export default withStyles(styles,{withTheme: true})(SuggestField);
| 23.880952 | 120 | 0.56331 | 3.0625 |
f05afdbd5aec954079117e24e6a1f75f80dba71c
| 1,523 |
py
|
Python
|
Consumer_test.py
|
image-store-org/image-store-py-web-api-consumer-test
|
59d805e8a7b459a97ede7285f6e4a67e87cfba02
|
[
"MIT"
] | null | null | null |
Consumer_test.py
|
image-store-org/image-store-py-web-api-consumer-test
|
59d805e8a7b459a97ede7285f6e4a67e87cfba02
|
[
"MIT"
] | null | null | null |
Consumer_test.py
|
image-store-org/image-store-py-web-api-consumer-test
|
59d805e8a7b459a97ede7285f6e4a67e87cfba02
|
[
"MIT"
] | null | null | null |
import sys
sys.path.append('dependencies/image-store-py-web-api-consumer')
from Consumer import Consumer
class Consumer_test:
def __init__(self):
self.c = Consumer()
def get(self):
print('\x1b[6;30;42m' + 'GET' + '\x1b[0m')
print(self.c.get())
print('{}\n'.format(self.c.get().json()))
# get a data entry by id
def get_id(self, id):
print('\x1b[6;30;42m' + 'GET ID({})'.format(id) + '\x1b[0m')
print(self.c.get_id(id))
print('{}\n'.format(self.c.get_id(id).json()))
# get latest data entry
def get_latest(self):
print('\x1b[6;30;42m' + 'GET LATEST' + '\x1b[0m')
print(self.c.get_latest())
print('{}\n'.format(self.c.get_latest().json()))
# post a data entry, id incremented by internal mySQL counter
def post(self):
print('\x1b[6;30;42m' + 'POST' + '\x1b[0m')
print(self.c.post())
print('{}\n'.format(self.c.post().json()))
# TODO be able to edit payload with keywords e.g: title.TEST
# edit existing data entry by id
def put(self, id):
print('\x1b[6;30;42m' + 'PUT({})'.format(id) + '\x1b[0m')
print(self.c.put(id))
# delete data entry by id
def delete(self, id):
print('\x1b[6;30;42m' + 'DELETE({})'.format(id) + '\x1b[0m')
print(self.c.delete(id))
if __name__ == '__main__':
consumer = Consumer_test()
consumer.get()
consumer.get_id(1)
consumer.post()
consumer.get_latest()
consumer.put(10)
| 30.46 | 69 | 0.570584 | 3.21875 |
729e0202f2d020991875ed4628ee1c1cceab0f93
| 3,086 |
rs
|
Rust
|
core/tauri-utils/src/html.rs
|
jiusanzhou/tauri
|
aa498e72614f59afcdd1f637b4e3bdf6fe00b137
|
[
"Apache-2.0",
"MIT"
] | 1 |
2021-08-03T18:59:32.000Z
|
2021-08-03T18:59:32.000Z
|
core/tauri-utils/src/html.rs
|
jiusanzhou/tauri
|
aa498e72614f59afcdd1f637b4e3bdf6fe00b137
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
core/tauri-utils/src/html.rs
|
jiusanzhou/tauri
|
aa498e72614f59afcdd1f637b4e3bdf6fe00b137
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
// Copyright 2019-2021 Tauri Programme within The Commons Conservancy
// SPDX-License-Identifier: Apache-2.0
// SPDX-License-Identifier: MIT
use html5ever::{interface::QualName, namespace_url, ns, LocalName};
use kuchiki::{Attribute, ExpandedName, NodeRef};
/// Injects the invoke key token to each script on the document.
///
/// The invoke key token is replaced at runtime with the actual invoke key value.
pub fn inject_invoke_key_token(document: &mut NodeRef) {
let mut targets = vec![];
if let Ok(scripts) = document.select("script") {
for target in scripts {
targets.push(target);
}
for target in targets {
let node = target.as_node();
let element = node.as_element().unwrap();
let attrs = element.attributes.borrow();
// if the script is external (has `src`) or its type is not "module", we won't inject the token
if attrs.get("src").is_some() || attrs.get("type") != Some("module") {
continue;
}
let replacement_node = NodeRef::new_element(
QualName::new(None, ns!(html), "script".into()),
element
.attributes
.borrow()
.clone()
.map
.into_iter()
.collect::<Vec<_>>(),
);
let script = node.text_contents();
replacement_node.append(NodeRef::new_text(format!(
r#"
const __TAURI_INVOKE_KEY__ = __TAURI__INVOKE_KEY_TOKEN__;
{}
"#,
script
)));
node.insert_after(replacement_node);
node.detach();
}
}
}
/// Injects a content security policy to the HTML.
pub fn inject_csp(document: &mut NodeRef, csp: &str) {
if let Ok(ref head) = document.select_first("head") {
head.as_node().append(create_csp_meta_tag(csp));
} else {
let head = NodeRef::new_element(
QualName::new(None, ns!(html), LocalName::from("head")),
None,
);
head.append(create_csp_meta_tag(csp));
document.prepend(head);
}
}
fn create_csp_meta_tag(csp: &str) -> NodeRef {
NodeRef::new_element(
QualName::new(None, ns!(html), LocalName::from("meta")),
vec![
(
ExpandedName::new(ns!(), LocalName::from("http-equiv")),
Attribute {
prefix: None,
value: "Content-Security-Policy".into(),
},
),
(
ExpandedName::new(ns!(), LocalName::from("content")),
Attribute {
prefix: None,
value: csp.into(),
},
),
],
)
}
#[cfg(test)]
mod tests {
use kuchiki::traits::*;
#[test]
fn csp() {
let htmls = vec![
"<html><head></head></html>".to_string(),
"<html></html>".to_string(),
];
for html in htmls {
let mut document = kuchiki::parse_html().one(html);
let csp = "default-src 'self'; img-src https://*; child-src 'none';";
super::inject_csp(&mut document, csp);
assert_eq!(
document.to_string(),
format!(
r#"<html><head><meta content="{}" http-equiv="Content-Security-Policy"></head><body></body></html>"#,
csp
)
);
}
}
}
| 27.801802 | 111 | 0.581011 | 3.15625 |
0bbab57a58980cab77be4152c0853746383805da
| 3,265 |
py
|
Python
|
examples/pincell_depletion/restart_depletion.py
|
norberto-schmidt/openmc
|
ff4844303154a68027b9c746300f5704f73e0875
|
[
"MIT"
] | 262 |
2018-08-09T21:27:03.000Z
|
2022-03-24T05:02:10.000Z
|
examples/pincell_depletion/restart_depletion.py
|
norberto-schmidt/openmc
|
ff4844303154a68027b9c746300f5704f73e0875
|
[
"MIT"
] | 753 |
2018-08-03T15:26:57.000Z
|
2022-03-29T23:54:48.000Z
|
examples/pincell_depletion/restart_depletion.py
|
norberto-schmidt/openmc
|
ff4844303154a68027b9c746300f5704f73e0875
|
[
"MIT"
] | 196 |
2018-08-06T13:41:14.000Z
|
2022-03-29T20:47:12.000Z
|
import openmc
import openmc.deplete
import matplotlib.pyplot as plt
###############################################################################
# Load previous simulation results
###############################################################################
# Load geometry from statepoint
statepoint = 'statepoint.100.h5'
with openmc.StatePoint(statepoint) as sp:
geometry = sp.summary.geometry
# Load previous depletion results
previous_results = openmc.deplete.ResultsList.from_hdf5("depletion_results.h5")
###############################################################################
# Transport calculation settings
###############################################################################
# Instantiate a Settings object, set all runtime parameters
settings = openmc.Settings()
settings.batches = 100
settings.inactive = 10
settings.particles = 10000
# Create an initial uniform spatial source distribution over fissionable zones
bounds = [-0.62992, -0.62992, -1, 0.62992, 0.62992, 1]
uniform_dist = openmc.stats.Box(bounds[:3], bounds[3:], only_fissionable=True)
settings.source = openmc.source.Source(space=uniform_dist)
entropy_mesh = openmc.RegularMesh()
entropy_mesh.lower_left = [-0.39218, -0.39218, -1.e50]
entropy_mesh.upper_right = [0.39218, 0.39218, 1.e50]
entropy_mesh.dimension = [10, 10, 1]
settings.entropy_mesh = entropy_mesh
###############################################################################
# Initialize and run depletion calculation
###############################################################################
# Create depletion "operator"
chain_file = './chain_simple.xml'
op = openmc.deplete.Operator(geometry, settings, chain_file, previous_results)
# Perform simulation using the predictor algorithm
time_steps = [1.0, 1.0, 1.0, 1.0, 1.0] # days
power = 174 # W/cm, for 2D simulations only (use W for 3D)
integrator = openmc.deplete.PredictorIntegrator(op, time_steps, power, timestep_units='d')
integrator.integrate()
###############################################################################
# Read depletion calculation results
###############################################################################
# Open results file
results = openmc.deplete.ResultsList.from_hdf5("depletion_results.h5")
# Obtain K_eff as a function of time
time, keff = results.get_eigenvalue()
# Obtain U235 concentration as a function of time
time, n_U235 = results.get_atoms('1', 'U235')
# Obtain Xe135 capture reaction rate as a function of time
time, Xe_capture = results.get_reaction_rate('1', 'Xe135', '(n,gamma)')
###############################################################################
# Generate plots
###############################################################################
days = 24*60*60
plt.figure()
plt.plot(time/days, keff, label="K-effective")
plt.xlabel("Time (days)")
plt.ylabel("Keff")
plt.show()
plt.figure()
plt.plot(time/days, n_U235, label="U 235")
plt.xlabel("Time (days)")
plt.ylabel("n U5 (-)")
plt.show()
plt.figure()
plt.plot(time/days, Xe_capture, label="Xe135 capture")
plt.xlabel("Time (days)")
plt.ylabel("RR (-)")
plt.show()
plt.close('all')
| 35.879121 | 90 | 0.543951 | 3.125 |
0bd156c3187afbbb2b5232d091d6df24ad90beaa
| 1,483 |
js
|
JavaScript
|
07-Migrando_Funcionalidades/public/js/chat.js
|
joaonunesdev/websockets
|
b9a49c78ea4f63844e7b05d7a0a39c6cc2ca9ea0
|
[
"ISC"
] | null | null | null |
07-Migrando_Funcionalidades/public/js/chat.js
|
joaonunesdev/websockets
|
b9a49c78ea4f63844e7b05d7a0a39c6cc2ca9ea0
|
[
"ISC"
] | 1 |
2021-05-10T17:50:58.000Z
|
2021-05-10T17:50:58.000Z
|
07-Migrando_Funcionalidades/public/js/chat.js
|
joaonunesdev/websockets
|
b9a49c78ea4f63844e7b05d7a0a39c6cc2ca9ea0
|
[
"ISC"
] | null | null | null |
const socket = io()
// Recupera os elementos da página de chat
const $messageForm = document.querySelector('#message-form')
const $messageFormInput = $messageForm.querySelector('input')
const $messageFormButton = $messageForm.querySelector('button')
const $sendLocationButton = document.querySelector('#send-location')
const $messages = document.querySelector('#messages')
// Templates
const $messageTemplate = document.querySelector('#message-template').innerHTML
const $locationMessageTemplate = document.querySelector('#location-message-template').innerHTML
const $sidebarTemplate = document.querySelector('#sidebar-template').innerHTML
// Função auxiliar que cria uma sequência aleatória de caracteres
// Será removida em seguida
const generateFakeName = () => {
const nameLength = 5
var fakeName = ''
var characters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'
var charactersLength = characters.length
for (var i = 0; i < nameLength; i++) {
fakeName += characters.charAt(Math.floor(Math.random() * charactersLength))
}
return fakeName
}
socket.emit('join', { username: generateFakeName(), room: 'virtus1' }, error => {
if (error) {
alert(error)
location.href = '/';
}
})
socket.on('message', ({ username, text, createdAt }) => {
const html = Mustache.render($messageTemplate, {
username,
text,
createdAt: moment(createdAt).format('h:mm a')
});
$messages.insertAdjacentHTML('beforeend', html);
})
| 32.955556 | 95 | 0.729602 | 3.4375 |
18bc61b3d2966ba56930e9786f8e9a16b6ba1642
| 6,126 |
sql
|
SQL
|
migrate/migrations/20180315113303-strict-rotation-state.sql
|
kanish671/goalert
|
592c2f4ed21be3be78816c377301372e0e88c6a0
|
[
"Apache-2.0"
] | 1,614 |
2019-06-11T19:55:39.000Z
|
2022-03-31T10:34:37.000Z
|
migrate/migrations/20180315113303-strict-rotation-state.sql
|
kanish671/goalert
|
592c2f4ed21be3be78816c377301372e0e88c6a0
|
[
"Apache-2.0"
] | 671 |
2019-06-14T16:01:41.000Z
|
2022-03-30T19:16:29.000Z
|
migrate/migrations/20180315113303-strict-rotation-state.sql
|
kanish671/goalert
|
592c2f4ed21be3be78816c377301372e0e88c6a0
|
[
"Apache-2.0"
] | 179 |
2019-06-11T20:17:06.000Z
|
2022-03-25T06:31:09.000Z
|
-- +migrate Up
ALTER TABLE rotation_state
DROP CONSTRAINT rotation_state_rotation_participant_id_fkey,
ADD CONSTRAINT rotation_state_rotation_participant_id_fkey
FOREIGN KEY (rotation_participant_id)
REFERENCES rotation_participants (id)
ON DELETE RESTRICT,
ALTER rotation_participant_id SET NOT NULL;
-- +migrate StatementBegin
CREATE OR REPLACE FUNCTION fn_set_rot_state_pos_on_active_change() RETURNS TRIGGER AS
$$
BEGIN
SELECT position INTO NEW.position
FROM rotation_participants
WHERE id = NEW.rotation_participant_id;
RETURN NEW;
END;
$$ LANGUAGE 'plpgsql';
-- +migrate StatementEnd
-- +migrate StatementBegin
CREATE OR REPLACE FUNCTION fn_set_rot_state_pos_on_part_reorder() RETURNS TRIGGER AS
$$
BEGIN
UPDATE rotation_state
SET position = NEW.position
WHERE rotation_participant_id = NEW.id;
RETURN NEW;
END;
$$ LANGUAGE 'plpgsql';
-- +migrate StatementEnd
ALTER TABLE rotations
ADD COLUMN participant_count INT NOT NULL DEFAULT 0;
-- +migrate StatementBegin
CREATE OR REPLACE FUNCTION fn_incr_part_count_on_add() RETURNS TRIGGER AS
$$
BEGIN
UPDATE rotations
SET participant_count = participant_count + 1
WHERE id = NEW.rotation_id;
RETURN NEW;
END;
$$ LANGUAGE 'plpgsql';
-- +migrate StatementEnd
-- +migrate StatementBegin
CREATE OR REPLACE FUNCTION fn_decr_part_count_on_del() RETURNS TRIGGER AS
$$
BEGIN
UPDATE rotations
SET participant_count = participant_count - 1
WHERE id = OLD.rotation_id;
RETURN OLD;
END;
$$ LANGUAGE 'plpgsql';
-- +migrate StatementEnd
-- +migrate StatementBegin
CREATE OR REPLACE FUNCTION fn_start_rotation_on_first_part_add() RETURNS TRIGGER AS
$$
DECLARE
first_part UUID;
BEGIN
SELECT id
INTO first_part
FROM rotation_participants
WHERE rotation_id = NEW.rotation_id AND position = 0;
INSERT INTO rotation_state (
rotation_id, rotation_participant_id, shift_start
) VALUES (
NEW.rotation_id, first_part, now()
) ON CONFLICT DO NOTHING;
RETURN NEW;
END;
$$ LANGUAGE 'plpgsql';
-- +migrate StatementEnd
-- +migrate StatementBegin
CREATE OR REPLACE FUNCTION fn_advance_or_end_rot_on_part_del() RETURNS TRIGGER AS
$$
DECLARE
new_part UUID;
active_part UUID;
BEGIN
SELECT rotation_participant_id
INTO active_part
FROM rotation_state
WHERE rotation_id = OLD.rotation_id;
IF active_part != OLD.id THEN
RETURN OLD;
END IF;
SELECT id
INTO new_part
FROM rotation_participants
WHERE
rotation_id = OLD.rotation_id AND
id != OLD.id AND
position IN (0, OLD.position)
ORDER BY position DESC
LIMIT 1;
IF new_part ISNULL THEN
DELETE FROM rotation_state
WHERE rotation_id = OLD.rotation_id;
ELSE
UPDATE rotation_state
SET rotation_participant_id = new_part
WHERE rotation_id = OLD.rotation_id;
END IF;
RETURN OLD;
END;
$$ LANGUAGE 'plpgsql';
-- +migrate StatementEnd
LOCK rotation_participants;
WITH part_count AS (
SELECT rotation_id, count(*)
FROM rotation_participants
GROUP BY rotation_id
)
UPDATE rotations
SET participant_count = part_count.count
FROM part_count
WHERE part_count.rotation_id = rotations.id;
INSERT INTO rotation_state (rotation_id, rotation_participant_id, shift_start)
SELECT rotation_id, id, now()
FROM rotation_participants
WHERE position = 0
ON CONFLICT (rotation_id) DO NOTHING;
CREATE TRIGGER trg_set_rot_state_pos_on_active_change
BEFORE UPDATE ON rotation_state
FOR EACH ROW
WHEN (NEW.rotation_participant_id != OLD.rotation_participant_id)
EXECUTE PROCEDURE fn_set_rot_state_pos_on_active_change();
CREATE TRIGGER trg_set_rot_state_pos_on_part_reorder
BEFORE UPDATE ON rotation_participants
FOR EACH ROW
WHEN (NEW.position != OLD.position)
EXECUTE PROCEDURE fn_set_rot_state_pos_on_part_reorder();
CREATE TRIGGER trg_incr_part_count_on_add
BEFORE INSERT ON rotation_participants
FOR EACH ROW
EXECUTE PROCEDURE fn_incr_part_count_on_add();
CREATE TRIGGER trg_start_rotation_on_first_part_add
AFTER INSERT ON rotation_participants
FOR EACH ROW
EXECUTE PROCEDURE fn_start_rotation_on_first_part_add();
CREATE TRIGGER trg_10_decr_part_count_on_del
BEFORE DELETE ON rotation_participants
FOR EACH ROW
EXECUTE PROCEDURE fn_decr_part_count_on_del();
DROP TRIGGER trg_decr_rot_part_position_on_delete ON rotation_participants;
CREATE TRIGGER trg_20_decr_rot_part_position_on_delete
BEFORE DELETE ON rotation_participants
FOR EACH ROW
EXECUTE PROCEDURE fn_decr_rot_part_position_on_delete();
CREATE TRIGGER trg_30_advance_or_end_rot_on_part_del
BEFORE DELETE ON rotation_participants
FOR EACH ROW
EXECUTE PROCEDURE fn_advance_or_end_rot_on_part_del();
-- +migrate Down
ALTER TABLE rotation_state
ALTER rotation_participant_id DROP NOT NULL,
DROP CONSTRAINT rotation_state_rotation_participant_id_fkey,
ADD CONSTRAINT rotation_state_rotation_participant_id_fkey
FOREIGN KEY (rotation_participant_id)
REFERENCES rotation_participants (id)
ON DELETE SET NULL;
DROP TRIGGER trg_set_rot_state_pos_on_active_change ON rotation_state;
DROP TRIGGER trg_set_rot_state_pos_on_part_reorder ON rotation_participants;
DROP TRIGGER trg_incr_part_count_on_add ON rotation_participants;
DROP TRIGGER trg_start_rotation_on_first_part_add ON rotation_participants;
DROP TRIGGER trg_10_decr_part_count_on_del ON rotation_participants;
DROP TRIGGER trg_20_decr_rot_part_position_on_delete ON rotation_participants;
CREATE TRIGGER trg_decr_rot_part_position_on_delete
BEFORE DELETE ON rotation_participants
FOR EACH ROW
EXECUTE PROCEDURE fn_decr_rot_part_position_on_delete();
DROP TRIGGER trg_30_advance_or_end_rot_on_part_del ON rotation_participants;
DROP FUNCTION fn_set_rot_state_pos_on_active_change();
DROP FUNCTION fn_set_rot_state_pos_on_part_reorder();
DROP FUNCTION fn_incr_part_count_on_add();
DROP FUNCTION fn_decr_part_count_on_del();
DROP FUNCTION fn_start_rotation_on_first_part_add();
DROP FUNCTION fn_advance_or_end_rot_on_part_del();
ALTER TABLE rotations
DROP COLUMN participant_count;
| 27.106195 | 85 | 0.797094 | 3.09375 |
d98e71d63e7ec329c58b785c1245b11c91bcd862
| 5,374 |
rs
|
Rust
|
src/raw/parse.rs
|
mellon85/brokaw
|
a25c224ea72f70e2139bd54f23745b03ce73377c
|
[
"MIT"
] | 3 |
2020-06-12T23:20:46.000Z
|
2021-06-08T05:44:38.000Z
|
src/raw/parse.rs
|
mellon85/brokaw
|
a25c224ea72f70e2139bd54f23745b03ce73377c
|
[
"MIT"
] | 7 |
2020-06-19T20:49:41.000Z
|
2020-10-19T17:00:30.000Z
|
src/raw/parse.rs
|
mellon85/brokaw
|
a25c224ea72f70e2139bd54f23745b03ce73377c
|
[
"MIT"
] | 2 |
2020-09-11T08:27:07.000Z
|
2020-11-20T03:20:20.000Z
|
use std::convert::TryInto;
use nom::bytes::complete::take_until;
use nom::character::complete::{crlf, one_of};
use nom::combinator::all_consuming;
use nom::sequence::{terminated, tuple};
use nom::IResult;
/// The first line of an NNTP response
///
/// This struct contains data borrowed from a read buffer
#[derive(Clone, Copy, Debug, PartialEq)]
pub(crate) struct InitialResponseLine<'a> {
/// The response code
pub code: &'a [u8; 3],
/// The data within the response NOT including leading whitespace and terminator characters
pub data: &'a [u8],
/// The entire response including the response code and termination characters
pub buffer: &'a [u8],
}
/// Return true if the first character is a digit
fn one_of_digit(b: &[u8]) -> IResult<&[u8], char> {
one_of("0123456789")(b)
}
/// Takes a line from the input buffer
///
/// A "line" is a sequence of bytes terminated by a CRLF (`\r\n`) sequence.
fn take_line(b: &[u8]) -> IResult<&[u8], &[u8]> {
let (rest, line) = terminated(take_until("\r\n"), crlf)(b)?;
Ok((rest, line))
}
/// Takes a response code from the buffer
///
/// A valid response code is three ASCII digits where the first digit is between 1 and 5
fn take_response_code(b: &[u8]) -> IResult<&[u8], &[u8]> {
let res: IResult<_, (char, char, char)> =
tuple((one_of("12345"), one_of_digit, one_of_digit))(b);
let (rest, _) = res?;
Ok((rest, &b[0..3]))
}
/// Returns true if the buffer only contains a `.`
pub(crate) fn is_end_of_datablock(b: &[u8]) -> bool {
b == b"."
}
/// Parse an first line of an NNTP response
///
/// Per [RFC 3977](https://tools.ietf.org/html/rfc3977#section-3.2), the first line of an
/// NNTP response consists of a three-digit response code, a single space, and then
/// some text terminated with a CRLF.
pub(crate) fn parse_first_line(b: &[u8]) -> IResult<&[u8], InitialResponseLine<'_>> {
let res = all_consuming(tuple((
take_response_code,
nom::character::complete::char(' '),
take_until("\r\n"),
crlf,
)))(b)?;
let (rest, (code, _, data, _crlf)) = res;
let code = code
.try_into()
.expect("Code should be three bytes, there is likely a bug in the parser.");
Ok((
rest,
InitialResponseLine {
code,
data,
buffer: b,
},
))
}
/// Parse a data block line from the buffer
pub(crate) fn parse_data_block_line(b: &[u8]) -> IResult<&[u8], &[u8]> {
all_consuming(take_line)(b)
}
#[cfg(test)]
mod tests {
use super::*;
use nom::error::ErrorKind;
use nom::Err;
const MOTD: &[u8] =
b"200 news.example.com InterNetNews server INN 2.5.5 ready (transit mode)\r\n";
const MOTD_NO_CRLF: &[u8] =
b"200 news.example.com InterNetNews server INN 2.5.5 ready (transit mode)";
mod test_parse_initial_response {
use super::*;
#[test]
fn happy_path() {
let (_remainder, raw_response) = parse_first_line(MOTD).unwrap();
let expected_resp = InitialResponseLine {
code: b"200",
data: &b"news.example.com InterNetNews server INN 2.5.5 ready (transit mode)"[..],
buffer: &MOTD,
};
assert_eq!(raw_response, expected_resp)
}
#[test]
fn test_remaining_data() {
let data = [MOTD, &b"SOME MORE DATA\r\n"[..]].concat();
assert!(parse_first_line(&data).is_err());
}
}
mod test_take_line {
use super::*;
#[test]
fn happy_path() {
assert_eq!(take_line(MOTD), Ok((&b""[..], MOTD_NO_CRLF)));
}
#[test]
fn test_gzip() {
let header = include_bytes!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/tests/xover_gzip_header"
));
let (rest, data) = take_line(header).unwrap();
assert_eq!(rest.len(), 0);
assert_eq!(data, &header[..header.len() - 2]);
}
}
mod test_parse_data_block {
use super::*;
#[test]
fn happy_path() {
let msg = b"101 Capability list:\r\n";
let (_remainder, block) = parse_data_block_line(msg).unwrap();
assert_eq!(block, b"101 Capability list:")
}
}
mod test_parse_response_code {
use super::*;
#[test]
fn happy_path() {
[
&b"200"[..],
&b"200 "[..],
&b"2000"[..],
&b"200000"[..],
&b"200123"[..],
&b"200abc"[..],
]
.iter()
.for_each(|input| {
let res = take_response_code(input);
assert!(res.is_ok());
let (_rest, code) = res.unwrap();
assert_eq!(code, b"200")
});
}
#[test]
fn too_short() {
println!("Testing {:?}", b"5");
assert_eq!(
take_response_code(b"5"),
Err(Err::Error((&b""[..], ErrorKind::OneOf)))
)
}
#[test]
fn not_enough_digits() {
assert_eq!(
take_response_code(b"5ab500"),
Err(Err::Error((&b"ab500"[..], ErrorKind::OneOf)))
)
}
}
}
| 28.136126 | 98 | 0.533122 | 3.484375 |
18ad84958232a903b6ede66f506cf3b9d327dd72
| 1,428 |
rs
|
Rust
|
src/main.rs
|
OverengineeredOne/oecli
|
4635ae6e2c2fe5a28b49f95ff53cc54d8ca13a86
|
[
"MIT"
] | null | null | null |
src/main.rs
|
OverengineeredOne/oecli
|
4635ae6e2c2fe5a28b49f95ff53cc54d8ca13a86
|
[
"MIT"
] | null | null | null |
src/main.rs
|
OverengineeredOne/oecli
|
4635ae6e2c2fe5a28b49f95ff53cc54d8ca13a86
|
[
"MIT"
] | null | null | null |
//! oecli is a command line interface to provide a productivity boost by
//! handling boilerplate and some operational overhead with development within
//! the Overengineered ecosystem.
use clap::{Args, Parser, Subcommand};
mod github;
mod node;
/// oecli parser
#[derive(Parser)]
#[clap(author, version, about, long_about = None)]
#[clap(propagate_version = true)]
struct Cli {
/// Subcommand passed into oecli
#[clap(subcommand)]
command: Commands,
}
/// Different sub command line options available with oecli
#[derive(Subcommand)]
enum Commands {
/// Progressive Web App
PWA(PWA),
}
/// Subcommand for interacting with Progressive Web Apps. Overengineered uses
/// Yew, which is a modern Rust framework for creating multi-threaded front-end
/// web applications using WebAssembly.
#[derive(Args, Debug)]
struct PWA {
/// Will create a new Github repository with the provided name. This
/// repository will use a template to create a Yew app using the PatternFly
/// for a component library.
#[clap(long)]
new: String,
}
fn main() {
let cli = Cli::parse();
match &cli.command {
Commands::PWA(name) => {
github::create(&name.new);
let username = github::logged_in_user();
let full_repo = format!("{}/{}", &username, &name.new);
github::clone(&full_repo);
node::npm_install(&name.new);
}
}
}
| 27.461538 | 79 | 0.660364 | 3.109375 |
4a3895446295ae81561a0d2d78f8ce0351aee093
| 2,348 |
js
|
JavaScript
|
src/algebras/Maybe.js
|
mandober/electron-bookmarks
|
2822950955da60e0d0e7f219726487dcfe453745
|
[
"MIT"
] | null | null | null |
src/algebras/Maybe.js
|
mandober/electron-bookmarks
|
2822950955da60e0d0e7f219726487dcfe453745
|
[
"MIT"
] | 1 |
2021-05-11T07:18:24.000Z
|
2021-05-11T07:18:24.000Z
|
src/algebras/Maybe.js
|
mandober/electron-bookmarks
|
2822950955da60e0d0e7f219726487dcfe453745
|
[
"MIT"
] | null | null | null |
/*
Maybe
=====
data Maybe a = Nothing | Just a
Maybe has an instance for:
- Semigroup
- Monoid
- Pointed
- Functor
- Applicative
- Monad
- Foldable
- Traversable
*/
class Maybe {
// -------------------------------------------------------------- internals
#value // field
constructor(x) { this.#value = x }
get isNothing() { return this.#value == null }
get isJust() { return !this.isNothing }
toString = () =>
this.isNothing ? "Nothing" : `Just(${this.#value})`
valueOf = () =>
this.isNothing ? 0 : Number(this.#value)
// --------------------------------------------------------------------- Eq
eq = b => this.#value === b
// -------------------------------------------------------------------- Ord
// ---------------------------------------------------------------- Pointed
static of = x => new Maybe(x)
// ---------------------------------------------------------------- Functor
map = f => this.isNothing ? this : Maybe.of(f(this.#value))
// ------------------------------------------------------------ Applicative
ap = f => this.isNothing ? this : f.map(this.#value)
// ------------------------------------------------------------------ Monad
chain = f => this.map(f).join()
join = () => this.isNothing ? this : this.#value
// ------------------------------------------------------------ Traversable
sequence = of => this.traverse(of, x => x)
traverse = (of, f) =>
this.isNothing ? of(this) : f(this.#value).map(Maybe.of)
}
// ---------------------------------------------------------------------- or
const Just = x => ({
// map :: Maybe a ~> (a -> b) -> Maybe b
map: f => Just(f(x)),
// fold :: Maybe a ~> (b, a -> b) -> b
fold: (_, f) => f(x)
})
const Nothing = ({
// map :: Maybe a ~> (a -> b) -> Maybe b
map: _ => Nothing,
// Return default value
// fold :: Maybe a ~> (b, a -> b) -> b
fold: (d, _) => d
})
// fromNullable :: a? -> Maybe a
const fromNullable = x => x == null ? Nothing : Just(x)
// ---------------------------------------------------------------------- check
let j = new Maybe(42)
let n = new Maybe()
console.log('\n',
j.toString(), '\n',
j + j, '\n',
n.toString(), '\n',
j + n, '\n',
fromNullable(undefined).fold(442)
)
| 24.978723 | 79 | 0.366695 | 3.234375 |
1198e1a05933a75b3086364306664ceba999626a
| 3,054 |
lua
|
Lua
|
lua/tanklib/shared/quaternion.lua
|
TankNut/TankLib
|
79276b44a88a5a649f2b5c5766692d0f87ce10d1
|
[
"MIT"
] | null | null | null |
lua/tanklib/shared/quaternion.lua
|
TankNut/TankLib
|
79276b44a88a5a649f2b5c5766692d0f87ce10d1
|
[
"MIT"
] | null | null | null |
lua/tanklib/shared/quaternion.lua
|
TankNut/TankLib
|
79276b44a88a5a649f2b5c5766692d0f87ce10d1
|
[
"MIT"
] | null | null | null |
local class = TankLib.Class:New("TankLib.Quaternion")
-- Most of this is transcribed from wire's E2 implementation
local deg2rad = math.pi / 180
local rad2deg = 180 / math.pi
local function qmul(a, b)
local a1, a2, a3, a4 = unpack(a)
local b1, b2, b3, b4 = unpack(b)
return {
a1 * b1 - a2 * b2 - a3 * b3 - a4 * b4,
a1 * b2 + a2 * b1 + a3 * b4 - a4 * b3,
a1 * b3 + a3 * b1 + a4 * b2 - a2 * b4,
a1 * b4 + a4 * b1 + a2 * b3 - a3 * b2
}
end
function class:Initialize(...)
local args = {...}
for i = 1, 4 do
self[i] = args[i] or 0
end
end
-- Static
function class.Static:FromVector(vec)
return class:New(0, vec.x, vec.y, vec.z)
end
function class.Static:FromAngle(ang)
local p, y, r = ang:Unpack()
p = p * deg2rad * 0.5
y = y * deg2rad * 0.5
r = r * deg2rad * 0.5
local qp = {math.cos(p), 0, math.sin(p), 0}
local qy = {math.cos(y), 0, 0, math.sin(y)}
local qr = {math.cos(r), math.sin(r), 0, 0}
return class:New(unpack(qmul(qy, qmul(qp, qr))))
end
function class.Static:FromVectors(forward, up)
local y = up:Cross(forward):GetNormalized()
local ang = forward:Angle()
ang.p = math.NormalizeAngle(ang.p)
ang.y = math.NormalizeAngle(ang.y)
local yyaw = Vector(0, 1, 0)
yyaw:Rotate(Angle(0, ang.y, 0))
local roll = math.acos(math.Clamp(y:Dot(yyaw), -1, 1)) * rad2deg
local dot = y.z
if dot < 0 then
roll = -roll
end
return self:FromAngle(Angle(ang.p, ang.y, roll))
end
function class.Static:Rotation(axis, ang)
axis = axis:GetNormalized()
ang = ang * deg2rad * 0.5
return class:New(math.cos(ang), axis.x * math.sin(ang), axis.y * math.sin(ang), axis.z * math.sin(ang))
end
-- Meta
function class.__unm(self)
return class:New(-self[1], -self[2], -self[3], -self[4])
end
function class.__add(a, b)
if isnumber(b) then
return class:New(a[1] + b, a[2], a[3], a[4])
end
return class:New(a[1] + b[1], a[2] + b[2], a[3] + b[3], a[4] + b[4])
end
function class.__sub(a, b)
if isnumber(b) then
return class:New(a[1] - b, a[2], a[3], a[4])
end
return class:New(a[1] - b[1], a[2] - b[2], a[3] - b[3], a[4] - b[4])
end
function class.__mul(a, b)
return class:New(unpack(qmul(a, b)))
end
-- Methods
function class:Angle()
local l = math.sqrt((self[1] * self[1]) + (self[2] * self[2]) + (self[3] * self[3]) + (self[4] * self[4]))
if l == 0 then
return Angle()
end
local q1 = self[1] / l
local q2 = self[2] / l
local q3 = self[3] / l
local q4 = self[4] / l
local x = Vector(
(q1 * q1) + (q2 * q2) - (q3 * q3) - (q4 * q4),
(2 * q3 * q2) + (2 * q4 * q1),
(2 * q4 * q2) - (2 * q3 * q1)
)
local y = Vector(
(2 * q2 * q3) - (2 * q4 * q1),
(q1 * q1) - (q2 * q2) + (q3 * q3) - (q4 * q4),
(2 * q2 * q1) + (2 * q3 * q4)
)
local ang = x:Angle()
ang.p = math.NormalizeAngle(ang.p)
ang.y = math.NormalizeAngle(ang.y)
local yyaw = Vector(0, 1, 0)
yyaw:Rotate(Angle(0, ang.y, 0))
local roll = math.acos(math.Clamp(y:Dot(yyaw), -1, 1)) * rad2deg
local dot = y.z
if dot < 0 then
roll = -roll
end
return Angle(ang.p, ang.y, roll)
end
TankLib.Quaternion = class
| 20.635135 | 107 | 0.588736 | 3.359375 |
3f1fec7621ef992d6bd8e668c951c177ff04698c
| 2,097 |
swift
|
Swift
|
Tests/SMStorageTests/UserDefaultsTests.swift
|
siginur/SMStorage
|
9ff6d9ec016797699d4e64557c97490b7e40740e
|
[
"MIT"
] | null | null | null |
Tests/SMStorageTests/UserDefaultsTests.swift
|
siginur/SMStorage
|
9ff6d9ec016797699d4e64557c97490b7e40740e
|
[
"MIT"
] | null | null | null |
Tests/SMStorageTests/UserDefaultsTests.swift
|
siginur/SMStorage
|
9ff6d9ec016797699d4e64557c97490b7e40740e
|
[
"MIT"
] | null | null | null |
import XCTest
import SMStorage
final class SMStorageTests: XCTestCase {
let intValue: Int = 1
let doubleValue: Double = 2.3
let stringValue: String = "stringValue"
let boolValue: Bool = true
let dataValue: Data = "some data".data(using: .utf8)!
func testString() throws {
let storage = SMStorage.userDefaults()
storage["int"] = intValue
storage["string"] = stringValue
storage["double"] = doubleValue
storage["bool"] = boolValue
storage["data"] = dataValue
XCTAssertEqual(intValue, storage["int"])
XCTAssertEqual(stringValue, storage["string"])
XCTAssertEqual(doubleValue, storage["double"])
XCTAssertEqual(boolValue, storage["bool"])
XCTAssertEqual(dataValue, storage["data"])
}
func testInt() throws {
let storage = SMStorage<Int>.userDefaults()
storage[1] = intValue
storage[2] = stringValue
storage[3] = doubleValue
storage[4] = boolValue
storage[5] = dataValue
XCTAssertEqual(intValue, storage[1])
XCTAssertEqual(stringValue, storage[2])
XCTAssertEqual(doubleValue, storage[3])
XCTAssertEqual(boolValue, storage[4])
XCTAssertEqual(dataValue, storage[5])
}
func testEnum() throws {
enum Key: String, StorageKey {
case int
case string
case double
case bool
case dataValue
var key: String { rawValue }
}
let storage = SMStorage<Key>.userDefaults()
storage[.int] = intValue
storage[.string] = stringValue
storage[.double] = doubleValue
storage[.bool] = boolValue
storage[.dataValue] = dataValue
XCTAssertEqual(intValue, storage[.int])
XCTAssertEqual(stringValue, storage[.string])
XCTAssertEqual(doubleValue, storage[.double])
XCTAssertEqual(boolValue, storage[.bool])
XCTAssertEqual(dataValue, storage[.dataValue])
}
}
| 30.391304 | 57 | 0.596567 | 3.03125 |
e0dd6f01cd34c13c1202a3b29b51ac7827032c28
| 1,370 |
kt
|
Kotlin
|
core/src/main/kotlin/net/justmachinery/futility/streams/OverrideInputStream.kt
|
ScottPeterJohnson/futility
|
0f863a873fa6caf3aef136b03c1b5c1fde9bb9ac
|
[
"Apache-2.0"
] | null | null | null |
core/src/main/kotlin/net/justmachinery/futility/streams/OverrideInputStream.kt
|
ScottPeterJohnson/futility
|
0f863a873fa6caf3aef136b03c1b5c1fde9bb9ac
|
[
"Apache-2.0"
] | null | null | null |
core/src/main/kotlin/net/justmachinery/futility/streams/OverrideInputStream.kt
|
ScottPeterJohnson/futility
|
0f863a873fa6caf3aef136b03c1b5c1fde9bb9ac
|
[
"Apache-2.0"
] | null | null | null |
package net.justmachinery.futility.streams
import java.io.InputStream
import java.io.OutputStream
/**
* Unlike FilterInputStream and FilterOutputStream, these don't have hidden stupidities, like writing all the bytes
* in a write(ByteArray,Int,Int) method one by one.
* Note that if you override any of the read() methods, you probably want to override all of them.
*/
public open class OverrideInputStream(public val input: InputStream) : InputStream() {
override fun read(): Int = input.read()
override fun available(): Int = input.available()
override fun close(): Unit = input.close()
override fun mark(readlimit: Int): Unit = input.mark(readlimit)
override fun markSupported(): Boolean = input.markSupported()
override fun read(b: ByteArray?): Int = input.read(b)
override fun read(b: ByteArray?, off: Int, len: Int): Int = input.read(b, off, len)
override fun readAllBytes(): ByteArray = input.readAllBytes()
override fun readNBytes(len: Int): ByteArray = input.readNBytes(len)
override fun readNBytes(b: ByteArray?, off: Int, len: Int): Int = input.readNBytes(b, off, len)
override fun reset(): Unit = input.reset()
override fun skip(n: Long): Long = input.skip(n)
override fun transferTo(out: OutputStream?): Long = input.transferTo(out)
override fun skipNBytes(n: Long): Unit = input.skipNBytes(n)
}
| 52.692308 | 115 | 0.720438 | 3.15625 |
c38c92556fc455a9104e20d37e5f0aac18f268af
| 29,475 |
rs
|
Rust
|
src/app/states.rs
|
GuillaumeGomez/bottom
|
398d52af2e6b001256adef71be8e7c85a943066c
|
[
"MIT"
] | null | null | null |
src/app/states.rs
|
GuillaumeGomez/bottom
|
398d52af2e6b001256adef71be8e7c85a943066c
|
[
"MIT"
] | null | null | null |
src/app/states.rs
|
GuillaumeGomez/bottom
|
398d52af2e6b001256adef71be8e7c85a943066c
|
[
"MIT"
] | null | null | null |
use std::{collections::HashMap, time::Instant};
use unicode_segmentation::GraphemeCursor;
use tui::widgets::TableState;
use crate::{
app::{layout_manager::BottomWidgetType, query::*},
constants,
data_harvester::processes::{self, ProcessSorting},
};
use ProcessSorting::*;
#[derive(Debug)]
pub enum ScrollDirection {
// UP means scrolling up --- this usually DECREMENTS
Up,
// DOWN means scrolling down --- this usually INCREMENTS
Down,
}
impl Default for ScrollDirection {
fn default() -> Self {
ScrollDirection::Down
}
}
#[derive(Debug)]
pub enum CursorDirection {
Left,
Right,
}
/// AppScrollWidgetState deals with fields for a scrollable app's current state.
#[derive(Default)]
pub struct AppScrollWidgetState {
pub current_scroll_position: usize,
pub previous_scroll_position: usize,
pub scroll_direction: ScrollDirection,
pub table_state: TableState,
}
#[derive(PartialEq)]
pub enum KillSignal {
Cancel,
Kill(usize),
}
impl Default for KillSignal {
#[cfg(target_family = "unix")]
fn default() -> Self {
KillSignal::Kill(15)
}
#[cfg(target_os = "windows")]
fn default() -> Self {
KillSignal::Kill(1)
}
}
#[derive(Default)]
pub struct AppDeleteDialogState {
pub is_showing_dd: bool,
pub selected_signal: KillSignal,
/// tl x, tl y, br x, br y, index/signal
pub button_positions: Vec<(u16, u16, u16, u16, usize)>,
pub keyboard_signal_select: usize,
pub last_number_press: Option<Instant>,
pub scroll_pos: usize,
}
pub struct AppHelpDialogState {
pub is_showing_help: bool,
pub scroll_state: ParagraphScrollState,
pub index_shortcuts: Vec<u16>,
}
impl Default for AppHelpDialogState {
fn default() -> Self {
AppHelpDialogState {
is_showing_help: false,
scroll_state: ParagraphScrollState::default(),
index_shortcuts: vec![0; constants::HELP_TEXT.len()],
}
}
}
/// AppSearchState deals with generic searching (I might do this in the future).
pub struct AppSearchState {
pub is_enabled: bool,
pub current_search_query: String,
pub is_blank_search: bool,
pub is_invalid_search: bool,
pub grapheme_cursor: GraphemeCursor,
pub cursor_direction: CursorDirection,
pub cursor_bar: usize,
/// This represents the position in terms of CHARACTERS, not graphemes
pub char_cursor_position: usize,
/// The query
pub query: Option<Query>,
pub error_message: Option<String>,
}
impl Default for AppSearchState {
fn default() -> Self {
AppSearchState {
is_enabled: false,
current_search_query: String::default(),
is_invalid_search: false,
is_blank_search: true,
grapheme_cursor: GraphemeCursor::new(0, 0, true),
cursor_direction: CursorDirection::Right,
cursor_bar: 0,
char_cursor_position: 0,
query: None,
error_message: None,
}
}
}
impl AppSearchState {
/// Returns a reset but still enabled app search state
pub fn reset(&mut self) {
*self = AppSearchState {
is_enabled: self.is_enabled,
..AppSearchState::default()
}
}
pub fn is_invalid_or_blank_search(&self) -> bool {
self.is_blank_search || self.is_invalid_search
}
}
/// Meant for canvas operations involving table column widths.
#[derive(Default)]
pub struct CanvasTableWidthState {
pub desired_column_widths: Vec<u16>,
pub calculated_column_widths: Vec<u16>,
}
/// ProcessSearchState only deals with process' search's current settings and state.
pub struct ProcessSearchState {
pub search_state: AppSearchState,
pub is_ignoring_case: bool,
pub is_searching_whole_word: bool,
pub is_searching_with_regex: bool,
}
impl Default for ProcessSearchState {
fn default() -> Self {
ProcessSearchState {
search_state: AppSearchState::default(),
is_ignoring_case: true,
is_searching_whole_word: false,
is_searching_with_regex: false,
}
}
}
impl ProcessSearchState {
pub fn search_toggle_ignore_case(&mut self) {
self.is_ignoring_case = !self.is_ignoring_case;
}
pub fn search_toggle_whole_word(&mut self) {
self.is_searching_whole_word = !self.is_searching_whole_word;
}
pub fn search_toggle_regex(&mut self) {
self.is_searching_with_regex = !self.is_searching_with_regex;
}
}
pub struct ColumnInfo {
pub enabled: bool,
pub shortcut: Option<&'static str>,
// FIXME: Move column width logic here!
// pub hard_width: Option<u16>,
// pub max_soft_width: Option<f64>,
}
pub struct ProcColumn {
pub ordered_columns: Vec<ProcessSorting>,
/// The y location of headers. Since they're all aligned, it's just one value.
pub column_header_y_loc: Option<u16>,
/// The x start and end bounds for each header.
pub column_header_x_locs: Option<Vec<(u16, u16)>>,
pub column_mapping: HashMap<ProcessSorting, ColumnInfo>,
pub longest_header_len: u16,
pub column_state: TableState,
pub scroll_direction: ScrollDirection,
pub current_scroll_position: usize,
pub previous_scroll_position: usize,
pub backup_prev_scroll_position: usize,
}
impl Default for ProcColumn {
fn default() -> Self {
let ordered_columns = vec![
Count,
Pid,
ProcessName,
Command,
CpuPercent,
Mem,
MemPercent,
ReadPerSecond,
WritePerSecond,
TotalRead,
TotalWrite,
User,
State,
];
let mut column_mapping = HashMap::new();
let mut longest_header_len = 0;
for column in ordered_columns.clone() {
longest_header_len = std::cmp::max(longest_header_len, column.to_string().len());
match column {
CpuPercent => {
column_mapping.insert(
column,
ColumnInfo {
enabled: true,
shortcut: Some("c"),
// hard_width: None,
// max_soft_width: None,
},
);
}
MemPercent => {
column_mapping.insert(
column,
ColumnInfo {
enabled: true,
shortcut: Some("m"),
// hard_width: None,
// max_soft_width: None,
},
);
}
Mem => {
column_mapping.insert(
column,
ColumnInfo {
enabled: false,
shortcut: Some("m"),
// hard_width: None,
// max_soft_width: None,
},
);
}
ProcessName => {
column_mapping.insert(
column,
ColumnInfo {
enabled: true,
shortcut: Some("n"),
// hard_width: None,
// max_soft_width: None,
},
);
}
Command => {
column_mapping.insert(
column,
ColumnInfo {
enabled: false,
shortcut: Some("n"),
// hard_width: None,
// max_soft_width: None,
},
);
}
Pid => {
column_mapping.insert(
column,
ColumnInfo {
enabled: true,
shortcut: Some("p"),
// hard_width: None,
// max_soft_width: None,
},
);
}
Count => {
column_mapping.insert(
column,
ColumnInfo {
enabled: false,
shortcut: None,
// hard_width: None,
// max_soft_width: None,
},
);
}
User => {
column_mapping.insert(
column,
ColumnInfo {
enabled: cfg!(target_family = "unix"),
shortcut: None,
},
);
}
_ => {
column_mapping.insert(
column,
ColumnInfo {
enabled: true,
shortcut: None,
// hard_width: None,
// max_soft_width: None,
},
);
}
}
}
let longest_header_len = longest_header_len as u16;
ProcColumn {
ordered_columns,
column_mapping,
longest_header_len,
column_state: TableState::default(),
scroll_direction: ScrollDirection::default(),
current_scroll_position: 0,
previous_scroll_position: 0,
backup_prev_scroll_position: 0,
column_header_y_loc: None,
column_header_x_locs: None,
}
}
}
impl ProcColumn {
/// Returns its new status.
pub fn toggle(&mut self, column: &ProcessSorting) -> Option<bool> {
if let Some(mapping) = self.column_mapping.get_mut(column) {
mapping.enabled = !(mapping.enabled);
Some(mapping.enabled)
} else {
None
}
}
pub fn try_set(&mut self, column: &ProcessSorting, setting: bool) -> Option<bool> {
if let Some(mapping) = self.column_mapping.get_mut(column) {
mapping.enabled = setting;
Some(mapping.enabled)
} else {
None
}
}
pub fn try_enable(&mut self, column: &ProcessSorting) -> Option<bool> {
if let Some(mapping) = self.column_mapping.get_mut(column) {
mapping.enabled = true;
Some(mapping.enabled)
} else {
None
}
}
pub fn try_disable(&mut self, column: &ProcessSorting) -> Option<bool> {
if let Some(mapping) = self.column_mapping.get_mut(column) {
mapping.enabled = false;
Some(mapping.enabled)
} else {
None
}
}
pub fn is_enabled(&self, column: &ProcessSorting) -> bool {
if let Some(mapping) = self.column_mapping.get(column) {
mapping.enabled
} else {
false
}
}
pub fn get_enabled_columns_len(&self) -> usize {
self.ordered_columns
.iter()
.filter_map(|column_type| {
if let Some(col_map) = self.column_mapping.get(column_type) {
if col_map.enabled {
Some(1)
} else {
None
}
} else {
None
}
})
.sum()
}
/// NOTE: ALWAYS call this when opening the sorted window.
pub fn set_to_sorted_index_from_type(&mut self, proc_sorting_type: &ProcessSorting) {
// TODO [Custom Columns]: If we add custom columns, this may be needed! Since column indices will change, this runs the risk of OOB. So, when you change columns, CALL THIS AND ADAPT!
let mut true_index = 0;
for column in &self.ordered_columns {
if *column == *proc_sorting_type {
break;
}
if self.column_mapping.get(column).unwrap().enabled {
true_index += 1;
}
}
self.current_scroll_position = true_index;
self.backup_prev_scroll_position = self.previous_scroll_position;
}
/// This function sets the scroll position based on the index.
pub fn set_to_sorted_index_from_visual_index(&mut self, visual_index: usize) {
self.current_scroll_position = visual_index;
self.backup_prev_scroll_position = self.previous_scroll_position;
}
pub fn get_column_headers(
&self, proc_sorting_type: &ProcessSorting, sort_reverse: bool,
) -> Vec<String> {
const DOWN_ARROW: char = '▼';
const UP_ARROW: char = '▲';
// TODO: Gonna have to figure out how to do left/right GUI notation if we add it.
self.ordered_columns
.iter()
.filter_map(|column_type| {
let mapping = self.column_mapping.get(column_type).unwrap();
let mut command_str = String::default();
if let Some(command) = mapping.shortcut {
command_str = format!("({})", command);
}
if mapping.enabled {
Some(format!(
"{}{}{}",
column_type,
command_str,
if proc_sorting_type == column_type {
if sort_reverse {
DOWN_ARROW
} else {
UP_ARROW
}
} else {
' '
}
))
} else {
None
}
})
.collect()
}
}
pub struct ProcWidgetState {
pub process_search_state: ProcessSearchState,
pub is_grouped: bool,
pub scroll_state: AppScrollWidgetState,
pub process_sorting_type: processes::ProcessSorting,
pub is_process_sort_descending: bool,
pub is_using_command: bool,
pub current_column_index: usize,
pub is_sort_open: bool,
pub columns: ProcColumn,
pub is_tree_mode: bool,
pub table_width_state: CanvasTableWidthState,
pub requires_redraw: bool,
}
impl ProcWidgetState {
pub fn init(
is_case_sensitive: bool, is_match_whole_word: bool, is_use_regex: bool, is_grouped: bool,
show_memory_as_values: bool, is_tree_mode: bool, is_using_command: bool,
) -> Self {
let mut process_search_state = ProcessSearchState::default();
if is_case_sensitive {
// By default it's off
process_search_state.search_toggle_ignore_case();
}
if is_match_whole_word {
process_search_state.search_toggle_whole_word();
}
if is_use_regex {
process_search_state.search_toggle_regex();
}
let (process_sorting_type, is_process_sort_descending) = if is_tree_mode {
(processes::ProcessSorting::Pid, false)
} else {
(processes::ProcessSorting::CpuPercent, true)
};
// TODO: If we add customizable columns, this should pull from config
let mut columns = ProcColumn::default();
columns.set_to_sorted_index_from_type(&process_sorting_type);
if is_grouped {
// Normally defaults to showing by PID, toggle count on instead.
columns.toggle(&ProcessSorting::Count);
columns.toggle(&ProcessSorting::Pid);
}
if show_memory_as_values {
// Normally defaults to showing by percent, toggle value on instead.
columns.toggle(&ProcessSorting::Mem);
columns.toggle(&ProcessSorting::MemPercent);
}
if is_using_command {
columns.toggle(&ProcessSorting::ProcessName);
columns.toggle(&ProcessSorting::Command);
}
ProcWidgetState {
process_search_state,
is_grouped,
scroll_state: AppScrollWidgetState::default(),
process_sorting_type,
is_process_sort_descending,
is_using_command,
current_column_index: 0,
is_sort_open: false,
columns,
is_tree_mode,
table_width_state: CanvasTableWidthState::default(),
requires_redraw: false,
}
}
/// Updates sorting when using the column list.
/// ...this really should be part of the ProcColumn struct (along with the sorting fields),
/// but I'm too lazy.
///
/// Sorry, future me, you're gonna have to refactor this later. Too busy getting
/// the feature to work in the first place! :)
pub fn update_sorting_with_columns(&mut self) {
let mut true_index = 0;
let mut enabled_index = 0;
let target_itx = self.columns.current_scroll_position;
for column in &self.columns.ordered_columns {
let enabled = self.columns.column_mapping.get(column).unwrap().enabled;
if enabled_index == target_itx && enabled {
break;
}
if enabled {
enabled_index += 1;
}
true_index += 1;
}
if let Some(new_sort_type) = self.columns.ordered_columns.get(true_index) {
if *new_sort_type == self.process_sorting_type {
// Just reverse the search if we're reselecting!
self.is_process_sort_descending = !(self.is_process_sort_descending);
} else {
self.process_sorting_type = new_sort_type.clone();
match self.process_sorting_type {
ProcessSorting::State
| ProcessSorting::Pid
| ProcessSorting::ProcessName
| ProcessSorting::Command => {
// Also invert anything that uses alphabetical sorting by default.
self.is_process_sort_descending = false;
}
_ => {
self.is_process_sort_descending = true;
}
}
}
}
}
pub fn toggle_command_and_name(&mut self, is_using_command: bool) {
if let Some(pn) = self
.columns
.column_mapping
.get_mut(&ProcessSorting::ProcessName)
{
pn.enabled = !is_using_command;
}
if let Some(c) = self
.columns
.column_mapping
.get_mut(&ProcessSorting::Command)
{
c.enabled = is_using_command;
}
}
pub fn get_search_cursor_position(&self) -> usize {
self.process_search_state
.search_state
.grapheme_cursor
.cur_cursor()
}
pub fn get_char_cursor_position(&self) -> usize {
self.process_search_state.search_state.char_cursor_position
}
pub fn is_search_enabled(&self) -> bool {
self.process_search_state.search_state.is_enabled
}
pub fn get_current_search_query(&self) -> &String {
&self.process_search_state.search_state.current_search_query
}
pub fn update_query(&mut self) {
if self
.process_search_state
.search_state
.current_search_query
.is_empty()
{
self.process_search_state.search_state.is_blank_search = true;
self.process_search_state.search_state.is_invalid_search = false;
self.process_search_state.search_state.error_message = None;
} else {
let parsed_query = self.parse_query();
// debug!("Parsed query: {:#?}", parsed_query);
if let Ok(parsed_query) = parsed_query {
self.process_search_state.search_state.query = Some(parsed_query);
self.process_search_state.search_state.is_blank_search = false;
self.process_search_state.search_state.is_invalid_search = false;
self.process_search_state.search_state.error_message = None;
} else if let Err(err) = parsed_query {
self.process_search_state.search_state.is_blank_search = false;
self.process_search_state.search_state.is_invalid_search = true;
self.process_search_state.search_state.error_message = Some(err.to_string());
}
}
self.scroll_state.previous_scroll_position = 0;
self.scroll_state.current_scroll_position = 0;
}
pub fn clear_search(&mut self) {
self.process_search_state.search_state.reset();
}
pub fn search_walk_forward(&mut self, start_position: usize) {
self.process_search_state
.search_state
.grapheme_cursor
.next_boundary(
&self.process_search_state.search_state.current_search_query[start_position..],
start_position,
)
.unwrap();
}
pub fn search_walk_back(&mut self, start_position: usize) {
self.process_search_state
.search_state
.grapheme_cursor
.prev_boundary(
&self.process_search_state.search_state.current_search_query[..start_position],
0,
)
.unwrap();
}
}
pub struct ProcState {
pub widget_states: HashMap<u64, ProcWidgetState>,
pub force_update: Option<u64>,
pub force_update_all: bool,
}
impl ProcState {
pub fn init(widget_states: HashMap<u64, ProcWidgetState>) -> Self {
ProcState {
widget_states,
force_update: None,
force_update_all: false,
}
}
pub fn get_mut_widget_state(&mut self, widget_id: u64) -> Option<&mut ProcWidgetState> {
self.widget_states.get_mut(&widget_id)
}
pub fn get_widget_state(&self, widget_id: u64) -> Option<&ProcWidgetState> {
self.widget_states.get(&widget_id)
}
}
pub struct NetWidgetState {
pub current_display_time: u64,
pub autohide_timer: Option<Instant>,
// pub draw_max_range_cache: f64,
// pub draw_labels_cache: Vec<String>,
// pub draw_time_start_cache: f64,
// TODO: Re-enable these when we move net details state-side!
// pub unit_type: DataUnitTypes,
// pub scale_type: AxisScaling,
}
impl NetWidgetState {
pub fn init(
current_display_time: u64,
autohide_timer: Option<Instant>,
// unit_type: DataUnitTypes,
// scale_type: AxisScaling,
) -> Self {
NetWidgetState {
current_display_time,
autohide_timer,
// draw_max_range_cache: 0.0,
// draw_labels_cache: vec![],
// draw_time_start_cache: 0.0,
// unit_type,
// scale_type,
}
}
}
pub struct NetState {
pub force_update: Option<u64>,
pub widget_states: HashMap<u64, NetWidgetState>,
}
impl NetState {
pub fn init(widget_states: HashMap<u64, NetWidgetState>) -> Self {
NetState {
force_update: None,
widget_states,
}
}
pub fn get_mut_widget_state(&mut self, widget_id: u64) -> Option<&mut NetWidgetState> {
self.widget_states.get_mut(&widget_id)
}
pub fn get_widget_state(&self, widget_id: u64) -> Option<&NetWidgetState> {
self.widget_states.get(&widget_id)
}
}
pub struct CpuWidgetState {
pub current_display_time: u64,
pub is_legend_hidden: bool,
pub autohide_timer: Option<Instant>,
pub scroll_state: AppScrollWidgetState,
pub is_multi_graph_mode: bool,
pub table_width_state: CanvasTableWidthState,
}
impl CpuWidgetState {
pub fn init(current_display_time: u64, autohide_timer: Option<Instant>) -> Self {
CpuWidgetState {
current_display_time,
is_legend_hidden: false,
autohide_timer,
scroll_state: AppScrollWidgetState::default(),
is_multi_graph_mode: false,
table_width_state: CanvasTableWidthState::default(),
}
}
}
pub struct CpuState {
pub force_update: Option<u64>,
pub widget_states: HashMap<u64, CpuWidgetState>,
}
impl CpuState {
pub fn init(widget_states: HashMap<u64, CpuWidgetState>) -> Self {
CpuState {
force_update: None,
widget_states,
}
}
pub fn get_mut_widget_state(&mut self, widget_id: u64) -> Option<&mut CpuWidgetState> {
self.widget_states.get_mut(&widget_id)
}
pub fn get_widget_state(&self, widget_id: u64) -> Option<&CpuWidgetState> {
self.widget_states.get(&widget_id)
}
}
pub struct MemWidgetState {
pub current_display_time: u64,
pub autohide_timer: Option<Instant>,
}
impl MemWidgetState {
pub fn init(current_display_time: u64, autohide_timer: Option<Instant>) -> Self {
MemWidgetState {
current_display_time,
autohide_timer,
}
}
}
pub struct MemState {
pub force_update: Option<u64>,
pub widget_states: HashMap<u64, MemWidgetState>,
}
impl MemState {
pub fn init(widget_states: HashMap<u64, MemWidgetState>) -> Self {
MemState {
force_update: None,
widget_states,
}
}
pub fn get_mut_widget_state(&mut self, widget_id: u64) -> Option<&mut MemWidgetState> {
self.widget_states.get_mut(&widget_id)
}
pub fn get_widget_state(&self, widget_id: u64) -> Option<&MemWidgetState> {
self.widget_states.get(&widget_id)
}
}
pub struct TempWidgetState {
pub scroll_state: AppScrollWidgetState,
pub table_width_state: CanvasTableWidthState,
}
impl TempWidgetState {
pub fn init() -> Self {
TempWidgetState {
scroll_state: AppScrollWidgetState::default(),
table_width_state: CanvasTableWidthState::default(),
}
}
}
pub struct TempState {
pub widget_states: HashMap<u64, TempWidgetState>,
}
impl TempState {
pub fn init(widget_states: HashMap<u64, TempWidgetState>) -> Self {
TempState { widget_states }
}
pub fn get_mut_widget_state(&mut self, widget_id: u64) -> Option<&mut TempWidgetState> {
self.widget_states.get_mut(&widget_id)
}
pub fn get_widget_state(&self, widget_id: u64) -> Option<&TempWidgetState> {
self.widget_states.get(&widget_id)
}
}
pub struct DiskWidgetState {
pub scroll_state: AppScrollWidgetState,
pub table_width_state: CanvasTableWidthState,
}
impl DiskWidgetState {
pub fn init() -> Self {
DiskWidgetState {
scroll_state: AppScrollWidgetState::default(),
table_width_state: CanvasTableWidthState::default(),
}
}
}
pub struct DiskState {
pub widget_states: HashMap<u64, DiskWidgetState>,
}
impl DiskState {
pub fn init(widget_states: HashMap<u64, DiskWidgetState>) -> Self {
DiskState { widget_states }
}
pub fn get_mut_widget_state(&mut self, widget_id: u64) -> Option<&mut DiskWidgetState> {
self.widget_states.get_mut(&widget_id)
}
pub fn get_widget_state(&self, widget_id: u64) -> Option<&DiskWidgetState> {
self.widget_states.get(&widget_id)
}
}
pub struct BasicTableWidgetState {
// Since this is intended (currently) to only be used for ONE widget, that's
// how it's going to be written. If we want to allow for multiple of these,
// then we can expand outwards with a normal BasicTableState and a hashmap
pub currently_displayed_widget_type: BottomWidgetType,
pub currently_displayed_widget_id: u64,
pub widget_id: i64,
pub left_tlc: Option<(u16, u16)>,
pub left_brc: Option<(u16, u16)>,
pub right_tlc: Option<(u16, u16)>,
pub right_brc: Option<(u16, u16)>,
}
#[derive(Default)]
pub struct BatteryWidgetState {
pub currently_selected_battery_index: usize,
pub tab_click_locs: Option<Vec<((u16, u16), (u16, u16))>>,
}
pub struct BatteryState {
pub widget_states: HashMap<u64, BatteryWidgetState>,
}
impl BatteryState {
pub fn init(widget_states: HashMap<u64, BatteryWidgetState>) -> Self {
BatteryState { widget_states }
}
pub fn get_mut_widget_state(&mut self, widget_id: u64) -> Option<&mut BatteryWidgetState> {
self.widget_states.get_mut(&widget_id)
}
pub fn get_widget_state(&self, widget_id: u64) -> Option<&BatteryWidgetState> {
self.widget_states.get(&widget_id)
}
}
#[derive(Default)]
pub struct ParagraphScrollState {
pub current_scroll_index: u16,
pub max_scroll_index: u16,
}
#[derive(Default)]
pub struct ConfigState {
pub current_category_index: usize,
pub category_list: Vec<ConfigCategory>,
}
#[derive(Default)]
pub struct ConfigCategory {
pub category_name: &'static str,
pub options_list: Vec<ConfigOption>,
}
pub struct ConfigOption {
pub set_function: Box<dyn Fn() -> anyhow::Result<()>>,
}
| 31.190476 | 192 | 0.56916 | 3 |
15bd4f3e467957c3df3e573882c990370613e873
| 1,005 |
rb
|
Ruby
|
hello.rb
|
joycedelatorre/SWPM
|
eabaae832941301072b5af1938bfbee9bdabb132
|
[
"MIT"
] | null | null | null |
hello.rb
|
joycedelatorre/SWPM
|
eabaae832941301072b5af1938bfbee9bdabb132
|
[
"MIT"
] | null | null | null |
hello.rb
|
joycedelatorre/SWPM
|
eabaae832941301072b5af1938bfbee9bdabb132
|
[
"MIT"
] | null | null | null |
puts "Hello World"
require 'open-uri'
require 'httparty'
require 'json'
$pictures=[]
def take_picture(num_to_take, num_sleep)
picture_container = []
num_of_pictures = 0
while num_of_pictures < num_to_take
@urlstring_to_post = 'http://10.0.0.1:10000/sony/camera'
@result = HTTParty.post(@urlstring_to_post.to_str,
:body => {
:method => 'actTakePicture',
:params => [],
:id => 1,
:version => '1.0'
}.to_json,
:headers => { 'Content-Type' => 'application/json' } )
num_of_pictures += 1
picture_container << @result["result"][0][0]
sleep(num_sleep)
#clean picture_container.
end
$pictures << picture_container
end
take_picture(2,0)
def download_pics()
$pictures[0].each do |pic|
filename_pic = pic.slice(/\bpict\d*\_\d*\.[A-Z]*/)
open("pictures/" + filename_pic, 'wb') do |file|
file << open(pic).read
# p file
end
end
end
download_pics()
| 23.928571 | 62 | 0.58806 | 3.234375 |
9e0932e759006fabae25a32e0f5eb1755ddedeee
| 16,073 |
lua
|
Lua
|
lua/kakoge/panels/cropper_strip.lua
|
Cryotheus/kakoge
|
5f3e9c6d3dbe6f372bf2622b161b36c68d957c0c
|
[
"MIT"
] | null | null | null |
lua/kakoge/panels/cropper_strip.lua
|
Cryotheus/kakoge
|
5f3e9c6d3dbe6f372bf2622b161b36c68d957c0c
|
[
"MIT"
] | null | null | null |
lua/kakoge/panels/cropper_strip.lua
|
Cryotheus/kakoge
|
5f3e9c6d3dbe6f372bf2622b161b36c68d957c0c
|
[
"MIT"
] | null | null | null |
local PANEL = {}
--accessor functions
AccessorFunc(PANEL, "Font", "Font", FORCE_STRING)
AccessorFunc(PANEL, "MinimumCropSize", "MinimumCropSize", FORCE_NUMBER)
--local functions
local function get_power(result) return math.ceil(math.log(result, 2)) end
--post function setup
if not KAKOGE.CropperFontMade then
surface.CreateFont("KakogeCropper", {
antialias = false,
name = "Consolas",
size = 16,
weight = 500
})
KAKOGE.CropperFontMade = true
end
--panel functions
function PANEL:Annotate(x, y, width, height, fractional, z_position)
local annotation = vgui.Create("KakogeCropperAnnotation", self.AnnotationPanel)
local z_position = z_position or self.ImageCount + 1
table.insert(self.Annotations, annotation)
if fractional then annotation:SetFractionBounds(x, y, width, height, true)
else
local parent_width, parent_height = self:GetSize()
annotation:SetFractionBounds(x / parent_width, y / parent_height, width / parent_width, height / parent_height, true)
end
annotation:SetFont("CloseCaption_BoldItalic")
annotation:SetText("THUNDER!")
annotation:SetTextColor(color_black)
annotation:SetZPos(z_position)
return annotation
end
function PANEL:AnnotateCrop(x_fraction, y_fraction, width_fraction, height_fraction, file_name)
local annotation = self:Annotate(x_fraction, y_fraction, width_fraction, height_fraction, true)
print(x_fraction, y_fraction, width_fraction, height_fraction, file_name)
annotation:SetFont("DermaDefaultBold")
annotation:SetText(file_name)
return annotation
end
function PANEL:CalculateCrop(start_x, start_y, end_x, end_y)
local maximum_x, maximum_y, minimum_x, minimum_y = self:CalculateMaxes(start_x, start_y, end_x, end_y)
return maximum_x, maximum_y, minimum_x, minimum_y, math.min(maximum_x - minimum_x, self:GetWide(), 2 ^ get_power(ScrW())), math.min(maximum_y - minimum_y, self:GetTall(), 2 ^ get_power(ScrH()))
end
function PANEL:CalculateMaxes(start_x, start_y, end_x, end_y)
local maximum_x, maximum_y = end_x, end_y
local minimum_x, minimum_y = start_x, start_y
if start_x > end_x then
maximum_x = start_x
minimum_x = end_x
end
if start_y > end_y then
maximum_y = start_y
minimum_y = end_y
end
return maximum_x, maximum_y, minimum_x, minimum_y
end
function PANEL:ClearAnnotations()
self.AnnotationPanel:Clear()
table.Empty(self.Annotations)
end
function PANEL:ClearImages()
local images = self.Images
self:ClearAnnotations()
for index, image in ipairs(images) do
image:Remove()
images[index] = nil
end
end
function PANEL:Crop(start_image, end_image, start_x, start_y, end_x, end_y, annotate)
local maximum_x, maximum_y, minimum_x, minimum_y, drag_width, drag_height = self:CalculateCrop(start_x, start_y, end_x, end_y)
--assertions
if drag_width == 0 or drag_height == 0 then return self:RejectCrop(start_x, start_y, end_x, end_y, "zero sized crop", 2)
elseif drag_width < self.MinimumCropSize or drag_height < self.MinimumCropSize then return self:RejectCrop(start_x, start_y, end_x, end_y, "undersized crop", 2) end
local crop_images = {}
local directory = self.Directory .. "crops/"
local image_heights = {}
local end_index, start_index = end_image:GetZPos(), start_image:GetZPos()
local images = self.Images
local maximum_width = self.MaximumWidth
--flip start index should always be lower than end_index
if start_index > end_index then end_index, start_index = start_index, end_index end
--first pass to calculate end of the render target's size
local width, height = self:GetSize()
local scale = maximum_width / width
local scale_width, scale_height = math.Round(drag_width * scale), math.Round(drag_height * scale)
--because capture's size cannot exceede the frame buffer >:(
if scale_width > 2 ^ get_power(ScrW()) or scale_height > 2 ^ get_power(ScrH()) then return self:RejectCrop(start_x, start_y, end_x, end_y, "oversized crop", 3) end
local file_name = string.format("%u_%u_%u_%u.png", minimum_x, minimum_y, scale_width, scale_height)
local power = get_power(math.max(scale_height, maximum_width))
local scale_x, scale_y = math.Round(minimum_x * scale), math.Round(minimum_y * scale)
local y_offset = math.Round(images[start_index]:GetY() * scale) - scale_y
local target_info = hook.Call("KakogeRenderTarget", KAKOGE, power, true, function()
render.Clear(0, 0, 255, 255)
render.Clear(0, 0, 255, 255, true, true)
surface.SetDrawColor(0, 255, 0)
surface.DrawRect(0, 0, 100, 100)
--we make the capture's x and y the image's 0, 0 so we can fit more
for index = start_index, end_index do
local image = images[index]
local image_height = maximum_width / image.ActualWidth * image.ActualHeight
--DImage:PaintAt has scaling loss
surface.SetDrawColor(255, 255, 255)
surface.SetMaterial(image:GetMaterial())
surface.DrawTexturedRect(-scale_x, y_offset, maximum_width, image_height)
y_offset = y_offset + image_height
end
--unfortunately this seems to return an empty or malformed string when beyond the frame buffer >:(
--the frame buffer's ratio is sometimes be 2:1, but in normal play is 1:1
file.CreateDir(directory)
file.Write(directory .. file_name, render.Capture{
alpha = false,
format = "png",
x = 0,
y = 0,
w = scale_width,
h = scale_height
})
end, MATERIAL_RT_DEPTH_NONE, IMAGE_FORMAT_RGB888)
local debug_expires = RealTime() + 10
hook.Add("HUDPaint", "Kakoge", function()
if RealTime() > debug_expires then hook.Remove("HUDPaint", "Kakoge") end
surface.SetDrawColor(255, 255, 255)
surface.SetMaterial(target_info.Material)
surface.DrawTexturedRect(0, 0, 2 ^ power, 2 ^ power)
end)
if annotate then annotate = self:AnnotateCrop(minimum_x / width, minimum_y / height, drag_width / width, drag_height / height, string.StripExtension(file_name)) end
self:OnCrop(scale_x, scale_y, scale_width, scale_height, annotate)
return true
end
function PANEL:GetCropFromFile(file_name_stripped)
local bounds = string.Split(file_name_stripped, "_")
if #bounds ~= 4 then return end
for index, value in ipairs(bounds) do bounds[index] = tonumber(value) end
return unpack(bounds)
end
function PANEL:Init()
self.Annotations = {}
self.CropRejections = {}
self.Font = "KakogeCropper"
self.Images = {}
self.Pressing = {}
self.MinimumCropSize = 16
do --annotation panel
local panel = vgui.Create("DPanel", self)
panel.IndexingParent = self
panel:SetPaintBackground(false)
panel:SetMouseInputEnabled(false)
panel:SetZPos(29999)
function panel:PerformLayout(width, height) self.IndexingParent:PerformLayoutAnnotations(self, width, height) end
self.AnnotationPanel = panel
end
do --overlay panel
local panel = vgui.Create("DPanel", self)
panel.IndexingParent = self
panel:SetMouseInputEnabled(false)
panel:SetZPos(30000)
function panel:Paint(width, height) self.IndexingParent:PaintOverlay(self, width, height) end
function panel:PerformLayout(width, height) self.IndexingParent:PerformLayoutOverlay(self, width, height) end
self.OverlayPanel = panel
end
end
function PANEL:OnCrop(scale_x, scale_y, scale_width, scale_height) end
function PANEL:OnMousePressedImage(image, code)
local pressing = self.Pressing
local x, y = self:ScreenToLocal(gui.MouseX(), gui.MouseY())
pressing[code] = {
Image = image,
X = x,
Y = y
}
end
function PANEL:OnMouseReleasedImage(image, code)
local pressing = self.Pressing
local press_data = pressing[code]
local x, y = self:ScreenToLocal(gui.MouseX(), gui.MouseY())
if press_data then
self:OnMouseClickedImage(code, press_data.Image, image, press_data.X, press_data.Y, x, y)
pressing[code] = nil
end
end
function PANEL:OnMouseClickedImage(code, start_image, end_image, start_x, start_y, end_x, end_y)
--crop wtih left click, but cancel with right click
--probably will add a new mode in the furute
if code == MOUSE_LEFT and not self.Pressing[MOUSE_RIGHT] then self:Crop(start_image, end_image, start_x, start_y, end_x, end_y, true) end
end
function PANEL:OnRemove()
local directory = self.Directory
local files = file.Find(directory .. "crops/*.png", "DATA")
print(directory .. "crops/*.png")
if files then
local roster = {}
PrintTable(files, 1)
for index, file_name in ipairs(files) do
local x, y, width, height = self:GetCropFromFile(string.StripExtension(file_name))
if x and y and width and height then table.insert(roster, file_name) end
end
PrintTable(roster, 1)
if next(roster) then file.Write(directory .. "crops/roster.txt", table.concat(roster, "\n")) end
end
end
function PANEL:PaintCrop(start_x, start_y, width, height)
--right click means cancel, so turn white if they are cancelling
local disco = self.Pressing[MOUSE_RIGHT] and color_white or HSVToColor(math.floor(RealTime() * 2) * 30, 0.7, 1)
local font = self.Font
local screen_end_x, screen_end_y = gui.MouseX(), gui.MouseY()
local end_x, end_y = self:ScreenToLocal(screen_end_x, screen_end_y)
local screen_start_x, screen_start_y = self:LocalToScreen(start_x, start_y)
local maximum_x, maximum_y, minimum_x, minimum_y, drag_width, drag_height = self:CalculateCrop(start_x, start_y, end_x, end_y)
local screen_minimum_x, screen_minimum_y = math.min(screen_end_x, screen_start_x), math.min(screen_end_y, screen_start_y)
surface.SetDrawColor(0, 0, 0, 64)
surface.DrawRect(minimum_x, minimum_y, drag_width, drag_height)
surface.SetDrawColor(0, 0, 0, 255)
surface.DrawOutlinedRect(minimum_x, minimum_y, drag_width, drag_height, 3)
surface.SetDrawColor(disco)
surface.DrawOutlinedRect(minimum_x + 1, minimum_y + 1, drag_width - 2, drag_height - 2, 1)
--scissor rect bad!
render.SetScissorRect(screen_minimum_x + 3, screen_minimum_y + 3, screen_minimum_x + drag_width - 3, screen_minimum_y + drag_height - 3, true)
draw.SimpleTextOutlined(drag_width .. " width", font, minimum_x + drag_width - 4, minimum_y + 2, disco, TEXT_ALIGN_RIGHT, TEXT_ALIGN_TOP, 1, color_black)
draw.SimpleTextOutlined(drag_height .. " height", font, minimum_x + drag_width - 4, minimum_y + 16, disco, TEXT_ALIGN_RIGHT, TEXT_ALIGN_TOP, 1, color_black)
render.SetScissorRect(0, 0, 0, 0, false)
end
function PANEL:PaintOverlay(overlay_panel, width, height)
local cropping = self.Pressing[MOUSE_LEFT]
self:PaintRejects(width, height)
if cropping then self:PaintCrop(cropping.X, cropping.Y, width, height) end
end
function PANEL:PaintRejects(width, height)
local font = self.Font
local real_time = RealTime()
local rejection_index = 1
local rejections = self.CropRejections
while rejection_index <= #rejections do
local reject_data = rejections[rejection_index]
local expires = reject_data.Expires
if real_time > expires then table.remove(rejections, rejection_index)
else
local difference = expires - real_time
local fraction = math.Clamp(difference, 0, 1)
local fraction_510 = fraction * 510
local message = reject_data.Message
local saturation = math.ceil(difference * 2) % 2 * 64
local x, y, width, height = reject_data.X, reject_data.Y, reject_data.Width, reject_data.Height
surface.SetDrawColor(255, saturation, saturation, fraction * 192)
surface.DrawRect(x, y, width, height)
--a little bit hacky, but an alpha above 255 is treated as 255, so we can make this fade 0.5 seconds before the expiration by making it double 255
surface.SetDrawColor(0, 0, 0, fraction_510)
surface.DrawOutlinedRect(x, y, width, height, 3)
surface.SetDrawColor(255, saturation, saturation, fraction_510)
surface.DrawOutlinedRect(x + 1, y + 1, width - 2, height - 2, 1)
if message then
local clipping = DisableClipping(true)
local message_saturation = saturation + 32
draw.SimpleTextOutlined(message, font, x + width * 0.5, y + height * 0.5, Color(255, message_saturation, message_saturation, fraction_510), TEXT_ALIGN_CENTER, TEXT_ALIGN_CENTER, 1, Color(0, 0, 0, fraction_510))
DisableClipping(clipping)
end
rejection_index = rejection_index + 1
end
end
end
function PANEL:PerformLayout(width, height)
local annotation_panel = self.AnnotationPanel
local overlay = self.OverlayPanel
--1 instead of 0, because I'm scared of dividing by 0...
--never again, that sh*t is like a plague
annotation_panel:SetTall(1)
overlay:SetTall(1)
for index, image in ipairs(self.Images) do
local image_width = image:GetWide()
image:SetCursor("crosshair")
image:SetTall(width / image.ActualWidth * image.ActualHeight)
end
self:SizeToChildren(false, true)
--now, resize!
annotation_panel:SetSize(self:GetSize())
overlay:SetSize(self:GetSize())
end
function PANEL:PerformLayoutAnnotations(annotation_parent, width, height)
local annotation_parent = self.AnnotationPanel
for index, annotation in ipairs(self.Annotations) do annotation:ScaleToPanel(annotation_parent, width, height) end
end
function PANEL:PerformLayoutOverlay(overlay, width, height) end
function PANEL:RejectCrop(start_x, start_y, end_x, end_y, message, duration)
local maximum_x, maximum_y, minimum_x, minimum_y, drag_width, drag_height = self:CalculateCrop(start_x, start_y, end_x, end_y)
table.insert(self.CropRejections, {
Expires = RealTime() + duration,
Width = drag_width,
Height = drag_height,
Message = message,
X = minimum_x,
Y = minimum_y,
})
return false, message
end
function PANEL:SetAnnotationsEditable(state) self.AnnotationPanel:SetMouseInputEnabled(state) end
function PANEL:SetDirectory(directory)
if not string.EndsWith(directory, "/") then directory = directory .. "/" end
local files, folders = file.Find(directory .. "*", "DATA")
local images = self.Images
self.Directory = directory
self:ClearImages()
assert(files, "KakogeCropperStrip had an invalid directory set")
for index, file_name in ipairs(files) do files[file_name] = index end
for index, folder_name in ipairs(folders) do folders[folder_name] = index end
--trustee generated roster
if files["roster.txt"] then
local total_height = 0
local maximum_width = 0
local image_count = 0
local image_names = string.Split(file.Read(directory .. "roster.txt", "DATA"), "\n")
for index, image_name in ipairs(image_names) do
local image = vgui.Create("KakogeCropperImage", self)
image_count = image_count + 1
image.CropperStrip = self
table.insert(images, image)
image:Dock(TOP)
image:SetMaterial("data/" .. directory .. image_name)
image:SetZPos(index)
maximum_width = math.max(maximum_width, image.ActualWidth)
total_height = total_height + image.ActualHeight
end
self.TotalHeight = total_height
self.MaximumWidth = maximum_width
self.ImageCount = image_count
self:InvalidateLayout(true)
local parent_width, parent_height = maximum_width, total_height
--we store our crops in a folder alongside its OCR data
--easy way to store meta without having duplication: use the file's name!
if folders.crops then
local crop_files, crop_folders = file.Find(directory .. "crops/*", "DATA")
for index, file_name in ipairs(crop_files) do crop_files[file_name] = index end
for index, file_name in ipairs(crop_files) do
local file_name_stripped = string.StripExtension(file_name)
local x, y, width, height = self:GetCropFromFile(file_name_stripped)
if x and y and width and height then
local extension = string.GetExtensionFromFilename(file_name)
if extension == "png" then self:AnnotateCrop(x / parent_width, y / parent_height, width / parent_width, height / parent_height, file_name_stripped)
elseif extension == "txt" then
--more!
self:DescribeAnnotation(file_name_stripped)
else print("bonus file: " .. file_name) end
else print("malformed file in crops folder: " .. file_name_stripped, x, y, width, height) end
end
end
end
end
function PANEL:SetFont(font) self.Font = font and tostring(font) or "KakogeCropper" end
--post
derma.DefineControl("KakogeCropperStrip", "", PANEL, "DSizeToContents")
| 33.837895 | 214 | 0.745225 | 3.046875 |
58374ac411f643cd9d641daf193fd22943afa51c
| 1,248 |
swift
|
Swift
|
Swift/LeetCode/LeetCode/PermutationsII.swift
|
TonnyL/Windary
|
39f85cdedaaf5b85f7ce842ecef975301fc974cf
|
[
"MIT"
] | 205 |
2017-11-16T08:38:46.000Z
|
2022-03-06T05:50:03.000Z
|
Swift/LeetCode/LeetCode/PermutationsII.swift
|
santosh241/Windary
|
39f85cdedaaf5b85f7ce842ecef975301fc974cf
|
[
"MIT"
] | 3 |
2018-04-10T10:17:52.000Z
|
2020-12-11T08:00:09.000Z
|
Swift/LeetCode/LeetCode/PermutationsII.swift
|
santosh241/Windary
|
39f85cdedaaf5b85f7ce842ecef975301fc974cf
|
[
"MIT"
] | 28 |
2018-04-10T06:42:42.000Z
|
2021-09-14T14:15:39.000Z
|
//
// PermutationsII.swift
// LeetCode
//
// Created by 黎赵太郎 on 05/12/2017.
// Copyright © 2017 lizhaotailang. All rights reserved.
//
// Given a collection of numbers that might contain duplicates, return all possible unique permutations.
//
// For example,
// [1,1,2] have the following unique permutations:
// [
// [1,1,2],
// [1,2,1],
// [2,1,1]
// ]
//
// Accepted. See [PermutationsIITests](./LeetCodeTests/PermutationsIITests.swift) for test cases.
//
import Foundation
class PermutationsII {
func permuteUnique(_ nums: [Int]) -> [[Int]] {
var results: [[Int]] = []
if nums.isEmpty {
return results
}
if nums.count == 1 {
results.append([nums[0]])
return results
}
let ints = Array(nums[0..<nums.count - 1])
var map: [String: Array<Int>] = [:]
for list in permuteUnique(ints) {
for i in 0...list.count {
var tmp = Array(list)
tmp.insert(nums[nums.count - 1], at: i)
map[String(describing: tmp)] = tmp
}
}
map.values.forEach{
results.append($0)
}
return results
}
}
| 23.54717 | 105 | 0.526442 | 3.234375 |
6f41343ddd5b8a87549d292306a404506e1d7ef2
| 3,589 |
rs
|
Rust
|
src/affine.rs
|
sradley/cipher
|
6af859b6e0d204b5904d0cbd8a31ab12d3b7ed63
|
[
"MIT"
] | 2 |
2019-08-22T17:23:37.000Z
|
2019-08-25T11:07:39.000Z
|
src/affine.rs
|
sradley/cipher
|
6af859b6e0d204b5904d0cbd8a31ab12d3b7ed63
|
[
"MIT"
] | 1 |
2019-08-24T17:45:47.000Z
|
2019-08-24T17:45:47.000Z
|
src/affine.rs
|
sradley/cipher
|
6af859b6e0d204b5904d0cbd8a31ab12d3b7ed63
|
[
"MIT"
] | null | null | null |
//! # Affine Cipher
//!
//! Implements the functionality for the Affine cipher.
//!
//! The following is an excerpt from [Wikipedia](https://en.wikipedia.org/wiki/Affine_cipher).
//! > The affine cipher is a type of monoalphabetic substitution cipher, wherein each letter in an
//! alphabet is mapped to its numeric equivalent, encrypted using a simple mathematical function,
//! and converted back to a letter.
//!
//! > The formula used means that each letter encrypts to one other letter, and back again, meaning
//! the cipher is essentially a standard substitution cipher with a rule governing which letter goes
//! to which.
//!
//! > As such, it has the weaknesses of all substitution ciphers. Each letter is enciphered with the
//! function (ax + b) mod 26, where b is the magnitude of the shift.
use crate::{input, Cipher, CipherResult};
static RELATIVE_PRIMES: [i32; 12] = [1, 3, 5, 7, 9, 11, 15, 17, 19, 21, 23, 25];
/// An Affine cipher implementation.
pub struct Affine {
a: i32,
b: i32,
}
impl Affine {
/// Takes the two keys for the Affine cipher and returns a
/// corresponding Affine struct.
///
/// # Panics
/// * If `a` is outside the range [1, 26).
/// * If `b` is outside the range [0, 26).
/// * If `a` is not relatively prime to 26.
pub fn new(a: i32, b: i32) -> Self {
assert!(0 < a && a < 26, "`a` must be in the range [1, 26)");
assert!(0 <= b && b < 26, "`b` must be in the range [0, 26)");
assert!(
RELATIVE_PRIMES.contains(&a),
"`a` must be relatively prime to 26"
);
Self { a, b }
}
}
impl Cipher for Affine {
/// Enciphers the given plaintext (a str reference) using the Affine cipher
/// and returns the ciphertext as a `CipherResult`.
///
/// # Example
/// ```
/// use ciphers::{Cipher, Affine};
///
/// let affine = Affine::new(7, 11);
///
/// let ctext = affine.encipher("DEFENDTHEEASTWALLOFTHECASTLE");
/// assert_eq!(ctext.unwrap(), "GNUNYGOINNLHOJLKKFUOINZLHOKN");
/// ```
fn encipher(&self, ptext: &str) -> CipherResult {
input::is_alpha(ptext)?;
let ptext = ptext.to_ascii_uppercase();
let ctext = ptext
.bytes()
.map(move |c| ((self.a * (c as i32 - 65) + self.b) % 26) as u8 + 65)
.collect();
Ok(String::from_utf8(ctext).unwrap())
}
/// Deciphers the given ciphertext (a str reference) using the Affine cipher
/// and returns the plaintext as a `CipherResult`.
///
/// # Example
/// ```
/// use ciphers::{Cipher, Affine};
///
/// let affine = Affine::new(7, 11);
///
/// let ptext = affine.decipher("GNUNYGOINNLHOJLKKFUOINZLHOKN");
/// assert_eq!(ptext.unwrap(), "DEFENDTHEEASTWALLOFTHECASTLE");
/// ```
fn decipher(&self, ctext: &str) -> CipherResult {
input::is_alpha(ctext)?;
let ctext = ctext.to_ascii_uppercase();
let a_inv = invmod(self.a, 26).unwrap();
let ptext = ctext
.bytes()
.map(move |c| (((a_inv * (c as i32 - 65 - self.b)) % 26 + 26) % 26) as u8 + 65)
.collect();
Ok(String::from_utf8(ptext).unwrap())
}
}
fn egcd(a: i32, b: i32) -> (i32, i32, i32) {
match a {
0 => (b, 0, 1),
_ => {
let (g, x, y) = egcd(b % a, a);
(g, y - (b / a) * x, x)
}
}
}
fn invmod(a: i32, m: i32) -> Option<i32> {
let (g, x, _) = egcd(a, m);
match g {
1 => Some((x % m + m) % m),
_ => None,
}
}
| 30.675214 | 100 | 0.561716 | 3.265625 |
5f2c4ec227b30ffc450facc3aaa9bcf4febd6d86
| 3,443 |
sql
|
SQL
|
sqlnexus/SQLNexus_PostProcessing.sql
|
asavioli/SqlNexus
|
f0980dd6095ed64a112d130ac7fb456a9b5bd6dc
|
[
"MIT"
] | null | null | null |
sqlnexus/SQLNexus_PostProcessing.sql
|
asavioli/SqlNexus
|
f0980dd6095ed64a112d130ac7fb456a9b5bd6dc
|
[
"MIT"
] | null | null | null |
sqlnexus/SQLNexus_PostProcessing.sql
|
asavioli/SqlNexus
|
f0980dd6095ed64a112d130ac7fb456a9b5bd6dc
|
[
"MIT"
] | null | null | null |
set nocount on
--add extra columns that represent local server time, computed based on offset if the data is available
--these will facilitation joins between ReadTrace.* tables and other tbl_* tables - the latter storing datetime in local server time
if (OBJECT_ID('[ReadTrace].[tblBatches]') is not null)
begin
--add columns to tblBatches
if ((COLUMNPROPERTY (OBJECT_ID('[ReadTrace].[tblBatches]'), 'StartTime_local', 'ColumnId' ) IS NULL)
and (COLUMNPROPERTY (OBJECT_ID('[ReadTrace].[tblBatches]'), 'EndTime_local', 'ColumnId' ) IS NULL))
begin
ALTER TABLE [ReadTrace].[tblBatches] ADD StartTime_local datetime, EndTime_local datetime;
end
end
if (OBJECT_ID('[ReadTrace].[tblStatements]') is not null)
begin
--add columns to tblStatements
if ((COLUMNPROPERTY (OBJECT_ID('[ReadTrace].[tblStatements]'), 'StartTime_local', 'ColumnId' ) IS NULL)
and (COLUMNPROPERTY (OBJECT_ID('[ReadTrace].[tblStatements]'), 'EndTime_local', 'ColumnId' ) IS NULL))
begin
ALTER TABLE [ReadTrace].[tblStatements] ADD StartTime_local datetime, EndTime_local datetime;
end
end
if (OBJECT_ID('[ReadTrace].[tblConnections]') is not null)
begin
--add columns to tblConnections
if ((COLUMNPROPERTY (OBJECT_ID('[ReadTrace].[tblConnections]'), 'StartTime_local', 'ColumnId' ) IS NULL)
and (COLUMNPROPERTY (OBJECT_ID('[ReadTrace].[tblConnections]'), 'EndTime_local', 'ColumnId' ) IS NULL))
begin
ALTER TABLE [ReadTrace].[tblConnections] ADD StartTime_local datetime, EndTime_local datetime;
end
end
go
if ( (OBJECT_ID('tbl_ServerProperties') is not null) or (OBJECT_ID('tbl_server_times') is not null) )
begin
--get the offset from one of two possible tables
declare @utc_to_local_offset numeric(3,0) = 0
if OBJECT_ID('tbl_ServerProperties') is not null
begin
select @utc_to_local_offset = PropertyValue from tbl_ServerProperties
where PropertyName = 'UTCOffset_in_Hours'
end
else if OBJECT_ID('tbl_server_times') is not null
begin
select top 1 @utc_to_local_offset = time_delta_hours * -1 from tbl_server_times
end
--update the new columns in tblBatches with local times
if ((COLUMNPROPERTY (OBJECT_ID('[ReadTrace].[tblBatches]'), 'StartTime_local', 'ColumnId' ) IS NOT NULL)
and (COLUMNPROPERTY (OBJECT_ID('[ReadTrace].[tblBatches]'), 'EndTime_local', 'ColumnId' ) IS NOT NULL))
begin
update [ReadTrace].[tblBatches]
set StartTime_local = DATEADD(hour, @utc_to_local_offset, StartTime) ,
EndTime_local = DATEADD (hour, @utc_to_local_offset, EndTime) ;
end
--update the new columns in tblStatements with local times
if ((COLUMNPROPERTY (OBJECT_ID('[ReadTrace].[tblStatements]'), 'StartTime_local', 'ColumnId' ) IS NOT NULL)
and (COLUMNPROPERTY (OBJECT_ID('[ReadTrace].[tblStatements]'), 'EndTime_local', 'ColumnId' ) IS NOT NULL))
begin
update [ReadTrace].[tblStatements]
set StartTime_local = DATEADD(hour, @utc_to_local_offset, StartTime) ,
EndTime_local = DATEADD (hour, @utc_to_local_offset, EndTime) ;
end
--update the new columns in tblConnections with local times
if ((COLUMNPROPERTY (OBJECT_ID('[ReadTrace].[tblConnections]'), 'StartTime_local', 'ColumnId' ) IS NOT NULL)
and (COLUMNPROPERTY (OBJECT_ID('[ReadTrace].[tblConnections]'), 'EndTime_local', 'ColumnId' ) IS NOT NULL))
begin
update [ReadTrace].[tblConnections]
set StartTime_local = DATEADD(hour, @utc_to_local_offset, StartTime) ,
EndTime_local = DATEADD (hour, @utc_to_local_offset, EndTime) ;
end
end
| 42.506173 | 133 | 0.75138 | 3.125 |
01edb56298fe98d15043545dd7b84fd617d1fbd5
| 2,214 |
rs
|
Rust
|
src/day1.rs
|
peterdn/advent-of-code-2020
|
fd820e57c591f207ceaaacd078aa9c61cc344d53
|
[
"BSD-2-Clause"
] | null | null | null |
src/day1.rs
|
peterdn/advent-of-code-2020
|
fd820e57c591f207ceaaacd078aa9c61cc344d53
|
[
"BSD-2-Clause"
] | null | null | null |
src/day1.rs
|
peterdn/advent-of-code-2020
|
fd820e57c591f207ceaaacd078aa9c61cc344d53
|
[
"BSD-2-Clause"
] | null | null | null |
extern crate aoc_macros;
use aoc_macros::aoc_main;
mod aoc;
aoc_main!(parse_expenses);
fn parse_expenses(input: &str) -> Vec<u32> {
let mut expenses = input
.trim()
.lines()
.map(|line| line.trim().parse::<u32>().unwrap())
.collect::<Vec<u32>>();
expenses.sort();
expenses
}
fn part1(expenses: &Vec<u32>) {
if let Some((lo, hi)) = find_pairwise_goal(&expenses, 2020, None) {
println!("Found {}, {} = {}", lo, hi, lo * hi);
} else {
println!("Failed to find!");
}
}
fn part2(expenses: &Vec<u32>) {
// Subset-sum problem; this dataset is small enough to brute-force.
for i in 0..expenses.len() {
let goal = 2020 - expenses[i];
if let Some((lo, hi)) = find_pairwise_goal(&expenses, goal, Some(i)) {
println!(
"Found {}, {}, {} = {}",
lo,
hi,
expenses[i],
lo * hi * expenses[i]
);
return;
}
}
println!("Didn't find!");
}
fn find_pairwise_goal(
expenses: &[u32],
goal: u32,
exclude_idx: Option<usize>,
) -> Option<(u32, u32)> {
let mut idx_lo = 0;
let mut idx_hi = expenses.len() - 1;
while idx_lo < idx_hi {
if exclude_idx.is_some() && idx_lo == exclude_idx.unwrap() {
idx_lo += 1;
}
if exclude_idx.is_some() && idx_hi == exclude_idx.unwrap() {
idx_hi -= 1;
}
let lo = expenses[idx_lo];
let hi = expenses[idx_hi];
let result = lo + hi;
if result == goal {
return Some((lo, hi));
} else if result > goal {
idx_hi -= 1;
idx_lo = 0;
} else {
idx_lo += 1;
}
}
None
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_find_pairwise_goal() {
// Test case from https://adventofcode.com/2020/day/1 part 1.
let expenses = parse_expenses(
"1721
979
366
299
675
1456",
);
let goal = find_pairwise_goal(&expenses, 2020, None);
assert_eq!(goal, Some((299, 1721)));
}
}
| 23.553191 | 78 | 0.485095 | 3.265625 |
bcac93b63b8d137c83f12aeb51a4fce7d3845f6b
| 934 |
js
|
JavaScript
|
day4-2/route1.js
|
AmosZhang77/nodeJsLearnBase
|
a0cb3ad1377385a0e7651934b91215a16b8d7edd
|
[
"MIT"
] | null | null | null |
day4-2/route1.js
|
AmosZhang77/nodeJsLearnBase
|
a0cb3ad1377385a0e7651934b91215a16b8d7edd
|
[
"MIT"
] | null | null | null |
day4-2/route1.js
|
AmosZhang77/nodeJsLearnBase
|
a0cb3ad1377385a0e7651934b91215a16b8d7edd
|
[
"MIT"
] | null | null | null |
let express = require('express')
let app = express()
app.listen(8070)
function bodyParser(){
return function (req,res,next) {
let str =''
req.on('data',function (chunk) {
str += chunk
})
req.on('end',function (chunk) {
// console.log(str)
req.body = require('querystring').parse(str) // querystring自带模块。自定义body属性用于存储。建议用第三方body-parser见route2.js
next() // next 一定要放这里不能放外面,收完数据才next,否则下面的req.body拿不到。
})
}
}
app.use(bodyParser())
// /user/login
// 未分模块子路由前
/*app.get('/login', function (req, res) {
res.send('登录')
})
app.get('/reg', function (req, res) {
res.send('注册')
})*/
// 模块化拆分子路由
let user = require('./routes/user')
app.use('/user', user)
// /article/ post
/*app.get('/post', function (req, res) {
res.send('发布文章')
})
app.get('/delete', function (req, res) {
res.send('删除文章')
})*/
// 模块化拆分子路由
let article = require('./routes/article')
app.use('/article', article)
| 19.87234 | 111 | 0.617773 | 3.015625 |
7a5f635df2eda24b97db87def6b9738a2a33765c
| 1,209 |
sql
|
SQL
|
sql/N184_department_highest_salary.sql
|
MikuSugar/LeetCode
|
2b87898853bf48a1f94e7b35dd0584047481801f
|
[
"Apache-2.0"
] | 3 |
2019-01-19T03:01:25.000Z
|
2020-06-06T12:11:29.000Z
|
sql/N184_department_highest_salary.sql
|
MikuSugar/LeetCode
|
2b87898853bf48a1f94e7b35dd0584047481801f
|
[
"Apache-2.0"
] | null | null | null |
sql/N184_department_highest_salary.sql
|
MikuSugar/LeetCode
|
2b87898853bf48a1f94e7b35dd0584047481801f
|
[
"Apache-2.0"
] | null | null | null |
select d.Name as Department, e.Name as Employee, e.Salary as Salary
from Employee e join Department d on e.DepartmentId=d.Id
where (DepartmentId,Salary) in (
select DepartmentId,max(Salary)
from Employee
group by DepartmentId
)
--Employee 表包含所有员工信息,每个员工有其对应的 Id, salary 和 department Id。
--
--+----+-------+--------+--------------+
--| Id | Name | Salary | DepartmentId |
--+----+-------+--------+--------------+
--| 1 | Joe | 70000 | 1 |
--| 2 | Henry | 80000 | 2 |
--| 3 | Sam | 60000 | 2 |
--| 4 | Max | 90000 | 1 |
--+----+-------+--------+--------------+
--Department 表包含公司所有部门的信息。
--
--+----+----------+
--| Id | Name |
--+----+----------+
--| 1 | IT |
--| 2 | Sales |
--+----+----------+
--编写一个 SQL 查询,找出每个部门工资最高的员工。例如,根据上述给定的表格,Max 在 IT 部门有最高工资,Henry 在 Sales 部门有最高工资。
--
--+------------+----------+--------+
--| Department | Employee | Salary |
--+------------+----------+--------+
--| IT | Max | 90000 |
--| Sales | Henry | 80000 |
--+------------+----------+--------+
--
--来源:力扣(LeetCode)
--链接:https://leetcode-cn.com/problems/department-highest-salary
--著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
| 32.675676 | 80 | 0.445823 | 3.015625 |
9bf5f25315e73651bb35d5e8bb5f6f6ba505d4d9
| 1,874 |
js
|
JavaScript
|
src/modules/auth/login/index.js
|
farhadrbb/saham-time
|
b715699fe9efd9d42a2736fd93b8c2225c0875cd
|
[
"MIT"
] | null | null | null |
src/modules/auth/login/index.js
|
farhadrbb/saham-time
|
b715699fe9efd9d42a2736fd93b8c2225c0875cd
|
[
"MIT"
] | null | null | null |
src/modules/auth/login/index.js
|
farhadrbb/saham-time
|
b715699fe9efd9d42a2736fd93b8c2225c0875cd
|
[
"MIT"
] | null | null | null |
import React, { useState } from 'react';
import Login from './components/cardLogin';
import { useFormik } from "formik";
import * as Yup from "yup";
import { login } from './../../../redux/auth/login/index';
import { useHistory } from "react-router-dom";
import { setLocalStorageLogin } from './components/localstorage';
import auth from '../../../redux/auth/login/auth';
export default function Index() {
const history = useHistory();
const [loading, setLoading] = useState(false);
const initialValues = {
userName: "",
password: "",
};
const LoginSchema = Yup.object().shape({
userName:
Yup.string()
.required('فیلد مورد نظر را پر نمایید'),
password:
Yup.string()
.required('فیلد مورد نظر را پر نمایید'),
});
const formik = useFormik({
initialValues,
validationSchema: LoginSchema,
onSubmit: (values, { setStatus, setSubmitting }) => {
setSubmitting(true)
setLoading(true);
login(values.userName, values.password)
.then((res) => {
if (!Object.keys(res.data.response.data).length) {
setStatus(true)
return;
}
let data = {
authenticated: true,
value: res.data.response.data,
timestamp: 1000 * 60 * 60 * 12,//12 hours
}
setLocalStorageLogin(
"persist:root",
data,
() => {
auth.login(
() => history.push("/dashboard")
)
})
setStatus(false)
})
.catch(() => {
alert("در ارتباط با سرور مشکلی پیش آمده");
})
.finally(() => {
setSubmitting(false);
setLoading(false);
});
},
})
return (
<>
<Login
loading={loading}
formik={formik}
/>
</>
);
}
| 22.309524 | 65 | 0.52508 | 3.171875 |
4a3f473ef512b9ce11ab519362466b1fdbec2de5
| 1,973 |
js
|
JavaScript
|
web/gui/js/mqtt.js
|
NuwanJ/co326-project
|
4dbd4766ad43a7b31d17aae5bde3e17c662d6f60
|
[
"Apache-2.0"
] | null | null | null |
web/gui/js/mqtt.js
|
NuwanJ/co326-project
|
4dbd4766ad43a7b31d17aae5bde3e17c662d6f60
|
[
"Apache-2.0"
] | null | null | null |
web/gui/js/mqtt.js
|
NuwanJ/co326-project
|
4dbd4766ad43a7b31d17aae5bde3e17c662d6f60
|
[
"Apache-2.0"
] | 2 |
2020-10-05T15:08:13.000Z
|
2020-10-20T06:21:59.000Z
|
var client = new Paho.MQTT.Client(mqtt_server, mqtt_port,"/socket.io");
function mqttConnect(){
client.connect({onSuccess:onConnect});
const txtSendBox = document.getElementById("serialSend");
txtSendBox.innerHTML = "Trying to connect...";
}
function onConnect(){
document.getElementById("serialSend").innerHTML = "Connected\n"
client.onMessageArrived = onMessageArrived;
client.onConnectionLost = onConnectionLost;
client.subscribe(TOPIC_COM2WEB);
}
function sendCommand(text) {
message = new Paho.MQTT.Message(text+ '\n');
message.destinationName = TOPIC_WEB2COM;
client.send(message);
const txtSendBox = document.getElementById("serialSend");
txtSendBox.innerHTML = text + '\n' + txtSendBox.innerHTML;
}
// called when the client loses its connection
function onConnectionLost(responseObject) {
if (responseObject.errorCode !== 0) {
console.log("onConnectionLost:"+responseObject.errorMessage);
const txtSendBox = document.getElementById("serialSend");
txtSendBox.innerHTML = "Connection Lost\n"+ responseObject.errorMessage+ "\n" + txtSendBox.innerHTML;
}
}
function onMessageArrived(message) {
const result = message.payloadString.trim();
const topic = message.destinationName;
//if(topic == TOPIC_WEB2COM){
if(result != ""){
if(result.startsWith("<")){
console.log("Coordinate Result");
var state = result.split('|')[0].substring(1);
var coordinates = result.split('|')[1].split(':')[1].split(',');
console.log(coordinates);
$('#lblXCord').text(coordinates[0]);
$('#lblYCord').text(coordinates[1]);
$('#lblZCord').text(coordinates[2]);
$('#lblSatus').text(state);
}
document.getElementById("serialReceive").innerHTML += result;
const txtReceiveBox = document.getElementById("serialReceive");
txtReceiveBox.innerHTML = result + '\n' + txtReceiveBox.innerHTML
}
//}
}
| 29.014706 | 107 | 0.675621 | 3.25 |
0ceb15471ca6941f1a3c2803a1bcd3575ac7f39e
| 5,306 |
py
|
Python
|
PyPowerStore/utils/helpers.py
|
dell/python-powerstore
|
04d6d73e4c926cf0d347cf68b24f8f11ff80f565
|
[
"Apache-2.0"
] | 15 |
2020-05-06T23:46:44.000Z
|
2021-12-14T08:04:48.000Z
|
PyPowerStore/utils/helpers.py
|
dell/python-powerstore
|
04d6d73e4c926cf0d347cf68b24f8f11ff80f565
|
[
"Apache-2.0"
] | 2 |
2020-06-09T15:19:25.000Z
|
2020-08-18T18:58:59.000Z
|
PyPowerStore/utils/helpers.py
|
dell/python-powerstore
|
04d6d73e4c926cf0d347cf68b24f8f11ff80f565
|
[
"Apache-2.0"
] | 5 |
2020-05-06T23:46:22.000Z
|
2021-05-08T03:03:07.000Z
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2019-2021, Dell EMC
"""Helper module for PowerStore"""
import logging
from pkg_resources import parse_version
provisioning_obj = None
def set_provisioning_obj(val):
global provisioning_obj
provisioning_obj = val
def prepare_querystring(*query_arguments, **kw_query_arguments):
"""Prepare a querystring dict containing all query_arguments and
kw_query_arguments passed.
:return: Querystring dict.
:rtype: dict
"""
querystring = dict()
for argument_dict in query_arguments:
if isinstance(argument_dict, dict):
querystring.update(argument_dict)
querystring.update(kw_query_arguments)
return querystring
def get_logger(module_name, enable_log=False):
"""Return a logger with the specified name
:param module_name: Name of the module
:type module_name: str
:param enable_log: (optional) Whether to enable log or not
:type enable_log: bool
:return: Logger object
:rtype: logging.Logger
"""
LOG = logging.getLogger(module_name)
LOG.setLevel(logging.DEBUG)
if enable_log:
LOG.disabled = False
else:
LOG.disabled = True
return LOG
def is_foot_hill_or_higher():
"""Return a true if the array version is foot hill or higher.
:return: True if foot hill or higher
:rtype: bool
"""
foot_hill_version = '2.0.0.0'
array_version = provisioning_obj.get_array_version()
if array_version and (
parse_version(array_version) >= parse_version(foot_hill_version)):
return True
return False
def filtered_details(filterable_keys, filter_dict, resource_list,
resource_name):
"""
Get the filtered output.
:filterable_keys: Keys on which filters are supported.
:type filterable_keys: list
:filter_dict: Dict containing the filters, operators and value.
:type filter_dict: dict
:resource_list: The response of the REST api call on which
filter_dict is to be applied.
:type resource_list: list
:resource_name: Name of the resource
:type resource_name: str
:return: Dict, containing filtered values.
:rtype: dict
"""
err_msg = "Entered key {0} is not supported for filtering. " \
"For {1}, filters can be applied only on {2}. "
response = list()
for resource in resource_list:
count = 0
for key in filter_dict:
# Check if the filters can be applied on the key or not
if key not in filterable_keys:
raise Exception(err_msg.format(
key, resource_name, str(filterable_keys)))
count = apply_operators(filter_dict, key, resource, count)
if count == len(filter_dict):
temp_dict = dict()
temp_dict['id'] = resource['id']
# check if resource has 'name' parameter or not.
if resource_name not in ["CHAP config", "service config"]:
temp_dict['name'] = resource['name']
response.append(temp_dict)
return response
def apply_operators(filter_dict, key, resource, count):
"""
Returns the count for the filters applied on the keys
"""
split_list = filter_dict[key].split(".")
if split_list[0] == 'eq' and str(resource[key]) == str(split_list[1]):
count += 1
elif split_list[0] == 'neq' and str(resource[key]) != str(split_list[1]):
count += 1
elif split_list[0] == 'ilike':
if not isinstance(resource[key], str):
raise Exception('like can be applied on string type'
' parameters only. Please enter a valid operator'
' and parameter combination')
search_val = split_list[1].replace("*", "")
value = resource[key]
if split_list[1].startswith("*") and \
split_list[1].endswith("*") and \
value.count(search_val) > 0:
count += 1
elif split_list[1].startswith("*") and \
value.endswith(search_val):
count += 1
elif value.startswith(search_val):
count += 1
elif split_list[0] == 'gt':
if not isinstance(resource[key], (int, float)):
raise Exception('greater can be applied on int type'
' parameters only. Please enter a valid operator'
' and parameter combination')
if isinstance(resource[key], int) and\
int(split_list[1]) < resource[key]:
count += 1
if isinstance(resource[key], float) and \
float(split_list[1]) < resource[key]:
count += 1
elif split_list[0] == 'lt':
if not isinstance(resource[key], (int, float)):
raise Exception('lesser can be applied on int type'
' parameters only. Please enter a valid operator'
' and parameter combination')
if isinstance(resource[key], int) and\
int(split_list[1]) > resource[key]:
count += 1
if isinstance(resource[key], float) and \
float(split_list[1]) > resource[key]:
count += 1
return count
| 35.373333 | 78 | 0.602714 | 3.015625 |
0241dac1277c9b93cb329a25e5931d78270c35e5
| 5,487 |
swift
|
Swift
|
Decred Wallet/Features/History/TransactionTableViewCell.swift
|
itswisdomagain/dcrios
|
90f3e8594e48b9230500cc18229ceb5283dd534a
|
[
"ISC"
] | null | null | null |
Decred Wallet/Features/History/TransactionTableViewCell.swift
|
itswisdomagain/dcrios
|
90f3e8594e48b9230500cc18229ceb5283dd534a
|
[
"ISC"
] | 4 |
2019-05-23T15:05:21.000Z
|
2019-09-02T15:27:16.000Z
|
Decred Wallet/Features/History/TransactionTableViewCell.swift
|
itswisdomagain/dcrios
|
90f3e8594e48b9230500cc18229ceb5283dd534a
|
[
"ISC"
] | null | null | null |
//
// TransactionTableViewCell.swift
// Decred Wallet
//
// Copyright (c) 2018-2019 The Decred developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
import Foundation
import UIKit
class TransactionTableViewCell: BaseTableViewCell {
@IBOutlet weak var dataImage: UIImageView!
@IBOutlet weak var dataText: UILabel!
@IBOutlet weak var status: UILabel!
@IBOutlet weak var dateT: UILabel!
var count = 0
override func awakeFromNib() {}
override class func height() -> CGFloat {
return 60
}
override func setData(_ data: Any?) {
if let transaction = data as? Transaction {
let bestBlock = AppDelegate.walletLoader.wallet?.getBestBlock()
var confirmations = 0
if(transaction.Height != -1){
confirmations = Int(bestBlock!) - transaction.Height
confirmations += 1
}
if (transaction.Height == -1) {
self.status.textColor = UIColor(hex:"#3d659c")
self.status.text = LocalizedStrings.pending
} else {
if (Settings.spendUnconfirmed || confirmations > 1) {
self.status.textColor = UIColor(hex:"#2DD8A3")
self.status.text = LocalizedStrings.confirmed
} else {
self.status.textColor = UIColor(hex:"#3d659c")
self.status.text = LocalizedStrings.pending
}
}
let Date2 = NSDate.init(timeIntervalSince1970: TimeInterval(transaction.Timestamp) )
let dateformater = DateFormatter()
dateformater.locale = Locale(identifier: "en_US_POSIX")
dateformater.dateFormat = "MMM dd, yyyy hh:mma"
dateformater.amSymbol = "am"
dateformater.pmSymbol = "pm"
dateformater.string(from: Date2 as Date)
self.dateT.text = dateformater.string(from: Date2 as Date)
let amount = Decimal(transaction.Amount / 100000000.00) as NSDecimalNumber
let requireConfirmation = Settings.spendUnconfirmed ? 0 : 2
if (transaction.Type.lowercased() == "regular") {
if (transaction.Direction == 0) {
let attributedString = NSMutableAttributedString(string: "-")
attributedString.append(Utils.getAttributedString(str: amount.round(8).description, siz: 13.0, TexthexColor: GlobalConstants.Colors.TextAmount))
self.dataText.attributedText = attributedString
self.dataImage?.image = UIImage(named: "debit")
} else if(transaction.Direction == 1) {
let attributedString = NSMutableAttributedString(string: " ")
attributedString.append(Utils.getAttributedString(str: amount.round(8).description, siz: 13.0, TexthexColor: GlobalConstants.Colors.TextAmount))
self.dataText.attributedText = attributedString
self.dataImage?.image = UIImage(named: "credit")
} else if(transaction.Direction == 2) {
let attributedString = NSMutableAttributedString(string: " ")
attributedString.append(Utils.getAttributedString(str: amount.round(8).description, siz: 13.0, TexthexColor: GlobalConstants.Colors.TextAmount))
self.dataText.attributedText = attributedString
self.dataImage?.image = UIImage(named: "account")
}
} else if(transaction.Type.lowercased() == "vote") {
self.dataText.text = " \(LocalizedStrings.vote)"
self.dataImage?.image = UIImage(named: "vote")
} else if (transaction.Type.lowercased() == "ticket_purchase") {
self.dataText.text = " \(LocalizedStrings.ticket)"
self.dataImage?.image = UIImage(named: "immature")
if (confirmations < requireConfirmation){
self.status.textColor = UIColor(hex:"#3d659c")
self.status.text = LocalizedStrings.pending
} else if (confirmations > BuildConfig.TicketMaturity) {
let statusText = LocalizedStrings.confirmedLive
let range = (statusText as NSString).range(of: "/")
let attributedString = NSMutableAttributedString(string: statusText)
attributedString.addAttribute(NSAttributedString.Key.foregroundColor, value: UIColor.black , range: range)
self.status.textColor = UIColor(hex:"#2DD8A3")
self.status.attributedText = attributedString
self.dataImage?.image = UIImage(named: "live")
} else {
let statusText = LocalizedStrings.confirmedImmature
let range = (statusText as NSString).range(of: "/")
let attributedString = NSMutableAttributedString(string: statusText)
attributedString.addAttribute(NSAttributedString.Key.foregroundColor, value: UIColor.black , range: range)
self.status.textColor = UIColor.orange
self.status.attributedText = attributedString
self.dataImage?.image = UIImage(named: "immature")
}
}
}
}
}
| 49.881818 | 164 | 0.58994 | 3 |
a837de479e223b3cd4a565b33677bec9026cbf36
| 787 |
rs
|
Rust
|
src/utils.rs
|
zutils/create-protocols-plugin
|
048f49bd9651750a5139054debbf68a98aa029d9
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
src/utils.rs
|
zutils/create-protocols-plugin
|
048f49bd9651750a5139054debbf68a98aa029d9
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
src/utils.rs
|
zutils/create-protocols-plugin
|
048f49bd9651750a5139054debbf68a98aa029d9
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
extern crate failure;
use std::path::PathBuf;
use failure::Error;
pub fn sleep_ms(ms: u64) {
use std::{thread, time};
let time = time::Duration::from_millis(ms);
thread::sleep(time);
}
pub fn append_to_file(new_file: &PathBuf, contents: &str) -> Result<(), Error> {
use std::fs::OpenOptions;
use std::io::Write;
let mut file = OpenOptions::new()
.write(true)
.append(true)
.open(new_file)?;
println!("Writing to: {:?}", new_file);
file.write_all(contents.as_bytes())?;
Ok(())
}
pub fn uppercase_first_letter(s: &str) -> String {
let mut c = s.chars();
match c.next() {
None => String::new(),
Some(f) => f.to_uppercase().collect::<String>() + c.as_str(),
}
}
| 24.59375 | 80 | 0.559085 | 3.34375 |
9c206de424831909c285e68228bc188cf68f5884
| 8,393 |
js
|
JavaScript
|
sources/js/ViewStates.js
|
heschel6/Magic_Experiment
|
ceb8b81e7bcb210cc9f1775323f6afe4da6bdbb1
|
[
"MIT"
] | 201 |
2016-11-14T23:13:21.000Z
|
2019-04-17T19:55:12.000Z
|
sources/js/ViewStates.js
|
heschel6/Magic_Experiment
|
ceb8b81e7bcb210cc9f1775323f6afe4da6bdbb1
|
[
"MIT"
] | 10 |
2016-12-23T06:06:23.000Z
|
2021-04-19T21:04:55.000Z
|
sources/js/ViewStates.js
|
heschel6/Magic_Experiment
|
ceb8b81e7bcb210cc9f1775323f6afe4da6bdbb1
|
[
"MIT"
] | 19 |
2016-12-23T10:08:35.000Z
|
2021-08-16T08:08:22.000Z
|
var ViewStates, ViewStatesIgnoredKeys,
__hasProp = {}.hasOwnProperty,
__extends = function(child, parent) { for (var key in parent) { if (__hasProp.call(parent, key)) child[key] = parent[key]; } function ctor() { this.constructor = child; } ctor.prototype = parent.prototype; child.prototype = new ctor(); child.__super__ = parent.prototype; return child; },
__indexOf = [].indexOf || function(item) { for (var i = 0, l = this.length; i < l; i++) { if (i in this && this[i] === item) return i; } return -1; },
__slice = [].slice;
ViewStatesIgnoredKeys = ["ignoreEvent"];
ViewStates = (function(_super) {
__extends(ViewStates, _super);
function ViewStates(view) {
this.view = view;
this._states = {};
this._orderedStates = [];
this.animationOptions = {};
this._currentState = "default";
this._previousStates = [];
this.add("default", this.view.props);
ViewStates.__super__.constructor.apply(this, arguments);
}
ViewStates.prototype.add = function(stateName, properties) {
var error, k, v;
if (Utils.isObject(stateName)) {
for (k in stateName) {
v = stateName[k];
this.add(k, v);
}
return;
}
error = function() {
throw Error("Usage example: view.states.add(\"someName\", {x:500})");
};
if (!Utils.isString(stateName)) {
error();
}
if (!Utils.isObject(properties)) {
error();
}
this._orderedStates.push(stateName);
return this._states[stateName] = ViewStates.filterStateProperties(properties);
};
ViewStates.prototype.remove = function(stateName) {
if (!this._states.hasOwnProperty(stateName)) {
return;
}
delete this._states[stateName];
return this._orderedStates = Utils.without(this._orderedStates, stateName);
};
ViewStates.prototype["switch"] = function(stateName, animationOptions, instant) {
var animatablePropertyKeys, animatingKeys, args, callback, k, properties, propertyName, v, value, _ref, _ref1;
if (instant == null) {
instant = false;
}
args = arguments;
callback = NULL;
if (Utils.isFunction(arguments[1])) {
callback = arguments[1];
} else if (Utils.isFunction(arguments[2])) {
callback = arguments[2];
} else if (Utils.isFunction(arguments[3])) {
callback = arguments[3];
}
if (!this._states.hasOwnProperty(stateName)) {
throw Error("No such state: '" + stateName + "'");
}
this.emit(Event.StateWillSwitch, this._currentState, stateName, this);
this._previousStates.push(this._currentState);
this._currentState = stateName;
properties = {};
animatingKeys = this.animatingKeys();
_ref = this._states[stateName];
for (propertyName in _ref) {
value = _ref[propertyName];
if (__indexOf.call(ViewStatesIgnoredKeys, propertyName) >= 0) {
continue;
}
if (__indexOf.call(animatingKeys, propertyName) < 0) {
continue;
}
if (Utils.isFunction(value)) {
value = value.call(this.view, this.view, propertyName, stateName);
}
properties[propertyName] = value;
}
animatablePropertyKeys = [];
for (k in properties) {
v = properties[k];
if (Utils.isNumber(v)) {
animatablePropertyKeys.push(k);
} else if (Color.isColorObject(v)) {
animatablePropertyKeys.push(k);
} else if (v === null) {
animatablePropertyKeys.push(k);
}
}
if (animatablePropertyKeys.length === 0) {
instant = true;
}
if (instant === true) {
this.view.props = properties;
return this.emit(Event.StateDidSwitch, Utils.last(this._previousStates), this._currentState, this);
} else {
if (animationOptions == null) {
animationOptions = this.animationOptions;
}
animationOptions.properties = properties;
if ((_ref1 = this._animation) != null) {
_ref1.stop();
}
this._animation = this.view.animate(animationOptions);
return this._animation.once("stop", (function(_this) {
return function() {
for (k in properties) {
v = properties[k];
if (!(Utils.isNumber(v) || Color.isColorObject(v))) {
_this.view[k] = v;
}
}
if (callback) {
callback(Utils.last(_this._previousStates), _this._currentState, _this);
}
if (Utils.last(_this._previousStates) !== stateName) {
return _this.emit(Event.StateDidSwitch, Utils.last(_this._previousStates), _this._currentState, _this);
}
};
})(this));
}
};
ViewStates.prototype.switchInstant = function(stateName, callback) {
return this["switch"](stateName, null, true, callback);
};
ViewStates.define("state", {
get: function() {
return this._currentState;
}
});
ViewStates.define("current", {
get: function() {
return this._currentState;
}
});
ViewStates.define("all", {
get: function() {
return Utils.clone(this._orderedStates);
}
});
ViewStates.prototype.states = function() {
return Utils.clone(this._orderedStates);
};
ViewStates.prototype.animatingKeys = function() {
var keys, state, stateName, _ref;
keys = [];
_ref = this._states;
for (stateName in _ref) {
state = _ref[stateName];
keys = Utils.union(keys, Utils.keys(state));
}
return keys;
};
ViewStates.prototype.previous = function(states, animationOptions) {
var args, callback, last;
args = arguments;
last = Utils.last(args);
callback = NULL;
if (Utils.isFunction(last)) {
args = Array.prototype.slice.call(arguments);
callback = args.pop();
if (states === callback) {
states = NULL;
}
if (animationOptions === callback) {
animationOptions = {};
}
}
if (states == null) {
states = this.states();
}
return this["switch"](Utils.arrayPrev(states, this._currentState), animationOptions, callback);
};
ViewStates.prototype.next = function() {
var args, callback, index, last, states, that;
args = arguments;
last = Utils.last(args);
callback = NULL;
that = this;
if (Utils.isFunction(last)) {
args = Array.prototype.slice.call(arguments);
callback = args.pop();
}
states = Utils.arrayFromArguments(args);
if (!states.length) {
states = this.states();
index = states.indexOf(this._currentState);
if (index + 1 > states.length) {
states = [states[0]];
} else {
states = [states[index + 1]];
}
}
return this["switch"](Utils.arrayNext(states, this._currentState), function() {
states.shift();
if (states.length > 0) {
if (callback) {
return that.next(states, callback);
} else {
return that.next(states);
}
} else if (callback) {
return callback();
}
});
};
ViewStates.prototype.last = function(animationOptions) {
var args, callback, last, state;
args = arguments;
last = Utils.last(args);
callback = NULL;
state = NULL;
if (Utils.isFunction(last)) {
args = Array.prototype.slice.call(arguments);
callback = args.pop();
if (animationOptions === callback) {
animationOptions = {};
}
}
if (!this._previousStates.length) {
state = this.states();
} else {
state = this._previousStates;
}
return this["switch"](Utils.last(state), animationOptions, callback);
};
ViewStates.prototype.emit = function() {
var args, _ref;
args = 1 <= arguments.length ? __slice.call(arguments, 0) : [];
ViewStates.__super__.emit.apply(this, arguments);
return (_ref = this.view).emit.apply(_ref, args);
};
ViewStates.filterStateProperties = function(properties) {
var k, stateProperties, v;
stateProperties = {};
for (k in properties) {
v = properties[k];
if (Utils.isString(v) && Utils.endsWith(k.toLowerCase(), "color") && Color.isColorString(v)) {
stateProperties[k] = new Color(v);
} else if (Utils.isNumber(v) || Utils.isFunction(v) || Utils.isBoolean(v) || Utils.isString(v) || Color.isColorObject(v) || v === null) {
stateProperties[k] = v;
}
}
return stateProperties;
};
return ViewStates;
})(Element);
| 30.97048 | 290 | 0.608245 | 3.03125 |
56015e9fa8333eb6ce82b37e0e0bdc5c2764504a
| 2,795 |
kt
|
Kotlin
|
CustomViewTest/app/src/main/java/com/example/lt/customviewtest/view/CameraView.kt
|
maxdylan/rwx_test
|
db3ee7b07c8d1fd35d24d53c6d79bb8fa13eb7c8
|
[
"Apache-2.0"
] | null | null | null |
CustomViewTest/app/src/main/java/com/example/lt/customviewtest/view/CameraView.kt
|
maxdylan/rwx_test
|
db3ee7b07c8d1fd35d24d53c6d79bb8fa13eb7c8
|
[
"Apache-2.0"
] | null | null | null |
CustomViewTest/app/src/main/java/com/example/lt/customviewtest/view/CameraView.kt
|
maxdylan/rwx_test
|
db3ee7b07c8d1fd35d24d53c6d79bb8fa13eb7c8
|
[
"Apache-2.0"
] | null | null | null |
package com.example.lt.customviewtest.view
import android.content.Context
import android.graphics.*
import android.util.AttributeSet
import android.view.View
import androidx.core.graphics.withSave
import com.example.lt.customviewtest.R
import com.example.lt.customviewtest.extension.px
import kotlin.math.max
class CameraView(context: Context, attrs: AttributeSet?) : View(context, attrs) {
private val bitmap = getAvatar(300f.px.toInt())
private val paint = Paint(Paint.ANTI_ALIAS_FLAG)
private val camera = Camera().apply {
setLocation(0f,0f,-6f*resources.displayMetrics.density)
}
var leftCameraAngle = 0f
set(value) {
field = value
invalidate()
}
var rightCameraAngle = 0f
set(value) {
field = value
invalidate()
}
var flipAngle = 0f
set(value) {
field = value
invalidate()
}
override fun onDraw(canvas: Canvas) {
super.onDraw(canvas)
canvas.save()
canvas.translate(width/2f,height/2f)
canvas.rotate(-flipAngle)
camera.save()
camera.rotateX(leftCameraAngle)
camera.applyToCanvas(canvas)
camera.restore()
canvas.clipRect(
- bitmap.width.toFloat(),
- bitmap.height.toFloat() ,
bitmap.width.toFloat(),
0f
)
canvas.rotate(flipAngle)
canvas.translate(-width/2f,-height/2f)
canvas.drawBitmap(
bitmap,
width / 2f - bitmap.width / 2f,
height / 2f - bitmap.height / 2f,
paint
)
canvas.restore()
canvas.save()
canvas.translate(width / 2f, height / 2f)
canvas.rotate(-flipAngle)
camera.save()
camera.rotateX(rightCameraAngle)
camera.applyToCanvas(canvas)
camera.restore()
canvas.clipRect(
- bitmap.width.toFloat(),
0f,
bitmap.width.toFloat(),
bitmap.height.toFloat()
)
canvas.rotate(flipAngle)
canvas.translate(-width/2f,- height / 2f)
canvas.drawBitmap(
bitmap,
width / 2f - bitmap.width / 2f,
height / 2f - bitmap.height / 2f,
paint
)
canvas.restore()
}
private fun getAvatar(width:Int):Bitmap{
val options = BitmapFactory.Options()
options.inJustDecodeBounds = true
BitmapFactory.decodeResource(resources, R.drawable.squire, options)
options.inJustDecodeBounds = false
options.inDensity = max(options.outWidth,options.outHeight)
options.inTargetDensity = width
return BitmapFactory.decodeResource(resources,R.drawable.squire,options)
}
}
| 29.734043 | 81 | 0.595707 | 3.109375 |
9baaff69d1d21a5ac53edd2eac42e665f7c553e8
| 3,634 |
js
|
JavaScript
|
src/res/js/under/extend/stage/physics/VariableGravityWorld.js
|
Expine/Under
|
8c9f2b884b1a8ce4bdf2fff948cac4cebbd5fdc3
|
[
"MIT"
] | null | null | null |
src/res/js/under/extend/stage/physics/VariableGravityWorld.js
|
Expine/Under
|
8c9f2b884b1a8ce4bdf2fff948cac4cebbd5fdc3
|
[
"MIT"
] | null | null | null |
src/res/js/under/extend/stage/physics/VariableGravityWorld.js
|
Expine/Under
|
8c9f2b884b1a8ce4bdf2fff948cac4cebbd5fdc3
|
[
"MIT"
] | null | null | null |
// TODO: Comment
/**
* Gravity world
* - Performs a physical operation
* - Registers entities and apply a physical operation
* - Continually perform collision processing
* - ### Manages not actor by split area
* @extends {SplitWorld}
* @classdesc Gravity world to manage not actor by split area
*/
class VariableGravityWorld extends SplitWorld { // eslint-disable-line no-unused-vars
/**
* Gravity world constructor
* @constructor
* @param {number} stageWidth Stage width (pixel)
* @param {number} stageHeight Stage height (pixel)
* @param {number} [gravity=9.8] gravity of the world
*/
constructor(stageWidth, stageHeight, gravity = 9.8) {
super(stageWidth, stageHeight, gravity);
/**
* Gravity x direction
* @protected
* @param {number}
*/
this.gravityX = 0;
/**
* Gravity y direction
* @protected
* @param {number}
*/
this.gravityY = 1;
this.gravityXs = [];
this.gravityYs = [];
this.deltas = [];
this.number = 0;
}
/**
* Add gravity change time
* @param {number} gravityX Gravity x direction
* @param {number} gravityY Gravity y direction
* @param {number} delta Delta time
*/
addGravity(gravityX, gravityY, delta) {
this.gravityXs.push(gravityX);
this.gravityYs.push(gravityY);
this.deltas.push(delta);
}
/**
* Update external force
* @protected
* @override
* @param {number} dt Delta time
*/
updateExternalForce(dt) {
if (this.deltas[this.number] < 0) {
this.number++;
}
if (this.number < this.deltas.length) {
this.gravityX = this.gravityXs[this.number];
this.gravityY = this.gravityYs[this.number];
this.deltas[this.number] -= dt / 1000;
if (this.deltas[this.number] < 1) {
this.gravityX = 0;
this.gravityY = 1;
}
}
for (const target of this.actors) {
if (target.body !== null) {
const g = this.gravity * target.material.mass * target.body.material.gravityScale;
target.body.enforce(g * this.gravityX, g * this.gravityY);
}
}
}
/**
* Render world
* @abstract
* @param {Context} ctx Canvas context
* @param {number} [shiftX = 0] Shift x position
* @param {number} [shiftY = 0] Shift y position
*/
render(ctx, shiftX = 0, shiftY = 0) {
if (this.number < this.deltas.length) {
const delta = this.deltas[this.number];
if (delta < 1 && Math.floor(delta * 1000) % 2 === 0) {
if (this.number < this.deltas.length - 1) {
const x = this.gravityXs[this.number + 1];
const y = this.gravityYs[this.number + 1];
if (x > 0) {
ctx.fillText(`>`, GameScreen.it.width - 10, GameScreen.it.height / 2, 1.0, 0.5, 100, `red`);
}
if (x < 0) {
ctx.fillText(`<`, 10, GameScreen.it.height / 2, 0.0, 0.5, 100, `red`);
}
if (y > 0) {
ctx.fillText(`|`, GameScreen.it.width / 2, GameScreen.it.height - 10, 0.5, 1.0, 100, `red`);
}
if (y < 0) {
ctx.fillText(`^`, GameScreen.it.width / 2, 10, 0.5, 0.0, 100, `red`);
}
}
}
}
}
}
| 32.159292 | 116 | 0.504953 | 3.265625 |
cb8ef700df14e74ec7235f8d60b33a7a0239854b
| 3,275 |
go
|
Go
|
conv32_test.go
|
sirkon/decconv
|
4d07560f2c6d19d59702db9105000b4201583702
|
[
"MIT"
] | null | null | null |
conv32_test.go
|
sirkon/decconv
|
4d07560f2c6d19d59702db9105000b4201583702
|
[
"MIT"
] | null | null | null |
conv32_test.go
|
sirkon/decconv
|
4d07560f2c6d19d59702db9105000b4201583702
|
[
"MIT"
] | null | null | null |
package decconv
import (
"testing"
)
func TestDecode32(t *testing.T) {
type args struct {
precision int
scale int
input string
}
tests := []struct {
name string
args args
want int32
conv string
wantErr bool
}{
{
name: "only-integral-positive",
args: args{
precision: 9,
scale: 0,
input: "12",
},
want: 12,
conv: "12",
wantErr: false,
},
{
name: "only-integral-negative",
args: args{
precision: 9,
scale: 0,
input: "-123",
},
want: -123,
conv: "-123",
wantErr: false,
},
{
name: "generic-empty-fraction",
args: args{
precision: 9,
scale: 1,
input: "12.0",
},
want: 120,
conv: "12",
wantErr: false,
},
{
name: "generic-empty-fraction",
args: args{
precision: 9,
scale: 2,
input: "12.02",
},
want: 1202,
conv: "12.02",
wantErr: false,
},
{
name: "check-input-positive",
args: args{
precision: 9,
scale: 5,
input: "3015.07654",
},
want: 301507654,
conv: "3015.07654",
wantErr: false,
},
{
name: "check-input-negative",
args: args{
precision: 9,
scale: 5,
input: "-3015.07654",
},
want: -301507654,
conv: "-3015.07654",
wantErr: false,
},
{
name: "passing-leading-zeroes",
args: args{
precision: 9,
scale: 2,
input: "0000123.25",
},
want: 12325,
conv: "123.25",
wantErr: false,
},
{
name: "passing-trailing-zeroes",
args: args{
precision: 9,
scale: 3,
input: "123.02500000000000",
},
want: 123025,
conv: "123.025",
wantErr: false,
},
{
name: "passing-both-sides-zeroes",
args: args{
precision: 9,
scale: 4,
input: "-0000000123.12300000000",
},
want: -1231230,
conv: "-123.123",
wantErr: false,
},
{
name: "error-empty-input",
args: args{
precision: 9,
scale: 4,
input: "",
},
want: 0,
wantErr: true,
},
{
name: "error-invalid-input-integral-part",
args: args{
precision: 9,
scale: 4,
input: "1A2.3",
},
want: 0,
wantErr: true,
},
{
name: "error-invalid-input-fraction-part",
args: args{
precision: 9,
scale: 4,
input: "12.b3",
},
want: 0,
wantErr: true,
},
{
name: "error-overflow-integral",
args: args{
precision: 9,
scale: 6,
input: "1234.12",
},
want: 0,
wantErr: true,
},
{
name: "error-overflow-fraction",
args: args{
precision: 9,
scale: 3,
input: "1234.1234",
},
want: 0,
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := Decode32(tt.args.precision, tt.args.scale, []byte(tt.args.input))
if (err != nil) != tt.wantErr {
t.Errorf("Decode32() error = %v, wantErr %v", err, tt.wantErr)
return
}
if got != tt.want {
t.Errorf("Decode32() = %v, want %v", got, tt.want)
}
if conv := Encode32(tt.args.scale, got); conv != tt.conv && err == nil {
t.Errorf("Encode32() = %v, want %v", conv, tt.conv)
}
})
}
}
| 17.607527 | 80 | 0.500458 | 3.046875 |
b6da0bb4ae81472c509218aeb58ecfda6e71bc87
| 2,940 |
lua
|
Lua
|
init.lua
|
vincens2005/lite-xl-discord
|
438d7231e95b29a46abf7d13aca3c6261d425bef
|
[
"MIT"
] | 2 |
2021-06-11T20:24:31.000Z
|
2021-09-02T14:54:40.000Z
|
init.lua
|
vincens2005/lite-xl-discord
|
438d7231e95b29a46abf7d13aca3c6261d425bef
|
[
"MIT"
] | 4 |
2021-06-03T02:18:14.000Z
|
2021-12-15T00:39:27.000Z
|
init.lua
|
vincens2005/lite-xl-discord
|
438d7231e95b29a46abf7d13aca3c6261d425bef
|
[
"MIT"
] | 2 |
2021-07-31T02:47:45.000Z
|
2021-08-29T16:13:37.000Z
|
-- mod-version:1 lite-xl 2.00
local core = require "core"
local command = require "core.command"
local common = require "core.common"
local config = require "core.config"
-- function replacements:
local quit = core.quit
local restart = core.restart
local status = {filename = nil, space = nil}
-- stolen from https://stackoverflow.com/questions/1426954/split-string-in-lua
local function split_string(inputstr, sep)
if sep == nil then sep = "%s" end
local t = {}
for str in string.gmatch(inputstr, "([^" .. sep .. "]+)") do
table.insert(t, str)
end
return t
end
local function tell_discord_to_stop()
local cmd = "python3 " .. USERDIR ..
"/plugins/lite-xl-discord/update_presence.py --state='no' --details='no' --die-now='yes' --pickle=" ..
USERDIR .. "/plugins/lite-xl-discord/discord_data.pickle"
-- core.log("running command ".. command)
core.log("stopping discord rpc...")
system.exec(cmd)
end
local function update_status()
local filename = "unsaved file"
-- return if doc isn't active
if not core.active_view then return end
if not core.active_view.doc then return end
if core.active_view.doc.filename then
filename = core.active_view.doc.filename
filename = split_string(filename, "/")
filename = filename[#filename]
end
local details = "editing " .. filename
local dir = common.basename(core.project_dir)
local state = "in " .. dir
status.filename = filename
status.space = dir
local cmd = "python3 " .. USERDIR ..
"/plugins/lite-xl-discord/update_presence.py --state='" ..
state .. "' --details='" .. details ..
"' --die-now='no' --pickle=" .. USERDIR ..
"/plugins/lite-xl-discord/discord_data.pickle"
system.exec(cmd)
end
local function start_rpc()
core.log("discord plugin: starting python script")
system.exec("python3 " .. USERDIR ..
"/plugins/lite-xl-discord/presence.py --pickle=" .. USERDIR ..
"/plugins/lite-xl-discord/discord_data.pickle --pidfile=" .. USERDIR .. "/plugins/lite-xl-discord/pidfile.pid")
update_status()
end
core.quit = function(force)
tell_discord_to_stop()
return quit(force)
end
core.restart = function()
tell_discord_to_stop()
return restart()
end
core.add_thread(function()
while true do
-- skip loop if doc isn't active
if not core.active_view then goto continue end
if not core.active_view.doc then goto continue end
if not (common.basename(core.project_dir) == status.space and
core.active_view.doc.filename == status.filename) then
update_status()
end
::continue::
coroutine.yield(config.project_scan_rate)
end
end)
command.add("core.docview",
{["discord-presence:stop-RPC"] = tell_discord_to_stop})
command.add("core.docview", {["discord-presence:update-RPC"] = update_status})
command.add("core.docview", {["discord-presence:start-RPC"] = start_rpc})
start_rpc()
| 30.625 | 127 | 0.677551 | 3.234375 |
7502cc388a55322a4f38d0c4291c68a6cc4d8a7f
| 1,368 |
rs
|
Rust
|
src/stss/mod.rs
|
nagashi/The-Traveling-Politician-Problem
|
78bdbf6759ca017a8c863505025b4db7b7734ef0
|
[
"Apache-2.0",
"MIT"
] | 2 |
2020-08-14T13:58:19.000Z
|
2021-04-27T17:34:21.000Z
|
src/stss/mod.rs
|
nagashi/The-Traveling-Politician-Problem
|
78bdbf6759ca017a8c863505025b4db7b7734ef0
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
src/stss/mod.rs
|
nagashi/The-Traveling-Politician-Problem
|
78bdbf6759ca017a8c863505025b4db7b7734ef0
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
// function to convert String to static str.
fn string_to_static_str(s: String) -> &'static str {
Box::leak(s.into_boxed_str())
}
// function to create cypher.csv heading
pub fn title(vec_len: usize) -> Vec<String> {
let mut header: Vec<String> = Vec::new();
for i in 0..vec_len {
match i {
0 => header.push("KEY".to_owned()), // First header column
a if a == (vec_len - 1) => header.push("DISTANCE".to_owned()), // Last header column
_ => {
// Configure header for each state in vector
let mut a: String = "STATE_".to_owned();
let b: String = i.to_string();
let b: &'static str = string_to_static_str(b);
a.push_str(b);
header.push(a);
}
}
}
header // return header
}
// function constructs the rows of cypher.csv
pub fn vec_row(row_num: usize, distance: f64, mut vec_row: Vec<&str>) -> Vec<&str> {
let mut vec: Vec<&str> = Vec::new();
let rownum = format!("{:?}", row_num);
let dist = format!("{:.1}", distance);
let rownum: &'static str = string_to_static_str(rownum);
let dist: &'static str = string_to_static_str(dist);
vec.push(rownum); // Key: row numbers
vec.append(&mut vec_row); // States
vec.push(dist); // Distance
vec // return vec
}
| 33.365854 | 96 | 0.567982 | 3.234375 |
f06e06965b7f1b5220c96fb081f4691448e98901
| 1,437 |
kt
|
Kotlin
|
kotlin/src/main/kotlin/modules/GuestBookModule.kt
|
mandm-pt/languages
|
eecc58a631059a2f1594fe696f10c4b350ce5de0
|
[
"MIT"
] | 1 |
2020-08-05T13:49:55.000Z
|
2020-08-05T13:49:55.000Z
|
kotlin/src/main/kotlin/modules/GuestBookModule.kt
|
mandm-pt/languages
|
eecc58a631059a2f1594fe696f10c4b350ce5de0
|
[
"MIT"
] | null | null | null |
kotlin/src/main/kotlin/modules/GuestBookModule.kt
|
mandm-pt/languages
|
eecc58a631059a2f1594fe696f10c4b350ce5de0
|
[
"MIT"
] | null | null | null |
package modules
import HttpUtils.Companion.writeResponseText
import com.sun.net.httpserver.HttpExchange
import java.net.URLDecoder
import java.nio.charset.StandardCharsets
import java.text.SimpleDateFormat
import java.util.*
class GuestBookModule : BaseModule("GuestBookModule") {
private val path = "/Guest"
private val guestsMessages = mutableListOf<String>()
override fun canProcess(http: HttpExchange) = http.requestURI.rawPath.equals(path, true)
override fun processingLogic(http: HttpExchange) {
if (http.requestMethod == "POST") {
tryAddGuest(http)
}
showPage(http)
}
private fun tryAddGuest(http: HttpExchange) {
val text = String(http.requestBody.readAllBytes(), StandardCharsets.ISO_8859_1)
if (text.startsWith("message=", true)) {
val message = URLDecoder.decode(text.split("=")[1])
val currentTime = SimpleDateFormat("dd/MM/yyyy HH:mm:ss").format(Date())
guestsMessages += "$currentTime - $message"
}
}
private fun showPage(http: HttpExchange) {
var pageHtml = "<ul>"
for (message in guestsMessages)
pageHtml += "<li>$message</li>"
pageHtml += """</ul>
<form method='post'>
<label for='message'>Message</label>
<input name='message' id='message' autofocus>
</form>"""
http.writeResponseText(pageHtml)
}
}
| 28.74 | 92 | 0.64579 | 3.125 |
9c1eeb4dda8cfacb60430120611ad230055397b2
| 16,814 |
kt
|
Kotlin
|
api/src/main/java/com/brein/domain/BreinUser.kt
|
Breinify/brein-api-library-android-kotlin
|
055e20c35ffb722aa6c3dc78f7092106571cdd42
|
[
"MIT"
] | null | null | null |
api/src/main/java/com/brein/domain/BreinUser.kt
|
Breinify/brein-api-library-android-kotlin
|
055e20c35ffb722aa6c3dc78f7092106571cdd42
|
[
"MIT"
] | null | null | null |
api/src/main/java/com/brein/domain/BreinUser.kt
|
Breinify/brein-api-library-android-kotlin
|
055e20c35ffb722aa6c3dc78f7092106571cdd42
|
[
"MIT"
] | null | null | null |
package com.brein.domain
import android.Manifest
import android.app.Application
import android.content.Context
import android.content.Context.WIFI_SERVICE
import android.content.pm.PackageManager
import android.location.Location
import android.location.LocationManager
import android.net.wifi.SupplicantState
import android.net.wifi.WifiInfo
import android.net.wifi.WifiManager
import android.util.Log
import androidx.core.app.ActivityCompat
import com.brein.api.Breinify
import com.brein.api.BreinifyManager
import com.brein.util.BreinUtil
import com.google.gson.JsonObject
import java.math.BigInteger
import java.net.InetAddress
import java.net.UnknownHostException
import java.nio.ByteOrder
/**
* A plain object specifying the user information the activity belongs to
*
* create a brein user with field email.
*
* @param email of the user
*/
class BreinUser(private var email: String?) {
enum class UserInfo {
FIRST_NAME,
LAST_NAME,
PHONE_NUMBER,
EMAIL
}
/**
* contains further fields in the user section
*/
private val userMap = HashMap<String, Any?>()
/**
* contains further fields in the user additional section
*/
private val additionalMap = HashMap<String, Any?>()
/**
* contains the first name of the user
*
* @return String the first name
*/
private var firstName: String = ""
/**
* contains the last name of the user
*
* @return last name
*/
private var lastName: String = ""
/**
* contains the sessionId (if set)
*
* @return sessionId
*/
private var sessionId: String = ""
/**
* contains the date of birth
*
* @return String, date of birth
*/
private var dateOfBirth: String = ""
fun setDateOfBirth(dateOfBirth: String): BreinUser {
this.dateOfBirth = dateOfBirth
this.userMap["dateOfBirth"] = this.dateOfBirth
return this
}
fun getDateOfBirth(): String {
return this.dateOfBirth
}
/**
* Set's the date of birth
* There is no check if the month - day combination is valid, only
* the range for day, month and year will be checked
*
* @param month int month (1..12)
* @param day int day (1..31)
* @param year int year (1900..2100)
* @return BreinUser the object itself
*/
fun setDateOfBirthValue(month: Int, day: Int, year: Int): BreinUser {
if (month in 1..12 &&
day in 1..31 &&
year in 1900..2100
) {
this.dateOfBirth = String.format("%s/%d/%d", month, day, year)
this.userMap["dateOfBirth"] = this.dateOfBirth
} else {
this.dateOfBirth = ""
this.userMap["dateOfBirth"] = this.dateOfBirth
}
return this
}
/**
* contains imei (International Mobile Equipment Identity)
*
* @return String serial number as string
*/
private var imei: String = ""
/**
* contains the deviceid
*
* @return String device id
*/
private var deviceId: String = ""
/**
* contains the userId
*
* @return String userId String
*/
private var userId: String = ""
fun setUserId(s: String): BreinUser {
this.userId = s
this.userMap["userId"] = this.userId
return this
}
/**
* contains the phone number
*
* @return String phone number
*/
private var phone: String = ""
/**
* retrieves the additional userAgent value
*
* @return String user agent
*/
private var userAgent: String = ""
/**
* contains the ipAddress (additional part)
*
* @return String ipAddress
*/
private var ipAddress: String = ""
fun getIpAddress(): String {
return this.ipAddress
}
/**
* contains the detected ipAddress (additional part)
*
* @return String detected ipAddress
*/
private var detectedIp: String = ""
fun getDetectedIp(): String {
return this.detectedIp
}
/**
* contains the additional referrer value
*
* @return String the referrer
*/
private var referrer: String = ""
/**
* contains the timezone
*
* @return String contains the timezone
*/
private var timezone: String = ""
fun getTimezone(): String {
return this.timezone
}
/**
* contains the additional url
*
* @return String the url
*/
private var url: String = ""
fun getUrl(): String {
return this.url
}
/**
* contains the localDateTime
*
* @return String the local date time
*/
private var localDateTime: String = ""
fun getLocalDateTime(): String {
return this.localDateTime
}
/**
* retrieves the pushDeviceToken
*
* @return String the deviceRegistration token
*/
private var pushDeviceRegistration: String = ""
fun getPushDeviceRegistration(): String {
return this.pushDeviceRegistration
}
fun setEmail(e: String): BreinUser {
this.email = e
this.userMap["email"] = this.email
return this
}
fun setIpAddress(s: String): BreinUser {
this.ipAddress = s
this.additionalMap["ipAddress"] = this.ipAddress
return this
}
fun setDetectedIp(s: String): BreinUser {
this.detectedIp = s
this.additionalMap["detectedIp"] = this.detectedIp
return this
}
fun setUserAgent(s: String): BreinUser {
this.userAgent = s
this.additionalMap["userAgent"] = this.userAgent
return this
}
fun setDeviceId(s: String): BreinUser {
this.deviceId = s
this.userMap["deviceId"] = this.deviceId
return this
}
fun getDeviceId(): String {
return this.deviceId
}
fun setImei(s: String): BreinUser {
this.imei = s
this.userMap["imei"] = this.imei
return this
}
fun setSessionId(s: String): BreinUser {
this.sessionId = s
this.userMap["sessionId"] = this.sessionId
return this
}
fun getSessionId(): String {
return this.sessionId
}
fun setUrl(s: String): BreinUser {
this.url = s
this.additionalMap["url"] = this.url
return this
}
fun setReferrer(s: String): BreinUser {
this.referrer = s
this.additionalMap["referrer"] = this.referrer
return this
}
fun getReferrer(): String {
return this.referrer
}
fun setLastName(s: String): BreinUser {
this.lastName = s
this.userMap["lastName"] = this.lastName
return this
}
fun getLastName(): String {
return this.lastName
}
fun setFirstName(s: String): BreinUser {
this.firstName = s
this.userMap["firstName"] = this.firstName
return this
}
fun getFirstName(): String {
return this.firstName
}
fun setTimezone(timezone: String): BreinUser {
this.timezone = timezone
this.additionalMap["timezone"] = this.timezone
return this
}
fun setLocalDateTime(localDateTime: String): BreinUser {
this.localDateTime = localDateTime
this.additionalMap["localDateTime"] = this.localDateTime
return this
}
fun setPushDeviceRegistration(deviceToken: String?): BreinUser {
if (deviceToken != null) {
this.pushDeviceRegistration = deviceToken
val identifierMap = HashMap<String, Any?>()
identifierMap["androidPushDeviceRegistration"] = this.pushDeviceRegistration
this.additionalMap["identifiers"] = identifierMap
}
return this
}
fun resetDateOfBirth(): BreinUser {
this.dateOfBirth = ""
return this
}
fun setPhone(phone: String): BreinUser {
this.phone = phone
this.userMap["phone"] = this.phone
return this
}
constructor() : this("") {
}
/**
* Creates the userAgent String in Android standard format and adds the
* app name.
*
* @return String userAgent
*/
private fun createUserAgent(): String {
val appCtx: Application? = BreinifyManager.getApplication()
var appName = ""
if (appCtx != null) {
appName = appCtx.applicationInfo.loadLabel(appCtx.packageManager).toString()
}
// add the app
val httpAgent = System.getProperty("http.agent")
return if (httpAgent != null) {
String.format("%s/(%s)", httpAgent, appName)
} else {
""
}
}
/**
* detects the GPS coordinates and adds this to the user.additional.location section
*/
private fun detectGpsCoordinates() {
// firstly get the context
val applicationContext: Application = Breinify.config?.getApplication() ?: return
val locationManager: LocationManager =
applicationContext.getSystemService(Context.LOCATION_SERVICE) as LocationManager
val providers: List<String> = locationManager.allProviders // getProviders(true);
// Loop over the array backwards, and if you get an accurate location, then break out the loop
val location: Location? = null
for (index in providers.indices.reversed()) {
val accessFineLocationPermission: Int = ActivityCompat.checkSelfPermission(
applicationContext,
Manifest.permission.ACCESS_FINE_LOCATION
)
val accessCoarseLocationPermission: Int = ActivityCompat.checkSelfPermission(
applicationContext,
Manifest.permission.ACCESS_COARSE_LOCATION
)
if (accessCoarseLocationPermission != PackageManager.PERMISSION_GRANTED ||
accessFineLocationPermission != PackageManager.PERMISSION_GRANTED
) {
return
}
}
if (location != null) {
val locationData = JsonObject()
locationData.addProperty("accuracy", location.accuracy)
locationData.addProperty("speed", location.speed)
locationData.addProperty("latitude", location.latitude)
locationData.addProperty("longitude", location.longitude)
this.additionalMap["location"] = locationData
}
}
/**
* Provides network information within the user additional request
*/
private fun detectNetwork() {
// firstly get the context
val applicationContext: Application = Breinify.config?.getApplication() ?: return
// only possible if permission has been granted
if (ActivityCompat.checkSelfPermission(
applicationContext,
Manifest.permission.ACCESS_WIFI_STATE
) == PackageManager.PERMISSION_GRANTED
) {
val wifiManager: WifiManager = applicationContext
.applicationContext
.getSystemService(WIFI_SERVICE) as WifiManager
val wifiInfo: WifiInfo = wifiManager.connectionInfo
val wifiInfoStatus = wifiInfo.supplicantState
if (wifiInfoStatus == SupplicantState.COMPLETED) {
var ssid = ""
var bssid = ""
var ip = 0
// contains double quotes
wifiInfo.ssid?.let { ssid = wifiInfo.ssid.replace("\"", "") }
wifiInfo.bssid?.let { bssid = wifiInfo.bssid }
wifiInfo.ipAddress.let { ip = wifiInfo.ipAddress }
// Convert little-endian to big-endian if needed
if (ByteOrder.nativeOrder() == ByteOrder.LITTLE_ENDIAN) {
ip = Integer.reverseBytes(ip)
}
val detectedIpAddress = try {
val ipByteArray: ByteArray = BigInteger.valueOf(ip.toLong()).toByteArray()
InetAddress.getByAddress(ipByteArray).hostAddress
} catch (ex: UnknownHostException) {
Log.e("WIFIIP", "Breinify - unable to get host address.")
null
}
if (detectedIpAddress != null) {
if (detectedIpAddress.isNotEmpty()) {
setDetectedIp(detectedIpAddress)
}
}
val linkSpeed: Int = wifiInfo.linkSpeed
val macAddress = ""
val rssi: Int = wifiInfo.rssi
val networkId: Int = wifiInfo.networkId
val state: String = wifiInfo.supplicantState.toString()
val networkData = JsonObject()
networkData.addProperty("ssid", ssid)
networkData.addProperty("bssid", bssid)
networkData.addProperty("ipAddress", this.ipAddress)
networkData.addProperty("linkSpeed", linkSpeed)
networkData.addProperty("macAddress", macAddress)
networkData.addProperty("rssi", rssi)
networkData.addProperty("networkId", networkId)
networkData.addProperty("state", state)
this.additionalMap["network"] = networkData
}
}
}
/**
* Sets the users value and overrides any current value. Cannot used to override the `additional` field.
*
* @param key String the name of the value to be set
* @param value Object the value to be set
* @return BreinUser the object itself
*/
operator fun set(key: String, value: Any?, additional: Boolean): BreinUser {
if (additional) {
this.additionalMap[key] = value
} else {
this.userMap[key] = value
}
return this
}
/**
* Retrieves for a given key the object
* @param key String, contains the key
* @param <T> T contains the object
* @return T contains the object
</T> */
operator fun get(key: String): Any? {
return get(key, false)
}
/**
* Retrieves for a given key within the additional or userMap the value
*
* @param key String, contains the key
* @param additional boolean true if additional part should be used
* @param <T> T contains the value
* @return T contains the value
</T> */
operator fun get(key: String, additional: Boolean): Any? {
return if (additional) {
this.additionalMap[key]
} else {
this.userMap[key]
}
}
/**
* Retrieves the additional value
*
* @param key String contains the key
* @param <T> T contains the value
* @return T contains the value
</T> */
fun getAdditional(key: String): Any? {
return get(key, true)
}
/**
* Sets an additional value.
*
* @param key String, the name of the additional value to be set
* @param value Object the value to be set
* @return BreinUser the object itself
*/
fun setAdditional(key: String, value: Any?): BreinUser {
this.additionalMap[key] = value
return this
}
/**
* prepares the request data
*
* @param config BreinConfig, contains the configuration (if necessary)
* @param requestData Map request destination
*/
@Suppress("UNUSED_PARAMETER")
fun prepareRequestData(config: BreinConfig?, requestData: HashMap<String, Any?>) {
val userRequestData = HashMap<String, Any?>()
requestData["user"] = userRequestData
// add the user-data, if there is any
if (this.userMap.isNotEmpty()) {
// loop a Map
for ((key, value) in this.userMap) {
if (BreinUtil.containsValue(value)) {
userRequestData[key] = value
}
}
}
// tries to detect gps and network data this will be added to property additionalMap
detectGpsCoordinates()
detectNetwork()
// check or create userAgent
generateUserAgent()
// add the additional-data, if there is any
if (this.additionalMap.isNotEmpty()) {
userRequestData["additional"] = this.additionalMap
}
}
/**
* Checks if a userAgent has been set if not it will be generated and set.
*/
private fun generateUserAgent() {
if (this.userAgent.isEmpty()) {
this.userAgent = createUserAgent()
}
if (this.userAgent.isNotEmpty()) {
this.additionalMap["userAgent"] = this.userAgent
}
}
}
| 28.498305 | 108 | 0.587665 | 3.1875 |
83fda2a2fbfa471c7d5dee908d310a7b5545549d
| 1,935 |
swift
|
Swift
|
_SwiftUI-By-Tutorials/SwiftUI-Handling-User-Input/Sources/App/Practice/DeckBuilder.swift
|
luannguyen252/my-swift-journey
|
788d66f256358dc5aefa2f3093ef74fd572e83b3
|
[
"MIT"
] | 14 |
2020-12-09T08:53:39.000Z
|
2021-12-07T09:15:44.000Z
|
_SwiftUI-By-Tutorials/SwiftUI-Handling-User-Input/Sources/App/Practice/DeckBuilder.swift
|
luannguyen252/my-swift-journey
|
788d66f256358dc5aefa2f3093ef74fd572e83b3
|
[
"MIT"
] | null | null | null |
_SwiftUI-By-Tutorials/SwiftUI-Handling-User-Input/Sources/App/Practice/DeckBuilder.swift
|
luannguyen252/my-swift-journey
|
788d66f256358dc5aefa2f3093ef74fd572e83b3
|
[
"MIT"
] | 8 |
2020-12-10T05:59:26.000Z
|
2022-01-03T07:49:21.000Z
|
import Foundation
import Assessing
import Learning
final class DeckBuilder {
// MARK: - Static variables
static var `default` = DeckBuilder()
static var `learning` = DeckBuilder()
// MARK: - Variables
var answers: [String] { return cards.map { $0.word.translation }}
var cards = [WordCard]()
// MARK: - Initializers
private init() { }
// MARK: - Methods
func build() -> [WordCard] {
self.cards = [WordCard]()
if let words = LanguageLoader.loadTranslatedWords(from: "jp") {
words.forEach { word in
cards.append(WordCard(from: word))
}
}
return self.cards
}
func assess(upTo count: Int) -> [WordAssessment] {
let cards = self.cards.filter { $0.completed == false }
var randomCards: Set<WordCard>
// If there are not enough cards, return them all
if cards.count < count {
randomCards = Set(cards)
} else {
randomCards = Set()
while randomCards.count < count {
guard let randomCard = cards.randomElement() else { continue }
randomCards.insert(randomCard)
}
}
let tests = randomCards.map({
WordAssessment(
card: $0,
answers: getRandomAnswers(count: 3, including: $0.word.translation)
)
})
return tests.shuffled()
}
// MARK: - Private Methods
private func getRandomAnswers(count: Int, including includedAnswer: String) -> [String] {
let answers = self.answers
// If there are not enough answers, return them all
guard count < answers.count else {
return answers.shuffled()
}
var randomAnswers = Set<String>()
randomAnswers.insert(includedAnswer)
while randomAnswers.count < count {
guard let randomAnswer = answers.randomElement() else { continue }
randomAnswers.insert(randomAnswer)
}
return Array(randomAnswers).shuffled()
}
}
| 23.035714 | 91 | 0.618605 | 3.40625 |
508922b976c619092a5e36861a594794430247ba
| 2,893 |
go
|
Go
|
docs/coding/minimal-sandbox2.go
|
philip/dbdeployer
|
bc1005265d9e71b0b7ef3b8187976714761daf96
|
[
"Apache-2.0"
] | null | null | null |
docs/coding/minimal-sandbox2.go
|
philip/dbdeployer
|
bc1005265d9e71b0b7ef3b8187976714761daf96
|
[
"Apache-2.0"
] | null | null | null |
docs/coding/minimal-sandbox2.go
|
philip/dbdeployer
|
bc1005265d9e71b0b7ef3b8187976714761daf96
|
[
"Apache-2.0"
] | null | null | null |
// This is a sample source file that shows how
// to create two MySQL sandboxes using dbdeployer code
// from another Go program.
package main
import (
"github.com/datacharmer/dbdeployer/common"
"github.com/datacharmer/dbdeployer/defaults"
"github.com/datacharmer/dbdeployer/sandbox"
"os"
)
func main() {
// Searches for expanded sandboxes in $HOME/opt/mysql
sandbox_binary := os.Getenv("HOME") + "/opt/mysql"
// Creates sandboxes in $HOME/sandboxes
sandbox_home := os.Getenv("HOME") + "/sandboxes"
// For this to work, we need to have
// a MySQL tarball expanded in $HOME/opt/mysql/5.7.22
version1 := "5.7.22"
version2 := "5.6.25"
sandbox_name1 := "msb_5_7_22"
sandbox_name2 := "msb_5_6_25"
// The unique ports for these sandboxes
port1 := 5722
port2 := 5625
// MySQL will look for binaries in $HOME/opt/mysql/5.7.22
basedir1 := sandbox_binary + "/" + version1 // This is what dbdeployer expects
// i.e. a name containing the full version
// MySQL will look for binaries in $HOME/opt/mysql/my-5.6
basedir2 := sandbox_binary + "/my-5.6" // This is a deviation from dbdeployer
// paradigm, using a non-standard name
// for the base directory
// Username and password for this sandbox
user := "msandbox"
password := "msandbox"
// Creates the base target directory if it doesn't exist
if !common.DirExists(sandbox_home) {
common.Mkdir(sandbox_home)
}
// Minimum data to be filled for a simple sandbox.
// See sandbox/sandbox.go for the full description
// of this data structure
var sdef = sandbox.SandboxDef{
Version: version1,
Basedir: basedir1,
SandboxDir: sandbox_home,
DirName: sandbox_name1,
LoadGrants: true,
// This is the list of ports to ignore
// when checking if the designated port is
// used or not.
// Try changing the Port item to 3306.
// You will see that the sandbox will install using 3307
InstalledPorts: []int{1186, 3306, 33060},
Port: port1,
DbUser: user,
DbPassword: password,
RplUser: "r" + user,
RplPassword: "r" + password,
RemoteAccess: "127.%",
BindAddress: "127.0.0.1",
}
// Calls the sandbox creation
sandbox.CreateSingleSandbox(sdef)
sdef.Version = version2
sdef.Basedir = basedir2
sdef.DirName = sandbox_name2
sdef.Port = port2
// Calls the sandbox creation for the second sandbox
sandbox.CreateSingleSandbox(sdef)
// Invokes the sandbox self-testing script
common.Run_cmd(sandbox_home + "/" + sandbox_name1 + "/test_sb")
common.Run_cmd(sandbox_home + "/" + sandbox_name2 + "/test_sb")
// Removes the sandbox from disk
sandbox.RemoveSandbox(sandbox_home, sandbox_name1, false)
sandbox.RemoveSandbox(sandbox_home, sandbox_name2, false)
// Removes the sandbox from dbdeployer catalog
defaults.DeleteFromCatalog(sandbox_home + "/" + sandbox_name1)
defaults.DeleteFromCatalog(sandbox_home + "/" + sandbox_name2)
}
| 29.824742 | 79 | 0.712064 | 3.21875 |
67c5341359e1165e78d9d9a19f8f87ecda0e6a33
| 1,546 |
sql
|
SQL
|
domain/src/main/resources/db/migration/V1__initial.sql
|
LoganPhan/zenhome-challenge
|
65e6343ef1046b466e3206a492cd483fc45abab9
|
[
"MIT"
] | null | null | null |
domain/src/main/resources/db/migration/V1__initial.sql
|
LoganPhan/zenhome-challenge
|
65e6343ef1046b466e3206a492cd483fc45abab9
|
[
"MIT"
] | null | null | null |
domain/src/main/resources/db/migration/V1__initial.sql
|
LoganPhan/zenhome-challenge
|
65e6343ef1046b466e3206a492cd483fc45abab9
|
[
"MIT"
] | null | null | null |
CREATE TABLE persistent_audit
(
created_by VARCHAR(255),
created_date TIMESTAMP without TIME ZONE NOT NULL DEFAULT now(),
last_modified_by VARCHAR (255),
last_modified_date TIMESTAMP without TIME ZONE NOT NULL DEFAULT now()
);
CREATE TABLE "user"
(
id SERIAL NOT NULL,
name VARCHAR(255),
CONSTRAINT user_pkey PRIMARY KEY (id)
)INHERITS (zenhomes.persistent_audit);
CREATE TABLE user_property
(
id SERIAL NOT NULL,
name VARCHAR(255),
type VARCHAR(255),
parent_id BIGINT DEFAULT '0',
user_id BIGINT,
CONSTRAINT user_property_pKey PRIMARY KEY (id),
CONSTRAINT user_property_user_fkey FOREIGN KEY (user_id)
REFERENCES zenhomes.user (id) MATCH SIMPLE
ON UPDATE NO ACTION ON DELETE NO ACTION
)INHERITS (zenhomes.persistent_audit);
CREATE TABLE rental_contract
(
id SERIAL NOT NULL,
tenant_id BIGINT, -- 'id of user'
CONSTRAINT rental_contract_pkey PRIMARY KEY (id)
)INHERITS (zenhomes.persistent_audit);
CREATE TABLE rental_property
(
id SERIAL NOT NULL,
rental_contract_id BIGINT,
user_property_id BIGINT,
CONSTRAINT rental_property_pkey PRIMARY KEY (id),
CONSTRAINT rental_property_rental_contract_fkey FOREIGN KEY (rental_contract_id)
REFERENCES zenhomes.rental_contract (id) MATCH SIMPLE
ON UPDATE NO ACTION ON DELETE NO ACTION,
CONSTRAINT rental_property_user_property_fkey FOREIGN KEY (user_property_id)
REFERENCES zenhomes.user_property (id) MATCH SIMPLE
ON UPDATE NO ACTION ON DELETE NO ACTION
)INHERITS (zenhomes.persistent_audit);
| 30.92 | 82 | 0.763907 | 3.015625 |
0b89d5110511e9a326a0adf1605527ae76c9199c
| 1,220 |
py
|
Python
|
1SiteRanking/create_kernel_density_map_arcpy.py
|
HCH2CHO/EmotionMap
|
bc572b4182637dcdd65e9a13c92f2fa0d9a3d680
|
[
"MIT"
] | 3 |
2021-07-15T15:58:52.000Z
|
2021-07-16T13:22:47.000Z
|
1SiteRanking/create_kernel_density_map_arcpy.py
|
HCH2CHO/EmotionMap
|
bc572b4182637dcdd65e9a13c92f2fa0d9a3d680
|
[
"MIT"
] | null | null | null |
1SiteRanking/create_kernel_density_map_arcpy.py
|
HCH2CHO/EmotionMap
|
bc572b4182637dcdd65e9a13c92f2fa0d9a3d680
|
[
"MIT"
] | 4 |
2017-08-04T12:41:06.000Z
|
2019-01-31T14:55:10.000Z
|
# coding:utf-8
# version:python2.7.3
# author:kyh
# import x,y data from txt and create kernel density map
import arcpy
from arcpy.sa import *
from arcpy import env
def read_point_data(filepath,i):
# Read data file and create shp file
with open(filepath, 'r') as pt_file:
pt=arcpy.Point()
ptGeoms=[]
i=0
for line in pt_file.readlines():
i=i+1
pt.X = float(line.split('\t')[7])
pt.Y = float(line.split('\t')[8])
ptGeoms.append(arcpy.PointGeometry(pt))
arcpy.CopyFeatures_management(ptGeoms, "D://Users//KYH//Documents//ArcGIS//FlickrPhoto//World_Flickr{0}.shp".format(i))
if __name__ == '__main__':
arcpy.CheckOutExtension('Spatial')
env.workspace=("D:\Users\KYH\Documents\ArcGIS\FlickrPhoto")
for i in range(0,25):
if (i==5) or (i==22):
continue
read_point_data("D:\\Users\\KYH\\Desktop\\EmotionMap\\FlickrEmotionData\\3faces_emotion\\faceflickr{0}.txt".format(i))
# Kernel Density Analysis
out_kernel_density=KernelDensity("World_Flickr{0}.shp".format(i),"NONE")
out_kernel_density.save("D:\Users\KYH\Documents\ArcGIS\FlickrPhoto\kd_Face{0}".format(i))
| 33.888889 | 127 | 0.645902 | 3 |
6478a75ba49a8bf199e1e3ba3aa90c8eccd2c981
| 4,166 |
rs
|
Rust
|
aoc/src/y2021/d18/mod.rs
|
matiaslindgren/advent-of-code-rust
|
1cf4ce91f00a6f63a59751dfcd139ff28ee64930
|
[
"MIT"
] | null | null | null |
aoc/src/y2021/d18/mod.rs
|
matiaslindgren/advent-of-code-rust
|
1cf4ce91f00a6f63a59751dfcd139ff28ee64930
|
[
"MIT"
] | null | null | null |
aoc/src/y2021/d18/mod.rs
|
matiaslindgren/advent-of-code-rust
|
1cf4ce91f00a6f63a59751dfcd139ff28ee64930
|
[
"MIT"
] | null | null | null |
pub fn main(input: &str) -> String {
let a = find_a(input);
let b = find_b(input);
format!("{} {}", a, b)
}
fn find_a(input: &str) -> i32 {
let result = input.lines().map(str::to_owned).reduce(add).unwrap();
magnitude(&result)
}
fn find_b(input: &str) -> i32 {
let lines: Vec<String> = input.lines().map(str::to_owned).collect();
let mut b = 0;
for (i, x) in lines.iter().enumerate() {
for (j, y) in lines.iter().enumerate() {
if i != j {
let added = add(x.to_string(), y.to_string());
b = b.max(magnitude(&added));
}
}
}
b
}
fn add(s1: String, s2: String) -> String {
let mut s = format!("[{},{}]", s1, s2);
loop {
if let Some(explode_begin) = find_exploding(&s) {
s = explode(&s, explode_begin);
continue;
}
if let Some(split_begin) = find_splitting(&s) {
s = split(&s, split_begin);
continue;
}
return s;
}
}
fn find_exploding(s: &str) -> Option<usize> {
let chars: Vec<char> = s.chars().collect();
let mut depth = 0;
for (i, w) in chars.windows(2).enumerate() {
if w[0] == '[' {
depth += 1;
if depth > 4 && w[1].is_numeric() {
return Some(i);
}
} else if w[0] == ']' {
depth -= 1;
}
}
None
}
fn find_splitting(s: &str) -> Option<usize> {
let chars: Vec<char> = s.chars().collect();
for (i, w) in chars.windows(2).enumerate() {
if w[0].is_numeric() && w[1].is_numeric() {
return Some(i);
}
}
None
}
fn explode(s: &str, pair_begin: usize) -> String {
let pair_end = pair_begin + s[pair_begin..].find(']').unwrap() + 1;
let left = if let Some(l) = find_left(s, pair_begin) {
let left = parse_number(s, l);
let x = parse_number(s, pair_begin + 1);
let res = format!("{}", left + x);
let after_left = find_next_non_numeric(s, l);
[&s[..l], &res, &s[after_left..pair_begin]].join("")
} else {
s[..pair_begin].to_string()
};
let right = if let Some(r) = find_right(s, pair_end) {
let y_pos = 1 + &s[..pair_end - 1].rfind(not_numeric).unwrap();
let y = parse_number(s, y_pos);
let right = parse_number(s, r);
let res = format!("{}", y + right);
let after_right = find_next_non_numeric(s, r);
[&s[pair_end..r], &res, &s[after_right..]].join("")
} else {
s[pair_end..].to_string()
};
[&left, "0", &right].join("")
}
fn find_next_non_numeric(s: &str, i: usize) -> usize {
if let Some(end) = &s[i..].find(not_numeric) {
i + end
} else {
s.len()
}
}
fn not_numeric(c: char) -> bool {
!c.is_numeric()
}
fn find_left(s: &str, pair_begin: usize) -> Option<usize> {
let end = 1 + &s[..pair_begin].rfind(char::is_numeric)?;
let begin = 1 + &s[..end].rfind(not_numeric)?;
Some(begin)
}
fn find_right(s: &str, pair_end: usize) -> Option<usize> {
let begin = &s[pair_end..].find(char::is_numeric)?;
Some(pair_end + begin)
}
fn split(s: &str, num_begin: usize) -> String {
let num_end = find_next_non_numeric(s, num_begin);
let n = parse_number(s, num_begin);
let x = n / 2;
let y = (n + 1) / 2;
[&s[..num_begin], &format!("[{},{}]", x, y), &s[num_end..]].join("")
}
fn parse_number(s: &str, i: usize) -> i32 {
let end = find_next_non_numeric(s, i);
s[i..end].parse::<i32>().unwrap()
}
fn magnitude(s: &str) -> i32 {
if s.chars().all(char::is_numeric) {
parse_number(s, 0) as i32
} else {
let (left, right) = split_root_pair(s).unwrap();
3 * magnitude(left) + 2 * magnitude(right)
}
}
fn split_root_pair(s: &str) -> Option<(&str, &str)> {
let s = &s[1..s.len() - 1];
let mut depth = 0;
for (i, ch) in s.chars().enumerate() {
depth += (ch == '[') as i32;
depth -= (ch == ']') as i32;
if depth == 0 && ch == ',' {
let (l, r) = s.split_at(i);
return Some((l, &r[1..]));
}
}
None
}
| 27.959732 | 72 | 0.507921 | 3.421875 |
0b649e46fb5914bfe7b320bbcd19fe8e80f42ef7
| 1,624 |
py
|
Python
|
code_trunk/emb.py
|
chris4540/DD2430-ds-proj
|
b876efabe949392b27a7ebd4afb2be623174e287
|
[
"MIT"
] | null | null | null |
code_trunk/emb.py
|
chris4540/DD2430-ds-proj
|
b876efabe949392b27a7ebd4afb2be623174e287
|
[
"MIT"
] | null | null | null |
code_trunk/emb.py
|
chris4540/DD2430-ds-proj
|
b876efabe949392b27a7ebd4afb2be623174e287
|
[
"MIT"
] | null | null | null |
import torch
from network.siamese import SiameseNet
from network.resnet import ResidualEmbNetwork
import os
import numpy as np
from utils.datasets import DeepFashionDataset
from torchvision.transforms import Compose
from torchvision.transforms import Resize
from torchvision.transforms import ToTensor
from torchvision.transforms import Normalize
from torch.utils.data import Subset
from torch.utils.data import DataLoader
from utils import extract_embeddings
import pickle
from cuml.manifold import TSNE
emb_net = ResidualEmbNetwork()
model = SiameseNet(emb_net)
trans = Compose(
[
Resize((224, 224)),
ToTensor(),
Normalize([0.7511, 0.7189, 0.7069], [0.2554, 0.2679, 0.2715]),
])
model.load_state_dict(torch.load('siamese_resnet18.pth'))
deep_fashion_root_dir = "./deepfashion_data"
train_ds = DeepFashionDataset(
deep_fashion_root_dir, 'train', transform=trans)
emb_net = model.emb_net
emb_net.cuda()
# subset
n_samples = 25000
sel_idx = np.random.choice(
list(range(len(train_ds))),
n_samples, replace=False)
assert len(set(sel_idx)) == n_samples
ds = Subset(train_ds, sel_idx)
loader = DataLoader(
ds, batch_size=100, pin_memory=True, num_workers=os.cpu_count())
print("extracting...")
embeddings, labels = extract_embeddings(emb_net, loader)
tsne = TSNE(n_iter=400, metric="euclidean")
projected_emb = tsne.fit_transform(embeddings)
with open('projected_emb.pkl', 'wb') as handle:
pickle.dump(projected_emb, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open('labels.pkl', 'wb') as handle:
pickle.dump(labels, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 28.491228 | 72 | 0.76601 | 3.125 |
bca80f5a08ca840e9bc1635b04c4ed92fee3844d
| 1,311 |
js
|
JavaScript
|
src/movie-app/results.controller.js
|
rbelow/angularjs-movie-app
|
e2069292bb2edea9ca2209ca4b388fb9bcc29b9a
|
[
"MIT"
] | null | null | null |
src/movie-app/results.controller.js
|
rbelow/angularjs-movie-app
|
e2069292bb2edea9ca2209ca4b388fb9bcc29b9a
|
[
"MIT"
] | null | null | null |
src/movie-app/results.controller.js
|
rbelow/angularjs-movie-app
|
e2069292bb2edea9ca2209ca4b388fb9bcc29b9a
|
[
"MIT"
] | null | null | null |
angular // eslint-disable-line
.module('movieApp')
.controller('ResultsController', function ($scope, $location, $exceptionHandler, $log, omdbApi) {
// $scope.results = []
// $scope.results.push({ data: { Title:"Star Wars: Episode IV - A New Hope" }})
// $scope.results.push({ data: { Title: "Star Wars: Episode V - The Empire Strikes Back" }})
// $scope.results.push({ data: { Title: "Star Wars: Episode VI - Return of the Jedi" }})
var query = $location.search().q
$log.debug('Controller loaded with query: ', query)
// copied from `omdb/service.spec.js`
omdbApi
.search(query)
.then(function (data) {
$log.debug('Data returned for query: ', query, data)
$scope.results = data.Search
})
.catch(function (e) {
// it's *not* angularjs best practice to `throw` native exceptions
// throw 'Something went wrong!' // eslint-disable-line
$exceptionHandler(e)
// $exceptionHandler(e)
// $exceptionHandler('something else went wrong')
})
// expand search result movie data
$scope.expand = function expand (index, id) {
omdbApi.find(id)
.then(function (data) {
$scope.results[index].data = data
$scope.results[index].open = true
})
}
})
| 36.416667 | 99 | 0.600305 | 3 |
dd2379cb4aca0ca7256779aead1d8b1340a3d091
| 4,002 |
go
|
Go
|
cmd/douban.go
|
linuxing3/gospider
|
28f2d2747cd7f318d0589391ef28a64120a8d780
|
[
"Apache-2.0"
] | null | null | null |
cmd/douban.go
|
linuxing3/gospider
|
28f2d2747cd7f318d0589391ef28a64120a8d780
|
[
"Apache-2.0"
] | null | null | null |
cmd/douban.go
|
linuxing3/gospider
|
28f2d2747cd7f318d0589391ef28a64120a8d780
|
[
"Apache-2.0"
] | null | null | null |
package cmd
import (
"context"
"fmt"
"log"
"net/http"
"regexp"
"strconv"
"strings"
"github.com/PuerkitoBio/goquery"
"github.com/linuxing3/gospider/config"
"github.com/linuxing3/gospider/prisma/db"
)
type DoubanMovie struct {
Title string
Subtitle string
Other string
Desc string
Year string
Area string
Tag string
Star string
Comment string
Quote string
}
type Page struct {
Page int
Url string
}
var (
DoubanBaseUrl = "https://movie.douban.com/top250"
DoubanTopPageSelector = "#content > div > div.article > div.paginator > a"
)
// GetPages 获取分页
func GetPages(url string) (pages []Page) {
htmlContent, err := config.GetHTTPHtmlContent(DoubanBaseUrl, DoubanTopPageSelector, config.DocBodySelector)
if err != nil {
log.Fatal(err)
}
pageList, err := config.GetDataList(htmlContent, DoubanTopPageSelector)
if err != nil {
log.Fatal("No list")
}
pageList.Each(func(i int, selection *goquery.Selection) {
title, _ := strconv.Atoi(selection.Text())
url, _ := selection.Attr("href")
pageUrl := strings.Join([]string{DoubanBaseUrl, url}, "")
pages = append(pages, Page{
Page: title,
Url: pageUrl,
})
})
return pages
}
// ParseMovies 在每一个页面上分析电影数据
func ParseMovies(doc *goquery.Document) (movies []DoubanMovie) {
movieSelector := "#content > div > div.article > ol > li"
doc.Find(movieSelector).Each(func(i int, s *goquery.Selection) {
fmt.Printf("获取第 %d 个电影\n", i)
title := s.Find(".hd > a > span").Eq(0).Text()
subtitle := s.Find(".hd > a > span").Eq(1).Text()
subtitle = strings.TrimLeft(subtitle, " / ")
other := s.Find(".hd > a > span").Eq(2).Text()
other = strings.TrimLeft(other, " / ")
desc := strings.TrimSpace(s.Find(".bd > p").Eq(0).Text())
DescInfo := strings.Split(desc, "\n")
desc = DescInfo[0]
movieDesc := strings.Split(DescInfo[1], "/")
year := strings.TrimSpace(movieDesc[0])
area := strings.TrimSpace(movieDesc[1])
tag := strings.TrimSpace(movieDesc[2])
star := s.Find(".bd > .star > .rating_num").Text()
comment := strings.TrimSpace(s.Find(".bd > .star > span").Eq(3).Text())
compile := regexp.MustCompile("[0-9]")
comment = strings.Join(compile.FindAllString(comment, -1), "")
quote := s.Find(".quote > .inq").Text()
movie := DoubanMovie{
Title: title,
Subtitle: subtitle,
Other: other,
Desc: desc,
Year: year,
Area: area,
Tag: tag,
Star: star,
Comment: comment,
Quote: quote,
}
// log.Printf("i: %d, movie: %v", i, movie)
movies = append(movies, movie)
})
return movies
}
// SaveMovies 保存电影记录到数据库
func SaveMovies(movies []DoubanMovie) {
client := db.NewClient()
if err := client.Prisma.Connect(); err != nil {
log.Fatalln(err)
}
defer func() {
if err := client.Prisma.Disconnect(); err != nil {
panic(err)
}
}()
ctx := context.Background()
for _, movie := range movies {
_, err := client.Movies.CreateOne(
db.Movies.Title.Set(movie.Title),
db.Movies.Subtitle.Set(movie.Subtitle),
db.Movies.Desc.Set(movie.Desc),
db.Movies.Area.Set(movie.Area),
db.Movies.Year.Set(movie.Year),
db.Movies.Tag.Set(movie.Tag),
db.Movies.Star.Set(movie.Star),
).Exec(ctx)
if err != nil {
fmt.Println(err)
}
}
}
// ExampleScrape 测试抓取网页
func ExampleScrape() {
// Request the HTML page.
res, err := http.Get("http://metalsucks.net")
if err != nil {
log.Fatal(err)
}
defer res.Body.Close()
if res.StatusCode != 200 {
log.Fatalf("status code error: %d %s", res.StatusCode, res.Status)
}
// Load the HTML document
doc, err := goquery.NewDocumentFromReader(res.Body)
if err != nil {
log.Fatal(err)
}
fmt.Println("Example scrapy")
// Find the review items
doc.Find(".sidebar-reviews article .content-block").Each(func(i int, s *goquery.Selection) {
// For each item found, get the band and title
band := s.Find("a").Text()
title := s.Find("i").Text()
fmt.Printf("Review %d: %s - %s\n", i, band, title)
})
}
| 22.868571 | 108 | 0.64068 | 3.125 |
f6c5545d82731a2f0bdc424dd850c5a773cb2b00
| 1,451 |
lua
|
Lua
|
KkthnxUI/Modules/Misc/MassGuildKick.lua
|
mrrosh/KkthnxUI_WotLK
|
59d36f99bebce53b90fbfd3806911b7a61133dab
|
[
"MIT"
] | 1 |
2022-02-09T22:49:43.000Z
|
2022-02-09T22:49:43.000Z
|
KkthnxUI/Modules/Misc/MassGuildKick.lua
|
mopd/KkthnxUI_WotLK
|
bf1199cce61ba3d465872f5b2a0839b787b1eb95
|
[
"MIT"
] | null | null | null |
KkthnxUI/Modules/Misc/MassGuildKick.lua
|
mopd/KkthnxUI_WotLK
|
bf1199cce61ba3d465872f5b2a0839b787b1eb95
|
[
"MIT"
] | 2 |
2018-09-19T22:32:21.000Z
|
2022-01-01T20:51:30.000Z
|
local K, C, L, _ = select(2, ...):unpack()
local split = string.split
local tonumber = tonumber
SLASH_MASSGUILDKICK1 = "/cleanguild"
SLASH_MASSGUILDKICK2 = "/MassGuildKick"
SlashCmdList["MASSGUILDKICK"] = function(msg)
local minLevel, minDays, minRankIndex = split(",", msg)
minRankIndex = tonumber(minRankIndex)
minLevel = tonumber(minLevel)
minDays = tonumber(minDays)
if not minLevel or not minDays then
K.Print("Usage: /cleanguild <minLevel>, <minDays>, [<minRankIndex>]")
return
end
if minDays > 31 then
K.Print("Maximum days value must be below 32.")
return
end
if not minRankIndex then minRankIndex = GuildControlGetNumRanks() - 1 end
for i = 1, GetNumGuildMembers() do
local name, _, rankIndex, level, class, _, note, officerNote, connected, _, class = GetGuildRosterInfo(i)
local minLevelx = minLevel
if class == "DEATHKNIGHT" then
minLevelx = minLevelx + 55
end
if not connected then
local years, months, days, hours = GetGuildRosterLastOnline(i)
if days ~= nil and ((years > 0 or months > 0 or days >= minDays) and rankIndex >= minRankIndex) and note ~= nil and officerNote ~= nil and (level <= minLevelx) then
GuildUninvite(name)
end
end
end
SendChatMessage("Guild Cleanup Results: Removed all guild members below rank "..GuildControlGetRankName(minRankIndex)..", that have a minimal level of "..minLevel..", and have not been online for at least: "..minDays.." days.", "GUILD")
end
| 33.744186 | 237 | 0.718815 | 3.109375 |
d9e7d37a2ea61d28e0f701afeefbf49fecdbced9
| 4,033 |
rs
|
Rust
|
linkerd/buffer/src/lib.rs
|
javaducky/linkerd2-proxy
|
b1b6b5376e200dc4d681ed78f2f2be26cc20c2ea
|
[
"Apache-2.0"
] | null | null | null |
linkerd/buffer/src/lib.rs
|
javaducky/linkerd2-proxy
|
b1b6b5376e200dc4d681ed78f2f2be26cc20c2ea
|
[
"Apache-2.0"
] | null | null | null |
linkerd/buffer/src/lib.rs
|
javaducky/linkerd2-proxy
|
b1b6b5376e200dc4d681ed78f2f2be26cc20c2ea
|
[
"Apache-2.0"
] | null | null | null |
use futures::Async;
use linkerd2_error::Error;
use std::time::Duration;
use tokio::sync::{mpsc, watch};
mod dispatch;
pub mod error;
mod layer;
mod service;
pub use self::{dispatch::Dispatch, layer::SpawnBufferLayer, service::Buffer};
struct InFlight<Req, F> {
request: Req,
tx: tokio::sync::oneshot::Sender<Result<F, linkerd2_error::Error>>,
}
pub(crate) fn new<Req, S>(
inner: S,
capacity: usize,
idle_timeout: Option<Duration>,
) -> (Buffer<Req, S::Future>, Dispatch<S, Req, S::Future>)
where
Req: Send + 'static,
S: tower::Service<Req> + Send + 'static,
S::Error: Into<Error>,
S::Response: Send + 'static,
S::Future: Send + 'static,
{
let (tx, rx) = mpsc::channel(capacity);
let (ready_tx, ready_rx) = watch::channel(Ok(Async::NotReady));
let dispatch = Dispatch::new(inner, rx, ready_tx, idle_timeout);
(Buffer::new(tx, ready_rx), dispatch)
}
#[cfg(test)]
mod test {
use futures::{future, Async, Future, Poll};
use std::sync::Arc;
use tower::util::ServiceExt;
use tower::Service;
use tower_test::mock;
#[test]
fn propagates_readiness() {
run(|| {
let (service, mut handle) = mock::pair::<(), ()>();
let (mut service, mut dispatch) = super::new(service, 1, None);
handle.allow(0);
assert!(dispatch.poll().expect("never fails").is_not_ready());
assert!(service.poll_ready().expect("must not fail").is_not_ready());
handle.allow(1);
assert!(dispatch.poll().expect("never fails").is_not_ready());
assert!(service.poll_ready().expect("must not fail").is_ready());
// Consume the allowed call.
drop(service.call(()));
handle.send_error(Bad);
assert!(dispatch.poll().expect("never fails").is_ready());
assert!(service
.poll_ready()
.expect_err("must fail")
.source()
.unwrap()
.is::<Bad>());
Ok::<(), ()>(())
})
}
#[test]
fn repolls_ready_on_notification() {
struct ReadyNotify {
notified: bool,
_handle: Arc<()>,
}
impl tower::Service<()> for ReadyNotify {
type Response = ();
type Error = Bad;
type Future = future::FutureResult<(), Bad>;
fn poll_ready(&mut self) -> Poll<(), Bad> {
println!("Polling");
if self.notified {
return Err(Bad);
}
println!("Notifying");
futures::task::current().notify();
self.notified = true;
Ok(Async::Ready(()))
}
fn call(&mut self, _: ()) -> Self::Future {
unimplemented!("not called");
}
}
run(|| {
let _handle = Arc::new(());
let handle = Arc::downgrade(&_handle);
let (service, dispatch) = super::new(
ReadyNotify {
_handle,
notified: false,
},
1,
None,
);
tokio::spawn(dispatch.map_err(|_| ()));
service.ready().then(move |ret| {
assert!(ret.is_err());
assert!(
handle.upgrade().is_none(),
"inner service must be dropped on error"
);
Ok::<(), ()>(())
})
})
}
#[derive(Debug)]
struct Bad;
impl std::error::Error for Bad {}
impl std::fmt::Display for Bad {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "bad")
}
}
fn run<F, R>(f: F)
where
F: FnOnce() -> R + 'static,
R: future::IntoFuture<Item = ()> + 'static,
{
tokio::runtime::current_thread::run(future::lazy(f).map_err(|_| panic!("Failed")));
}
}
| 28.602837 | 91 | 0.491198 | 3.109375 |
f027e6207f84d89378cfacc9c580753614b7155a
| 4,245 |
py
|
Python
|
visualization.py
|
Tommy-Johannessen/MovementRecognition
|
be84d7d014a272987dd20d03194336a9244eb900
|
[
"MIT"
] | null | null | null |
visualization.py
|
Tommy-Johannessen/MovementRecognition
|
be84d7d014a272987dd20d03194336a9244eb900
|
[
"MIT"
] | null | null | null |
visualization.py
|
Tommy-Johannessen/MovementRecognition
|
be84d7d014a272987dd20d03194336a9244eb900
|
[
"MIT"
] | 1 |
2019-02-13T12:42:39.000Z
|
2019-02-13T12:42:39.000Z
|
import itertools
import os
from collections import defaultdict
import matplotlib.pyplot as plt
#plt.style.use('ggplot')
from matplotlib.ticker import FuncFormatter
import pickle
import os
import numpy as np
def calculate_cm(pred_vals, true_vals, classes):
"""
This function calculates the confusion matrix.
"""
if len(pred_vals) != len(true_vals):
raise ValueError("Dimensions do not match")
n_classes = len(classes)
d = [[0 for _ in range(n_classes)] for _ in range(n_classes)]
for guess, ground_truth in zip(pred_vals, true_vals):
d[ground_truth][guess] += 1
d = np.asarray(d)
recall = []
precison = []
f1 = []
for index, values in enumerate(d):
recall.append(0 if sum(values) == 0 else values[index] / sum(values))
for index, values in enumerate(d.transpose()):
precison.append(0 if sum(values) == 0 else values[index] / sum(values))
for r, p in zip(recall, precison):
f1.append((r + p)/2)
return recall, precison, f1, d
def plot_confusion_matrix(cm, classes, path, name, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if not os.path.exists(path):
os.makedirs(path)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
plt.figure(figsize=(12, 6))
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.tight_layout()
plt.savefig(path + name)
plt.clf()
plt.close()
def plot_data_distribution(filename, move_type='basic', is_sliding_window=False):
image_folder = os.path.join('figures', 'data_distribution')
figure_name = f'{move_type}_{filename}.png'
data_folder = 'data/processed_data'
movement_type = f'{move_type}_movement'
pickle_file = os.path.join(data_folder, movement_type, f'{filename}.p')
with open(pickle_file, 'rb') as bin_file:
data = pickle.load(bin_file)
x_labels = []
y_labels = []
if is_sliding_window:
sliding_windows, categories = data
data = defaultdict(list)
for category, sliding_window in zip(categories, sliding_windows):
data[category].append([sliding_window.tolist()])
for category, data_lists in data.items():
data_points_count = 0
for data_list in data_lists:
data_points_count += len(data_list)
x_labels.append(category)
y_labels.append(data_points_count)
x_labels = np.arange(len(x_labels))
fig, ax = plt.subplots()
formatter = FuncFormatter(lambda x, p: format(int(x), ','))
ax.yaxis.set_major_formatter(formatter)
plt.title(f'Data distribution for {move_type} {filename.split("_")[0]} {filename.split("_")[1]}')
plt.ylabel('Number of data elements')
plt.xlabel('Movement Categories')
plt.bar(x_labels, y_labels)
plt.xticks(x_labels)
plt.tight_layout()
plt.savefig(os.path.join(image_folder, figure_name))
plt.clf()
plt.close()
if __name__ == '__main__':
search_folder = 'data/processed_data'
for folder in os.listdir(search_folder):
if folder == 'custom_movement':
for file in os.listdir(os.path.join(search_folder, folder)):
plot_data_distribution(file.split('.')[0],
folder.split('_')[0],
True if 'sliding_window' in file else False)
else:
print(f'Image created for {folder} at an earlier stage')
| 30.321429 | 113 | 0.640047 | 3.296875 |
0b84d29786d1202df0158d5a5b88910f8c8196a5
| 1,314 |
py
|
Python
|
weather_alarm/main.py
|
Cs4r/weather_alarm
|
b78b6f11f91e3b81aa43a1bfaa55074a0626a036
|
[
"MIT"
] | null | null | null |
weather_alarm/main.py
|
Cs4r/weather_alarm
|
b78b6f11f91e3b81aa43a1bfaa55074a0626a036
|
[
"MIT"
] | null | null | null |
weather_alarm/main.py
|
Cs4r/weather_alarm
|
b78b6f11f91e3b81aa43a1bfaa55074a0626a036
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import datetime
import os
from apscheduler.schedulers.blocking import BlockingScheduler
from weather_alarm.constants import *
from weather_alarm.forecaster import Forecaster
from weather_alarm.sender import NotificationSender
sender = NotificationSender(BOT_TOKEN, TELEGRAM_USER_ID)
forecaster = Forecaster(OWM_API_KEY)
def send_tomorrow_forecast(hour, time):
sender.send_message(forecaster.tomorrow_forecast_at(CITY, hour, time))
def send_current_observed_weather():
sender.send_message(forecaster.current_observed_weather(CITY))
now = datetime.datetime.now()
nightly_alarm_time = datetime.datetime(now.year, now.month, now.day, *NIGHTLY_ALARM_TIME)
daily_alarm_time = datetime.datetime(now.year, now.month, now.day, *DAILY_ALARM_TIME)
scheduler = BlockingScheduler()
scheduler.add_job(func=send_tomorrow_forecast, args=FORECAST_TIME, trigger='interval', next_run_time=nightly_alarm_time,
misfire_grace_time=30, days=1)
scheduler.add_job(func=send_current_observed_weather, trigger='interval', next_run_time=daily_alarm_time,
misfire_grace_time=30, days=1)
print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))
try:
scheduler.start()
except (KeyboardInterrupt, SystemExit):
pass
| 32.04878 | 120 | 0.780822 | 3.09375 |
93d5ad722fa26e30bfb76bb4dcd89a9cb6598bfc
| 2,057 |
asm
|
Assembly
|
core/core.asm
|
paulscottrobson/nextForth
|
3725af76cedd403e1e468fc8392e819fc0feb744
|
[
"MIT"
] | 2 |
2019-02-17T02:39:13.000Z
|
2020-06-10T09:57:22.000Z
|
core/core.asm
|
paulscottrobson/nextForth
|
3725af76cedd403e1e468fc8392e819fc0feb744
|
[
"MIT"
] | 1 |
2019-03-03T20:40:02.000Z
|
2020-07-02T09:39:48.000Z
|
core/core.asm
|
paulscottrobson/nextForth
|
3725af76cedd403e1e468fc8392e819fc0feb744
|
[
"MIT"
] | null | null | null |
; **********************************************************************************************************
;
; Name: core.asm
; Purpose: Main file for Z80 Forth Core
; Author: Paul Robson ([email protected])
; Date: 1st February 2018
;
; **********************************************************************************************************
BaseAddress: equ $6000
CodeBaseAddress:equ $8000
org BaseAddress ; where we start
opt zxnext ; on our ZXNext
include "dictionary.asm" ; dictionary and other vectors
org CodeBaseAddress ; words have to go $8000-$FFFF
include "io.asm" ; I/O Routines
include "words.asm" ; Words
; **********************************************************************************************************
; Initialisation/Support code
; **********************************************************************************************************
initialise:
di ; no interrupts
ld sp,$FFFF ; initialise our stack just so this will work.
call ResetStack ; then properly initialise them :)
call IO_ClearScreen ; clear the screen/border
ld de,$0000 ; top of stack value.
ld hl,(VectorMainAddress) ; and run the __main word.
jp (hl)
; **********************************************************************************************************
; Stack reset code
; **********************************************************************************************************
ResetStack:
pop hl ; so we can go back.
ld sp,$0000 ; return stack
jp (hl)
__NextFreeProgramAddress: ; marks the end of code
; **********************************************************************************************************
;
; write out .sna and .bin
;
; **********************************************************************************************************
savesna "core.sna",start,$5BFE
savebin "core.bin",start,__NextFreeProgramAddress-start
| 38.092593 | 108 | 0.357803 | 3.015625 |
3896343061a9c982fe10949642c046885b9a8b04
| 2,054 |
h
|
C
|
Math/Rho.h
|
FranciscoThiesen/contest-notebook
|
e856796e9aeb2cde1623b64efae9716997d6a117
|
[
"WTFPL"
] | 278 |
2017-03-09T02:41:51.000Z
|
2022-02-08T23:38:28.000Z
|
Math/Rho.h
|
vsourav/Algorithm-DataStructures
|
9399f43dd0d13e5771c42c2540cd36079f5bae43
|
[
"WTFPL"
] | 1 |
2017-12-30T19:58:00.000Z
|
2017-12-30T19:58:00.000Z
|
Math/Rho.h
|
vsourav/Algorithm-DataStructures
|
9399f43dd0d13e5771c42c2540cd36079f5bae43
|
[
"WTFPL"
] | 87 |
2017-05-28T17:38:39.000Z
|
2022-02-10T15:52:31.000Z
|
#include <cstdint>
#include <vector>
#include <algorithm>
#include <numeric>
#include "MillerRabin.h"
long long mult(long long a, long long b, long long m) {
return (__uint128_t)a * b % m;
}
long long f(long long x, long long c, long long m) {
long long ret = mult(x, x, m) + c;
if (ret >= m)
ret -= m;
return ret;
}
long long brent(long long n, long long x0=2, long long c=1) {
long long x = x0;
long long g = 1;
long long q = 1;
long long xs, y;
int m = 128;
int l = 1;
while (g == 1) {
y = x;
for (int i = 1; i < l; i++)
x = f(x, c, n);
int k = 0;
while (k < l && g == 1) {
xs = x;
for (int i = 0; i < m && i < l - k; i++) {
x = f(x, c, n);
q = mult(q, std::abs(y - x), n);
}
g = std::gcd(q, n);
k += m;
}
l *= 2;
}
if (g == n) {
do {
xs = f(xs, c, n);
g = std::gcd(std::abs(xs - y), n);
} while (g == 1);
}
return g;
}
class Factorizer {
public:
std::vector<long long> factorize(long long x) {
factors.clear();
static std::vector<int> primes_100 =
{2, 3, 5, 7, 11, 13, 17, 19, 23,
29, 31, 37, 41, 43, 47, 53, 59,
61, 67, 71, 73, 79, 83, 89, 97};
for (int p : primes_100) {
while (x % p == 0) {
factors.push_back(p);
x /= p;
}
}
recursive_factorize(x);
sort(factors.begin(), factors.end());
return factors;
}
void recursive_factorize(long long x) {
if (x < 100 * 100 || MillerRabin(x)) {
factors.push_back(x);
} else {
int c = 1;
long long g = x;
while (g == x) {
g = brent(x, 2, ++c);
}
recursive_factorize(g);
recursive_factorize(x / g);
}
}
private:
std::vector<long long> factors;
};
| 23.078652 | 61 | 0.419182 | 3.09375 |
e7ffb07502a866daacad535d6c162c3df47ed0fa
| 1,075 |
py
|
Python
|
001-050/029-divide-two-integers.py
|
bbram10/leetcode-master
|
565f5f0cb3c9720e59a78ddf2e5e6e829c70bac6
|
[
"MIT"
] | 134 |
2017-01-16T11:17:44.000Z
|
2022-03-16T17:13:26.000Z
|
001-050/029-divide-two-integers.py
|
bbram10/leetcode-master
|
565f5f0cb3c9720e59a78ddf2e5e6e829c70bac6
|
[
"MIT"
] | 1 |
2019-11-18T02:10:51.000Z
|
2019-11-18T02:10:51.000Z
|
001-050/029-divide-two-integers.py
|
bbram10/leetcode-master
|
565f5f0cb3c9720e59a78ddf2e5e6e829c70bac6
|
[
"MIT"
] | 54 |
2017-07-17T01:24:00.000Z
|
2022-02-06T05:28:44.000Z
|
"""
STATEMENT
Divide two integers without using multiplication, division and mod operator.
CLARIFICATIONS
- Do I have to handle 32-bit integer overflow? Yes, return the MAX_INT in that case.
- Can the divisor be zero? Yes, return the MAX_INT.
EXAMPLES
34/3 -> 11
COMMENTS
- This solution is by tusizi in Leetcode (picked up from https://discuss.leetcode.com/topic/8714/clear-python-code)
"""
def divide(dividend, divisor):
"""
:type dividend: int
:type divisor: int
:rtype: int
"""
sign = (dividend < 0) is (divisor < 0)
dividend, divisor = abs(dividend), abs(divisor)
INT_MIN, INT_MAX = -2147483648, 2147483647
if (not divisor) or (dividend < INT_MIN and divisor == -1):
return INT_MAX
to_return = 0
while dividend >= divisor:
temp, i = divisor, 1
while dividend >= temp:
dividend -= temp
to_return += i
i <<= 1
temp <<= 1
if not sign:
to_return = -to_return
return min(max(INT_MIN, to_return), INT_MAX)
| 27.564103 | 115 | 0.613953 | 3.25 |
dde2ed7a2be523b85ef5d53d56618aa731d519bb
| 1,775 |
kt
|
Kotlin
|
app/src/androidTest/java/com/waryozh/simplestepcounter/ResetStepsDialogTest.kt
|
Waryozh/simple-step-counter
|
9c1d619d51c2fcdc5f041ea3a84a6ed36771a4d1
|
[
"MIT"
] | null | null | null |
app/src/androidTest/java/com/waryozh/simplestepcounter/ResetStepsDialogTest.kt
|
Waryozh/simple-step-counter
|
9c1d619d51c2fcdc5f041ea3a84a6ed36771a4d1
|
[
"MIT"
] | null | null | null |
app/src/androidTest/java/com/waryozh/simplestepcounter/ResetStepsDialogTest.kt
|
Waryozh/simple-step-counter
|
9c1d619d51c2fcdc5f041ea3a84a6ed36771a4d1
|
[
"MIT"
] | null | null | null |
package com.waryozh.simplestepcounter
import androidx.test.espresso.Espresso
import androidx.test.espresso.Espresso.onView
import androidx.test.espresso.action.ViewActions.click
import androidx.test.espresso.assertion.ViewAssertions.matches
import androidx.test.espresso.matcher.ViewMatchers.withId
import androidx.test.espresso.matcher.ViewMatchers.withText
import androidx.test.ext.junit.runners.AndroidJUnit4
import kotlinx.coroutines.runBlocking
import org.junit.Assert.assertEquals
import org.junit.Before
import org.junit.Test
import org.junit.runner.RunWith
@RunWith(AndroidJUnit4::class)
class ResetStepsDialogTest : MainActivityBaseTest() {
@Before
fun initResetStepsDialogTest() {
setStepsCorrection(1000)
runBlocking {
repository.setStepLength(70)
repository.setStepsTaken(2000)
}
onView(withId(R.id.tv_steps_taken)).check(matches(withText("1000")))
onView(withId(R.id.tv_distance_walked)).check(matches(withText("700")))
Espresso.openActionBarOverflowOrOptionsMenu(applicationContext)
onView(withText(R.string.menu_reset_steps)).perform(click())
}
@Test
fun cancelDialog() {
onView(withText(R.string.cancel)).perform(click())
onView(withId(R.id.tv_steps_taken)).check(matches(withText("1000")))
onView(withId(R.id.tv_distance_walked)).check(matches(withText("700")))
assertEquals(70, repository.getStepLength())
}
@Test
fun confirmDialog() {
onView(withText(R.string.reset)).perform(click())
onView(withId(R.id.tv_steps_taken)).check(matches(withText("0")))
onView(withId(R.id.tv_distance_walked)).check(matches(withText("0")))
assertEquals(70, repository.getStepLength())
}
}
| 36.22449 | 79 | 0.733521 | 3.015625 |
6b4f454553e9d2b01da0f48ed0d09c91582dae65
| 1,018 |
c
|
C
|
src/memory.c
|
sagalpreet/Dispatcher-Simulator
|
28d901f018cd1935cc6b0a64a75c5cf6e616f972
|
[
"MIT"
] | null | null | null |
src/memory.c
|
sagalpreet/Dispatcher-Simulator
|
28d901f018cd1935cc6b0a64a75c5cf6e616f972
|
[
"MIT"
] | null | null | null |
src/memory.c
|
sagalpreet/Dispatcher-Simulator
|
28d901f018cd1935cc6b0a64a75c5cf6e616f972
|
[
"MIT"
] | null | null | null |
#include <stdlib.h>
char* duplicate_sptr(char *source)
{
// deep copy a string
int size = 0;
for (int i = 0; ; i++)
{
if (source[i]) size++;
else break;
}
char *destination = (char *) calloc(size+1, sizeof(char));
for (int i = 0; i < size; i++) destination[i] = source[i];
return destination;
}
char** duplicate_dptr(char **source)
{
// deep copy an array of strings
int size = 0;
for (int i = 0; ; i++)
{
if (source[i]) size++;
else break;
}
char **destination = (char **) calloc(size+1, sizeof(char*));
for (int i = 0; i < size; i++) destination[i] = duplicate_sptr(source[i]);
return destination;
}
void free_sptr(char **source)
{
// free the space allocated to a string
free(*source);
}
void free_dptr(char ***source)
{
// free the space allocated to array of strings
for (int i = 0; ; i++)
{
if ((*source)[i] == 0) break;
free_sptr(&((*source)[i]));
}
free(*source);
}
| 21.659574 | 78 | 0.544204 | 3.359375 |
363edb346cfc198dc66b3e456df4aac50b582125
| 6,592 |
rs
|
Rust
|
nlprule/src/rule/grammar.rs
|
drahnr/nlprule
|
ae208aa911cf46c96731ed4012aba9b03fa6242e
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
nlprule/src/rule/grammar.rs
|
drahnr/nlprule
|
ae208aa911cf46c96731ed4012aba9b03fa6242e
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
nlprule/src/rule/grammar.rs
|
drahnr/nlprule
|
ae208aa911cf46c96731ed4012aba9b03fa6242e
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
use super::engine::composition::{GraphId, MatchGraph, PosMatcher};
use crate::types::*;
use crate::{
tokenizer::Tokenizer,
utils::{self, regex::SerializeRegex},
};
use onig::Captures;
use serde::{Deserialize, Serialize};
use std::collections::HashSet;
impl std::cmp::PartialEq for Suggestion {
fn eq(&self, other: &Suggestion) -> bool {
let a: HashSet<&String> = self.replacements.iter().collect();
let b: HashSet<&String> = other.replacements.iter().collect();
a.intersection(&b).count() > 0 && other.start == self.start && other.end == self.end
}
}
#[derive(Debug, Serialize, Deserialize)]
pub enum Conversion {
Nop,
AllLower,
StartLower,
AllUpper,
StartUpper,
}
impl Conversion {
fn convert(&self, input: &str) -> String {
match &self {
Conversion::Nop => input.to_string(),
Conversion::AllLower => input.to_lowercase(),
Conversion::StartLower => utils::apply_to_first(input, |c| c.to_lowercase().collect()),
Conversion::AllUpper => input.to_uppercase(),
Conversion::StartUpper => utils::apply_to_first(input, |c| c.to_uppercase().collect()),
}
}
}
/// An example associated with a [Rule][crate::rule::Rule].
#[derive(Debug, Serialize, Deserialize)]
pub struct Example {
pub(crate) text: String,
pub(crate) suggestion: Option<Suggestion>,
}
impl Example {
/// Gets the text of this example.
pub fn text(&self) -> &str {
&self.text
}
/// Gets the suggestion for this example.
/// * If this is `None`, the associated rule should not trigger for this example.
/// * If it is `Some`, the associated rule should return a suggestion with equivalent range and suggestions.
pub fn suggestion(&self) -> Option<&Suggestion> {
self.suggestion.as_ref()
}
}
#[derive(Serialize, Deserialize, Debug)]
pub struct PosReplacer {
pub(crate) matcher: PosMatcher,
}
impl PosReplacer {
fn apply(&self, text: &str, tokenizer: &Tokenizer) -> Option<String> {
let mut candidates: Vec<_> = tokenizer
.tagger()
.get_tags(
text,
tokenizer.options().always_add_lower_tags,
tokenizer.options().use_compound_split_heuristic,
)
.iter()
.map(|x| {
let group_words = tokenizer
.tagger()
.get_group_members(&x.lemma.as_ref().to_string());
let mut data = Vec::new();
for word in group_words {
if let Some(i) = tokenizer
.tagger()
.get_tags(
word,
tokenizer.options().always_add_lower_tags,
tokenizer.options().use_compound_split_heuristic,
)
.iter()
.position(|x| self.matcher.is_match(&x.pos))
{
data.push((word.to_string(), i));
}
}
data
})
.rev()
.flatten()
.collect();
candidates.sort_by(|(_, a), (_, b)| a.cmp(b));
if candidates.is_empty() {
None
} else {
Some(candidates.remove(0).0)
}
}
}
#[derive(Serialize, Deserialize, Debug)]
pub struct Match {
pub(crate) id: GraphId,
pub(crate) conversion: Conversion,
pub(crate) pos_replacer: Option<PosReplacer>,
pub(crate) regex_replacer: Option<(SerializeRegex, String)>,
}
impl Match {
fn apply(&self, graph: &MatchGraph, tokenizer: &Tokenizer) -> Option<String> {
let text = graph.by_id(self.id).text(graph.tokens()[0].sentence);
let mut text = if let Some(replacer) = &self.pos_replacer {
replacer.apply(text, tokenizer)?
} else {
text.to_string()
};
text = if let Some((regex, replacement)) = &self.regex_replacer {
regex.replace_all(&text, |caps: &Captures| {
utils::dollar_replace(replacement.to_string(), caps)
})
} else {
text
};
// TODO: maybe return a vector here and propagate accordingly
Some(self.conversion.convert(&text))
}
fn has_conversion(&self) -> bool {
!matches!(self.conversion, Conversion::Nop)
}
}
#[derive(Debug, Serialize, Deserialize)]
pub enum SynthesizerPart {
Text(String),
Match(Match),
}
#[derive(Debug, Serialize, Deserialize)]
pub struct Synthesizer {
pub(crate) use_titlecase_adjust: bool,
pub(crate) parts: Vec<SynthesizerPart>,
}
impl Synthesizer {
pub fn apply(
&self,
graph: &MatchGraph,
tokenizer: &Tokenizer,
start: GraphId,
_end: GraphId,
) -> Option<String> {
let mut output = Vec::new();
let starts_with_conversion = match &self.parts[..] {
[SynthesizerPart::Match(m), ..] => m.has_conversion(),
_ => false,
};
for part in &self.parts {
match part {
SynthesizerPart::Text(t) => output.push(t.clone()),
SynthesizerPart::Match(m) => {
output.push(m.apply(graph, tokenizer)?);
}
}
}
let suggestion = utils::normalize_whitespace(&output.join(""));
// if the suggestion does not start with a case conversion match, make it title case if:
// * at sentence start
// * the replaced text is title case
let make_uppercase = !starts_with_conversion
&& graph.groups()[graph.get_index(start)..]
.iter()
.find_map(|x| x.tokens(graph.tokens()).next())
.map(|first_token| {
(self.use_titlecase_adjust
&& first_token
.word
.text
.as_ref()
.chars()
.next()
.expect("token must have at least one char")
.is_uppercase())
|| first_token.byte_span.0 == 0
})
.unwrap_or(false);
if make_uppercase {
Some(utils::apply_to_first(&suggestion, |x| {
x.to_uppercase().collect()
}))
} else {
Some(suggestion)
}
}
}
| 30.948357 | 112 | 0.526092 | 3.015625 |
f04d1937650ea1b63befb73eea4d0e4baa59f849
| 1,541 |
js
|
JavaScript
|
routes/api/apt.js
|
v-mauna/SLC
|
d551cb6181964d3879bbaad579463e39c97e1656
|
[
"MIT"
] | null | null | null |
routes/api/apt.js
|
v-mauna/SLC
|
d551cb6181964d3879bbaad579463e39c97e1656
|
[
"MIT"
] | null | null | null |
routes/api/apt.js
|
v-mauna/SLC
|
d551cb6181964d3879bbaad579463e39c97e1656
|
[
"MIT"
] | null | null | null |
const router = require ("express").Router();
const mongoose =require("mongoose");
const path =require("path");
const { brotliDecompress } = require("zlib");
const db = require("../../models");
// how do i structure this?
router.post("/aptform", async({ body ,user}, res,next) => {
try {
console.log(`user === ${user}`)
body.userId = mongoose.Types.ObjectId(user._id)
const aptForm = await db.Event.create(body)
res.json(aptForm)
} catch (error) {
console.log(`Error in apt.js router: ${error.message}`)
res.status(400).json(err);
}
})
router.put("/aptform", async({body}, res,next) => {
try{
console.log(`body in aptForm request === ${body}`)
const updatedForm = await Workout.findByIdAndUpdate(
req.params.id,
)
res.json(updatedForm)
}catch(error){
console.log(`error in aptForm router put request === ${error.message}`)
next(error)
}})
module.exports = router
const Schema = mongoose.Schema;
const EventSchema = new Schema({
location: {
type: String,
trim: true,
required: true
},
contact: {
type: String,
trim: true,
required: true
},
service: {
type: String,
trim: true
},
time:{
type: String,
required: true
},
day:{
type: Date,
default: new Date().setDate(new Date().getDate())
},
note:{
type: String,
trim: true
},
userId:{
type : Schema.Types.ObjectId,
ref: "User"
}
});
const Event = mongoose.model("event", EventSchema);
module.exports = Event;
| 18.566265 | 75 | 0.605451 | 3.015625 |
f29ec76cb02dfd09985046c0a9c5054f028c5e8a
| 1,764 |
sql
|
SQL
|
db/migrate/V4__fix_get_folders.sql
|
andrewrmiller/reimas
|
3c1b76971fc04b6663c47270fdcf9d9d4a56932a
|
[
"MIT"
] | null | null | null |
db/migrate/V4__fix_get_folders.sql
|
andrewrmiller/reimas
|
3c1b76971fc04b6663c47270fdcf9d9d4a56932a
|
[
"MIT"
] | 4 |
2020-04-05T23:19:05.000Z
|
2022-01-22T09:55:53.000Z
|
db/migrate/V4__fix_get_folders.sql
|
andrewrmiller/reimas
|
3c1b76971fc04b6663c47270fdcf9d9d4a56932a
|
[
"MIT"
] | null | null | null |
DELIMITER $$
-- Fix potentially uninitialized @parent_id_compressed in pst_get_folders.
DROP PROCEDURE IF EXISTS `pst_get_folders` $$
CREATE PROCEDURE `pst_get_folders`(
IN p_user_id VARCHAR(254),
IN p_library_id VARCHAR(36),
IN p_parent_id VARCHAR(36))
this_proc:BEGIN
IF p_parent_id IS NOT NULL THEN
SET @parent_id_compressed = pst_compress_guid(p_parent_id);
ELSE
SET @parent_id_compressed = NULL;
END IF;
SET @library_id_compressed = pst_compress_guid(p_library_id);
-- Make sure the user has permission to see pst_folders under the parent.
SET @role = pst_get_user_role(
p_user_id,
@library_id_compressed,
IF (p_parent_id IS NULL, NULL, @parent_id_compressed));
IF (@role IS NULL) THEN
SELECT
1 AS err_code, /* Not Found */
'Folder does not exist.' AS err_context;
LEAVE this_proc;
END IF;
SELECT
0 AS err_code,
NULL AS err_context;
SELECT
pst_expand_guid(f.library_id) AS library_id,
pst_expand_guid(f.folder_id) AS folder_id,
f.name,
pst_expand_guid(f.parent_id) AS parent_id,
f.type,
f.`path`,
f.file_count,
f.file_size,
f.file_size_sm,
f.file_size_md,
f.file_size_lg,
f.file_size_cnv_video,
f.data,
f.`where`,
f.order_by,
ur.role AS user_role
FROM
pst_folders f
LEFT JOIN (SELECT * FROM pst_folder_user_roles WHERE user_id = p_user_id) ur
ON f.library_id = ur.library_id AND f.folder_id = ur.folder_id
WHERE
f.library_id = pst_compress_guid(p_library_id) AND
(f.parent_id = @parent_id_compressed OR (p_parent_id IS NULL AND f.parent_id IS NULL))
ORDER BY
name;
END $$
DELIMITER ;
| 26.328358 | 90 | 0.668367 | 3.046875 |
653326363f171f61656c55ab1ac0a5e07a6afbd8
| 18,085 |
pyw
|
Python
|
quizme.pyw
|
dmahugh/quizme
|
edd5340db4524855c7e0dea0340339dafb10a78a
|
[
"MIT"
] | null | null | null |
quizme.pyw
|
dmahugh/quizme
|
edd5340db4524855c7e0dea0340339dafb10a78a
|
[
"MIT"
] | null | null | null |
quizme.pyw
|
dmahugh/quizme
|
edd5340db4524855c7e0dea0340339dafb10a78a
|
[
"MIT"
] | null | null | null |
"""GUI for taking tests based on quizme-xxx.json files.
"""
import os
import sys
import json
from random import randint
import tkinter as tk
from tkinter import font
from tkinter import ttk
from tkinter import filedialog
from tkinter import messagebox
from widgetrefs import widgets
def center_window(window):
"""Position a window in the center of the screen.
1st parameter = window
"""
window.update_idletasks()
width = window.winfo_screenwidth()
height = window.winfo_screenheight()
size = tuple(int(_) for _ in window.geometry().split('+')[0].split('x'))
offsetx = width/2 - size[0]/2
offsety = height/2 - size[1]/2
window.geometry("%dx%d+%d+%d" % (size + (offsetx, offsety)))
def display_help():
"""Display the help screen.
"""
helpmsg = (
'QuizMe is an interactive tool for testing your ability to answer\n'
'a set of multi-choice questions.\n\n'
'Use the underlined hotkeys 1-5 to select your answer, then\n'
'press C to check your answer. You can also press Enter to\n'
'check your answer.\n\n'
'To select a different question, press P, N, or R for Previous,\n'
'Next or Random. You can also use the left/right arrow keys to\n'
'move through the questions in order.\n\n'
'Have fun!')
messagebox.showinfo('Help', helpmsg)
def display_score():
"""Display the current score (number and percent correct).
"""
totq = len(widgets.questions) # total questions in current topic
totans = widgets.totAnswered # number of questions answered so far
correct = widgets.totCorrect # number of correct answers so far
perc = 0 if totans == 0 else 100*(correct/totans)
title = widgets.topic + ' - {0} total questions'.format(totq)
msg = "You have {0} out of {1} correct for {2:.0f}%."
messagebox.showinfo(title, msg.format(correct, totans, perc))
def display_question():
"""Refresh display for current question.
Note: question-related state info is stored in widgets. properties.
"""
q_num = widgets.currentq # current question#
u_answered = (q_num in widgets.answered) # whether answered
u_answer = widgets.answered.get(q_num, '') # user's answer (if any)
question = widgets.questions[q_num] # current question dict()
q_corrnum = question['correct'] # the correct answer ('1' through '5')
q_corrtext = question['answers'].get(q_corrnum, '') # text of answer
u_correct = (u_answer == q_corrnum) # whether user's answer is correct
widgets.lblHeader.configure(text='Topic:\n'+widgets.topic)
widgets.txtQuestion.config(state="normal")
widgets.txtQuestion.delete(1.0, tk.END)
widgets.txtQuestion.insert(tk.END, question['question'])
widgets.txtQuestion.focus_set() # set focus to the question
widgets.txtQuestion.config(state="disabled")
currentstate = 'disabled' if u_answered else 'normal'
display_radiobuttons(rbstate=currentstate, rbselected=u_answer)
# "correct answer" textbox
widgets.txtCorrect.config(state="normal")
widgets.txtCorrect.delete(1.0, tk.END)
widgets.txtCorrect.config(bg="white")
if u_answered:
if u_correct:
msg = '#' + u_answer + ' is CORRECT - ' + q_corrtext
else:
msg = '#' + u_answer + \
' is INCORRECT - correct answer is #' + \
q_corrnum + ': ' + q_corrtext
widgets.txtCorrect.insert(tk.END, msg)
bgcolor = "#B1ECB1" if u_correct else "#FFC6C5"
else:
bgcolor = 'white' # white background if question not answered yet
widgets.txtCorrect.config(bg=bgcolor)
widgets.txtCorrect.config(state="disabled")
widgets.txtExplanation.config(state="normal")
widgets.txtExplanation.delete(1.0, tk.END)
if u_answered:
widgets.txtExplanation.insert(tk.END, question.get('explanation', ''))
widgets.txtExplanation.config(state="disabled")
image = question.get('image', '')
answerimage = question.get('answerimage', '')
displayedimage = answerimage if (u_answered and answerimage) else image
if displayedimage:
displayedimage = 'images/' + displayedimage
# PhotoImage() needs a reference to avoid garbage collection
widgets.image = tk.PhotoImage(file=displayedimage)
widgets.lblImage.configure(image=widgets.image)
else:
widgets.image = None
widgets.lblImage['image'] = None
def display_radiobuttons(rbstate='normal', rbselected=''):
"""Set radiobuttons to the answer options for the current question.
state = the state to set the radiobuttons to; 'normal' or 'disabled'
selected = the radiobutton to select (e.g., '1', or '' for none)
"""
question = widgets.questions[widgets.currentq]
# radiobuttons (answers)
text1 = question['answers'].get('1', '')
text2 = question['answers'].get('2', '')
text3 = question['answers'].get('3', '')
text4 = question['answers'].get('4', '')
text5 = question['answers'].get('5', '')
# note that we hide unused radiobuttons by lowering them in
# the Z-order so that they're hidden behind widgets.rbframe
if text1:
widgets.answer1.configure(text='1: '+text1, state=rbstate)
widgets.answer1.lift(widgets.rbframe)
else:
widgets.answer1.configure(text='', state='disabled')
widgets.answer1.lower(widgets.rbframe)
if text2:
widgets.answer2.configure(text='2: '+text2, state=rbstate)
widgets.answer2.lift(widgets.rbframe)
else:
widgets.answer2.configure(text='', state='disabled')
widgets.answer2.lower(widgets.rbframe)
if text3:
widgets.answer3.configure(text='3: '+text3, state=rbstate)
widgets.answer3.lift(widgets.rbframe)
else:
widgets.answer3.configure(text='', state='disabled')
widgets.answer3.lower(widgets.rbframe)
if text4:
widgets.answer4.configure(text='4: '+text4, state=rbstate)
widgets.answer4.lift(widgets.rbframe)
else:
widgets.answer4.configure(text='', state='disabled')
widgets.answer4.lower(widgets.rbframe)
if text5:
widgets.answer5.configure(text='5: '+text5, state=rbstate)
widgets.answer5.lift(widgets.rbframe)
else:
widgets.answer5.configure(text='', state='disabled')
widgets.answer5.lower(widgets.rbframe)
# select the user's answer (or clear selection if rbselected=='')
widgets.answerSelection.set(rbselected)
def initialize_score():
"""Initialize/reset the total answered and total correct.
"""
widgets.totAnswered = 0
widgets.totCorrect = 0
widgets.answered = dict() # key=question#, value = user's answer
def keystroke_bindings():
"""Assign keyboard shortcuts.
"""
root.bind('1', lambda event: widgets.answerSelection.set('1'))
root.bind('2', lambda event: widgets.answerSelection.set('2'))
root.bind('3', lambda event: widgets.answerSelection.set('3'))
root.bind('4', lambda event: widgets.answerSelection.set('4'))
root.bind('5', lambda event: widgets.answerSelection.set('5'))
root.bind('c', lambda event: save_answer())
root.bind('C', lambda event: save_answer())
root.bind('<Return>', lambda event: save_answer())
root.bind('<Left>', lambda event: move_previous())
root.bind('p', lambda event: move_previous())
root.bind('P', lambda event: move_previous())
root.bind('<Right>', lambda event: move_next())
root.bind('n', lambda event: move_next())
root.bind('N', lambda event: move_next())
root.bind('r', lambda event: move_random())
root.bind('R', lambda event: move_random())
root.bind('s', lambda event: display_score())
root.bind('S', lambda event: display_score())
root.bind('t', lambda event: select_topic(gui=True))
root.bind('T', lambda event: select_topic(gui=True))
root.bind('h', lambda event: display_help())
root.bind('H', lambda event: display_help())
root.bind('<F1>', lambda event: display_help())
root.bind("<Key-Escape>", lambda event: root.quit()) # Esc=quit
def move_next():
"""Move to next question.
"""
qnum_int = int(widgets.currentq) # convert question# to integer
if qnum_int < widgets.totalquestions:
widgets.currentq = str(qnum_int + 1)
display_question()
def move_previous():
"""Move to previous question.
"""
qnum_int = int(widgets.currentq) # convert question# to integer
if qnum_int > 1:
widgets.currentq = str(qnum_int - 1)
display_question()
def move_random():
"""Move to a random question.
"""
# handle the case where all questions have been answered
if len(widgets.answered) == widgets.totalquestions:
topic_completed()
return
# unanswered[] = list of remaining unanswered question numbers
unanswered = []
for qnum in range(1, widgets.totalquestions+1):
if str(qnum) not in widgets.answered:
unanswered.append(str(qnum))
# now we select a random question# from unanswered[]
random_index = randint(0, len(unanswered)-1)
widgets.currentq = str(unanswered[random_index])
display_question()
def pythonw_setup():
"""Handle default folder location if running under pythonw.exe.
The pythonw.exe launcher starts from the Windows System32 folder
as the default location, which isn't typically what's desired.
This function checks whether we're running under pythonw.exe, and
if so sets the default folder to the location of this program.
"""
fullname = sys.executable
nameonly = os.path.split(fullname)[1].split('.')[0].lower()
if nameonly == 'pythonw':
progfolder = os.path.dirname(os.path.realpath(sys.argv[0]))
os.chdir(progfolder)
def read_datafile():
"""Read widgets.dataFile and store questions in widgets.questions.
NOTE: the data file must include questions numbered 1-N with no gaps.
"""
with open(widgets.dataFile, 'r') as jsonfile:
widgets.questions = json.load(jsonfile)
widgets.currentq = '1' # current question#
widgets.totalquestions = len(widgets.questions)
def save_answer():
"""Save the current answer and refresh displayed question.
"""
q_num = widgets.currentq # current question#
question = widgets.questions[q_num] # current question dict()
if q_num in widgets.answered:
return # this question has already been answered
answer = widgets.answerSelection.get()
if not answer or answer not in '12345':
return # an answer has not been selected
# update totals
widgets.totAnswered += 1
if answer == question['correct']:
widgets.totCorrect += 1
widgets.answered[q_num] = answer
# refresh the display based on current status
display_question()
# if all questions have been answered, display score
if len(widgets.answered) == widgets.totalquestions:
topic_completed()
def select_topic(gui=True):
"""Select a topic (.json file).
If gui=True, the quizme app window exists and will be updated if
a topic is selected.
Returns the selected filename (or '' if none selected), and the
global widgets object's properties are updated.
"""
if not gui:
tempwindow = tk.Tk() # create the top-level window
tempwindow.withdraw() # hide the top-level window behind this dialog
newtopic = filedialog.askopenfilename(title='select QuizMe file',
initialdir='data',
filetypes=[('JSON files', '.json')])
if not gui:
tempwindow.destroy() # destroy the temporary top-level window
if newtopic:
# a file was selected
widgets.dataFile = newtopic
# by convention topic name is the part of the filename after '-'
# e.g., filename = quizme-TopicName.json
nameonly = os.path.basename(newtopic)
nameonly = os.path.splitext(nameonly)[0]
widgets.topic = nameonly
widgets.currentq = '1' # start with first topic
widgets.answered = {} # reset answered questions
if gui:
read_datafile()
display_question()
return newtopic
def topic_completed():
"""Topic has been completed, so show score and ask whether to re-start.
"""
messagebox.showwarning(
'Topic Completed',
'You have already answered all of the questions in this topic!')
display_score()
questiontext = 'Do you want to start over with this topic?'
if messagebox.askyesno('Topic Completed', questiontext):
initialize_score()
widgets.currentq = '1'
display_question()
class MainApplication(ttk.Frame):
"""Root application class.
"""
def __init__(self, parent, *args, **kwargs):
ttk.Frame.__init__(self, parent, *args, **kwargs)
self.grid(sticky="nsew")
self.parent = parent
self.parent.title('QuizMe')
self.parent.iconbitmap('quizme.ico')
initialize_score()
self.widgets_create()
display_question() # display first question in selected topic
# customize styles
style = ttk.Style()
style.configure("TButton", font=('Verdana', 12))
style.configure("TRadiobutton", font=('Verdana', 12))
keystroke_bindings()
def widgets_create(self):
"""Create all widgets in the main application window.
"""
# configure resizing behavior
top = self.winfo_toplevel()
top.rowconfigure(1, weight=1)
top.columnconfigure(0, weight=1)
self.rowconfigure(1, weight=1)
self.columnconfigure(0, weight=1)
# create the widgets
self.frm_question = FrameQuestion(self)
self.frm_controls = FrameControls(self)
self.frm_question.grid(row=1, column=0, sticky="w", padx=5, pady=5)
self.frm_controls.grid(row=1, column=1, sticky="w", padx=5, pady=5)
self.parent.columnconfigure(0, weight=1)
widgets.lblImage = tk.Label(self)
widgets.lblImage.place(x=511, y=50, height=300, width=300)
class FrameControls(ttk.Frame):
"""Frame for the controls (buttons).
"""
def __init__(self, parent, *args, **kwargs):
ttk.Frame.__init__(self, parent, *args, **kwargs)
self.parent = parent
widgets.lblHeader = tk.Label(self, text='Topic:\n???',
font=font.Font(family="Verdana", size=12),
bg="#6FD2F4", height=4, width=12)
widgets.lblHeader.pack(fill=tk.Y, padx=10, pady=10, expand=True)
btnpadding = dict(padx=10, pady=5)
ttk.Button(self, underline=0, text="Check Answer",
command=save_answer).pack(**btnpadding)
ttk.Button(self, underline=0, text="Next",
command=move_next).pack(**btnpadding)
ttk.Button(self, underline=0, text="Previous",
command=move_previous).pack(**btnpadding)
ttk.Button(self, underline=0, text="Random",
command=move_random).pack(**btnpadding)
ttk.Button(self, underline=0, text="Score",
command=display_score).pack(**btnpadding)
ttk.Button(self, underline=0, text="Topic",
command=lambda: select_topic(gui=True)).pack(**btnpadding)
ttk.Button(self, underline=0, text="Help",
command=display_help).pack(**btnpadding)
class FrameQuestion(ttk.Frame):
"""Frame for the question, answers, and explanation.
"""
def __init__(self, parent, *args, **kwargs):
ttk.Frame.__init__(self, parent, *args, **kwargs)
self.parent = parent
widgets.txtQuestion = tk.Text(self, height=2, border=0,
font=font.Font(family="Verdana", size=12))
widgets.txtQuestion.pack(anchor=tk.W, padx=5, pady=5, expand=tk.Y)
widgets.txtQuestion.config(state="disabled")
widgets.answerSelection = tk.StringVar()
# create a frame to be used for hiding/showing radiobuttons
widgets.rbframe = tk.Frame(self)
widgets.rbframe.pack(side="top", fill="both", expand=True)
rbops = dict(variable=widgets.answerSelection, underline=0)
packoptions = dict(in_=widgets.rbframe, anchor=tk.W, padx=15, pady=17)
widgets.answer1 = ttk.Radiobutton(self, value='1', text="1:", **rbops)
widgets.answer1.pack(**packoptions)
widgets.answer2 = ttk.Radiobutton(self, value='2', text="2:", **rbops)
widgets.answer2.pack(**packoptions)
widgets.answer3 = ttk.Radiobutton(self, value='3', text="3:", **rbops)
widgets.answer3.pack(**packoptions)
widgets.answer4 = ttk.Radiobutton(self, value='4', text="4:", **rbops)
widgets.answer4.pack(**packoptions)
widgets.answer5 = ttk.Radiobutton(self, value='5', text="5:", **rbops)
widgets.answer5.pack(**packoptions)
widgets.txtCorrect = tk.Text(self, border=0, height=2,
font=('Verdana', 12))
widgets.txtCorrect.pack(anchor=tk.W, padx=5, pady=8, expand=tk.Y)
widgets.txtCorrect.config(state="disabled")
widgets.txtExplanation = tk.Text(
self, height=2, border=0, font=font.Font(family="Verdana", size=12))
widgets.txtExplanation.pack(anchor=tk.W, padx=5, pady=5, expand=tk.Y)
# if running standalone, launch the app
if __name__ == "__main__":
pythonw_setup()
filename = select_topic(gui=False) # pylint: disable=C0103
if not filename:
sys.exit(0)
read_datafile() # read in the selected data file
root = tk.Tk() # pylint: disable=C0103
MainApplication(root)
root.minsize(width=900, height=400)
root.resizable(width=False, height=False) # app window not resizable
root.attributes("-topmost", True) # force app window to top
root.attributes("-topmost", False)
root.focus_force() # give app window focus
center_window(root)
root.mainloop()
| 38.397028 | 80 | 0.648825 | 3.3125 |
63fdeeb769910c1fdd2ebed776cf3c52092b9a01
| 5,424 |
lua
|
Lua
|
lua/core/plugins.lua
|
ackerr/nvim
|
f598fd94eaaac522c1c43b2ebec5e50e0ed072df
|
[
"MIT"
] | null | null | null |
lua/core/plugins.lua
|
ackerr/nvim
|
f598fd94eaaac522c1c43b2ebec5e50e0ed072df
|
[
"MIT"
] | null | null | null |
lua/core/plugins.lua
|
ackerr/nvim
|
f598fd94eaaac522c1c43b2ebec5e50e0ed072df
|
[
"MIT"
] | 1 |
2022-01-28T06:49:25.000Z
|
2022-01-28T06:49:25.000Z
|
local vim = vim
local fn = vim.fn
-- Automatically install packer
local install_path = fn.stdpath("data") .. "/site/pack/packer/start/packer.nvim"
local packer_bootstarp
if fn.empty(fn.glob(install_path)) > 0 then
packer_bootstarp = fn.system({
"git",
"clone",
"--depth",
"1",
"https://github.com/wbthomason/packer.nvim",
install_path,
})
print("Installing packer close and reopen Neovim...")
vim.cmd([[packadd packer.nvim]])
end
local status_ok, packer = pcall(require, "packer")
if not status_ok then
return
end
-- Plugin
packer.startup({
function(use)
use("wbthomason/packer.nvim")
use("nvim-lua/popup.nvim")
use("nvim-lua/plenary.nvim")
use("kyazdani42/nvim-web-devicons")
-- colorscheme
use({
"rebelot/kanagawa.nvim",
config = function()
require("kanagawa").setup()
end,
})
use({ "goolord/alpha-nvim", requires = { "nvim-telescope/telescope.nvim" } })
use({ "kevinhwang91/nvim-hlslens" })
use({ "tpope/vim-surround" })
use({ "tpope/vim-repeat" })
use({
"numToStr/Comment.nvim",
config = function()
require("Comment").setup()
end,
})
use({ "itchyny/vim-cursorword" })
use({ "junegunn/vim-easy-align" })
use({ "editorconfig/editorconfig-vim" })
use({ "terryma/vim-multiple-cursors" })
use({ "mg979/vim-visual-multi" })
use({ "Vimjas/vim-python-pep8-indent", ft = "python" })
use({
"norcalli/nvim-colorizer.lua",
config = function()
require("colorizer").setup({ "*" })
end,
})
use({ "github/copilot.vim" })
use({
"lewis6991/gitsigns.nvim",
event = "BufRead",
config = function()
require("core.gitsigns")
end,
})
use({ "andrewstuart/vim-kubernetes", ft = { "yaml", "yml" } })
use({ "cespare/vim-toml", ft = "toml" })
use({
"vim-test/vim-test",
cmd = { "TestNearest", "TestSuite", "TestVisit", "TestFile", "TestLast" },
})
use({
"rcarriga/vim-ultest",
cmd = { "UltestSummary", "ultest" },
requires = { "vim-test/vim-test" },
run = ":UpdateRemoteuseins",
})
use({ "romainl/vim-cool" })
use({ "psliwka/vim-smoothie" })
use({ "wakatime/vim-wakatime" })
use({ "voldikss/vim-translator", cmd = { "TranslateW" } })
-- terminal
use({ "voldikss/vim-floaterm" })
use({ "akinsho/toggleterm.nvim", tag = "v1.*" })
use({ "kyazdani42/nvim-tree.lua" })
use({ "akinsho/bufferline.nvim", tag = "v2.*", requires = "kyazdani42/nvim-web-devicons" })
use("nvim-lualine/lualine.nvim")
-- lsp
use("neovim/nvim-lspconfig")
use("williamboman/nvim-lsp-installer")
use("hrsh7th/nvim-cmp")
use("hrsh7th/cmp-nvim-lsp")
use({ "hrsh7th/cmp-buffer", after = "nvim-cmp" })
use({ "hrsh7th/cmp-path", after = "nvim-cmp" })
use({ "hrsh7th/cmp-cmdline", after = "nvim-cmp" })
use("windwp/nvim-autopairs")
-- lsp icon
use("onsails/lspkind-nvim")
-- snippet.
use("rafamadriz/friendly-snippets")
use("L3MON4D3/LuaSnip")
use({ "saadparwaiz1/cmp_luasnip", after = "nvim-cmp" })
-- lsp format
use({
"jose-elias-alvarez/null-ls.nvim",
event = "BufRead",
config = function()
require("core.null-ls")
end,
})
-- syntax
use({ "nvim-treesitter/nvim-treesitter", run = ":TSUpdate" })
use({ "nvim-treesitter/nvim-treesitter-textobjects", requires = { "nvim-treesitter/nvim-treesitter" } })
use({ "romgrk/nvim-treesitter-context", requires = { "nvim-treesitter/nvim-treesitter" } })
-- search
use("nvim-telescope/telescope.nvim")
use("nvim-telescope/telescope-project.nvim")
end,
config = {
display = {
open_fn = function()
return require("packer.util").float({ border = "rounded" })
end,
},
},
})
if packer_bootstarp then
require("packer").sync()
end
vim.cmd([[
silent! colorscheme kanagawa
highlight VertSplit guibg=NONE
]])
-- comment.nvim
Keymap("n", "<C-_>", "gcc", { noremap = false })
Keymap("v", "<C-_>", "gc", { noremap = false })
-- vim-easy-align
Keymap("x", "ga", ":EasyAlign<CR>")
Keymap("n", "ga", ":EasyAlign<CR>")
vim.g.easy_align_delimiters = {
[">"] = {
pattern = ">>\\|\\|=>\\|>",
},
["/"] = {
pattern = "//\\+\\|/\\*\\|\\*/",
delimiter_align = "l",
ignore_groups = { "!Comment" },
},
["#"] = {
pattern = "#\\+",
delimiter_align = "l",
ignore_groups = { "String" },
},
}
-- vim-translator
Keymap("n", "<M-t>", ":TranslateW<CR>")
Keymap("v", "<M-t>", ":TranslateW<CR>")
-- vim-test and vim-ultest
Keymap("n", "tn", ":TestNearest<CR>")
Keymap("n", "tf", ":TestFile<CR>")
Keymap("n", "ts", ":TestSuite<CR>")
Keymap("n", "tl", ":TestLast<CR>")
Keymap("n", "tg", ":TestVisit<CR>")
Keymap("n", "tt", ":UltestSummary<CR>")
vim.g["test#strategy"] = "floaterm"
vim.g["test#python#runner"] = "pytest"
vim.g["test#go#runner"] = "gotest"
vim.g["ultest_use_pty"] = 1
-- github copilot
vim.g.copilot_no_tab_map = true
vim.g.copilot_assume_mapped = true
vim.g.copilot_tab_fallback = ""
-- hlslens
require("hlslens").setup({
nearest_only = true,
})
Keymap("n", "n", [[<Cmd>execute('normal! ' . v:count1 . 'n')<CR><Cmd>lua require('hlslens').start()<CR>]])
Keymap("n", "N", [[<Cmd>execute('normal! ' . v:count1 . 'N')<CR><Cmd>lua require('hlslens').start()<CR>]])
Keymap("n", "*", [[*<Cmd>lua require('hlslens').start()<CR>]])
Keymap("n", "#", [[#<Cmd>lua require('hlslens').start()<CR>]])
vim.cmd([[
aug VMlens
au!
au User visual_multi_start lua require('vmlens').start()
au User visual_multi_exit lua require('vmlens').exit()
aug END
]])
| 25.952153 | 106 | 0.615044 | 3.125 |
f02d80a4afeebaf1a2e3f75631b09c3fc74059e3
| 2,538 |
py
|
Python
|
src/flask_easy/auth.py
|
Josephmaclean/flask-easy
|
64cb647b0dbcd031cb8d27cc60889e50c959e1ca
|
[
"MIT"
] | 1 |
2021-12-30T12:25:05.000Z
|
2021-12-30T12:25:05.000Z
|
src/flask_easy/auth.py
|
Josephmaclean/flask-easy
|
64cb647b0dbcd031cb8d27cc60889e50c959e1ca
|
[
"MIT"
] | null | null | null |
src/flask_easy/auth.py
|
Josephmaclean/flask-easy
|
64cb647b0dbcd031cb8d27cc60889e50c959e1ca
|
[
"MIT"
] | null | null | null |
"""
auth.py
Author: Joseph Maclean Arhin
"""
import os
import inspect
from functools import wraps
import jwt
from flask import request
from jwt.exceptions import ExpiredSignatureError, InvalidTokenError, PyJWTError
from .exc import Unauthorized, ExpiredTokenException, OperationError
def auth_required(other_roles=None):
"""auth required decorator"""
def authorize_user(func):
"""
A wrapper to authorize an action using
:param func: {function}` the function to wrap around
:return:
"""
@wraps(func)
def view_wrapper(*args, **kwargs):
authorization_header = request.headers.get("Authorization")
if not authorization_header:
raise Unauthorized("Missing authentication token")
token = authorization_header.split()[1]
try:
key = os.getenv("JWT_SECRET") # noqa E501
payload = jwt.decode(
token, key=key, algorithms=["HS256", "RS256"]
) # noqa E501
# Get realm roles from payload
available_roles = payload.get("realm_access").get("roles")
# Append service name to function name to form role
# generated_role = service_name + "_" + func.__name__
generated_role = "s"
authorized_roles = []
if other_roles:
authorized_roles = other_roles.split("|")
authorized_roles.append(generated_role)
if is_authorized(authorized_roles, available_roles):
if "user_id" in inspect.getfullargspec(func).args:
kwargs["user_id"] = payload.get(
"preferred_username"
) # noqa E501
return func(*args, **kwargs)
except ExpiredSignatureError as error:
raise ExpiredTokenException("Token Expired") from error
except InvalidTokenError as error:
raise OperationError("Invalid Token") from error
except PyJWTError as error:
raise OperationError("Error decoding token") from error
raise Unauthorized(status_code=403)
return view_wrapper
return authorize_user
def is_authorized(access_roles, available_roles):
"""Check if access roles is in available roles"""
for role in access_roles:
if role in available_roles:
return True
return False
| 32.126582 | 79 | 0.593775 | 3.046875 |
7adf8e67d42aa76cde76421b1599701d958d1b95
| 2,722 |
rs
|
Rust
|
src/design/param.rs
|
jobtijhuis/tydi
|
326c0636ac185ad97e9780d2047c44626050f1a3
|
[
"Apache-2.0"
] | null | null | null |
src/design/param.rs
|
jobtijhuis/tydi
|
326c0636ac185ad97e9780d2047c44626050f1a3
|
[
"Apache-2.0"
] | 26 |
2021-09-06T04:29:55.000Z
|
2022-02-16T04:23:42.000Z
|
src/design/param.rs
|
jobtijhuis/tydi
|
326c0636ac185ad97e9780d2047c44626050f1a3
|
[
"Apache-2.0"
] | 1 |
2021-09-21T14:26:19.000Z
|
2021-09-21T14:26:19.000Z
|
use std::collections::HashMap;
use std::convert::TryInto;
///! Generic parameter type
use crate::design::{ParamHandle, ParamKey, ParamStoreKey};
use crate::logical::LogicalType;
use crate::{Document, Error, Identify, Result, UniqueKeyBuilder};
#[derive(Debug, PartialEq)]
pub enum ParameterVariant {
Type(LogicalType),
String(String),
UInt(u32),
//...
}
#[derive(Debug, PartialEq)]
pub struct NamedParameter {
key: ParamKey,
item: ParameterVariant,
doc: Option<String>,
}
impl NamedParameter {
pub fn try_new(
key: impl TryInto<ParamKey, Error = impl Into<Box<dyn std::error::Error>>>,
item: ParameterVariant,
doc: Option<&str>,
) -> Result<Self> {
let key = key.try_into().map_err(Into::into)?;
Ok(NamedParameter {
key,
item,
doc: doc.map(|s| s.to_string()),
})
}
pub fn key(&self) -> &ParamKey {
&self.key
}
pub fn item(&self) -> &ParameterVariant {
&self.item
}
}
impl Identify for NamedParameter {
fn identifier(&self) -> &str {
self.key.as_ref()
}
}
impl Document for NamedParameter {
fn doc(&self) -> Option<String> {
self.doc.clone()
}
}
#[derive(Debug, PartialEq)]
pub struct ParameterStore {
key: ParamStoreKey,
params: HashMap<ParamKey, NamedParameter>,
}
impl Identify for ParameterStore {
fn identifier(&self) -> &str {
self.key.as_ref()
}
}
impl ParameterStore {
pub fn from_builder(
key: ParamStoreKey,
builder: UniqueKeyBuilder<NamedParameter>,
) -> Result<Self> {
Ok(ParameterStore {
key,
params: builder
.finish()?
.into_iter()
.map(|p| (p.key().clone(), p))
.collect::<HashMap<ParamKey, NamedParameter>>(),
})
}
pub fn add(&mut self, param: NamedParameter) -> Result<ParamHandle> {
let key = param.key().clone();
match self.params.insert(param.key().clone(), param) {
None => Ok(ParamHandle {
lib: self.key.clone(),
param: key.clone(),
}),
Some(_lib) => Err(Error::ProjectError(format!(
"Error while adding {} to the library",
key,
))),
}
}
pub fn get(&self, key: ParamKey) -> Result<&NamedParameter> {
self.params.get(&key).ok_or_else(|| {
Error::LibraryError(format!(
"Parameter {} not found in store {}",
key,
self.identifier()
))
})
}
pub fn key(&self) -> &ParamStoreKey {
&self.key
}
}
| 24.088496 | 83 | 0.541881 | 3.046875 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.