Dataset Viewer
hexsha
stringlengths 40
40
| size
int64 38
969k
| ext
stringclasses 1
value | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
106
| max_stars_repo_name
stringlengths 8
104
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
sequencelengths 1
4
| max_stars_count
int64 1
38.8k
β | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
β | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
β | max_issues_repo_path
stringlengths 4
106
| max_issues_repo_name
stringlengths 8
104
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
sequencelengths 1
4
| max_issues_count
int64 1
53.3k
β | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
β | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
β | max_forks_repo_path
stringlengths 4
106
| max_forks_repo_name
stringlengths 8
104
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
sequencelengths 1
4
| max_forks_count
int64 1
6.24k
β | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
β | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
β | content
stringlengths 38
969k
| avg_line_length
float64 6.33
6.5k
| max_line_length
int64 15
269k
| alphanum_fraction
float64 0.18
0.91
| test_functions
sequencelengths 1
212
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f7005682f45127ca851469ce7a4ea680c9da5dc4 | 49 | jl | Julia | AE/test/runtests.jl | foldfelis/ML101.jl | b4b217ac4af88ba460ec26c5c8a1ce322edae64a | [
"MIT"
] | 6 | 2021-02-23T05:48:18.000Z | 2021-02-23T11:52:24.000Z | AE/test/runtests.jl | foldfelis/ML101.jl | b4b217ac4af88ba460ec26c5c8a1ce322edae64a | [
"MIT"
] | 5 | 2021-02-22T21:59:07.000Z | 2021-05-05T07:29:55.000Z | AE/test/runtests.jl | foldfelis/ML101.jl | b4b217ac4af88ba460ec26c5c8a1ce322edae64a | [
"MIT"
] | 1 | 2021-02-28T07:04:06.000Z | 2021-02-28T07:04:06.000Z | using AE
using Test
@testset "AE.jl" begin
end
| 7 | 22 | 0.714286 | [
"@testset \"AE.jl\" begin\n\nend"
] |
f702b1e015efb351ee28034b390c3e7d1c8857bc | 7,933 | jl | Julia | src/OrthoPolynomials.jl | OpenLibMathSeq/Sequences | e53c1f30b7bf81669805f21d408d407b727615b5 | [
"MIT"
] | 6 | 2019-06-25T08:54:44.000Z | 2021-11-07T04:52:29.000Z | src/OrthoPolynomials.jl | OpenLibMathSeq/Sequences | e53c1f30b7bf81669805f21d408d407b727615b5 | [
"MIT"
] | 3 | 2019-04-30T19:07:41.000Z | 2019-06-04T15:51:34.000Z | src/OrthoPolynomials.jl | PeterLuschny/IntegerSequences.jl | 1b9440bc8b86e3ae74fd26ee48fba412befbbdb5 | [
"MIT"
] | 4 | 2019-04-30T17:00:10.000Z | 2020-02-08T11:32:39.000Z | # This file is part of IntegerSequences.
# Copyright Peter Luschny. License is MIT.
(@__DIR__) β LOAD_PATH && push!(LOAD_PATH, (@__DIR__))
module OrthoPolynomials
using Nemo, Triangles
export ModuleOrthoPolynomials
export OrthoPoly, InvOrthoPoly
export T053121, T216916, T217537, T064189, T202327, T111062, T099174
export T066325, T049310, T137338, T104562, T037027, T049218, T159834, T137286
export T053120, T053117, T111593, T059419
export L217924, L005773, L108624, L005425, L000085, L001464, L003723, L006229
"""
* OrthoPoly, InvOrthoPoly, T053121, T216916, T217537, T064189, T202327, T111062, T099174, T066325, T049310, T137338, T104562, T037027, T049218, T159834, T137286, T053120, T053117, T111593, T059419, L217924, L005773, L108624, L005425, L000085, L001464, L003723, L006229
"""
const ModuleOrthoPolynomials = ""
# Cf. http://oeis.org/wiki/User:Peter_Luschny/AignerTriangles
"""
By the theorem of Favard an orthogonal polynomial systems ``p_{n}(x)`` is a sequence of real polynomials with deg``(p_{n}(x)) = n`` for all ``n`` if and only if
`` p_{n+1}(x) = (x - s_n)p_n(x) - t_n p_{n-1}(x) ``
with ``p_{0}(x)=1`` for some pair of seq's ``s_k`` and ``t_k``. Return the coefficients of the polynomials as a triangular array with `dim` rows.
"""
function OrthoPoly(dim::Int, s::Function, t::Function)
dim β€ 0 && return ZZ[]
T = fill(ZZ(0), dim, dim)
for n β 1:dim T[n, n] = 1 end
for n β 2:dim, k β 1:n-1
T[n, k] = ((k > 1 ? T[n-1, k-1] : 0)
+ s(k - 1) * T[n-1, k] + t(k) * T[n-1, k+1])
end
[T[n, k] for n β 1:dim for k β 1:n] # flatt format
# [[T[n, k] for k β 1:n] for n β 1:dim] # triangle format
end
"""
Return the inverse of the coefficients of the orthogonal polynomials generated by ``s`` and ``t`` as a triangular array with `dim` rows.
"""
function InvOrthoPoly(dim::Int, s::Function, t::Function)
dim β€ 0 && return ZZ[]
T = fill(ZZ(0), dim, dim)
for n β 1:dim T[n, n] = 1 end
for n β 1:dim-1, k β 1:n+1
T[n+1, k] = ((k > 1 ? T[n, k-1] : 0)
- s(n - 1) * T[n, k] - (n > 1 ? t(n - 1) * T[n-1, k] : 0))
end
[T[n, k] for n β 1:dim for k β 1:n]
end
"""
Return the Catalan triangle (with 0's) read by rows.
"""
T053121(dim::Int) = OrthoPoly(dim, n -> 0, n -> 1)
# """
# binomial(n, floor(n/2)).
# """
# L001405(len::Int) = RowSums(T053121(len))
"""
Return the coefficients of some orthogonal polynomials related to set partitions without singletons (cf. A000296).
"""
T216916(dim::Int) = OrthoPoly(dim, n -> n + 1, n -> n + 1)
"""
Return the triangle ``T(n,k)`` of tangent numbers, coefficient of ``x^n/n!`` in the expansion of ``(tan x)^k/k!``.
"""
T059419(dim::Int) = OrthoPoly(dim, n -> 0, n -> n * (n - 1))
"""
Return the expansion of exp(tan(x)).
"""
L006229(len::Int) = RowSums(T059419(len))
"""
Return the first len integers defined as ``a(n) = n! [x^n] \\exp(2 \\exp (x) - x - 2)``.
"""
L217924(len::Int) = RowSums(T217537(len))
"""
Return the coefficients of some orthogonal polynomials related to indecomposable set partitions without singletons (cf. A098742).
"""
T217537(dim::Int) = OrthoPoly(dim, n -> n, n -> n)
"""
Return the (reflected) Motzkin triangle.
"""
T064189(dim::Int) = OrthoPoly(dim, n -> 1, n -> 1)
"""
Return the number of directed animals of size n as an array of length len.
"""
L005773(len::Int) = RowSums(T064189(len))
"""
Return the coefficients of ``x^n`` in the expansion of ``((-1-x+β(1+2x+5x^2))/2)^k`` as a triangle with dim rows.
"""
T202327(dim::Int) = OrthoPoly(dim, n -> -1, n -> -1)
"""
Return the sequence with generating function satisfying ``x = (A(x)+(A(x))^2)/(1-A(x)-(A(x))^2)``.
"""
L108624(len::Int) = RowSums(T202327(len))
"""
Return the triangle ``T(n, k) = \\binom{n}{k} \\times`` involutions``(n - k)``.
"""
T111062(dim::Int) = OrthoPoly(dim, n -> 1, n -> n)
"""
Return the number of self-inverse partial permutations.
"""
L005425(len::Int) = RowSums(T111062(len))
"""
Return the coefficients of the modified Hermite polynomials.
"""
T099174(dim::Int) = OrthoPoly(dim, n -> 0, n -> n)
# Also
# T099174(dim::Int) = InvOrthoPoly(dim, n -> 0, n -> -n)
"""
Return the number of involutions.
"""
L000085(len::Int) = RowSums(T099174(len))
"""
Return the coefficients of unitary Hermite polynomials He``_n(x)``.
"""
T066325(dim::Int) = InvOrthoPoly(dim, n -> 0, n -> n)
"""
Return the sequence defined by ``a(n) = n! [x^n] \\exp(-x-(x^2)/2)``.
"""
L001464(len::Int) = RowSums(T066325(len), true)
"""
Return the triangle of tanh numbers.
"""
T111593(dim::Int) = OrthoPoly(dim, n -> 0, n -> -n * (n - 1))
"""
Return the sequence defined by ``A(n) = n! [x^n] \\exp \\tan(x)`` as an array of length `len`.
"""
L003723(len::Int) = RowSums(T111593(len))
"""
Return the coefficients of Chebyshev's U``(n, x/2)`` polynomials.
"""
T049310(dim::Int) = InvOrthoPoly(dim, n -> 0, n -> 1)
"""
Return the coefficients of the Charlier polynomials with parameter ``a = 1``.
"""
T137338(dim::Int) = InvOrthoPoly(dim, n -> n + 1, n -> n + 1)
"""
Return the inverse of the Motzkin triangle (cf. A064189).
"""
T104562(dim::Int) = InvOrthoPoly(dim, n -> 1, n -> 1)
"""
Return the skew Fibonacci-Pascal triangle with `dim` rows.
"""
T037027(dim::Int) = InvOrthoPoly(dim, n -> -1, n -> -1)
"""
Return the arctangent numbers (expansion of arctan``(x)^n/n!``).
"""
T049218(dim::Int) = InvOrthoPoly(dim, n -> 0, n -> n * (n + 1))
"""
Return the coefficients of Hermite polynomials ``H(n, (x-1)/β(2))/(β(2))^n``.
"""
T159834(dim::Int) = InvOrthoPoly(dim, n -> 1, n -> n)
"""
Return the coefficients of a variant of the Hermite polynomials.
"""
T137286(dim::Int) = InvOrthoPoly(dim, n -> 0, n -> n + 1)
"""
Return the coefficients of the Chebyshev-T polynomials.
"""
function T053120(len)
T = ZTriangle(len)
R, x = PolynomialRing(ZZ, "x")
m = 1
for n β 0:len-1
f = chebyshev_t(n, x)
for k β 0:n
T[m] = coeff(f, k)
m += 1
end
end
T
end
"""
Return the coefficients of the Chebyshev-U polynomials.
"""
function T053117(len)
T = ZTriangle(len)
R, x = PolynomialRing(ZZ, "x")
m = 1
for n β 0:len-1
f = chebyshev_u(n, x)
for k β 0:n
T[m] = coeff(f, k)
m += 1
end
end
T
end
#START-TEST-########################################################
using Test, SeqTests
function test()
@testset "OrthoPoly" begin
@test isa(OrthoPoly(10, n -> 1, n -> n + 1)[end], fmpz)
@test isa(InvOrthoPoly(10, n -> 1, n -> n + 1)[end], fmpz)
@test RowSums(T217537(8)) == L217924(8)
if data_installed()
T = [
T066325,
T049310,
T137338,
T104562,
T037027,
T049218,
T159834,
T137286,
T053120,
T053117,
T053121,
T216916,
T217537,
T064189,
T202327,
T111062,
T099174,
T111593,
T064189
]
SeqTest(T, 'T')
L = [L217924, L005425, L000085, L001464, L003723, L108624, L006229]
SeqTest(L, 'L')
end
end
end
function demo()
T = T111593(8)
ShowAsΞ(T)
println(RowSums(T))
T = T217537(8)
ShowAsΞ(T)
println(RowSums(T))
T = T053117(8)
ShowAsΞ(T)
println(RowSums(T))
end
"""
T111062(500) :: 0.339080 seconds (750.52 k allocations: 15.375 MiB)
T066325(500) :: 0.157202 seconds (751.50 k allocations: 13.374 MiB)
T053120(500) :: 0.061058 seconds (375.75 k allocations: 6.705 MiB)
"""
function perf()
GC.gc()
@time T111062(500)
GC.gc()
@time T066325(500)
GC.gc()
@time T053120(500)
end
function main()
test()
demo()
perf()
end
main()
end # module
| 23.680597 | 268 | 0.577209 | [
"@testset \"OrthoPoly\" begin\n\n @test isa(OrthoPoly(10, n -> 1, n -> n + 1)[end], fmpz)\n @test isa(InvOrthoPoly(10, n -> 1, n -> n + 1)[end], fmpz)\n @test RowSums(T217537(8)) == L217924(8)\n\n if data_installed()\n\n T = [\n T066325,\n T049310,\n T137338,\n T104562,\n T037027,\n T049218,\n T159834,\n T137286,\n T053120,\n T053117,\n T053121,\n T216916,\n T217537,\n T064189,\n T202327,\n T111062,\n T099174,\n T111593,\n T064189\n ]\n SeqTest(T, 'T')\n\n L = [L217924, L005425, L000085, L001464, L003723, L108624, L006229]\n SeqTest(L, 'L')\n end\n end"
] |
f702dea5779e5262fac4b7f8e161329cc0c3f6d4 | 2,303 | jl | Julia | test/runtests.jl | felipenoris/SplitIterators.jl | 6ad384e290feed1339e94ff169b58922a3785359 | [
"MIT"
] | 2 | 2021-08-22T14:45:30.000Z | 2022-03-19T19:34:46.000Z | test/runtests.jl | felipenoris/SplitIterators.jl | 6ad384e290feed1339e94ff169b58922a3785359 | [
"MIT"
] | null | null | null | test/runtests.jl | felipenoris/SplitIterators.jl | 6ad384e290feed1339e94ff169b58922a3785359 | [
"MIT"
] | null | null | null |
using Test
import SplitIterators
@testset "split 11 by 3" begin
x = collect(1:11)
for (i, part) in enumerate(SplitIterators.split(x, 3))
if i == 1
@test part == collect(1:4)
elseif i == 2
@test part == collect(5:8)
elseif i == 3
@test part == collect(9:11)
else
@test false
end
end
@test length(SplitIterators.split(x, 3)) == 3
end
@testset "split range 11 by 3" begin
x = 1:11
for (i, part) in enumerate(SplitIterators.split(x, 3))
if i == 1
@test part == 1:4
elseif i == 2
@test part == 5:8
elseif i == 3
# TODO: should yield `9:11`
@test part == collect(9:11)
else
@test false
end
end
@test length(SplitIterators.split(x, 3)) == 3
end
@testset "split 11 by 11" begin
x = collect(1:11)
for (i, part) in enumerate(SplitIterators.split(x, 11))
@test part == [i]
end
end
@testset "split 11 by 15" begin
x = collect(1:11)
for (i, part) in enumerate(SplitIterators.split(x, 15))
@test part == [i]
end
end
@testset "split 11 by 1" begin
x = collect(1:11)
for (i, part) in enumerate(SplitIterators.split(x, 1))
if i == 1
@test part == collect(1:11)
else
@test false
end
end
end
@testset "split 12 by 2" begin
x = collect(1:12)
for (i, part) in enumerate(SplitIterators.split(x, 2))
if i == 1
@test part == collect(1:6)
elseif i == 2
@test part == collect(7:12)
else
@test false
end
end
end
@testset "split empty itr" begin
x = []
@test_throws ArgumentError SplitIterators.split(x, 10)
end
@testset "eltype" begin
x = [1]
if VERSION < v"1.4"
@test eltype(SplitIterators.split(x, 1)) == Vector{Int}
else
@test eltype(SplitIterators.split(x, 1)) == Union{SubArray{Int64, 1, Vector{Int64}, Tuple{UnitRange{Int64}}, true}, Vector{Int64}}
end
x = 1:2
if VERSION < v"1.4"
@test eltype(SplitIterators.split(x, 1)) == Vector{Int}
else
@test eltype(SplitIterators.split(x, 1)) == Union{UnitRange{Int64}, Vector{Int64}}
end
end
| 21.933333 | 138 | 0.539731 | [
"@testset \"split 11 by 3\" begin\n x = collect(1:11)\n\n for (i, part) in enumerate(SplitIterators.split(x, 3))\n if i == 1\n @test part == collect(1:4)\n elseif i == 2\n @test part == collect(5:8)\n elseif i == 3\n @test part == collect(9:11)\n else\n @test false\n end\n end\n\n @test length(SplitIterators.split(x, 3)) == 3\nend",
"@testset \"split range 11 by 3\" begin\n x = 1:11\n\n for (i, part) in enumerate(SplitIterators.split(x, 3))\n if i == 1\n @test part == 1:4\n elseif i == 2\n @test part == 5:8\n elseif i == 3\n # TODO: should yield `9:11`\n @test part == collect(9:11)\n else\n @test false\n end\n end\n\n @test length(SplitIterators.split(x, 3)) == 3\nend",
"@testset \"split 11 by 11\" begin\n x = collect(1:11)\n\n for (i, part) in enumerate(SplitIterators.split(x, 11))\n @test part == [i]\n end\nend",
"@testset \"split 11 by 15\" begin\n x = collect(1:11)\n\n for (i, part) in enumerate(SplitIterators.split(x, 15))\n @test part == [i]\n end\nend",
"@testset \"split 11 by 1\" begin\n x = collect(1:11)\n\n for (i, part) in enumerate(SplitIterators.split(x, 1))\n if i == 1\n @test part == collect(1:11)\n else\n @test false\n end\n end\nend",
"@testset \"split 12 by 2\" begin\n x = collect(1:12)\n\n for (i, part) in enumerate(SplitIterators.split(x, 2))\n if i == 1\n @test part == collect(1:6)\n elseif i == 2\n @test part == collect(7:12)\n else\n @test false\n end\n end\nend",
"@testset \"split empty itr\" begin\n x = []\n @test_throws ArgumentError SplitIterators.split(x, 10)\nend",
"@testset \"eltype\" begin\n x = [1]\n if VERSION < v\"1.4\"\n @test eltype(SplitIterators.split(x, 1)) == Vector{Int}\n else\n @test eltype(SplitIterators.split(x, 1)) == Union{SubArray{Int64, 1, Vector{Int64}, Tuple{UnitRange{Int64}}, true}, Vector{Int64}}\n end\n\n x = 1:2\n if VERSION < v\"1.4\"\n @test eltype(SplitIterators.split(x, 1)) == Vector{Int}\n else\n @test eltype(SplitIterators.split(x, 1)) == Union{UnitRange{Int64}, Vector{Int64}}\n end\nend"
] |
f7065123fa9d24e80182de827c92598269a8c321 | 122 | jl | Julia | test/runtests.jl | Teslos/MyExample.jl | 014079e8dd99a63c1ff8340d1d9ed670ed8e91ad | [
"MIT"
] | null | null | null | test/runtests.jl | Teslos/MyExample.jl | 014079e8dd99a63c1ff8340d1d9ed670ed8e91ad | [
"MIT"
] | null | null | null | test/runtests.jl | Teslos/MyExample.jl | 014079e8dd99a63c1ff8340d1d9ed670ed8e91ad | [
"MIT"
] | null | null | null | using MyExample
using Test
#2x + 3y
@testset "MyExample.jl" begin
@test my_f(2,1) == 7
@test my_f(2,3) == 13
end
| 13.555556 | 29 | 0.622951 | [
"@testset \"MyExample.jl\" begin\n @test my_f(2,1) == 7\n @test my_f(2,3) == 13\nend"
] |
f7071fefba07848f0e49c2ef170c0eb46f03133d | 1,564 | jl | Julia | test/Ocean/SplitExplicit/test_coriolis.jl | ErikQQY/ClimateMachine.jl | ad128d457dd877bf21b5bcd845d6c3fa42de3f8a | [
"Apache-2.0"
] | 256 | 2020-05-06T08:03:16.000Z | 2022-03-22T14:01:20.000Z | test/Ocean/SplitExplicit/test_coriolis.jl | ErikQQY/ClimateMachine.jl | ad128d457dd877bf21b5bcd845d6c3fa42de3f8a | [
"Apache-2.0"
] | 1,174 | 2020-05-06T16:19:51.000Z | 2022-02-25T17:51:13.000Z | test/Ocean/SplitExplicit/test_coriolis.jl | ErikQQY/ClimateMachine.jl | ad128d457dd877bf21b5bcd845d6c3fa42de3f8a | [
"Apache-2.0"
] | 45 | 2020-05-08T02:28:36.000Z | 2022-03-14T22:44:56.000Z | #!/usr/bin/env julia --project
using Test
include("hydrostatic_spindown.jl")
ClimateMachine.init()
const FT = Float64
#################
# RUN THE TESTS #
#################
@testset "$(@__FILE__)" begin
include("../refvals/hydrostatic_spindown_refvals.jl")
# simulation time
timeend = FT(15 * 24 * 3600) # s
tout = FT(24 * 3600) # s
timespan = (tout, timeend)
# DG polynomial order
N = Int(4)
# Domain resolution
NΛ£ = Int(5)
NΚΈ = Int(5)
NαΆ» = Int(8)
resolution = (N, NΛ£, NΚΈ, NαΆ»)
# Domain size
LΛ£ = 1e6 # m
LΚΈ = 1e6 # m
H = 400 # m
dimensions = (LΛ£, LΚΈ, H)
BC = (
OceanBC(Impenetrable(FreeSlip()), Insulating()),
OceanBC(Penetrable(FreeSlip()), Insulating()),
)
config = SplitConfig(
"rotating_bla",
resolution,
dimensions,
Coupled(),
Rotating();
solver = SplitExplicitSolver,
boundary_conditions = BC,
)
#=
BC = (
ClimateMachine.Ocean.SplitExplicit01.OceanFloorFreeSlip(),
ClimateMachine.Ocean.SplitExplicit01.OceanSurfaceNoStressNoForcing(),
)
config = SplitConfig(
"rotating_jmc",
resolution,
dimensions,
Coupled(),
Rotating();
solver = SplitExplicitLSRK2nSolver,
boundary_conditions = BC,
)
=#
run_split_explicit(
config,
timespan,
dt_fast = 300,
dt_slow = 300, # 90 * 60,
# refDat = refVals.ninety_minutes,
analytic_solution = true,
)
end
| 20.578947 | 77 | 0.555627 | [
"@testset \"$(@__FILE__)\" begin\n\n include(\"../refvals/hydrostatic_spindown_refvals.jl\")\n\n # simulation time\n timeend = FT(15 * 24 * 3600) # s\n tout = FT(24 * 3600) # s\n timespan = (tout, timeend)\n\n # DG polynomial order\n N = Int(4)\n\n # Domain resolution\n NΛ£ = Int(5)\n NΚΈ = Int(5)\n NαΆ» = Int(8)\n resolution = (N, NΛ£, NΚΈ, NαΆ»)\n\n # Domain size\n LΛ£ = 1e6 # m\n LΚΈ = 1e6 # m\n H = 400 # m\n dimensions = (LΛ£, LΚΈ, H)\n\n BC = (\n OceanBC(Impenetrable(FreeSlip()), Insulating()),\n OceanBC(Penetrable(FreeSlip()), Insulating()),\n )\n config = SplitConfig(\n \"rotating_bla\",\n resolution,\n dimensions,\n Coupled(),\n Rotating();\n solver = SplitExplicitSolver,\n boundary_conditions = BC,\n )\n\n #=\n BC = (\n ClimateMachine.Ocean.SplitExplicit01.OceanFloorFreeSlip(),\n ClimateMachine.Ocean.SplitExplicit01.OceanSurfaceNoStressNoForcing(),\n )\n\n config = SplitConfig(\n \"rotating_jmc\",\n resolution,\n dimensions,\n Coupled(),\n Rotating();\n solver = SplitExplicitLSRK2nSolver,\n boundary_conditions = BC,\n )\n =#\n\n run_split_explicit(\n config,\n timespan,\n dt_fast = 300,\n dt_slow = 300, # 90 * 60,\n # refDat = refVals.ninety_minutes,\n analytic_solution = true,\n )\nend"
] |
f70bd5fdbd81a3e0a966c69edae9271ec76b4c57 | 396 | jl | Julia | test/runtests.jl | hendri54/CollegeEntry | bcbd6434fdd7f66944075b0b85efbfd8f6e6ac29 | [
"MIT"
] | null | null | null | test/runtests.jl | hendri54/CollegeEntry | bcbd6434fdd7f66944075b0b85efbfd8f6e6ac29 | [
"MIT"
] | null | null | null | test/runtests.jl | hendri54/CollegeEntry | bcbd6434fdd7f66944075b0b85efbfd8f6e6ac29 | [
"MIT"
] | null | null | null | using CollegeEntry, ModelObjectsLH, ModelParams
using Test, TestSetExtensions
include("test_helpers.jl")
@testset "All" begin
include("helpers_test.jl")
include("admissions_test.jl");
include("admission_prob_test.jl");
include("student_rankings_test.jl")
include("entry_test.jl");
include("entry_decisions_test.jl")
include("entry_results_test.jl")
end
# ---------- | 24.75 | 47 | 0.719697 | [
"@testset \"All\" begin\n include(\"helpers_test.jl\")\n include(\"admissions_test.jl\");\n include(\"admission_prob_test.jl\");\n include(\"student_rankings_test.jl\")\n include(\"entry_test.jl\");\n include(\"entry_decisions_test.jl\")\n include(\"entry_results_test.jl\")\nend"
] |
f70c3ff4968c391bb44e7f54b612f4bd73c46365 | 1,192 | jl | Julia | test/GLMesh.jl | cvdlab/ViewerGL.js | ae28d7808699f9c34add4ad265b68a84bfa14842 | [
"MIT"
] | 4 | 2019-07-25T23:07:18.000Z | 2021-09-05T18:38:20.000Z | test/GLMesh.jl | cvdlab/ViewerGL.js | ae28d7808699f9c34add4ad265b68a84bfa14842 | [
"MIT"
] | null | null | null | test/GLMesh.jl | cvdlab/ViewerGL.js | ae28d7808699f9c34add4ad265b68a84bfa14842 | [
"MIT"
] | 31 | 2019-10-09T14:09:51.000Z | 2022-03-31T14:52:35.000Z | using Test
using LinearAlgebraicRepresentation
Lar = LinearAlgebraicRepresentation
using ViewerGL
GL = ViewerGL
@testset "GLMesh.jl" begin
# function GLMesh()
@testset "GLMesh" begin
@test
@test
@test
@test
end
# function GLMesh(primitive)
@testset "GLMesh" begin
@test
@test
@test
@test
end
# function releaseGpuResources(mesh::GLMesh)
@testset "releaseGpuResources" begin
@test
@test
@test
@test
end
# function computeNormal(p1::Point2d, p2::Point2d)
@testset "computeNormal" begin
@test
@test
@test
@test
end
# function computeNormal(p0::Point3d,p1::Point3d,p2::Point3d)
@testset "computeNormal" begin
@test
@test
@test
@test
end
# function getBoundingBox(mesh::GLMesh)
@testset "getBoundingBox" begin
@test
@test
@test
@test
end
# function GLCuboid(box::Box3d)
@testset "GLCuboid" begin
@test
@test
@test
@test
end
# function GLAxis(p0::Point3d,p1::Point3d)
@testset "GLAxis" begin
@test
@test
@test
@test
end
end
| 16.108108 | 64 | 0.589765 | [
"@testset \"GLMesh.jl\" begin\n\n # function GLMesh()\n @testset \"GLMesh\" begin\n @test\n @test\n @test\n @test\n end\n\n # function GLMesh(primitive)\n @testset \"GLMesh\" begin\n @test\n @test\n @test\n @test\n end\n\n # function releaseGpuResources(mesh::GLMesh)\n @testset \"releaseGpuResources\" begin\n @test\n @test\n @test\n @test\n end\n\n # function computeNormal(p1::Point2d, p2::Point2d)\n @testset \"computeNormal\" begin\n @test\n @test\n @test\n @test\n end\n\n # function computeNormal(p0::Point3d,p1::Point3d,p2::Point3d)\n @testset \"computeNormal\" begin\n @test\n @test\n @test\n @test\n end\n\n # function getBoundingBox(mesh::GLMesh)\n @testset \"getBoundingBox\" begin\n @test\n @test\n @test\n @test\n end\n\n # function GLCuboid(box::Box3d)\n @testset \"GLCuboid\" begin\n @test\n @test\n @test\n @test\n end\n\n # function GLAxis(p0::Point3d,p1::Point3d)\n @testset \"GLAxis\" begin\n @test\n @test\n @test\n @test\n end\n\nend"
] |
f70ccf8b21eeac0bbd8a4ce08b4cb71e0206dba4 | 3,529 | jl | Julia | test/knr/testknr.jl | UnofficialJuliaMirrorSnapshots/SimilaritySearch.jl-053f045d-5466-53fd-b400-a066f88fe02a | 70c46490431ca7d0e5cf41052bc36afc4ba3c8fa | [
"Apache-2.0"
] | null | null | null | test/knr/testknr.jl | UnofficialJuliaMirrorSnapshots/SimilaritySearch.jl-053f045d-5466-53fd-b400-a066f88fe02a | 70c46490431ca7d0e5cf41052bc36afc4ba3c8fa | [
"Apache-2.0"
] | null | null | null | test/knr/testknr.jl | UnofficialJuliaMirrorSnapshots/SimilaritySearch.jl-053f045d-5466-53fd-b400-a066f88fe02a | 70c46490431ca7d0e5cf41052bc36afc4ba3c8fa | [
"Apache-2.0"
] | null | null | null | using SimilaritySearch
using SimilaritySearch.SimilarReferences
using Test
function test_vectors(create_index, dist::Function, ksearch, nick)
@testset "indexing vectors with $nick and $dist" begin
n = 1000 # number of items in the dataset
m = 100 # number of queries
dim = 3 # vector's dimension
db = [rand(Float32, dim) |> normalize! for i in 1:n]
queries = [rand(Float32, dim) |> normalize! for i in 1:m]
index = create_index(db)
optimize!(index, dist, recall=0.9, k=10)
perf = Performance(dist, index.db, queries, expected_k=10)
p = probe(perf, index, dist)
@show dist, p
@test p.recall > 0.8
@info "adding more items"
for item in queries
push!(index, dist, item)
end
perf = Performance(dist, index.db, queries, expected_k=1)
p = probe(perf, index, dist)
@show dist, p
@test p.recall > 0.999
return p
end
end
function test_sequences(create_index, dist::Function, ksearch, nick)
@testset "indexing sequences with $nick and $dist" begin
n = 1000 # number of items in the dataset
m = 100 # number of queries
dim = 5 # the length of sequences
V = collect(1:10) # vocabulary of the sequences
function create_item()
s = rand(V, dim)
if dist == jaccard_distance || dist == dice_distance || dist == intersection_distance
sort!(s)
s = unique(s)
end
return s
end
db = [create_item() for i in 1:n]
queries = [create_item() for i in 1:m]
@info "inserting items into the index"
index = create_index(db)
# optimize!(index, recall=0.9, k=10)
perf = Performance(dist, index.db, queries, expected_k=10)
p = probe(perf, index, dist)
@show dist, p
@test p.recall > 0.1 ## Performance object tests object identifiers, but sequence distances have a lot of distance collisions
# for item in queries
# push!(index, dist, item)
# end
# perf = Performance(dist, index.db, queries, expected_k=1)
# p = probe(perf, index, dist)
# @show dist, p
# @test p.recall > 0.999
# return p
end
end
@testset "indexing vectors" begin
# NOTE: The following algorithms are complex enough to say we are testing it doesn't have syntax errors, a more grained test functions are required
ksearch = 10
Ο = 127
ΞΊ = 3
for dist in [
l2_distance, # 1.0 -> metric, < 1.0 if dist is not a metric
l1_distance,
linf_distance,
lp_distance(3),
lp_distance(0.5),
angle_distance
]
p = test_vectors((db) -> fit(Knr, dist, db, numrefs=Ο, k=ΞΊ), dist, ksearch, "KNR")
end
end
@testset "indexing sequences" begin
# NOTE: The following algorithms are complex enough to say we are testing it doesn't have syntax errors, a more grained test functions are required
ksearch = 10
Ο = 127
ΞΊ = 3
# metric distances should achieve recall=1 (perhaps lesser because of numerical inestability)
for dist in [
jaccard_distance,
dice_distance,
intersection_distance,
common_prefix_distance,
levenshtein_distance,
lcs_distance,
hamming_distance,
]
p = test_sequences((db) -> fit(Knr, dist, db, numrefs=Ο, k=ΞΊ), dist, ksearch, "KNR")
end
end
| 32.081818 | 151 | 0.597053 | [
"@testset \"indexing vectors\" begin\n # NOTE: The following algorithms are complex enough to say we are testing it doesn't have syntax errors, a more grained test functions are required\n ksearch = 10\n Ο = 127\n ΞΊ = 3\n\n for dist in [\n l2_distance, # 1.0 -> metric, < 1.0 if dist is not a metric\n l1_distance,\n linf_distance,\n lp_distance(3),\n lp_distance(0.5),\n angle_distance\n ]\n p = test_vectors((db) -> fit(Knr, dist, db, numrefs=Ο, k=ΞΊ), dist, ksearch, \"KNR\")\n end\nend",
"@testset \"indexing sequences\" begin\n # NOTE: The following algorithms are complex enough to say we are testing it doesn't have syntax errors, a more grained test functions are required\n ksearch = 10\n Ο = 127\n ΞΊ = 3\n \n # metric distances should achieve recall=1 (perhaps lesser because of numerical inestability)\n for dist in [\n jaccard_distance,\n dice_distance,\n intersection_distance,\n common_prefix_distance,\n levenshtein_distance,\n lcs_distance,\n hamming_distance,\n ] \n p = test_sequences((db) -> fit(Knr, dist, db, numrefs=Ο, k=ΞΊ), dist, ksearch, \"KNR\")\n end\nend",
"@testset \"indexing vectors with $nick and $dist\" begin\n n = 1000 # number of items in the dataset\n m = 100 # number of queries\n dim = 3 # vector's dimension\n\n db = [rand(Float32, dim) |> normalize! for i in 1:n]\n queries = [rand(Float32, dim) |> normalize! for i in 1:m]\n\n index = create_index(db)\n optimize!(index, dist, recall=0.9, k=10)\n perf = Performance(dist, index.db, queries, expected_k=10)\n p = probe(perf, index, dist)\n @show dist, p\n @test p.recall > 0.8\n\n @info \"adding more items\"\n for item in queries\n push!(index, dist, item)\n end\n perf = Performance(dist, index.db, queries, expected_k=1)\n p = probe(perf, index, dist)\n @show dist, p\n @test p.recall > 0.999\n return p\n end",
"@testset \"indexing sequences with $nick and $dist\" begin\n n = 1000 # number of items in the dataset\n m = 100 # number of queries\n dim = 5 # the length of sequences\n V = collect(1:10) # vocabulary of the sequences\n\n function create_item()\n s = rand(V, dim)\n if dist == jaccard_distance || dist == dice_distance || dist == intersection_distance\n sort!(s)\n s = unique(s)\n end\n\n return s\n end\n \n db = [create_item() for i in 1:n]\n queries = [create_item() for i in 1:m]\n\n @info \"inserting items into the index\"\n index = create_index(db)\n # optimize!(index, recall=0.9, k=10)\n perf = Performance(dist, index.db, queries, expected_k=10)\n p = probe(perf, index, dist)\n @show dist, p\n @test p.recall > 0.1 ## Performance object tests object identifiers, but sequence distances have a lot of distance collisions\n\n # for item in queries\n # push!(index, dist, item)\n # end\n # perf = Performance(dist, index.db, queries, expected_k=1)\n # p = probe(perf, index, dist)\n # @show dist, p\n # @test p.recall > 0.999\n # return p\n end"
] |
f70e234e93c6e69904c72f38c7eaec1a83d715df | 1,759 | jl | Julia | test/rountines.jl | UnofficialJuliaMirror/YaoBlocks.jl-418bc28f-b43b-5e0b-a6e7-61bbc1a2c1df | 703091b543e95e6e4a3d7fe451c29ce0dd423c73 | [
"Apache-2.0"
] | null | null | null | test/rountines.jl | UnofficialJuliaMirror/YaoBlocks.jl-418bc28f-b43b-5e0b-a6e7-61bbc1a2c1df | 703091b543e95e6e4a3d7fe451c29ce0dd423c73 | [
"Apache-2.0"
] | null | null | null | test/rountines.jl | UnofficialJuliaMirror/YaoBlocks.jl-418bc28f-b43b-5e0b-a6e7-61bbc1a2c1df | 703091b543e95e6e4a3d7fe451c29ce0dd423c73 | [
"Apache-2.0"
] | null | null | null | using Test, YaoBlocks, LuxurySparse, YaoBase
using YaoBlocks.ConstGate
import YaoBlocks: u1mat, unmat, cunmat, unij!
@testset "dense-u1mat-unmat" begin
nbit = 4
mmm = Rx(0.5) |> mat
m1 = u1mat(nbit, mmm, 2)
m2 = linop2dense(v -> instruct!(v, mmm, 2), nbit)
m3 = unmat(nbit, mmm, (2,))
@test m1 β m2
@test m1 β m3
# test control not
β = kron
res = mat(I2) β mat(I2) β mat(P1) β mat(I2) + mat(I2) β mat(I2) β mat(P0) β mat(Rx(0.5))
m3 = cunmat(nbit, (2,), (0,), mmm, (1,))
@test m3 β res
end
@testset "sparse-u1mat-unmat" begin
nbit = 4
# test control not
β = kron
res = mat(I2) β mat(I2) β mat(P1) β mat(I2) + mat(I2) β mat(I2) β mat(P0) β mat(P1)
m3 = cunmat(nbit, (2,), (0,), mat(P1), (1,))
@test m3 β res
end
@testset "perm-unij-unmat" begin
perm = PermMatrix([1, 2, 3, 4], [1, 1, 1, 1.0])
pm = unij!(copy(perm), [2, 3, 4], PermMatrix([3, 1, 2], [0.1, 0.2, 0.3]))
@test pm β PermMatrix([1, 4, 2, 3], [1, 0.1, 0.2, 0.3])
pm = unij!(copy(perm), [2, 3, 4], PermMatrix([3, 1, 2], [0.1, 0.2, 0.3]) |> staticize)
@test pm β PermMatrix([1, 4, 2, 3], [1, 0.1, 0.2, 0.3])
nbit = 4
mmm = X |> mat
m1 = unmat(nbit, mmm, (2,))
m2 = linop2dense(v -> instruct!(v, mmm, 2), nbit)
@test m1 β m2
end
@testset "identity-unmat" begin
nbit = 4
mmm = Z |> mat
m1 = unmat(nbit, mmm, (2,))
m2 = linop2dense(v -> instruct!(v, mmm, 2), nbit)
@test m1 β m2
end
@testset "fix-static and adjoint for mat" begin
G1 = matblock(rand_unitary(2))
G6 = matblock(rand_unitary(1 << 6))
@test mat(put(3, 2 => G1')) β mat(put(3, 2 => matblock(G1)))'
@test mat(put(7, (3, 2, 1, 5, 4, 6) => G6')) β mat(put(7, (3, 2, 1, 5, 4, 6) => G6))'
end
| 30.327586 | 92 | 0.529847 | [
"@testset \"dense-u1mat-unmat\" begin\n nbit = 4\n mmm = Rx(0.5) |> mat\n m1 = u1mat(nbit, mmm, 2)\n m2 = linop2dense(v -> instruct!(v, mmm, 2), nbit)\n m3 = unmat(nbit, mmm, (2,))\n @test m1 β m2\n @test m1 β m3\n\n # test control not\n β = kron\n res = mat(I2) β mat(I2) β mat(P1) β mat(I2) + mat(I2) β mat(I2) β mat(P0) β mat(Rx(0.5))\n m3 = cunmat(nbit, (2,), (0,), mmm, (1,))\n @test m3 β res\nend",
"@testset \"sparse-u1mat-unmat\" begin\n nbit = 4\n # test control not\n β = kron\n res = mat(I2) β mat(I2) β mat(P1) β mat(I2) + mat(I2) β mat(I2) β mat(P0) β mat(P1)\n m3 = cunmat(nbit, (2,), (0,), mat(P1), (1,))\n @test m3 β res\nend",
"@testset \"perm-unij-unmat\" begin\n perm = PermMatrix([1, 2, 3, 4], [1, 1, 1, 1.0])\n pm = unij!(copy(perm), [2, 3, 4], PermMatrix([3, 1, 2], [0.1, 0.2, 0.3]))\n @test pm β PermMatrix([1, 4, 2, 3], [1, 0.1, 0.2, 0.3])\n pm = unij!(copy(perm), [2, 3, 4], PermMatrix([3, 1, 2], [0.1, 0.2, 0.3]) |> staticize)\n @test pm β PermMatrix([1, 4, 2, 3], [1, 0.1, 0.2, 0.3])\n\n nbit = 4\n mmm = X |> mat\n m1 = unmat(nbit, mmm, (2,))\n m2 = linop2dense(v -> instruct!(v, mmm, 2), nbit)\n @test m1 β m2\nend",
"@testset \"identity-unmat\" begin\n nbit = 4\n mmm = Z |> mat\n m1 = unmat(nbit, mmm, (2,))\n m2 = linop2dense(v -> instruct!(v, mmm, 2), nbit)\n @test m1 β m2\nend",
"@testset \"fix-static and adjoint for mat\" begin\n G1 = matblock(rand_unitary(2))\n G6 = matblock(rand_unitary(1 << 6))\n @test mat(put(3, 2 => G1')) β mat(put(3, 2 => matblock(G1)))'\n @test mat(put(7, (3, 2, 1, 5, 4, 6) => G6')) β mat(put(7, (3, 2, 1, 5, 4, 6) => G6))'\nend"
] |
f712c2d65d4c6d110cc2b0191f55497c456cc7a7 | 945 | jl | Julia | Projects/Projet_Optinum/test/runtests.jl | faicaltoubali/ENSEEIHT | 6db0aef64d68446b04f17d1eae574591026002b5 | [
"Apache-2.0"
] | null | null | null | Projects/Projet_Optinum/test/runtests.jl | faicaltoubali/ENSEEIHT | 6db0aef64d68446b04f17d1eae574591026002b5 | [
"Apache-2.0"
] | null | null | null | Projects/Projet_Optinum/test/runtests.jl | faicaltoubali/ENSEEIHT | 6db0aef64d68446b04f17d1eae574591026002b5 | [
"Apache-2.0"
] | null | null | null | using Markdown
using Test
using LinearAlgebra
using TestOptinum
using Optinum
include("../src/Algorithme_De_Newton.jl")
include("../src/Gradient_Conjugue_Tronque.jl")
include("../src/Lagrangien_Augmente.jl")
include("../src/Pas_De_Cauchy.jl")
include("../src/Regions_De_Confiance.jl")
#TestOptinum.cacher_stacktrace()
affiche = true
println("affiche = ",affiche)
# Tester l'ensemble des algorithmes
@testset "Test SujetOptinum" begin
# Tester l'algorithme de Newton
tester_algo_newton(affiche,Algorithme_De_Newton)
# Tester l'algorithme du pas de Cauchy
tester_pas_de_cauchy(affiche,Pas_De_Cauchy)
# Tester l'algorithme du gradient conjuguΓ© tronquΓ©
tester_gct(affiche,Gradient_Conjugue_Tronque)
# Tester l'algorithme des RΓ©gions de confiance avec PasdeCauchy | GCT
tester_regions_de_confiance(affiche,Regions_De_Confiance)
# Tester l'algorithme du Lagrangien AugmentΓ©
tester_lagrangien_augmente(affiche,Lagrangien_Augmente)
end
| 27.794118 | 70 | 0.812698 | [
"@testset \"Test SujetOptinum\" begin\n\t# Tester l'algorithme de Newton\n\ttester_algo_newton(affiche,Algorithme_De_Newton)\n\n\t# Tester l'algorithme du pas de Cauchy\n\ttester_pas_de_cauchy(affiche,Pas_De_Cauchy)\n\n\t# Tester l'algorithme du gradient conjuguΓ© tronquΓ©\n\ttester_gct(affiche,Gradient_Conjugue_Tronque)\n\n\t# Tester l'algorithme des RΓ©gions de confiance avec PasdeCauchy | GCT\n\ttester_regions_de_confiance(affiche,Regions_De_Confiance)\n\n\t# Tester l'algorithme du Lagrangien AugmentΓ©\n\ttester_lagrangien_augmente(affiche,Lagrangien_Augmente)\nend"
] |
f71360a2dafb0db950b40e740ced0cc7c4d67b27 | 1,820 | jl | Julia | test/test_returning_original.jl | TheRoniOne/Cleaner | 7279c8e8e92a9763ed72f8614f9a77ddbd40fade | [
"MIT"
] | 16 | 2021-08-20T10:07:04.000Z | 2022-02-07T18:09:40.000Z | test/test_returning_original.jl | TheRoniOne/Cleaner | 7279c8e8e92a9763ed72f8614f9a77ddbd40fade | [
"MIT"
] | 2 | 2021-08-17T06:09:49.000Z | 2022-02-06T01:36:49.000Z | test/test_returning_original.jl | TheRoniOne/Cleaner | 7279c8e8e92a9763ed72f8614f9a77ddbd40fade | [
"MIT"
] | null | null | null | using Test
using Cleaner:
materializer,
compact_table_ROT,
compact_columns_ROT,
compact_rows_ROT,
delete_const_columns_ROT,
polish_names_ROT,
reinfer_schema_ROT,
row_as_names_ROT,
rename_ROT,
drop_missing_ROT,
add_index_ROT
using DataFrames: DataFrame
@testset "ROT functions are working as expected" begin
testRM1 = DataFrame(;
A=[missing, missing, missing], B=[1, missing, 3], C=["x", "", "z"]
)
@test compact_columns_ROT(testRM1) isa DataFrame
@test compact_rows_ROT(testRM1) isa DataFrame
@test compact_table_ROT(testRM1) isa DataFrame
@test materializer(testRM1)((a=[1], b=[2])) isa DataFrame
let testDF = DataFrame(; A=[1, 1, 1], B=[4, 5, 6], C=String["2", "2", "2"])
@test delete_const_columns_ROT(testDF) isa DataFrame
end
let testDF = DataFrame(
" _aName with_loTsOfProblems" => [1, 2, 3],
" _aName with_loTsOfProblems1" => [4, 5, 6],
" _aName with_loTsOfProblems2" => [7, 8, 9],
)
@test polish_names_ROT(testDF) isa DataFrame
end
let testDF = DataFrame(; A=[1, 2, 3], B=Any[4, missing, "z"], C=Any["5", "6", "9"])
@test reinfer_schema_ROT(testDF) isa DataFrame
end
let testDF = DataFrame(; A=[1, 2, "x", 4], B=[5, 6, "y", 7], C=["x", "y", "z", "a"])
@test row_as_names_ROT(testDF, 3) isa DataFrame
end
let testDF = DataFrame(; A=[1, 2, "x", 4], B=[5, 6, "y", 7], C=["x", "y", "z", "a"])
@test rename_ROT(testDF, [:a, :b, :c]) isa DataFrame
end
let testDF = DataFrame(; A=[1, 2, "x", 4], B=[5, 6, "y", 7], C=["x", "y", "z", "a"])
@test drop_missing_ROT(testDF) isa DataFrame
end
let testDF = DataFrame(; A=[4, 5, 6])
@test add_index_ROT(testDF) isa DataFrame
end
end
| 31.37931 | 88 | 0.590659 | [
"@testset \"ROT functions are working as expected\" begin\n testRM1 = DataFrame(;\n A=[missing, missing, missing], B=[1, missing, 3], C=[\"x\", \"\", \"z\"]\n )\n\n @test compact_columns_ROT(testRM1) isa DataFrame\n @test compact_rows_ROT(testRM1) isa DataFrame\n @test compact_table_ROT(testRM1) isa DataFrame\n @test materializer(testRM1)((a=[1], b=[2])) isa DataFrame\n\n let testDF = DataFrame(; A=[1, 1, 1], B=[4, 5, 6], C=String[\"2\", \"2\", \"2\"])\n @test delete_const_columns_ROT(testDF) isa DataFrame\n end\n\n let testDF = DataFrame(\n \" _aName with_loTsOfProblems\" => [1, 2, 3],\n \" _aName with_loTsOfProblems1\" => [4, 5, 6],\n \" _aName with_loTsOfProblems2\" => [7, 8, 9],\n )\n @test polish_names_ROT(testDF) isa DataFrame\n end\n\n let testDF = DataFrame(; A=[1, 2, 3], B=Any[4, missing, \"z\"], C=Any[\"5\", \"6\", \"9\"])\n @test reinfer_schema_ROT(testDF) isa DataFrame\n end\n\n let testDF = DataFrame(; A=[1, 2, \"x\", 4], B=[5, 6, \"y\", 7], C=[\"x\", \"y\", \"z\", \"a\"])\n @test row_as_names_ROT(testDF, 3) isa DataFrame\n end\n\n let testDF = DataFrame(; A=[1, 2, \"x\", 4], B=[5, 6, \"y\", 7], C=[\"x\", \"y\", \"z\", \"a\"])\n @test rename_ROT(testDF, [:a, :b, :c]) isa DataFrame\n end\n\n let testDF = DataFrame(; A=[1, 2, \"x\", 4], B=[5, 6, \"y\", 7], C=[\"x\", \"y\", \"z\", \"a\"])\n @test drop_missing_ROT(testDF) isa DataFrame\n end\n\n let testDF = DataFrame(; A=[4, 5, 6])\n @test add_index_ROT(testDF) isa DataFrame\n end\nend"
] |
f7139c61b9baf05db45b88230b03c8047a37b777 | 2,615 | jl | Julia | test/testProductReproducable.jl | dehann/iSAM.jl | 61869753a76717b1019756d09785a784fdafe3ab | [
"MIT"
] | null | null | null | test/testProductReproducable.jl | dehann/iSAM.jl | 61869753a76717b1019756d09785a784fdafe3ab | [
"MIT"
] | null | null | null | test/testProductReproducable.jl | dehann/iSAM.jl | 61869753a76717b1019756d09785a784fdafe3ab | [
"MIT"
] | null | null | null | # test for conv and product repeatability
using Test
using Statistics
using IncrementalInference
##
@testset "forward backward convolutions and products sequence" begin
fg = initfg()
addVariable!(fg, :a, ContinuousScalar)
addVariable!(fg, :b, ContinuousScalar)
addVariable!(fg, :c, ContinuousScalar)
addVariable!(fg, :d, ContinuousScalar)
addVariable!(fg, :e, ContinuousScalar)
addFactor!(fg, [:a], Prior(Normal()))
addFactor!(fg, [:a;:b], LinearRelative(Normal(10, 1)))
addFactor!(fg, [:b;:c], LinearRelative(Normal(10, 1)))
addFactor!(fg, [:c;:d], LinearRelative(Normal(10, 1)))
addFactor!(fg, [:d;:e], LinearRelative(Normal(10, 1)))
initAll!(fg)
tree = solveTree!(fg)
@test (Statistics.mean(getPoints(getBelief(fg, :a)))- 0 |> abs) < 3
@test (Statistics.mean(getPoints(getBelief(fg, :b)))-10 |> abs) < 4
@test (Statistics.mean(getPoints(getBelief(fg, :c)))-20 |> abs) < 4
@test (Statistics.mean(getPoints(getBelief(fg, :d)))-30 |> abs) < 5
@test (Statistics.mean(getPoints(getBelief(fg, :e)))-40 |> abs) < 5
@test 0.3 < Statistics.std(getPoints(getBelief(fg, :a))) < 2
@test 0.5 < Statistics.std(getPoints(getBelief(fg, :b))) < 4
@test 0.9 < Statistics.std(getPoints(getBelief(fg, :c))) < 6
@test 1.2 < Statistics.std(getPoints(getBelief(fg, :d))) < 7
@test 1.5 < Statistics.std(getPoints(getBelief(fg, :e))) < 8
# drawTree(tree, show=true)
# using RoMEPlotting
# plotKDE(fg, ls(fg))
# spyCliqMat(tree, :b)
end
@testset "Basic back and forth convolution over LinearRelative should spread" begin
fg = initfg()
addVariable!(fg, :a, ContinuousScalar)
addVariable!(fg, :b, ContinuousScalar)
addFactor!(fg, [:a;:b], LinearRelative(Normal(10, 1)), graphinit=false)
initManual!(fg, :a, randn(1,100))
initManual!(fg, :b, 10 .+randn(1,100))
A = getBelief(fg, :a)
B = getBelief(fg, :b)
# plotKDE(fg, [:a; :b])
# repeat many times to ensure the means stay put and covariances spread out
for i in 1:10
pts = approxConv(fg, :abf1, :b)
B_ = manikde!(ContinuousScalar, pts)
# plotKDE([B_; B])
initManual!(fg, :b, B_)
pts = approxConv(fg, :abf1, :a)
A_ = manikde!(ContinuousScalar, pts)
# plotKDE([A_; A])
initManual!(fg, :a, A_)
end
A_ = getBelief(fg, :a)
B_ = getBelief(fg, :b)
# plotKDE([A_; B_; A; B])
@test (Statistics.mean(getPoints(A)) |> abs) < 1
@test (Statistics.mean(getPoints(A_))|> abs) < 2
@test (Statistics.mean(getPoints(B)) -10 |> abs) < 1
@test (Statistics.mean(getPoints(B_))-10 |> abs) < 2
@test Statistics.std(getPoints(A)) < 2
@test 3 < Statistics.std(getPoints(A_))
@test Statistics.std(getPoints(B)) < 2
@test 3 < Statistics.std(getPoints(B_))
##
end
##
| 25.144231 | 83 | 0.676482 | [
"@testset \"forward backward convolutions and products sequence\" begin\n\nfg = initfg()\n\naddVariable!(fg, :a, ContinuousScalar)\naddVariable!(fg, :b, ContinuousScalar)\naddVariable!(fg, :c, ContinuousScalar)\naddVariable!(fg, :d, ContinuousScalar)\naddVariable!(fg, :e, ContinuousScalar)\n\naddFactor!(fg, [:a], Prior(Normal()))\naddFactor!(fg, [:a;:b], LinearRelative(Normal(10, 1)))\naddFactor!(fg, [:b;:c], LinearRelative(Normal(10, 1)))\naddFactor!(fg, [:c;:d], LinearRelative(Normal(10, 1)))\naddFactor!(fg, [:d;:e], LinearRelative(Normal(10, 1)))\n\ninitAll!(fg)\n\ntree = solveTree!(fg)\n\n\n@test (Statistics.mean(getPoints(getBelief(fg, :a)))- 0 |> abs) < 3\n@test (Statistics.mean(getPoints(getBelief(fg, :b)))-10 |> abs) < 4\n@test (Statistics.mean(getPoints(getBelief(fg, :c)))-20 |> abs) < 4\n@test (Statistics.mean(getPoints(getBelief(fg, :d)))-30 |> abs) < 5\n@test (Statistics.mean(getPoints(getBelief(fg, :e)))-40 |> abs) < 5\n\n@test 0.3 < Statistics.std(getPoints(getBelief(fg, :a))) < 2\n@test 0.5 < Statistics.std(getPoints(getBelief(fg, :b))) < 4\n@test 0.9 < Statistics.std(getPoints(getBelief(fg, :c))) < 6\n@test 1.2 < Statistics.std(getPoints(getBelief(fg, :d))) < 7\n@test 1.5 < Statistics.std(getPoints(getBelief(fg, :e))) < 8\n\n\n# drawTree(tree, show=true)\n# using RoMEPlotting\n# plotKDE(fg, ls(fg))\n# spyCliqMat(tree, :b)\n\nend",
"@testset \"Basic back and forth convolution over LinearRelative should spread\" begin\n\nfg = initfg()\n\naddVariable!(fg, :a, ContinuousScalar)\naddVariable!(fg, :b, ContinuousScalar)\n\naddFactor!(fg, [:a;:b], LinearRelative(Normal(10, 1)), graphinit=false)\n\ninitManual!(fg, :a, randn(1,100))\ninitManual!(fg, :b, 10 .+randn(1,100))\n\nA = getBelief(fg, :a)\nB = getBelief(fg, :b)\n# plotKDE(fg, [:a; :b])\n\n# repeat many times to ensure the means stay put and covariances spread out\nfor i in 1:10\n pts = approxConv(fg, :abf1, :b)\n B_ = manikde!(ContinuousScalar, pts)\n # plotKDE([B_; B])\n initManual!(fg, :b, B_)\n\n pts = approxConv(fg, :abf1, :a)\n A_ = manikde!(ContinuousScalar, pts)\n # plotKDE([A_; A])\n initManual!(fg, :a, A_)\nend\n\nA_ = getBelief(fg, :a)\nB_ = getBelief(fg, :b)\n# plotKDE([A_; B_; A; B])\n\n@test (Statistics.mean(getPoints(A)) |> abs) < 1\n@test (Statistics.mean(getPoints(A_))|> abs) < 2\n\n@test (Statistics.mean(getPoints(B)) -10 |> abs) < 1\n@test (Statistics.mean(getPoints(B_))-10 |> abs) < 2\n\n@test Statistics.std(getPoints(A)) < 2\n@test 3 < Statistics.std(getPoints(A_))\n\n@test Statistics.std(getPoints(B)) < 2\n@test 3 < Statistics.std(getPoints(B_))\n\n##\n\nend"
] |
f7153aaee71132dc4b60ff01c3f91af6c17752a3 | 5,750 | jl | Julia | test/runtests.jl | burmecia/OpenAIGym.jl | 087bec95d13ca85216a0eaa7d47f50cda2867367 | [
"MIT"
] | 86 | 2017-02-24T20:25:05.000Z | 2022-03-31T04:50:07.000Z | test/runtests.jl | burmecia/OpenAIGym.jl | 087bec95d13ca85216a0eaa7d47f50cda2867367 | [
"MIT"
] | 31 | 2017-08-06T17:27:08.000Z | 2020-08-05T16:05:07.000Z | test/runtests.jl | burmecia/OpenAIGym.jl | 087bec95d13ca85216a0eaa7d47f50cda2867367 | [
"MIT"
] | 30 | 2017-03-20T22:06:01.000Z | 2021-09-24T04:38:33.000Z | using OpenAIGym
using PyCall
using Test
"""
`function time_steps(env::GymEnv{T}, num_eps::Int) where T`
run through num_eps eps, recording the time taken for each step and
how many steps were made. Doesn't time the `reset!` or the first step of each
episode (since higher chance that it's slower/faster than the rest, and we want
to compare the average time taken for each step as fairly as possible)
"""
function time_steps(env::GymEnv, num_eps::Int)
t = 0.0
steps = 0
for i in 1:num_eps
reset!(env)
# step!(env, rand(env.actions)) # ignore the first step - it might be slow?
t += (@elapsed steps += epstep(env))
end
steps, t
end
"""
Steps through an episode until it's `done`
assumes env has been `reset!`
"""
function epstep(env::GymEnv)
steps = 0
while true
steps += 1
r, sβ² = step!(env, rand(env.actions))
finished(env, sβ²) && break
end
steps
end
@testset "Gym Basics" begin
pong = GymEnv(:Pong, :v4)
pongnf = GymEnv(:PongNoFrameskip, :v4)
pacman = GymEnv(:MsPacman, :v4)
pacmannf = GymEnv(:MsPacmanNoFrameskip, :v4)
cartpole = GymEnv(:CartPole)
bj = GymEnv(:Blackjack)
allenvs = [pong, pongnf, pacman, pacmannf, cartpole, bj]
eps2trial = Dict(pong=>2, pongnf=>1, pacman=>2, pacmannf=>1, cartpole=>400, bj=>30000)
atarienvs = [pong, pongnf, pacman, pacmannf]
envs = allenvs
@testset "string constructor" begin
for name β ("Pong-v4", "PongNoFrameskip-v4", "MsPacman-v4",
"MsPacmanNoFrameskip-v4", "CartPole-v0", "Blackjack-v0")
env = GymEnv(name)
@test !PyCall.ispynull(env.pyenv)
end
end
@testset "envs load" begin
# check they all work - no errors == no worries
println("------------------------------ Check envs load ------------------------------")
for (i, env) in enumerate(envs)
a = rand(env.actions) |> OpenAIGym.pyaction
action_type = a |> PyObject |> pytypeof
println("env.pyenv: $(env.pyenv) action_type: $action_type (e.g. $a)")
time_steps(env, 1)
@test !ispynull(env.pyenv)
println("------------------------------")
end
end
@testset "julia speed test" begin
println("------------------------------ Begin Julia Speed Check ------------------------------")
for env in envs
num_eps = eps2trial[env]
steps, t = time_steps(env, num_eps)
println("env.pyenv: $(env.pyenv) num_eps: $num_eps t: $t steps: $steps")
println("microsecs/step (lower is better): ", t*1e6/steps)
close(env)
println("------------------------------")
end
println("------------------------------ End Julia Speed Check ------------------------------\n")
end
@testset "python speed test" begin
println("------------------------------ Begin Python Speed Check ------------------------------")
py"""
import gym
import numpy as np
pong = gym.make("Pong-v4")
pongnf = gym.make("PongNoFrameskip-v4")
pacman = gym.make("MsPacman-v4");
pacmannf = gym.make("MsPacmanNoFrameskip-v4");
cartpole = gym.make("CartPole-v0")
bj = gym.make("Blackjack-v0")
allenvs = [pong, pongnf, pacman, pacmannf, cartpole, bj]
eps2trial = {pong: 2, pongnf: 1, pacman: 2, pacmannf: 1, cartpole: 400, bj: 30000}
atarienvs = [pong, pongnf, pacman, pacmannf];
envs = allenvs
import time
class Timer(object):
elapsed = 0.0
def __init__(self, name=None):
self.name = name
def __enter__(self):
self.tstart = time.time()
def __exit__(self, type, value, traceback):
Timer.elapsed = time.time() - self.tstart
def time_steps(env, num_eps):
t = 0.0
steps = 0
for i in range(num_eps):
env.reset()
with Timer():
steps += epstep(env)
t += Timer.elapsed
return steps, t
def epstep(env):
steps = 0
while True:
steps += 1
action = env.action_space.sample()
state, reward, done, info = env.step(action)
if done == True:
break
return steps
for env in envs:
num_eps = eps2trial[env]
with Timer():
steps, s = time_steps(env, num_eps)
t = Timer.elapsed
print("{env} num_eps: {num_eps} t: {t} steps: {steps} \n"
"microsecs/step (lower is better): {time}".format(
env=env, num_eps=num_eps, t=t, steps=steps,
time=t*1e6/steps))
print("------------------------------")
"""
println("------------------------------ End Python Speed Check ------------------------------")
end # @testset "python speed test"
@testset "Base.show" begin
let
io = IOBuffer()
env = GymEnv(:MsPacman, :v4)
show(io, env)
@test String(take!(io)) == "GymEnv MsPacman-v4\n" *
" TimeLimit\n" *
" r = 0.0\n" *
" βr = 0.0"
end
let
io = IOBuffer()
env = GymEnv(:Blackjack)
show(io, env)
@test String(take!(io)) == "GymEnv Blackjack-v0\n" *
" r = 0.0\n" *
" βr = 0.0"
end
end # @testset "Base.show"
end
| 33.430233 | 105 | 0.488348 | [
"@testset \"Gym Basics\" begin\n\n pong = GymEnv(:Pong, :v4)\n pongnf = GymEnv(:PongNoFrameskip, :v4)\n pacman = GymEnv(:MsPacman, :v4)\n pacmannf = GymEnv(:MsPacmanNoFrameskip, :v4)\n cartpole = GymEnv(:CartPole)\n bj = GymEnv(:Blackjack)\n\n allenvs = [pong, pongnf, pacman, pacmannf, cartpole, bj]\n eps2trial = Dict(pong=>2, pongnf=>1, pacman=>2, pacmannf=>1, cartpole=>400, bj=>30000)\n atarienvs = [pong, pongnf, pacman, pacmannf]\n envs = allenvs\n\n @testset \"string constructor\" begin\n for name β (\"Pong-v4\", \"PongNoFrameskip-v4\", \"MsPacman-v4\",\n \"MsPacmanNoFrameskip-v4\", \"CartPole-v0\", \"Blackjack-v0\")\n env = GymEnv(name)\n @test !PyCall.ispynull(env.pyenv)\n end\n end\n\n @testset \"envs load\" begin\n # check they all work - no errors == no worries\n println(\"------------------------------ Check envs load ------------------------------\")\n for (i, env) in enumerate(envs)\n a = rand(env.actions) |> OpenAIGym.pyaction\n action_type = a |> PyObject |> pytypeof\n println(\"env.pyenv: $(env.pyenv) action_type: $action_type (e.g. $a)\")\n time_steps(env, 1)\n @test !ispynull(env.pyenv)\n println(\"------------------------------\")\n end\n end\n\n @testset \"julia speed test\" begin\n println(\"------------------------------ Begin Julia Speed Check ------------------------------\")\n for env in envs\n num_eps = eps2trial[env]\n steps, t = time_steps(env, num_eps)\n println(\"env.pyenv: $(env.pyenv) num_eps: $num_eps t: $t steps: $steps\")\n println(\"microsecs/step (lower is better): \", t*1e6/steps)\n close(env)\n println(\"------------------------------\")\n end\n println(\"------------------------------ End Julia Speed Check ------------------------------\\n\")\n end\n\n @testset \"python speed test\" begin\n println(\"------------------------------ Begin Python Speed Check ------------------------------\")\n py\"\"\"\n import gym\n import numpy as np\n\n pong = gym.make(\"Pong-v4\")\n pongnf = gym.make(\"PongNoFrameskip-v4\")\n pacman = gym.make(\"MsPacman-v4\");\n pacmannf = gym.make(\"MsPacmanNoFrameskip-v4\");\n cartpole = gym.make(\"CartPole-v0\")\n bj = gym.make(\"Blackjack-v0\")\n\n allenvs = [pong, pongnf, pacman, pacmannf, cartpole, bj]\n eps2trial = {pong: 2, pongnf: 1, pacman: 2, pacmannf: 1, cartpole: 400, bj: 30000}\n atarienvs = [pong, pongnf, pacman, pacmannf];\n\n envs = allenvs\n\n import time\n class Timer(object):\n elapsed = 0.0\n def __init__(self, name=None):\n self.name = name\n\n def __enter__(self):\n self.tstart = time.time()\n\n def __exit__(self, type, value, traceback):\n Timer.elapsed = time.time() - self.tstart\n\n def time_steps(env, num_eps):\n t = 0.0\n steps = 0\n for i in range(num_eps):\n env.reset()\n with Timer():\n steps += epstep(env)\n t += Timer.elapsed\n return steps, t\n\n def epstep(env):\n steps = 0\n while True:\n steps += 1\n action = env.action_space.sample()\n state, reward, done, info = env.step(action)\n if done == True:\n break\n return steps\n\n for env in envs:\n num_eps = eps2trial[env]\n with Timer():\n steps, s = time_steps(env, num_eps)\n t = Timer.elapsed\n print(\"{env} num_eps: {num_eps} t: {t} steps: {steps} \\n\"\n \"microsecs/step (lower is better): {time}\".format(\n env=env, num_eps=num_eps, t=t, steps=steps,\n time=t*1e6/steps))\n print(\"------------------------------\")\n \"\"\"\n println(\"------------------------------ End Python Speed Check ------------------------------\")\n end # @testset \"python speed test\"\n\n @testset \"Base.show\" begin\n let\n io = IOBuffer()\n env = GymEnv(:MsPacman, :v4)\n show(io, env)\n @test String(take!(io)) == \"GymEnv MsPacman-v4\\n\" *\n \" TimeLimit\\n\" *\n \" r = 0.0\\n\" *\n \" βr = 0.0\"\n end\n\n let\n io = IOBuffer()\n env = GymEnv(:Blackjack)\n show(io, env)\n @test String(take!(io)) == \"GymEnv Blackjack-v0\\n\" *\n \" r = 0.0\\n\" *\n \" βr = 0.0\"\n end\n end # @testset \"Base.show\"\nend"
] |
f7183b3be5ea2b30e86fc2f42f90233e708e517b | 2,245 | jl | Julia | test/runtests.jl | maarten-keijzer/AdaptiveWindow.jl | 5bd90a475110ac5f6dd88226286455da0f8d87bf | [
"MIT"
] | 1 | 2022-01-04T13:50:24.000Z | 2022-01-04T13:50:24.000Z | test/runtests.jl | maarten-keijzer/AdaptiveWindow.jl | 5bd90a475110ac5f6dd88226286455da0f8d87bf | [
"MIT"
] | null | null | null | test/runtests.jl | maarten-keijzer/AdaptiveWindow.jl | 5bd90a475110ac5f6dd88226286455da0f8d87bf | [
"MIT"
] | null | null | null |
using AdaptiveWindows
using Test
@testset verbose=true "Adaptive Mean" begin
@testset "Mean Computation " begin
m = AdaptiveMean(Ξ΄ = 1e-9)
r = randn(1000)
fit!(m, r)
m1 = sum(r) / length(r)
m2 = value(m)
@test m1 β m2
ad = AdaptiveMean()
# This should not trigger a truncated window
fit!(ad, randn(10_000))
@test stats(ad).n == 10_000
# Changing the distribution should trigger a truncated window
fit!(ad, 1 .+ randn(10_000))
@test 9_900 < stats(ad).n < 20_000
# check truncation of shifting using the callback function
shifted = false
m = AdaptiveMean(onshiftdetected = ad -> shifted = true)
for i in 1:1_000
r = randn()
if i > 500
r += 1
end
fit!(m, r)
end
@test shifted
end
function consistent(ad)
total = sum(nobs(v) for v in ad.window)
total == nobs(ad.stats)
end
@testset "Memory Management" begin
m = AdaptiveMean()
fit!(m, 1)
@test nobs(m.window[1]) == 0
@test nobs(m.window[2]) == 1
@test nobs(m.window[3]) == 0
fit!(m, 1)
@test nobs(m.window[1]) == 0
@test nobs(m.window[2]) == 1
@test nobs(m.window[3]) == 1
fit!(m, 1)
fit!(m, 1)
fit!(m, 1)
fit!(m, 1)
fit!(m, 1)
@test consistent(m)
m = AdaptiveMean()
n = AdaptiveWindows.M * ( 1 + 2 + 4)
fit!(m, ones(n))
@test length(m.window) <= AdaptiveWindows.M * log2(n)
@test nobs(m) == n
@test consistent(m)
mn = AdaptiveMean()
n = 1<<12
# withoutdropping for speed
fit!(withoutdropping(mn), ones(n))
m = AdaptiveWindows.M
expected = m * ceil(log2(n) - log2(m))
@test length(mn.window) == expected
@test nobs(mn) == n
@test consistent(mn)
# Maximum amount of memory
mn = withmaxlength(AdaptiveMean(), 3)
fit!(mn, rand(10000))
@test length(mn.ad.window) == AdaptiveWindows.M * 3
@test consistent(mn.ad)
end
end
| 23.385417 | 69 | 0.50245 | [
"@testset verbose=true \"Adaptive Mean\" begin\n\n @testset \"Mean Computation \" begin\n m = AdaptiveMean(Ξ΄ = 1e-9)\n\n r = randn(1000)\n\n fit!(m, r)\n\n m1 = sum(r) / length(r)\n m2 = value(m)\n\n @test m1 β m2\n ad = AdaptiveMean()\n\n # This should not trigger a truncated window\n fit!(ad, randn(10_000))\n @test stats(ad).n == 10_000\n\n # Changing the distribution should trigger a truncated window\n fit!(ad, 1 .+ randn(10_000))\n @test 9_900 < stats(ad).n < 20_000\n\n # check truncation of shifting using the callback function\n shifted = false\n\n m = AdaptiveMean(onshiftdetected = ad -> shifted = true)\n\n for i in 1:1_000\n r = randn()\n if i > 500\n r += 1\n end\n fit!(m, r)\n end\n\n @test shifted\n end\n\n function consistent(ad)\n total = sum(nobs(v) for v in ad.window)\n total == nobs(ad.stats)\n end\n\n @testset \"Memory Management\" begin\n\n m = AdaptiveMean()\n fit!(m, 1)\n @test nobs(m.window[1]) == 0\n @test nobs(m.window[2]) == 1\n @test nobs(m.window[3]) == 0\n fit!(m, 1)\n @test nobs(m.window[1]) == 0\n @test nobs(m.window[2]) == 1\n @test nobs(m.window[3]) == 1\n fit!(m, 1)\n fit!(m, 1)\n fit!(m, 1)\n fit!(m, 1)\n fit!(m, 1)\n @test consistent(m)\n \n m = AdaptiveMean()\n n = AdaptiveWindows.M * ( 1 + 2 + 4)\n fit!(m, ones(n))\n\n @test length(m.window) <= AdaptiveWindows.M * log2(n)\n @test nobs(m) == n \n @test consistent(m)\n\n mn = AdaptiveMean()\n n = 1<<12\n\n # withoutdropping for speed\n fit!(withoutdropping(mn), ones(n))\n m = AdaptiveWindows.M \n expected = m * ceil(log2(n) - log2(m))\n @test length(mn.window) == expected\n @test nobs(mn) == n \n @test consistent(mn)\n \n # Maximum amount of memory\n mn = withmaxlength(AdaptiveMean(), 3)\n fit!(mn, rand(10000))\n @test length(mn.ad.window) == AdaptiveWindows.M * 3\n @test consistent(mn.ad)\n \n\n end\nend"
] |
f71fdd500cffb77f7512f74f826a1a508b234e8f | 34,448 | jl | Julia | test/runtests.jl | bkamins/Statistics.jl | 81a1cdd6c2105d3e50f76375630bbed4744e67c1 | [
"MIT"
] | null | null | null | test/runtests.jl | bkamins/Statistics.jl | 81a1cdd6c2105d3e50f76375630bbed4744e67c1 | [
"MIT"
] | null | null | null | test/runtests.jl | bkamins/Statistics.jl | 81a1cdd6c2105d3e50f76375630bbed4744e67c1 | [
"MIT"
] | null | null | null | # This file is a part of Julia. License is MIT: https://julialang.org/license
using Statistics, Test, Random, LinearAlgebra, SparseArrays
using Test: guardseed
Random.seed!(123)
@testset "middle" begin
@test middle(3) === 3.0
@test middle(2, 3) === 2.5
let x = ((floatmax(1.0)/4)*3)
@test middle(x, x) === x
end
@test middle(1:8) === 4.5
@test middle([1:8;]) === 4.5
# ensure type-correctness
for T in [Bool,Int8,Int16,Int32,Int64,Int128,UInt8,UInt16,UInt32,UInt64,UInt128,Float16,Float32,Float64]
@test middle(one(T)) === middle(one(T), one(T))
end
end
@testset "median" begin
@test median([1.]) === 1.
@test median([1.,3]) === 2.
@test median([1.,3,2]) === 2.
@test median([1,3,2]) === 2.0
@test median([1,3,2,4]) === 2.5
@test median([0.0,Inf]) == Inf
@test median([0.0,-Inf]) == -Inf
@test median([0.,Inf,-Inf]) == 0.0
@test median([1.,-1.,Inf,-Inf]) == 0.0
@test isnan(median([-Inf,Inf]))
X = [2 3 1 -1; 7 4 5 -4]
@test all(median(X, dims=2) .== [1.5, 4.5])
@test all(median(X, dims=1) .== [4.5 3.5 3.0 -2.5])
@test X == [2 3 1 -1; 7 4 5 -4] # issue #17153
@test_throws ArgumentError median([])
@test isnan(median([NaN]))
@test isnan(median([0.0,NaN]))
@test isnan(median([NaN,0.0]))
@test isnan(median([NaN,0.0,1.0]))
@test isnan(median(Any[NaN,0.0,1.0]))
@test isequal(median([NaN 0.0; 1.2 4.5], dims=2), reshape([NaN; 2.85], 2, 1))
@test ismissing(median([1, missing]))
@test ismissing(median([1, 2, missing]))
@test ismissing(median([NaN, 2.0, missing]))
@test ismissing(median([NaN, missing]))
@test ismissing(median([missing, NaN]))
@test ismissing(median(Any[missing, 2.0, 3.0, 4.0, NaN]))
@test median(skipmissing([1, missing, 2])) === 1.5
@test median!([1 2 3 4]) == 2.5
@test median!([1 2; 3 4]) == 2.5
@test invoke(median, Tuple{AbstractVector}, 1:10) == median(1:10) == 5.5
@test @inferred(median(Float16[1, 2, NaN])) === Float16(NaN)
@test @inferred(median(Float16[1, 2, 3])) === Float16(2)
@test @inferred(median(Float32[1, 2, NaN])) === NaN32
@test @inferred(median(Float32[1, 2, 3])) === 2.0f0
end
@testset "mean" begin
@test mean((1,2,3)) === 2.
@test mean([0]) === 0.
@test mean([1.]) === 1.
@test mean([1.,3]) == 2.
@test mean([1,2,3]) == 2.
@test mean([0 1 2; 4 5 6], dims=1) == [2. 3. 4.]
@test mean([1 2 3; 4 5 6], dims=1) == [2.5 3.5 4.5]
@test mean(-, [1 2 3 ; 4 5 6], dims=1) == [-2.5 -3.5 -4.5]
@test mean(-, [1 2 3 ; 4 5 6], dims=2) == transpose([-2.0 -5.0])
@test mean(-, [1 2 3 ; 4 5 6], dims=(1, 2)) == -3.5 .* ones(1, 1)
@test mean(-, [1 2 3 ; 4 5 6], dims=(1, 1)) == [-2.5 -3.5 -4.5]
@test mean(-, [1 2 3 ; 4 5 6], dims=()) == Float64[-1 -2 -3 ; -4 -5 -6]
@test mean(i->i+1, 0:2) === 2.
@test mean(isodd, [3]) === 1.
@test mean(x->3x, (1,1)) === 3.
# mean of iterables:
n = 10; a = randn(n); b = randn(n)
@test mean(Tuple(a)) β mean(a)
@test mean(Tuple(a + b*im)) β mean(a + b*im)
@test mean(cos, Tuple(a)) β mean(cos, a)
@test mean(x->x/2, a + b*im) β mean(a + b*im) / 2.
@test ismissing(mean(Tuple((1, 2, missing, 4, 5))))
@test isnan(mean([NaN]))
@test isnan(mean([0.0,NaN]))
@test isnan(mean([NaN,0.0]))
@test isnan(mean([0.,Inf,-Inf]))
@test isnan(mean([1.,-1.,Inf,-Inf]))
@test isnan(mean([-Inf,Inf]))
@test isequal(mean([NaN 0.0; 1.2 4.5], dims=2), reshape([NaN; 2.85], 2, 1))
@test ismissing(mean([1, missing]))
@test ismissing(mean([NaN, missing]))
@test ismissing(mean([missing, NaN]))
@test isequal(mean([missing 1.0; 2.0 3.0], dims=1), [missing 2.0])
@test mean(skipmissing([1, missing, 2])) === 1.5
@test isequal(mean(Complex{Float64}[]), NaN+NaN*im)
@test mean(Complex{Float64}[]) isa Complex{Float64}
@test isequal(mean(skipmissing(Complex{Float64}[])), NaN+NaN*im)
@test mean(skipmissing(Complex{Float64}[])) isa Complex{Float64}
@test isequal(mean(abs, Complex{Float64}[]), NaN)
@test mean(abs, Complex{Float64}[]) isa Float64
@test isequal(mean(abs, skipmissing(Complex{Float64}[])), NaN)
@test mean(abs, skipmissing(Complex{Float64}[])) isa Float64
@test isequal(mean(Int[]), NaN)
@test mean(Int[]) isa Float64
@test isequal(mean(skipmissing(Int[])), NaN)
@test mean(skipmissing(Int[])) isa Float64
@test_throws MethodError mean([])
@test_throws MethodError mean(skipmissing([]))
@test_throws ArgumentError mean((1 for i in 2:1))
if VERSION >= v"1.6.0-DEV.83"
@test_throws ArgumentError mean(())
@test_throws ArgumentError mean(Union{}[])
end
# Check that small types are accumulated using wider type
for T in (Int8, UInt8)
x = [typemax(T) typemax(T)]
g = (v for v in x)
@test mean(x) == mean(g) == typemax(T)
@test mean(identity, x) == mean(identity, g) == typemax(T)
@test mean(x, dims=2) == [typemax(T)]'
end
# Check that mean avoids integer overflow (#22)
let x = fill(typemax(Int), 10), a = tuple(x...)
@test (mean(x) == mean(x, dims=1)[] == mean(float, x)
== mean(a) == mean(v for v in x) == mean(v for v in a)
β float(typemax(Int)))
end
let x = rand(10000) # mean should use sum's accurate pairwise algorithm
@test mean(x) == sum(x) / length(x)
end
@test mean(Number[1, 1.5, 2+3im]) === 1.5+1im # mixed-type array
@test mean(v for v in Number[1, 1.5, 2+3im]) === 1.5+1im
@test (@inferred mean(Int[])) === 0/0
@test (@inferred mean(Float32[])) === 0.f0/0
@test (@inferred mean(Float64[])) === 0/0
@test (@inferred mean(Iterators.filter(x -> true, Int[]))) === 0/0
@test (@inferred mean(Iterators.filter(x -> true, Float32[]))) === 0.f0/0
@test (@inferred mean(Iterators.filter(x -> true, Float64[]))) === 0/0
end
@testset "mean/median for ranges" begin
for f in (mean, median)
for n = 2:5
@test f(2:n) == f([2:n;])
@test f(2:0.1:n) β f([2:0.1:n;])
end
end
@test mean(2:1) === NaN
@test mean(big(2):1) isa BigFloat
end
@testset "var & std" begin
# edge case: empty vector
# iterable; this has to throw for type stability
@test_throws MethodError var(())
@test_throws MethodError var((); corrected=false)
@test_throws MethodError var((); mean=2)
@test_throws MethodError var((); mean=2, corrected=false)
# reduction
@test isnan(var(Int[]))
@test isnan(var(Int[]; corrected=false))
@test isnan(var(Int[]; mean=2))
@test isnan(var(Int[]; mean=2, corrected=false))
# reduction across dimensions
@test isequal(var(Int[], dims=1), [NaN])
@test isequal(var(Int[], dims=1; corrected=false), [NaN])
@test isequal(var(Int[], dims=1; mean=[2]), [NaN])
@test isequal(var(Int[], dims=1; mean=[2], corrected=false), [NaN])
# edge case: one-element vector
# iterable
@test isnan(@inferred(var((1,))))
@test var((1,); corrected=false) === 0.0
@test var((1,); mean=2) === Inf
@test var((1,); mean=2, corrected=false) === 1.0
# reduction
@test isnan(@inferred(var([1])))
@test var([1]; corrected=false) === 0.0
@test var([1]; mean=2) === Inf
@test var([1]; mean=2, corrected=false) === 1.0
# reduction across dimensions
@test isequal(@inferred(var([1], dims=1)), [NaN])
@test var([1], dims=1; corrected=false) β [0.0]
@test var([1], dims=1; mean=[2]) β [Inf]
@test var([1], dims=1; mean=[2], corrected=false) β [1.0]
@test var(1:8) == 6.
@test varm(1:8,1) == varm(Vector(1:8),1)
@test isnan(varm(1:1,1))
@test isnan(var(1:1))
@test isnan(var(1:-1))
@test @inferred(var(1.0:8.0)) == 6.
@test varm(1.0:8.0,1.0) == varm(Vector(1.0:8.0),1)
@test isnan(varm(1.0:1.0,1.0))
@test isnan(var(1.0:1.0))
@test isnan(var(1.0:-1.0))
@test @inferred(var(1.0f0:8.0f0)) === 6.f0
@test varm(1.0f0:8.0f0,1.0f0) == varm(Vector(1.0f0:8.0f0),1)
@test isnan(varm(1.0f0:1.0f0,1.0f0))
@test isnan(var(1.0f0:1.0f0))
@test isnan(var(1.0f0:-1.0f0))
@test varm([1,2,3], 2) β 1.
@test var([1,2,3]) β 1.
@test var([1,2,3]; corrected=false) β 2.0/3
@test var([1,2,3]; mean=0) β 7.
@test var([1,2,3]; mean=0, corrected=false) β 14.0/3
@test varm((1,2,3), 2) β 1.
@test var((1,2,3)) β 1.
@test var((1,2,3); corrected=false) β 2.0/3
@test var((1,2,3); mean=0) β 7.
@test var((1,2,3); mean=0, corrected=false) β 14.0/3
@test_throws ArgumentError var((1,2,3); mean=())
@test var([1 2 3 4 5; 6 7 8 9 10], dims=2) β [2.5 2.5]'
@test var([1 2 3 4 5; 6 7 8 9 10], dims=2; corrected=false) β [2.0 2.0]'
@test var(collect(1:99), dims=1) β [825]
@test var(Matrix(transpose(collect(1:99))), dims=2) β [825]
@test stdm([1,2,3], 2) β 1.
@test std([1,2,3]) β 1.
@test std([1,2,3]; corrected=false) β sqrt(2.0/3)
@test std([1,2,3]; mean=0) β sqrt(7.0)
@test std([1,2,3]; mean=0, corrected=false) β sqrt(14.0/3)
@test stdm([1.0,2,3], 2) β 1.
@test std([1.0,2,3]) β 1.
@test std([1.0,2,3]; corrected=false) β sqrt(2.0/3)
@test std([1.0,2,3]; mean=0) β sqrt(7.0)
@test std([1.0,2,3]; mean=0, corrected=false) β sqrt(14.0/3)
@test std([1.0,2,3]; dims=1)[] β 1.
@test std([1.0,2,3]; dims=1, corrected=false)[] β sqrt(2.0/3)
@test std([1.0,2,3]; dims=1, mean=[0])[] β sqrt(7.0)
@test std([1.0,2,3]; dims=1, mean=[0], corrected=false)[] β sqrt(14.0/3)
@test stdm((1,2,3), 2) β 1.
@test std((1,2,3)) β 1.
@test std((1,2,3); corrected=false) β sqrt(2.0/3)
@test std((1,2,3); mean=0) β sqrt(7.0)
@test std((1,2,3); mean=0, corrected=false) β sqrt(14.0/3)
@test std([1 2 3 4 5; 6 7 8 9 10], dims=2) β sqrt.([2.5 2.5]')
@test std([1 2 3 4 5; 6 7 8 9 10], dims=2; corrected=false) β sqrt.([2.0 2.0]')
let A = ComplexF64[exp(i*im) for i in 1:10^4]
@test varm(A, 0.) β sum(map(abs2, A)) / (length(A) - 1)
@test varm(A, mean(A)) β var(A)
end
@test var([1//1, 2//1]) isa Rational{Int}
@test var([1//1, 2//1], dims=1) isa Vector{Rational{Int}}
@test std([1//1, 2//1]) isa Float64
@test std([1//1, 2//1], dims=1) isa Vector{Float64}
@testset "var: empty cases" begin
A = Matrix{Int}(undef, 0,1)
@test var(A) === NaN
@test isequal(var(A, dims=1), fill(NaN, 1, 1))
@test isequal(var(A, dims=2), fill(NaN, 0, 1))
@test isequal(var(A, dims=(1, 2)), fill(NaN, 1, 1))
@test isequal(var(A, dims=3), fill(NaN, 0, 1))
end
# issue #6672
@test std(AbstractFloat[1,2,3], dims=1) == [1.0]
for f in (var, std)
@test ismissing(f([1, missing]))
@test ismissing(f([NaN, missing]))
@test ismissing(f([missing, NaN]))
@test isequal(f([missing 1.0; 2.0 3.0], dims=1), [missing f([1.0, 3.0])])
@test f(skipmissing([1, missing, 2])) === f([1, 2])
end
for f in (varm, stdm)
@test ismissing(f([1, missing], 0))
@test ismissing(f([1, 2], missing))
@test ismissing(f([1, NaN], missing))
@test ismissing(f([NaN, missing], 0))
@test ismissing(f([missing, NaN], 0))
@test ismissing(f([NaN, missing], missing))
@test ismissing(f([missing, NaN], missing))
@test f(skipmissing([1, missing, 2]), 0) === f([1, 2], 0)
end
@test isequal(var(Complex{Float64}[]), NaN)
@test var(Complex{Float64}[]) isa Float64
@test isequal(var(skipmissing(Complex{Float64}[])), NaN)
@test var(skipmissing(Complex{Float64}[])) isa Float64
@test_throws MethodError var([])
@test_throws MethodError var(skipmissing([]))
@test_throws MethodError var((1 for i in 2:1))
@test isequal(var(Int[]), NaN)
@test var(Int[]) isa Float64
@test isequal(var(skipmissing(Int[])), NaN)
@test var(skipmissing(Int[])) isa Float64
# over dimensions with provided means
for x in ([1 2 3; 4 5 6], sparse([1 2 3; 4 5 6]))
@test var(x, dims=1, mean=mean(x, dims=1)) == var(x, dims=1)
@test var(x, dims=1, mean=reshape(mean(x, dims=1), 1, :, 1)) == var(x, dims=1)
@test var(x, dims=2, mean=mean(x, dims=2)) == var(x, dims=2)
@test var(x, dims=2, mean=reshape(mean(x, dims=2), :)) == var(x, dims=2)
@test var(x, dims=2, mean=reshape(mean(x, dims=2), :, 1, 1)) == var(x, dims=2)
@test_throws DimensionMismatch var(x, dims=1, mean=ones(size(x, 1)))
@test_throws DimensionMismatch var(x, dims=1, mean=ones(size(x, 1), 1))
@test_throws DimensionMismatch var(x, dims=2, mean=ones(1, size(x, 2)))
@test_throws DimensionMismatch var(x, dims=1, mean=ones(1, 1, size(x, 2)))
@test_throws DimensionMismatch var(x, dims=2, mean=ones(1, size(x, 2), 1))
@test_throws DimensionMismatch var(x, dims=2, mean=ones(size(x, 1), 1, 5))
@test_throws DimensionMismatch var(x, dims=1, mean=ones(1, size(x, 2), 5))
end
end
function safe_cov(x, y, zm::Bool, cr::Bool)
n = length(x)
if !zm
x = x .- mean(x)
y = y .- mean(y)
end
dot(vec(x), vec(y)) / (n - Int(cr))
end
X = [1.0 5.0;
2.0 4.0;
3.0 6.0;
4.0 2.0;
5.0 1.0]
Y = [6.0 2.0;
1.0 7.0;
5.0 8.0;
3.0 4.0;
2.0 3.0]
@testset "covariance" begin
for vd in [1, 2], zm in [true, false], cr in [true, false]
# println("vd = $vd: zm = $zm, cr = $cr")
if vd == 1
k = size(X, 2)
Cxx = zeros(k, k)
Cxy = zeros(k, k)
for i = 1:k, j = 1:k
Cxx[i,j] = safe_cov(X[:,i], X[:,j], zm, cr)
Cxy[i,j] = safe_cov(X[:,i], Y[:,j], zm, cr)
end
x1 = vec(X[:,1])
y1 = vec(Y[:,1])
else
k = size(X, 1)
Cxx = zeros(k, k)
Cxy = zeros(k, k)
for i = 1:k, j = 1:k
Cxx[i,j] = safe_cov(X[i,:], X[j,:], zm, cr)
Cxy[i,j] = safe_cov(X[i,:], Y[j,:], zm, cr)
end
x1 = vec(X[1,:])
y1 = vec(Y[1,:])
end
c = zm ? Statistics.covm(x1, 0, corrected=cr) :
cov(x1, corrected=cr)
@test isa(c, Float64)
@test c β Cxx[1,1]
@inferred cov(x1, corrected=cr)
@test cov(X) == Statistics.covm(X, mean(X, dims=1))
C = zm ? Statistics.covm(X, 0, vd, corrected=cr) :
cov(X, dims=vd, corrected=cr)
@test size(C) == (k, k)
@test C β Cxx
@inferred cov(X, dims=vd, corrected=cr)
@test cov(x1, y1) == Statistics.covm(x1, mean(x1), y1, mean(y1))
c = zm ? Statistics.covm(x1, 0, y1, 0, corrected=cr) :
cov(x1, y1, corrected=cr)
@test isa(c, Float64)
@test c β Cxy[1,1]
@inferred cov(x1, y1, corrected=cr)
if vd == 1
@test cov(x1, Y) == Statistics.covm(x1, mean(x1), Y, mean(Y, dims=1))
end
C = zm ? Statistics.covm(x1, 0, Y, 0, vd, corrected=cr) :
cov(x1, Y, dims=vd, corrected=cr)
@test size(C) == (1, k)
@test vec(C) β Cxy[1,:]
@inferred cov(x1, Y, dims=vd, corrected=cr)
if vd == 1
@test cov(X, y1) == Statistics.covm(X, mean(X, dims=1), y1, mean(y1))
end
C = zm ? Statistics.covm(X, 0, y1, 0, vd, corrected=cr) :
cov(X, y1, dims=vd, corrected=cr)
@test size(C) == (k, 1)
@test vec(C) β Cxy[:,1]
@inferred cov(X, y1, dims=vd, corrected=cr)
@test cov(X, Y) == Statistics.covm(X, mean(X, dims=1), Y, mean(Y, dims=1))
C = zm ? Statistics.covm(X, 0, Y, 0, vd, corrected=cr) :
cov(X, Y, dims=vd, corrected=cr)
@test size(C) == (k, k)
@test C β Cxy
@inferred cov(X, Y, dims=vd, corrected=cr)
end
@testset "floating point accuracy for `cov` of large numbers" begin
A = [4.0, 7.0, 13.0, 16.0]
C = A .+ 1.0e10
@test cov(A, A) β cov(C, C)
end
end
function safe_cor(x, y, zm::Bool)
if !zm
x = x .- mean(x)
y = y .- mean(y)
end
x = vec(x)
y = vec(y)
dot(x, y) / (sqrt(dot(x, x)) * sqrt(dot(y, y)))
end
@testset "correlation" begin
for vd in [1, 2], zm in [true, false]
# println("vd = $vd: zm = $zm")
if vd == 1
k = size(X, 2)
Cxx = zeros(k, k)
Cxy = zeros(k, k)
for i = 1:k, j = 1:k
Cxx[i,j] = safe_cor(X[:,i], X[:,j], zm)
Cxy[i,j] = safe_cor(X[:,i], Y[:,j], zm)
end
x1 = vec(X[:,1])
y1 = vec(Y[:,1])
else
k = size(X, 1)
Cxx = zeros(k, k)
Cxy = zeros(k, k)
for i = 1:k, j = 1:k
Cxx[i,j] = safe_cor(X[i,:], X[j,:], zm)
Cxy[i,j] = safe_cor(X[i,:], Y[j,:], zm)
end
x1 = vec(X[1,:])
y1 = vec(Y[1,:])
end
c = zm ? Statistics.corm(x1, 0) : cor(x1)
@test isa(c, Float64)
@test c β Cxx[1,1]
@inferred cor(x1)
@test cor(X) == Statistics.corm(X, mean(X, dims=1))
C = zm ? Statistics.corm(X, 0, vd) : cor(X, dims=vd)
@test size(C) == (k, k)
@test C β Cxx
@inferred cor(X, dims=vd)
@test cor(x1, y1) == Statistics.corm(x1, mean(x1), y1, mean(y1))
c = zm ? Statistics.corm(x1, 0, y1, 0) : cor(x1, y1)
@test isa(c, Float64)
@test c β Cxy[1,1]
@inferred cor(x1, y1)
if vd == 1
@test cor(x1, Y) == Statistics.corm(x1, mean(x1), Y, mean(Y, dims=1))
end
C = zm ? Statistics.corm(x1, 0, Y, 0, vd) : cor(x1, Y, dims=vd)
@test size(C) == (1, k)
@test vec(C) β Cxy[1,:]
@inferred cor(x1, Y, dims=vd)
if vd == 1
@test cor(X, y1) == Statistics.corm(X, mean(X, dims=1), y1, mean(y1))
end
C = zm ? Statistics.corm(X, 0, y1, 0, vd) : cor(X, y1, dims=vd)
@test size(C) == (k, 1)
@test vec(C) β Cxy[:,1]
@inferred cor(X, y1, dims=vd)
@test cor(X, Y) == Statistics.corm(X, mean(X, dims=1), Y, mean(Y, dims=1))
C = zm ? Statistics.corm(X, 0, Y, 0, vd) : cor(X, Y, dims=vd)
@test size(C) == (k, k)
@test C β Cxy
@inferred cor(X, Y, dims=vd)
end
@test cor(repeat(1:17, 1, 17))[2] <= 1.0
@test cor(1:17, 1:17) <= 1.0
@test cor(1:17, 18:34) <= 1.0
@test cor(Any[1, 2], Any[1, 2]) == 1.0
@test isnan(cor([0], Int8[81]))
let tmp = range(1, stop=85, length=100)
tmp2 = Vector(tmp)
@test cor(tmp, tmp) <= 1.0
@test cor(tmp, tmp2) <= 1.0
end
end
@testset "quantile" begin
@test quantile([1,2,3,4],0.5) β 2.5
@test quantile([1,2,3,4],[0.5]) β [2.5]
@test quantile([1., 3],[.25,.5,.75])[2] β median([1., 3])
@test quantile(100.0:-1.0:0.0, 0.0:0.1:1.0) β 0.0:10.0:100.0
@test quantile(0.0:100.0, 0.0:0.1:1.0, sorted=true) β 0.0:10.0:100.0
@test quantile(100f0:-1f0:0.0, 0.0:0.1:1.0) β 0f0:10f0:100f0
@test quantile([Inf,Inf],0.5) == Inf
@test quantile([-Inf,1],0.5) == -Inf
# here it is required to introduce an absolute tolerance because the calculated value is 0
@test quantile([0,1],1e-18) β 1e-18 atol=1e-18
@test quantile([1, 2, 3, 4],[]) == []
@test quantile([1, 2, 3, 4], (0.5,)) == (2.5,)
@test quantile([4, 9, 1, 5, 7, 8, 2, 3, 5, 17, 11],
(0.1, 0.2, 0.4, 0.9)) == (2.0, 3.0, 5.0, 11.0)
@test quantile(Union{Int, Missing}[4, 9, 1, 5, 7, 8, 2, 3, 5, 17, 11],
[0.1, 0.2, 0.4, 0.9]) β [2.0, 3.0, 5.0, 11.0]
@test quantile(Any[4, 9, 1, 5, 7, 8, 2, 3, 5, 17, 11],
[0.1, 0.2, 0.4, 0.9]) β [2.0, 3.0, 5.0, 11.0]
@test quantile([4, 9, 1, 5, 7, 8, 2, 3, 5, 17, 11],
Any[0.1, 0.2, 0.4, 0.9]) β [2.0, 3.0, 5.0, 11.0]
@test quantile([4, 9, 1, 5, 7, 8, 2, 3, 5, 17, 11],
Any[0.1, 0.2, 0.4, 0.9]) isa Vector{Float64}
@test quantile(Any[4, 9, 1, 5, 7, 8, 2, 3, 5, 17, 11],
Any[0.1, 0.2, 0.4, 0.9]) β [2, 3, 5, 11]
@test quantile(Any[4, 9, 1, 5, 7, 8, 2, 3, 5, 17, 11],
Any[0.1, 0.2, 0.4, 0.9]) isa Vector{Float64}
@test quantile([1, 2, 3, 4], ()) == ()
@test isempty(quantile([1, 2, 3, 4], Float64[]))
@test quantile([1, 2, 3, 4], Float64[]) isa Vector{Float64}
@test quantile([1, 2, 3, 4], []) isa Vector{Any}
@test quantile([1, 2, 3, 4], [0, 1]) isa Vector{Int}
@test quantile(Any[1, 2, 3], 0.5) isa Float64
@test quantile(Any[1, big(2), 3], 0.5) isa BigFloat
@test quantile(Any[1, 2, 3], Float16(0.5)) isa Float16
@test quantile(Any[1, Float16(2), 3], Float16(0.5)) isa Float16
@test quantile(Any[1, big(2), 3], Float16(0.5)) isa BigFloat
@test_throws ArgumentError quantile([1, missing], 0.5)
@test_throws ArgumentError quantile([1, NaN], 0.5)
@test quantile(skipmissing([1, missing, 2]), 0.5) === 1.5
# make sure that type inference works correctly in normal cases
for T in [Int, BigInt, Float64, Float16, BigFloat, Rational{Int}, Rational{BigInt}]
for S in [Float64, Float16, BigFloat, Rational{Int}, Rational{BigInt}]
@inferred quantile(T[1, 2, 3], S(0.5))
@inferred quantile(T[1, 2, 3], S(0.6))
@inferred quantile(T[1, 2, 3], S[0.5, 0.6])
@inferred quantile(T[1, 2, 3], (S(0.5), S(0.6)))
end
end
x = [3; 2; 1]
y = zeros(3)
@test quantile!(y, x, [0.1, 0.5, 0.9]) === y
@test y β [1.2, 2.0, 2.8]
#tests for quantile calculation with configurable alpha and beta parameters
v = [2, 3, 4, 6, 9, 2, 6, 2, 21, 17]
# tests against scipy.stats.mstats.mquantiles method
@test quantile(v, 0.0, alpha=0.0, beta=0.0) β 2.0
@test quantile(v, 0.2, alpha=1.0, beta=1.0) β 2.0
@test quantile(v, 0.4, alpha=0.0, beta=0.0) β 3.4
@test quantile(v, 0.4, alpha=0.0, beta=0.2) β 3.32
@test quantile(v, 0.4, alpha=0.0, beta=0.4) β 3.24
@test quantile(v, 0.4, alpha=0.0, beta=0.6) β 3.16
@test quantile(v, 0.4, alpha=0.0, beta=0.8) β 3.08
@test quantile(v, 0.4, alpha=0.0, beta=1.0) β 3.0
@test quantile(v, 0.4, alpha=0.2, beta=0.0) β 3.52
@test quantile(v, 0.4, alpha=0.2, beta=0.2) β 3.44
@test quantile(v, 0.4, alpha=0.2, beta=0.4) β 3.36
@test quantile(v, 0.4, alpha=0.2, beta=0.6) β 3.28
@test quantile(v, 0.4, alpha=0.2, beta=0.8) β 3.2
@test quantile(v, 0.4, alpha=0.2, beta=1.0) β 3.12
@test quantile(v, 0.4, alpha=0.4, beta=0.0) β 3.64
@test quantile(v, 0.4, alpha=0.4, beta=0.2) β 3.56
@test quantile(v, 0.4, alpha=0.4, beta=0.4) β 3.48
@test quantile(v, 0.4, alpha=0.4, beta=0.6) β 3.4
@test quantile(v, 0.4, alpha=0.4, beta=0.8) β 3.32
@test quantile(v, 0.4, alpha=0.4, beta=1.0) β 3.24
@test quantile(v, 0.4, alpha=0.6, beta=0.0) β 3.76
@test quantile(v, 0.4, alpha=0.6, beta=0.2) β 3.68
@test quantile(v, 0.4, alpha=0.6, beta=0.4) β 3.6
@test quantile(v, 0.4, alpha=0.6, beta=0.6) β 3.52
@test quantile(v, 0.4, alpha=0.6, beta=0.8) β 3.44
@test quantile(v, 0.4, alpha=0.6, beta=1.0) β 3.36
@test quantile(v, 0.4, alpha=0.8, beta=0.0) β 3.88
@test quantile(v, 0.4, alpha=0.8, beta=0.2) β 3.8
@test quantile(v, 0.4, alpha=0.8, beta=0.4) β 3.72
@test quantile(v, 0.4, alpha=0.8, beta=0.6) β 3.64
@test quantile(v, 0.4, alpha=0.8, beta=0.8) β 3.56
@test quantile(v, 0.4, alpha=0.8, beta=1.0) β 3.48
@test quantile(v, 0.4, alpha=1.0, beta=0.0) β 4.0
@test quantile(v, 0.4, alpha=1.0, beta=0.2) β 3.92
@test quantile(v, 0.4, alpha=1.0, beta=0.4) β 3.84
@test quantile(v, 0.4, alpha=1.0, beta=0.6) β 3.76
@test quantile(v, 0.4, alpha=1.0, beta=0.8) β 3.68
@test quantile(v, 0.4, alpha=1.0, beta=1.0) β 3.6
@test quantile(v, 0.6, alpha=0.0, beta=0.0) β 6.0
@test quantile(v, 0.6, alpha=1.0, beta=1.0) β 6.0
@test quantile(v, 0.8, alpha=0.0, beta=0.0) β 15.4
@test quantile(v, 0.8, alpha=0.0, beta=0.2) β 14.12
@test quantile(v, 0.8, alpha=0.0, beta=0.4) β 12.84
@test quantile(v, 0.8, alpha=0.0, beta=0.6) β 11.56
@test quantile(v, 0.8, alpha=0.0, beta=0.8) β 10.28
@test quantile(v, 0.8, alpha=0.0, beta=1.0) β 9.0
@test quantile(v, 0.8, alpha=0.2, beta=0.0) β 15.72
@test quantile(v, 0.8, alpha=0.2, beta=0.2) β 14.44
@test quantile(v, 0.8, alpha=0.2, beta=0.4) β 13.16
@test quantile(v, 0.8, alpha=0.2, beta=0.6) β 11.88
@test quantile(v, 0.8, alpha=0.2, beta=0.8) β 10.6
@test quantile(v, 0.8, alpha=0.2, beta=1.0) β 9.32
@test quantile(v, 0.8, alpha=0.4, beta=0.0) β 16.04
@test quantile(v, 0.8, alpha=0.4, beta=0.2) β 14.76
@test quantile(v, 0.8, alpha=0.4, beta=0.4) β 13.48
@test quantile(v, 0.8, alpha=0.4, beta=0.6) β 12.2
@test quantile(v, 0.8, alpha=0.4, beta=0.8) β 10.92
@test quantile(v, 0.8, alpha=0.4, beta=1.0) β 9.64
@test quantile(v, 0.8, alpha=0.6, beta=0.0) β 16.36
@test quantile(v, 0.8, alpha=0.6, beta=0.2) β 15.08
@test quantile(v, 0.8, alpha=0.6, beta=0.4) β 13.8
@test quantile(v, 0.8, alpha=0.6, beta=0.6) β 12.52
@test quantile(v, 0.8, alpha=0.6, beta=0.8) β 11.24
@test quantile(v, 0.8, alpha=0.6, beta=1.0) β 9.96
@test quantile(v, 0.8, alpha=0.8, beta=0.0) β 16.68
@test quantile(v, 0.8, alpha=0.8, beta=0.2) β 15.4
@test quantile(v, 0.8, alpha=0.8, beta=0.4) β 14.12
@test quantile(v, 0.8, alpha=0.8, beta=0.6) β 12.84
@test quantile(v, 0.8, alpha=0.8, beta=0.8) β 11.56
@test quantile(v, 0.8, alpha=0.8, beta=1.0) β 10.28
@test quantile(v, 0.8, alpha=1.0, beta=0.0) β 17.0
@test quantile(v, 0.8, alpha=1.0, beta=0.2) β 15.72
@test quantile(v, 0.8, alpha=1.0, beta=0.4) β 14.44
@test quantile(v, 0.8, alpha=1.0, beta=0.6) β 13.16
@test quantile(v, 0.8, alpha=1.0, beta=0.8) β 11.88
@test quantile(v, 0.8, alpha=1.0, beta=1.0) β 10.6
@test quantile(v, 1.0, alpha=0.0, beta=0.0) β 21.0
@test quantile(v, 1.0, alpha=1.0, beta=1.0) β 21.0
end
# StatsBase issue 164
let y = [0.40003674665581906, 0.4085630862624367, 0.41662034698690303, 0.41662034698690303, 0.42189053966652057, 0.42189053966652057, 0.42553514344518345, 0.43985732442991354]
@test issorted(quantile(y, range(0.01, stop=0.99, length=17)))
end
@testset "variance of complex arrays (#13309)" begin
z = rand(ComplexF64, 10)
@test var(z) β invoke(var, Tuple{Any}, z) β cov(z) β var(z,dims=1)[1] β sum(abs2, z .- mean(z))/9
@test isa(var(z), Float64)
@test isa(invoke(var, Tuple{Any}, z), Float64)
@test isa(cov(z), Float64)
@test isa(var(z,dims=1), Vector{Float64})
@test varm(z, 0.0) β invoke(varm, Tuple{Any,Float64}, z, 0.0) β sum(abs2, z)/9
@test isa(varm(z, 0.0), Float64)
@test isa(invoke(varm, Tuple{Any,Float64}, z, 0.0), Float64)
@test cor(z) === 1.0
v = varm([1.0+2.0im], 0; corrected = false)
@test v β 5
@test isa(v, Float64)
end
@testset "cov and cor of complex arrays (issue #21093)" begin
x = [2.7 - 3.3im, 0.9 + 5.4im, 0.1 + 0.2im, -1.7 - 5.8im, 1.1 + 1.9im]
y = [-1.7 - 1.6im, -0.2 + 6.5im, 0.8 - 10.0im, 9.1 - 3.4im, 2.7 - 5.5im]
@test cov(x, y) β 4.8365 - 12.119im
@test cov(y, x) β 4.8365 + 12.119im
@test cov(x, reshape(y, :, 1)) β reshape([4.8365 - 12.119im], 1, 1)
@test cov(reshape(x, :, 1), y) β reshape([4.8365 - 12.119im], 1, 1)
@test cov(reshape(x, :, 1), reshape(y, :, 1)) β reshape([4.8365 - 12.119im], 1, 1)
@test cov([x y]) β [21.779 4.8365-12.119im;
4.8365+12.119im 54.548]
@test cor(x, y) β 0.14032104449218274 - 0.35160772008699703im
@test cor(y, x) β 0.14032104449218274 + 0.35160772008699703im
@test cor(x, reshape(y, :, 1)) β reshape([0.14032104449218274 - 0.35160772008699703im], 1, 1)
@test cor(reshape(x, :, 1), y) β reshape([0.14032104449218274 - 0.35160772008699703im], 1, 1)
@test cor(reshape(x, :, 1), reshape(y, :, 1)) β reshape([0.14032104449218274 - 0.35160772008699703im], 1, 1)
@test cor([x y]) β [1.0 0.14032104449218274-0.35160772008699703im
0.14032104449218274+0.35160772008699703im 1.0]
end
@testset "Issue #17153 and PR #17154" begin
a = rand(10,10)
b = copy(a)
x = median(a, dims=1)
@test b == a
x = median(a, dims=2)
@test b == a
x = mean(a, dims=1)
@test b == a
x = mean(a, dims=2)
@test b == a
x = var(a, dims=1)
@test b == a
x = var(a, dims=2)
@test b == a
x = std(a, dims=1)
@test b == a
x = std(a, dims=2)
@test b == a
end
# dimensional correctness
const BASE_TEST_PATH = joinpath(Sys.BINDIR, "..", "share", "julia", "test")
isdefined(Main, :Furlongs) || @eval Main include(joinpath($(BASE_TEST_PATH), "testhelpers", "Furlongs.jl"))
using .Main.Furlongs
Statistics.middle(x::Furlong{p}) where {p} = Furlong{p}(middle(x.val))
Statistics.middle(x::Furlong{p}, y::Furlong{p}) where {p} = Furlong{p}(middle(x.val, y.val))
@testset "Unitful elements" begin
r = Furlong(1):Furlong(1):Furlong(2)
a = Vector(r)
@test sum(r) == sum(a) == Furlong(3)
@test cumsum(r) == Furlong.([1,3])
@test mean(r) == mean(a) == median(a) == median(r) == Furlong(1.5)
@test var(r) == var(a) == Furlong{2}(0.5)
@test std(r) == std(a) == Furlong{1}(sqrt(0.5))
# Issue #21786
A = [Furlong{1}(rand(-5:5)) for i in 1:2, j in 1:2]
@test mean(mean(A, dims=1), dims=2)[1] === mean(A)
@test var(A, dims=1)[1] === var(A[:, 1])
@test std(A, dims=1)[1] === std(A[:, 1])
end
# Issue #22901
@testset "var and quantile of Any arrays" begin
x = Any[1, 2, 4, 10]
y = Any[1, 2, 4, 10//1]
@test var(x) === 16.25
@test var(y) === 16.25
@test std(x) === sqrt(16.25)
@test quantile(x, 0.5) === 3.0
@test quantile(x, 1//2) === 3//1
end
@testset "Promotion in covzm. Issue #8080" begin
A = [1 -1 -1; -1 1 1; -1 1 -1; 1 -1 -1; 1 -1 1]
@test Statistics.covzm(A) - mean(A, dims=1)'*mean(A, dims=1)*size(A, 1)/(size(A, 1) - 1) β cov(A)
A = [1//1 -1 -1; -1 1 1; -1 1 -1; 1 -1 -1; 1 -1 1]
@test (A'A - size(A, 1)*mean(A, dims=1)'*mean(A, dims=1))/4 == cov(A)
end
@testset "Mean along dimension of empty array" begin
a0 = zeros(0)
a00 = zeros(0, 0)
a01 = zeros(0, 1)
a10 = zeros(1, 0)
@test isequal(mean(a0, dims=1) , fill(NaN, 1))
@test isequal(mean(a00, dims=(1, 2)), fill(NaN, 1, 1))
@test isequal(mean(a01, dims=1) , fill(NaN, 1, 1))
@test isequal(mean(a10, dims=2) , fill(NaN, 1, 1))
end
@testset "cov/var/std of Vector{Vector}" begin
x = [[2,4,6],[4,6,8]]
@test var(x) β vec(var([x[1] x[2]], dims=2))
@test std(x) β vec(std([x[1] x[2]], dims=2))
@test cov(x) β cov([x[1] x[2]], dims=2)
end
@testset "var of sparse array" begin
se33 = SparseMatrixCSC{Float64}(I, 3, 3)
sA = sprandn(3, 7, 0.5)
pA = sparse(rand(3, 7))
for arr in (se33, sA, pA)
farr = Array(arr)
@test var(arr) β var(farr)
@test var(arr, dims=1) β var(farr, dims=1)
@test var(arr, dims=2) β var(farr, dims=2)
@test var(arr, dims=(1, 2)) β [var(farr)]
@test isequal(var(arr, dims=3), var(farr, dims=3))
end
@testset "empty cases" begin
@test var(sparse(Int[])) === NaN
@test isequal(var(spzeros(0, 1), dims=1), var(Matrix{Int}(I, 0, 1), dims=1))
@test isequal(var(spzeros(0, 1), dims=2), var(Matrix{Int}(I, 0, 1), dims=2))
@test isequal(var(spzeros(0, 1), dims=(1, 2)), var(Matrix{Int}(I, 0, 1), dims=(1, 2)))
@test isequal(var(spzeros(0, 1), dims=3), var(Matrix{Int}(I, 0, 1), dims=3))
end
end
# Faster covariance function for sparse matrices
# Prevents densifying the input matrix when subtracting the mean
# Test against dense implementation
# PR https://github.com/JuliaLang/julia/pull/22735
# Part of this test needed to be hacked due to the treatment
# of Inf in sparse matrix algebra
# https://github.com/JuliaLang/julia/issues/22921
# The issue will be resolved in
# https://github.com/JuliaLang/julia/issues/22733
@testset "optimizing sparse $elty covariance" for elty in (Float64, Complex{Float64})
n = 10
p = 5
np2 = div(n*p, 2)
nzvals, x_sparse = guardseed(1) do
if elty <: Real
nzvals = randn(np2)
else
nzvals = complex.(randn(np2), randn(np2))
end
nzvals, sparse(rand(1:n, np2), rand(1:p, np2), nzvals, n, p)
end
x_dense = convert(Matrix{elty}, x_sparse)
@testset "Test with no Infs and NaNs, vardim=$vardim, corrected=$corrected" for vardim in (1, 2),
corrected in (true, false)
@test cov(x_sparse, dims=vardim, corrected=corrected) β
cov(x_dense , dims=vardim, corrected=corrected)
end
@testset "Test with $x11, vardim=$vardim, corrected=$corrected" for x11 in (NaN, Inf),
vardim in (1, 2),
corrected in (true, false)
x_sparse[1,1] = x11
x_dense[1 ,1] = x11
cov_sparse = cov(x_sparse, dims=vardim, corrected=corrected)
cov_dense = cov(x_dense , dims=vardim, corrected=corrected)
@test cov_sparse[2:end, 2:end] β cov_dense[2:end, 2:end]
@test isfinite.(cov_sparse) == isfinite.(cov_dense)
@test isfinite.(cov_sparse) == isfinite.(cov_dense)
end
@testset "Test with NaN and Inf, vardim=$vardim, corrected=$corrected" for vardim in (1, 2),
corrected in (true, false)
x_sparse[1,1] = Inf
x_dense[1 ,1] = Inf
x_sparse[2,1] = NaN
x_dense[2 ,1] = NaN
cov_sparse = cov(x_sparse, dims=vardim, corrected=corrected)
cov_dense = cov(x_dense , dims=vardim, corrected=corrected)
@test cov_sparse[(1 + vardim):end, (1 + vardim):end] β
cov_dense[ (1 + vardim):end, (1 + vardim):end]
@test isfinite.(cov_sparse) == isfinite.(cov_dense)
@test isfinite.(cov_sparse) == isfinite.(cov_dense)
end
end
| 40.102445 | 175 | 0.536054 | [
"@testset \"middle\" begin\n @test middle(3) === 3.0\n @test middle(2, 3) === 2.5\n let x = ((floatmax(1.0)/4)*3)\n @test middle(x, x) === x\n end\n @test middle(1:8) === 4.5\n @test middle([1:8;]) === 4.5\n\n # ensure type-correctness\n for T in [Bool,Int8,Int16,Int32,Int64,Int128,UInt8,UInt16,UInt32,UInt64,UInt128,Float16,Float32,Float64]\n @test middle(one(T)) === middle(one(T), one(T))\n end\nend",
"@testset \"median\" begin\n @test median([1.]) === 1.\n @test median([1.,3]) === 2.\n @test median([1.,3,2]) === 2.\n\n @test median([1,3,2]) === 2.0\n @test median([1,3,2,4]) === 2.5\n\n @test median([0.0,Inf]) == Inf\n @test median([0.0,-Inf]) == -Inf\n @test median([0.,Inf,-Inf]) == 0.0\n @test median([1.,-1.,Inf,-Inf]) == 0.0\n @test isnan(median([-Inf,Inf]))\n\n X = [2 3 1 -1; 7 4 5 -4]\n @test all(median(X, dims=2) .== [1.5, 4.5])\n @test all(median(X, dims=1) .== [4.5 3.5 3.0 -2.5])\n @test X == [2 3 1 -1; 7 4 5 -4] # issue #17153\n\n @test_throws ArgumentError median([])\n @test isnan(median([NaN]))\n @test isnan(median([0.0,NaN]))\n @test isnan(median([NaN,0.0]))\n @test isnan(median([NaN,0.0,1.0]))\n @test isnan(median(Any[NaN,0.0,1.0]))\n @test isequal(median([NaN 0.0; 1.2 4.5], dims=2), reshape([NaN; 2.85], 2, 1))\n\n @test ismissing(median([1, missing]))\n @test ismissing(median([1, 2, missing]))\n @test ismissing(median([NaN, 2.0, missing]))\n @test ismissing(median([NaN, missing]))\n @test ismissing(median([missing, NaN]))\n @test ismissing(median(Any[missing, 2.0, 3.0, 4.0, NaN]))\n @test median(skipmissing([1, missing, 2])) === 1.5\n\n @test median!([1 2 3 4]) == 2.5\n @test median!([1 2; 3 4]) == 2.5\n\n @test invoke(median, Tuple{AbstractVector}, 1:10) == median(1:10) == 5.5\n\n @test @inferred(median(Float16[1, 2, NaN])) === Float16(NaN)\n @test @inferred(median(Float16[1, 2, 3])) === Float16(2)\n @test @inferred(median(Float32[1, 2, NaN])) === NaN32\n @test @inferred(median(Float32[1, 2, 3])) === 2.0f0\nend",
"@testset \"mean\" begin\n @test mean((1,2,3)) === 2.\n @test mean([0]) === 0.\n @test mean([1.]) === 1.\n @test mean([1.,3]) == 2.\n @test mean([1,2,3]) == 2.\n @test mean([0 1 2; 4 5 6], dims=1) == [2. 3. 4.]\n @test mean([1 2 3; 4 5 6], dims=1) == [2.5 3.5 4.5]\n @test mean(-, [1 2 3 ; 4 5 6], dims=1) == [-2.5 -3.5 -4.5]\n @test mean(-, [1 2 3 ; 4 5 6], dims=2) == transpose([-2.0 -5.0])\n @test mean(-, [1 2 3 ; 4 5 6], dims=(1, 2)) == -3.5 .* ones(1, 1)\n @test mean(-, [1 2 3 ; 4 5 6], dims=(1, 1)) == [-2.5 -3.5 -4.5]\n @test mean(-, [1 2 3 ; 4 5 6], dims=()) == Float64[-1 -2 -3 ; -4 -5 -6]\n @test mean(i->i+1, 0:2) === 2.\n @test mean(isodd, [3]) === 1.\n @test mean(x->3x, (1,1)) === 3.\n\n # mean of iterables:\n n = 10; a = randn(n); b = randn(n)\n @test mean(Tuple(a)) β mean(a)\n @test mean(Tuple(a + b*im)) β mean(a + b*im)\n @test mean(cos, Tuple(a)) β mean(cos, a)\n @test mean(x->x/2, a + b*im) β mean(a + b*im) / 2.\n @test ismissing(mean(Tuple((1, 2, missing, 4, 5))))\n\n @test isnan(mean([NaN]))\n @test isnan(mean([0.0,NaN]))\n @test isnan(mean([NaN,0.0]))\n\n @test isnan(mean([0.,Inf,-Inf]))\n @test isnan(mean([1.,-1.,Inf,-Inf]))\n @test isnan(mean([-Inf,Inf]))\n @test isequal(mean([NaN 0.0; 1.2 4.5], dims=2), reshape([NaN; 2.85], 2, 1))\n\n @test ismissing(mean([1, missing]))\n @test ismissing(mean([NaN, missing]))\n @test ismissing(mean([missing, NaN]))\n @test isequal(mean([missing 1.0; 2.0 3.0], dims=1), [missing 2.0])\n @test mean(skipmissing([1, missing, 2])) === 1.5\n @test isequal(mean(Complex{Float64}[]), NaN+NaN*im)\n @test mean(Complex{Float64}[]) isa Complex{Float64}\n @test isequal(mean(skipmissing(Complex{Float64}[])), NaN+NaN*im)\n @test mean(skipmissing(Complex{Float64}[])) isa Complex{Float64}\n @test isequal(mean(abs, Complex{Float64}[]), NaN)\n @test mean(abs, Complex{Float64}[]) isa Float64\n @test isequal(mean(abs, skipmissing(Complex{Float64}[])), NaN)\n @test mean(abs, skipmissing(Complex{Float64}[])) isa Float64\n @test isequal(mean(Int[]), NaN)\n @test mean(Int[]) isa Float64\n @test isequal(mean(skipmissing(Int[])), NaN)\n @test mean(skipmissing(Int[])) isa Float64\n @test_throws MethodError mean([])\n @test_throws MethodError mean(skipmissing([]))\n @test_throws ArgumentError mean((1 for i in 2:1))\n if VERSION >= v\"1.6.0-DEV.83\"\n @test_throws ArgumentError mean(())\n @test_throws ArgumentError mean(Union{}[])\n end\n\n # Check that small types are accumulated using wider type\n for T in (Int8, UInt8)\n x = [typemax(T) typemax(T)]\n g = (v for v in x)\n @test mean(x) == mean(g) == typemax(T)\n @test mean(identity, x) == mean(identity, g) == typemax(T)\n @test mean(x, dims=2) == [typemax(T)]'\n end\n # Check that mean avoids integer overflow (#22)\n let x = fill(typemax(Int), 10), a = tuple(x...)\n @test (mean(x) == mean(x, dims=1)[] == mean(float, x)\n == mean(a) == mean(v for v in x) == mean(v for v in a)\n β float(typemax(Int)))\n end\n let x = rand(10000) # mean should use sum's accurate pairwise algorithm\n @test mean(x) == sum(x) / length(x)\n end\n @test mean(Number[1, 1.5, 2+3im]) === 1.5+1im # mixed-type array\n @test mean(v for v in Number[1, 1.5, 2+3im]) === 1.5+1im\n @test (@inferred mean(Int[])) === 0/0\n @test (@inferred mean(Float32[])) === 0.f0/0 \n @test (@inferred mean(Float64[])) === 0/0\n @test (@inferred mean(Iterators.filter(x -> true, Int[]))) === 0/0\n @test (@inferred mean(Iterators.filter(x -> true, Float32[]))) === 0.f0/0\n @test (@inferred mean(Iterators.filter(x -> true, Float64[]))) === 0/0\nend",
"@testset \"mean/median for ranges\" begin\n for f in (mean, median)\n for n = 2:5\n @test f(2:n) == f([2:n;])\n @test f(2:0.1:n) β f([2:0.1:n;])\n end\n end\n @test mean(2:1) === NaN\n @test mean(big(2):1) isa BigFloat\nend",
"@testset \"var & std\" begin\n # edge case: empty vector\n # iterable; this has to throw for type stability\n @test_throws MethodError var(())\n @test_throws MethodError var((); corrected=false)\n @test_throws MethodError var((); mean=2)\n @test_throws MethodError var((); mean=2, corrected=false)\n # reduction\n @test isnan(var(Int[]))\n @test isnan(var(Int[]; corrected=false))\n @test isnan(var(Int[]; mean=2))\n @test isnan(var(Int[]; mean=2, corrected=false))\n # reduction across dimensions\n @test isequal(var(Int[], dims=1), [NaN])\n @test isequal(var(Int[], dims=1; corrected=false), [NaN])\n @test isequal(var(Int[], dims=1; mean=[2]), [NaN])\n @test isequal(var(Int[], dims=1; mean=[2], corrected=false), [NaN])\n\n # edge case: one-element vector\n # iterable\n @test isnan(@inferred(var((1,))))\n @test var((1,); corrected=false) === 0.0\n @test var((1,); mean=2) === Inf\n @test var((1,); mean=2, corrected=false) === 1.0\n # reduction\n @test isnan(@inferred(var([1])))\n @test var([1]; corrected=false) === 0.0\n @test var([1]; mean=2) === Inf\n @test var([1]; mean=2, corrected=false) === 1.0\n # reduction across dimensions\n @test isequal(@inferred(var([1], dims=1)), [NaN])\n @test var([1], dims=1; corrected=false) β [0.0]\n @test var([1], dims=1; mean=[2]) β [Inf]\n @test var([1], dims=1; mean=[2], corrected=false) β [1.0]\n\n @test var(1:8) == 6.\n @test varm(1:8,1) == varm(Vector(1:8),1)\n @test isnan(varm(1:1,1))\n @test isnan(var(1:1))\n @test isnan(var(1:-1))\n\n @test @inferred(var(1.0:8.0)) == 6.\n @test varm(1.0:8.0,1.0) == varm(Vector(1.0:8.0),1)\n @test isnan(varm(1.0:1.0,1.0))\n @test isnan(var(1.0:1.0))\n @test isnan(var(1.0:-1.0))\n\n @test @inferred(var(1.0f0:8.0f0)) === 6.f0\n @test varm(1.0f0:8.0f0,1.0f0) == varm(Vector(1.0f0:8.0f0),1)\n @test isnan(varm(1.0f0:1.0f0,1.0f0))\n @test isnan(var(1.0f0:1.0f0))\n @test isnan(var(1.0f0:-1.0f0))\n\n @test varm([1,2,3], 2) β 1.\n @test var([1,2,3]) β 1.\n @test var([1,2,3]; corrected=false) β 2.0/3\n @test var([1,2,3]; mean=0) β 7.\n @test var([1,2,3]; mean=0, corrected=false) β 14.0/3\n\n @test varm((1,2,3), 2) β 1.\n @test var((1,2,3)) β 1.\n @test var((1,2,3); corrected=false) β 2.0/3\n @test var((1,2,3); mean=0) β 7.\n @test var((1,2,3); mean=0, corrected=false) β 14.0/3\n @test_throws ArgumentError var((1,2,3); mean=())\n\n @test var([1 2 3 4 5; 6 7 8 9 10], dims=2) β [2.5 2.5]'\n @test var([1 2 3 4 5; 6 7 8 9 10], dims=2; corrected=false) β [2.0 2.0]'\n\n @test var(collect(1:99), dims=1) β [825]\n @test var(Matrix(transpose(collect(1:99))), dims=2) β [825]\n\n @test stdm([1,2,3], 2) β 1.\n @test std([1,2,3]) β 1.\n @test std([1,2,3]; corrected=false) β sqrt(2.0/3)\n @test std([1,2,3]; mean=0) β sqrt(7.0)\n @test std([1,2,3]; mean=0, corrected=false) β sqrt(14.0/3)\n\n @test stdm([1.0,2,3], 2) β 1.\n @test std([1.0,2,3]) β 1.\n @test std([1.0,2,3]; corrected=false) β sqrt(2.0/3)\n @test std([1.0,2,3]; mean=0) β sqrt(7.0)\n @test std([1.0,2,3]; mean=0, corrected=false) β sqrt(14.0/3)\n\n @test std([1.0,2,3]; dims=1)[] β 1.\n @test std([1.0,2,3]; dims=1, corrected=false)[] β sqrt(2.0/3)\n @test std([1.0,2,3]; dims=1, mean=[0])[] β sqrt(7.0)\n @test std([1.0,2,3]; dims=1, mean=[0], corrected=false)[] β sqrt(14.0/3)\n\n @test stdm((1,2,3), 2) β 1.\n @test std((1,2,3)) β 1.\n @test std((1,2,3); corrected=false) β sqrt(2.0/3)\n @test std((1,2,3); mean=0) β sqrt(7.0)\n @test std((1,2,3); mean=0, corrected=false) β sqrt(14.0/3)\n\n @test std([1 2 3 4 5; 6 7 8 9 10], dims=2) β sqrt.([2.5 2.5]')\n @test std([1 2 3 4 5; 6 7 8 9 10], dims=2; corrected=false) β sqrt.([2.0 2.0]')\n\n let A = ComplexF64[exp(i*im) for i in 1:10^4]\n @test varm(A, 0.) β sum(map(abs2, A)) / (length(A) - 1)\n @test varm(A, mean(A)) β var(A)\n end\n\n @test var([1//1, 2//1]) isa Rational{Int}\n @test var([1//1, 2//1], dims=1) isa Vector{Rational{Int}}\n\n @test std([1//1, 2//1]) isa Float64\n @test std([1//1, 2//1], dims=1) isa Vector{Float64}\n\n @testset \"var: empty cases\" begin\n A = Matrix{Int}(undef, 0,1)\n @test var(A) === NaN\n\n @test isequal(var(A, dims=1), fill(NaN, 1, 1))\n @test isequal(var(A, dims=2), fill(NaN, 0, 1))\n @test isequal(var(A, dims=(1, 2)), fill(NaN, 1, 1))\n @test isequal(var(A, dims=3), fill(NaN, 0, 1))\n end\n\n # issue #6672\n @test std(AbstractFloat[1,2,3], dims=1) == [1.0]\n\n for f in (var, std)\n @test ismissing(f([1, missing]))\n @test ismissing(f([NaN, missing]))\n @test ismissing(f([missing, NaN]))\n @test isequal(f([missing 1.0; 2.0 3.0], dims=1), [missing f([1.0, 3.0])])\n @test f(skipmissing([1, missing, 2])) === f([1, 2])\n end\n for f in (varm, stdm)\n @test ismissing(f([1, missing], 0))\n @test ismissing(f([1, 2], missing))\n @test ismissing(f([1, NaN], missing))\n @test ismissing(f([NaN, missing], 0))\n @test ismissing(f([missing, NaN], 0))\n @test ismissing(f([NaN, missing], missing))\n @test ismissing(f([missing, NaN], missing))\n @test f(skipmissing([1, missing, 2]), 0) === f([1, 2], 0)\n end\n\n @test isequal(var(Complex{Float64}[]), NaN)\n @test var(Complex{Float64}[]) isa Float64\n @test isequal(var(skipmissing(Complex{Float64}[])), NaN)\n @test var(skipmissing(Complex{Float64}[])) isa Float64\n @test_throws MethodError var([])\n @test_throws MethodError var(skipmissing([]))\n @test_throws MethodError var((1 for i in 2:1))\n @test isequal(var(Int[]), NaN)\n @test var(Int[]) isa Float64\n @test isequal(var(skipmissing(Int[])), NaN)\n @test var(skipmissing(Int[])) isa Float64\n\n # over dimensions with provided means\n for x in ([1 2 3; 4 5 6], sparse([1 2 3; 4 5 6]))\n @test var(x, dims=1, mean=mean(x, dims=1)) == var(x, dims=1)\n @test var(x, dims=1, mean=reshape(mean(x, dims=1), 1, :, 1)) == var(x, dims=1)\n @test var(x, dims=2, mean=mean(x, dims=2)) == var(x, dims=2)\n @test var(x, dims=2, mean=reshape(mean(x, dims=2), :)) == var(x, dims=2)\n @test var(x, dims=2, mean=reshape(mean(x, dims=2), :, 1, 1)) == var(x, dims=2)\n @test_throws DimensionMismatch var(x, dims=1, mean=ones(size(x, 1)))\n @test_throws DimensionMismatch var(x, dims=1, mean=ones(size(x, 1), 1))\n @test_throws DimensionMismatch var(x, dims=2, mean=ones(1, size(x, 2)))\n @test_throws DimensionMismatch var(x, dims=1, mean=ones(1, 1, size(x, 2)))\n @test_throws DimensionMismatch var(x, dims=2, mean=ones(1, size(x, 2), 1))\n @test_throws DimensionMismatch var(x, dims=2, mean=ones(size(x, 1), 1, 5))\n @test_throws DimensionMismatch var(x, dims=1, mean=ones(1, size(x, 2), 5))\n end\nend",
"@testset \"covariance\" begin\n for vd in [1, 2], zm in [true, false], cr in [true, false]\n # println(\"vd = $vd: zm = $zm, cr = $cr\")\n if vd == 1\n k = size(X, 2)\n Cxx = zeros(k, k)\n Cxy = zeros(k, k)\n for i = 1:k, j = 1:k\n Cxx[i,j] = safe_cov(X[:,i], X[:,j], zm, cr)\n Cxy[i,j] = safe_cov(X[:,i], Y[:,j], zm, cr)\n end\n x1 = vec(X[:,1])\n y1 = vec(Y[:,1])\n else\n k = size(X, 1)\n Cxx = zeros(k, k)\n Cxy = zeros(k, k)\n for i = 1:k, j = 1:k\n Cxx[i,j] = safe_cov(X[i,:], X[j,:], zm, cr)\n Cxy[i,j] = safe_cov(X[i,:], Y[j,:], zm, cr)\n end\n x1 = vec(X[1,:])\n y1 = vec(Y[1,:])\n end\n\n c = zm ? Statistics.covm(x1, 0, corrected=cr) :\n cov(x1, corrected=cr)\n @test isa(c, Float64)\n @test c β Cxx[1,1]\n @inferred cov(x1, corrected=cr)\n\n @test cov(X) == Statistics.covm(X, mean(X, dims=1))\n C = zm ? Statistics.covm(X, 0, vd, corrected=cr) :\n cov(X, dims=vd, corrected=cr)\n @test size(C) == (k, k)\n @test C β Cxx\n @inferred cov(X, dims=vd, corrected=cr)\n\n @test cov(x1, y1) == Statistics.covm(x1, mean(x1), y1, mean(y1))\n c = zm ? Statistics.covm(x1, 0, y1, 0, corrected=cr) :\n cov(x1, y1, corrected=cr)\n @test isa(c, Float64)\n @test c β Cxy[1,1]\n @inferred cov(x1, y1, corrected=cr)\n\n if vd == 1\n @test cov(x1, Y) == Statistics.covm(x1, mean(x1), Y, mean(Y, dims=1))\n end\n C = zm ? Statistics.covm(x1, 0, Y, 0, vd, corrected=cr) :\n cov(x1, Y, dims=vd, corrected=cr)\n @test size(C) == (1, k)\n @test vec(C) β Cxy[1,:]\n @inferred cov(x1, Y, dims=vd, corrected=cr)\n\n if vd == 1\n @test cov(X, y1) == Statistics.covm(X, mean(X, dims=1), y1, mean(y1))\n end\n C = zm ? Statistics.covm(X, 0, y1, 0, vd, corrected=cr) :\n cov(X, y1, dims=vd, corrected=cr)\n @test size(C) == (k, 1)\n @test vec(C) β Cxy[:,1]\n @inferred cov(X, y1, dims=vd, corrected=cr)\n\n @test cov(X, Y) == Statistics.covm(X, mean(X, dims=1), Y, mean(Y, dims=1))\n C = zm ? Statistics.covm(X, 0, Y, 0, vd, corrected=cr) :\n cov(X, Y, dims=vd, corrected=cr)\n @test size(C) == (k, k)\n @test C β Cxy\n @inferred cov(X, Y, dims=vd, corrected=cr)\n end\n\n @testset \"floating point accuracy for `cov` of large numbers\" begin\n A = [4.0, 7.0, 13.0, 16.0]\n C = A .+ 1.0e10\n @test cov(A, A) β cov(C, C)\n end\nend",
"@testset \"correlation\" begin\n for vd in [1, 2], zm in [true, false]\n # println(\"vd = $vd: zm = $zm\")\n if vd == 1\n k = size(X, 2)\n Cxx = zeros(k, k)\n Cxy = zeros(k, k)\n for i = 1:k, j = 1:k\n Cxx[i,j] = safe_cor(X[:,i], X[:,j], zm)\n Cxy[i,j] = safe_cor(X[:,i], Y[:,j], zm)\n end\n x1 = vec(X[:,1])\n y1 = vec(Y[:,1])\n else\n k = size(X, 1)\n Cxx = zeros(k, k)\n Cxy = zeros(k, k)\n for i = 1:k, j = 1:k\n Cxx[i,j] = safe_cor(X[i,:], X[j,:], zm)\n Cxy[i,j] = safe_cor(X[i,:], Y[j,:], zm)\n end\n x1 = vec(X[1,:])\n y1 = vec(Y[1,:])\n end\n\n c = zm ? Statistics.corm(x1, 0) : cor(x1)\n @test isa(c, Float64)\n @test c β Cxx[1,1]\n @inferred cor(x1)\n\n @test cor(X) == Statistics.corm(X, mean(X, dims=1))\n C = zm ? Statistics.corm(X, 0, vd) : cor(X, dims=vd)\n @test size(C) == (k, k)\n @test C β Cxx\n @inferred cor(X, dims=vd)\n\n @test cor(x1, y1) == Statistics.corm(x1, mean(x1), y1, mean(y1))\n c = zm ? Statistics.corm(x1, 0, y1, 0) : cor(x1, y1)\n @test isa(c, Float64)\n @test c β Cxy[1,1]\n @inferred cor(x1, y1)\n\n if vd == 1\n @test cor(x1, Y) == Statistics.corm(x1, mean(x1), Y, mean(Y, dims=1))\n end\n C = zm ? Statistics.corm(x1, 0, Y, 0, vd) : cor(x1, Y, dims=vd)\n @test size(C) == (1, k)\n @test vec(C) β Cxy[1,:]\n @inferred cor(x1, Y, dims=vd)\n\n if vd == 1\n @test cor(X, y1) == Statistics.corm(X, mean(X, dims=1), y1, mean(y1))\n end\n C = zm ? Statistics.corm(X, 0, y1, 0, vd) : cor(X, y1, dims=vd)\n @test size(C) == (k, 1)\n @test vec(C) β Cxy[:,1]\n @inferred cor(X, y1, dims=vd)\n\n @test cor(X, Y) == Statistics.corm(X, mean(X, dims=1), Y, mean(Y, dims=1))\n C = zm ? Statistics.corm(X, 0, Y, 0, vd) : cor(X, Y, dims=vd)\n @test size(C) == (k, k)\n @test C β Cxy\n @inferred cor(X, Y, dims=vd)\n end\n\n @test cor(repeat(1:17, 1, 17))[2] <= 1.0\n @test cor(1:17, 1:17) <= 1.0\n @test cor(1:17, 18:34) <= 1.0\n @test cor(Any[1, 2], Any[1, 2]) == 1.0\n @test isnan(cor([0], Int8[81]))\n let tmp = range(1, stop=85, length=100)\n tmp2 = Vector(tmp)\n @test cor(tmp, tmp) <= 1.0\n @test cor(tmp, tmp2) <= 1.0\n end\nend",
"@testset \"quantile\" begin\n @test quantile([1,2,3,4],0.5) β 2.5\n @test quantile([1,2,3,4],[0.5]) β [2.5]\n @test quantile([1., 3],[.25,.5,.75])[2] β median([1., 3])\n @test quantile(100.0:-1.0:0.0, 0.0:0.1:1.0) β 0.0:10.0:100.0\n @test quantile(0.0:100.0, 0.0:0.1:1.0, sorted=true) β 0.0:10.0:100.0\n @test quantile(100f0:-1f0:0.0, 0.0:0.1:1.0) β 0f0:10f0:100f0\n @test quantile([Inf,Inf],0.5) == Inf\n @test quantile([-Inf,1],0.5) == -Inf\n # here it is required to introduce an absolute tolerance because the calculated value is 0\n @test quantile([0,1],1e-18) β 1e-18 atol=1e-18\n @test quantile([1, 2, 3, 4],[]) == []\n @test quantile([1, 2, 3, 4], (0.5,)) == (2.5,)\n @test quantile([4, 9, 1, 5, 7, 8, 2, 3, 5, 17, 11],\n (0.1, 0.2, 0.4, 0.9)) == (2.0, 3.0, 5.0, 11.0)\n @test quantile(Union{Int, Missing}[4, 9, 1, 5, 7, 8, 2, 3, 5, 17, 11],\n [0.1, 0.2, 0.4, 0.9]) β [2.0, 3.0, 5.0, 11.0]\n @test quantile(Any[4, 9, 1, 5, 7, 8, 2, 3, 5, 17, 11],\n [0.1, 0.2, 0.4, 0.9]) β [2.0, 3.0, 5.0, 11.0]\n @test quantile([4, 9, 1, 5, 7, 8, 2, 3, 5, 17, 11],\n Any[0.1, 0.2, 0.4, 0.9]) β [2.0, 3.0, 5.0, 11.0]\n @test quantile([4, 9, 1, 5, 7, 8, 2, 3, 5, 17, 11],\n Any[0.1, 0.2, 0.4, 0.9]) isa Vector{Float64}\n @test quantile(Any[4, 9, 1, 5, 7, 8, 2, 3, 5, 17, 11],\n Any[0.1, 0.2, 0.4, 0.9]) β [2, 3, 5, 11]\n @test quantile(Any[4, 9, 1, 5, 7, 8, 2, 3, 5, 17, 11],\n Any[0.1, 0.2, 0.4, 0.9]) isa Vector{Float64}\n @test quantile([1, 2, 3, 4], ()) == ()\n @test isempty(quantile([1, 2, 3, 4], Float64[]))\n @test quantile([1, 2, 3, 4], Float64[]) isa Vector{Float64}\n @test quantile([1, 2, 3, 4], []) isa Vector{Any}\n @test quantile([1, 2, 3, 4], [0, 1]) isa Vector{Int}\n\n @test quantile(Any[1, 2, 3], 0.5) isa Float64\n @test quantile(Any[1, big(2), 3], 0.5) isa BigFloat\n @test quantile(Any[1, 2, 3], Float16(0.5)) isa Float16\n @test quantile(Any[1, Float16(2), 3], Float16(0.5)) isa Float16\n @test quantile(Any[1, big(2), 3], Float16(0.5)) isa BigFloat\n\n @test_throws ArgumentError quantile([1, missing], 0.5)\n @test_throws ArgumentError quantile([1, NaN], 0.5)\n @test quantile(skipmissing([1, missing, 2]), 0.5) === 1.5\n\n # make sure that type inference works correctly in normal cases\n for T in [Int, BigInt, Float64, Float16, BigFloat, Rational{Int}, Rational{BigInt}]\n for S in [Float64, Float16, BigFloat, Rational{Int}, Rational{BigInt}]\n @inferred quantile(T[1, 2, 3], S(0.5))\n @inferred quantile(T[1, 2, 3], S(0.6))\n @inferred quantile(T[1, 2, 3], S[0.5, 0.6])\n @inferred quantile(T[1, 2, 3], (S(0.5), S(0.6)))\n end\n end\n x = [3; 2; 1]\n y = zeros(3)\n @test quantile!(y, x, [0.1, 0.5, 0.9]) === y\n @test y β [1.2, 2.0, 2.8]\n\n #tests for quantile calculation with configurable alpha and beta parameters\n v = [2, 3, 4, 6, 9, 2, 6, 2, 21, 17]\n\n # tests against scipy.stats.mstats.mquantiles method\n @test quantile(v, 0.0, alpha=0.0, beta=0.0) β 2.0\n @test quantile(v, 0.2, alpha=1.0, beta=1.0) β 2.0\n @test quantile(v, 0.4, alpha=0.0, beta=0.0) β 3.4\n @test quantile(v, 0.4, alpha=0.0, beta=0.2) β 3.32\n @test quantile(v, 0.4, alpha=0.0, beta=0.4) β 3.24\n @test quantile(v, 0.4, alpha=0.0, beta=0.6) β 3.16\n @test quantile(v, 0.4, alpha=0.0, beta=0.8) β 3.08\n @test quantile(v, 0.4, alpha=0.0, beta=1.0) β 3.0\n @test quantile(v, 0.4, alpha=0.2, beta=0.0) β 3.52\n @test quantile(v, 0.4, alpha=0.2, beta=0.2) β 3.44\n @test quantile(v, 0.4, alpha=0.2, beta=0.4) β 3.36\n @test quantile(v, 0.4, alpha=0.2, beta=0.6) β 3.28\n @test quantile(v, 0.4, alpha=0.2, beta=0.8) β 3.2\n @test quantile(v, 0.4, alpha=0.2, beta=1.0) β 3.12\n @test quantile(v, 0.4, alpha=0.4, beta=0.0) β 3.64\n @test quantile(v, 0.4, alpha=0.4, beta=0.2) β 3.56\n @test quantile(v, 0.4, alpha=0.4, beta=0.4) β 3.48\n @test quantile(v, 0.4, alpha=0.4, beta=0.6) β 3.4\n @test quantile(v, 0.4, alpha=0.4, beta=0.8) β 3.32\n @test quantile(v, 0.4, alpha=0.4, beta=1.0) β 3.24\n @test quantile(v, 0.4, alpha=0.6, beta=0.0) β 3.76\n @test quantile(v, 0.4, alpha=0.6, beta=0.2) β 3.68\n @test quantile(v, 0.4, alpha=0.6, beta=0.4) β 3.6\n @test quantile(v, 0.4, alpha=0.6, beta=0.6) β 3.52\n @test quantile(v, 0.4, alpha=0.6, beta=0.8) β 3.44\n @test quantile(v, 0.4, alpha=0.6, beta=1.0) β 3.36\n @test quantile(v, 0.4, alpha=0.8, beta=0.0) β 3.88\n @test quantile(v, 0.4, alpha=0.8, beta=0.2) β 3.8\n @test quantile(v, 0.4, alpha=0.8, beta=0.4) β 3.72\n @test quantile(v, 0.4, alpha=0.8, beta=0.6) β 3.64\n @test quantile(v, 0.4, alpha=0.8, beta=0.8) β 3.56\n @test quantile(v, 0.4, alpha=0.8, beta=1.0) β 3.48\n @test quantile(v, 0.4, alpha=1.0, beta=0.0) β 4.0\n @test quantile(v, 0.4, alpha=1.0, beta=0.2) β 3.92\n @test quantile(v, 0.4, alpha=1.0, beta=0.4) β 3.84\n @test quantile(v, 0.4, alpha=1.0, beta=0.6) β 3.76\n @test quantile(v, 0.4, alpha=1.0, beta=0.8) β 3.68\n @test quantile(v, 0.4, alpha=1.0, beta=1.0) β 3.6\n @test quantile(v, 0.6, alpha=0.0, beta=0.0) β 6.0\n @test quantile(v, 0.6, alpha=1.0, beta=1.0) β 6.0\n @test quantile(v, 0.8, alpha=0.0, beta=0.0) β 15.4\n @test quantile(v, 0.8, alpha=0.0, beta=0.2) β 14.12\n @test quantile(v, 0.8, alpha=0.0, beta=0.4) β 12.84\n @test quantile(v, 0.8, alpha=0.0, beta=0.6) β 11.56\n @test quantile(v, 0.8, alpha=0.0, beta=0.8) β 10.28\n @test quantile(v, 0.8, alpha=0.0, beta=1.0) β 9.0\n @test quantile(v, 0.8, alpha=0.2, beta=0.0) β 15.72\n @test quantile(v, 0.8, alpha=0.2, beta=0.2) β 14.44\n @test quantile(v, 0.8, alpha=0.2, beta=0.4) β 13.16\n @test quantile(v, 0.8, alpha=0.2, beta=0.6) β 11.88\n @test quantile(v, 0.8, alpha=0.2, beta=0.8) β 10.6\n @test quantile(v, 0.8, alpha=0.2, beta=1.0) β 9.32\n @test quantile(v, 0.8, alpha=0.4, beta=0.0) β 16.04\n @test quantile(v, 0.8, alpha=0.4, beta=0.2) β 14.76\n @test quantile(v, 0.8, alpha=0.4, beta=0.4) β 13.48\n @test quantile(v, 0.8, alpha=0.4, beta=0.6) β 12.2\n @test quantile(v, 0.8, alpha=0.4, beta=0.8) β 10.92\n @test quantile(v, 0.8, alpha=0.4, beta=1.0) β 9.64\n @test quantile(v, 0.8, alpha=0.6, beta=0.0) β 16.36\n @test quantile(v, 0.8, alpha=0.6, beta=0.2) β 15.08\n @test quantile(v, 0.8, alpha=0.6, beta=0.4) β 13.8\n @test quantile(v, 0.8, alpha=0.6, beta=0.6) β 12.52\n @test quantile(v, 0.8, alpha=0.6, beta=0.8) β 11.24\n @test quantile(v, 0.8, alpha=0.6, beta=1.0) β 9.96\n @test quantile(v, 0.8, alpha=0.8, beta=0.0) β 16.68\n @test quantile(v, 0.8, alpha=0.8, beta=0.2) β 15.4\n @test quantile(v, 0.8, alpha=0.8, beta=0.4) β 14.12\n @test quantile(v, 0.8, alpha=0.8, beta=0.6) β 12.84\n @test quantile(v, 0.8, alpha=0.8, beta=0.8) β 11.56\n @test quantile(v, 0.8, alpha=0.8, beta=1.0) β 10.28\n @test quantile(v, 0.8, alpha=1.0, beta=0.0) β 17.0\n @test quantile(v, 0.8, alpha=1.0, beta=0.2) β 15.72\n @test quantile(v, 0.8, alpha=1.0, beta=0.4) β 14.44\n @test quantile(v, 0.8, alpha=1.0, beta=0.6) β 13.16\n @test quantile(v, 0.8, alpha=1.0, beta=0.8) β 11.88\n @test quantile(v, 0.8, alpha=1.0, beta=1.0) β 10.6\n @test quantile(v, 1.0, alpha=0.0, beta=0.0) β 21.0\n @test quantile(v, 1.0, alpha=1.0, beta=1.0) β 21.0\nend",
"@testset \"variance of complex arrays (#13309)\" begin\n z = rand(ComplexF64, 10)\n @test var(z) β invoke(var, Tuple{Any}, z) β cov(z) β var(z,dims=1)[1] β sum(abs2, z .- mean(z))/9\n @test isa(var(z), Float64)\n @test isa(invoke(var, Tuple{Any}, z), Float64)\n @test isa(cov(z), Float64)\n @test isa(var(z,dims=1), Vector{Float64})\n @test varm(z, 0.0) β invoke(varm, Tuple{Any,Float64}, z, 0.0) β sum(abs2, z)/9\n @test isa(varm(z, 0.0), Float64)\n @test isa(invoke(varm, Tuple{Any,Float64}, z, 0.0), Float64)\n @test cor(z) === 1.0\n v = varm([1.0+2.0im], 0; corrected = false)\n @test v β 5\n @test isa(v, Float64)\nend",
"@testset \"cov and cor of complex arrays (issue #21093)\" begin\n x = [2.7 - 3.3im, 0.9 + 5.4im, 0.1 + 0.2im, -1.7 - 5.8im, 1.1 + 1.9im]\n y = [-1.7 - 1.6im, -0.2 + 6.5im, 0.8 - 10.0im, 9.1 - 3.4im, 2.7 - 5.5im]\n @test cov(x, y) β 4.8365 - 12.119im\n @test cov(y, x) β 4.8365 + 12.119im\n @test cov(x, reshape(y, :, 1)) β reshape([4.8365 - 12.119im], 1, 1)\n @test cov(reshape(x, :, 1), y) β reshape([4.8365 - 12.119im], 1, 1)\n @test cov(reshape(x, :, 1), reshape(y, :, 1)) β reshape([4.8365 - 12.119im], 1, 1)\n @test cov([x y]) β [21.779 4.8365-12.119im;\n 4.8365+12.119im 54.548]\n @test cor(x, y) β 0.14032104449218274 - 0.35160772008699703im\n @test cor(y, x) β 0.14032104449218274 + 0.35160772008699703im\n @test cor(x, reshape(y, :, 1)) β reshape([0.14032104449218274 - 0.35160772008699703im], 1, 1)\n @test cor(reshape(x, :, 1), y) β reshape([0.14032104449218274 - 0.35160772008699703im], 1, 1)\n @test cor(reshape(x, :, 1), reshape(y, :, 1)) β reshape([0.14032104449218274 - 0.35160772008699703im], 1, 1)\n @test cor([x y]) β [1.0 0.14032104449218274-0.35160772008699703im\n 0.14032104449218274+0.35160772008699703im 1.0]\nend",
"@testset \"Issue #17153 and PR #17154\" begin\n a = rand(10,10)\n b = copy(a)\n x = median(a, dims=1)\n @test b == a\n x = median(a, dims=2)\n @test b == a\n x = mean(a, dims=1)\n @test b == a\n x = mean(a, dims=2)\n @test b == a\n x = var(a, dims=1)\n @test b == a\n x = var(a, dims=2)\n @test b == a\n x = std(a, dims=1)\n @test b == a\n x = std(a, dims=2)\n @test b == a\nend",
"@testset \"Unitful elements\" begin\n r = Furlong(1):Furlong(1):Furlong(2)\n a = Vector(r)\n @test sum(r) == sum(a) == Furlong(3)\n @test cumsum(r) == Furlong.([1,3])\n @test mean(r) == mean(a) == median(a) == median(r) == Furlong(1.5)\n @test var(r) == var(a) == Furlong{2}(0.5)\n @test std(r) == std(a) == Furlong{1}(sqrt(0.5))\n\n # Issue #21786\n A = [Furlong{1}(rand(-5:5)) for i in 1:2, j in 1:2]\n @test mean(mean(A, dims=1), dims=2)[1] === mean(A)\n @test var(A, dims=1)[1] === var(A[:, 1])\n @test std(A, dims=1)[1] === std(A[:, 1])\nend",
"@testset \"var and quantile of Any arrays\" begin\n x = Any[1, 2, 4, 10]\n y = Any[1, 2, 4, 10//1]\n @test var(x) === 16.25\n @test var(y) === 16.25\n @test std(x) === sqrt(16.25)\n @test quantile(x, 0.5) === 3.0\n @test quantile(x, 1//2) === 3//1\nend",
"@testset \"Promotion in covzm. Issue #8080\" begin\n A = [1 -1 -1; -1 1 1; -1 1 -1; 1 -1 -1; 1 -1 1]\n @test Statistics.covzm(A) - mean(A, dims=1)'*mean(A, dims=1)*size(A, 1)/(size(A, 1) - 1) β cov(A)\n A = [1//1 -1 -1; -1 1 1; -1 1 -1; 1 -1 -1; 1 -1 1]\n @test (A'A - size(A, 1)*mean(A, dims=1)'*mean(A, dims=1))/4 == cov(A)\nend",
"@testset \"Mean along dimension of empty array\" begin\n a0 = zeros(0)\n a00 = zeros(0, 0)\n a01 = zeros(0, 1)\n a10 = zeros(1, 0)\n @test isequal(mean(a0, dims=1) , fill(NaN, 1))\n @test isequal(mean(a00, dims=(1, 2)), fill(NaN, 1, 1))\n @test isequal(mean(a01, dims=1) , fill(NaN, 1, 1))\n @test isequal(mean(a10, dims=2) , fill(NaN, 1, 1))\nend",
"@testset \"cov/var/std of Vector{Vector}\" begin\n x = [[2,4,6],[4,6,8]]\n @test var(x) β vec(var([x[1] x[2]], dims=2))\n @test std(x) β vec(std([x[1] x[2]], dims=2))\n @test cov(x) β cov([x[1] x[2]], dims=2)\nend",
"@testset \"var of sparse array\" begin\n se33 = SparseMatrixCSC{Float64}(I, 3, 3)\n sA = sprandn(3, 7, 0.5)\n pA = sparse(rand(3, 7))\n\n for arr in (se33, sA, pA)\n farr = Array(arr)\n @test var(arr) β var(farr)\n @test var(arr, dims=1) β var(farr, dims=1)\n @test var(arr, dims=2) β var(farr, dims=2)\n @test var(arr, dims=(1, 2)) β [var(farr)]\n @test isequal(var(arr, dims=3), var(farr, dims=3))\n end\n\n @testset \"empty cases\" begin\n @test var(sparse(Int[])) === NaN\n @test isequal(var(spzeros(0, 1), dims=1), var(Matrix{Int}(I, 0, 1), dims=1))\n @test isequal(var(spzeros(0, 1), dims=2), var(Matrix{Int}(I, 0, 1), dims=2))\n @test isequal(var(spzeros(0, 1), dims=(1, 2)), var(Matrix{Int}(I, 0, 1), dims=(1, 2)))\n @test isequal(var(spzeros(0, 1), dims=3), var(Matrix{Int}(I, 0, 1), dims=3))\n end\nend",
"@testset \"optimizing sparse $elty covariance\" for elty in (Float64, Complex{Float64})\n n = 10\n p = 5\n np2 = div(n*p, 2)\n nzvals, x_sparse = guardseed(1) do\n if elty <: Real\n nzvals = randn(np2)\n else\n nzvals = complex.(randn(np2), randn(np2))\n end\n nzvals, sparse(rand(1:n, np2), rand(1:p, np2), nzvals, n, p)\n end\n x_dense = convert(Matrix{elty}, x_sparse)\n @testset \"Test with no Infs and NaNs, vardim=$vardim, corrected=$corrected\" for vardim in (1, 2),\n corrected in (true, false)\n @test cov(x_sparse, dims=vardim, corrected=corrected) β\n cov(x_dense , dims=vardim, corrected=corrected)\n end\n\n @testset \"Test with $x11, vardim=$vardim, corrected=$corrected\" for x11 in (NaN, Inf),\n vardim in (1, 2),\n corrected in (true, false)\n x_sparse[1,1] = x11\n x_dense[1 ,1] = x11\n\n cov_sparse = cov(x_sparse, dims=vardim, corrected=corrected)\n cov_dense = cov(x_dense , dims=vardim, corrected=corrected)\n @test cov_sparse[2:end, 2:end] β cov_dense[2:end, 2:end]\n @test isfinite.(cov_sparse) == isfinite.(cov_dense)\n @test isfinite.(cov_sparse) == isfinite.(cov_dense)\n end\n\n @testset \"Test with NaN and Inf, vardim=$vardim, corrected=$corrected\" for vardim in (1, 2),\n corrected in (true, false)\n x_sparse[1,1] = Inf\n x_dense[1 ,1] = Inf\n x_sparse[2,1] = NaN\n x_dense[2 ,1] = NaN\n\n cov_sparse = cov(x_sparse, dims=vardim, corrected=corrected)\n cov_dense = cov(x_dense , dims=vardim, corrected=corrected)\n @test cov_sparse[(1 + vardim):end, (1 + vardim):end] β\n cov_dense[ (1 + vardim):end, (1 + vardim):end]\n @test isfinite.(cov_sparse) == isfinite.(cov_dense)\n @test isfinite.(cov_sparse) == isfinite.(cov_dense)\n end\nend"
] |
f720458e4c141a29498437c4c4276797a74a93c1 | 1,913 | jl | Julia | test/runtests.jl | JuliaGeo/GDAL.jl | 3838e938642712cf8a98c52df5937dcfdb19221e | [
"MIT"
] | 61 | 2018-07-30T12:45:24.000Z | 2022-03-31T20:23:46.000Z | test/runtests.jl | JuliaGeo/GDAL.jl | 3838e938642712cf8a98c52df5937dcfdb19221e | [
"MIT"
] | 67 | 2018-06-11T15:59:17.000Z | 2022-03-02T21:42:54.000Z | test/runtests.jl | JuliaGeo/GDAL.jl | 3838e938642712cf8a98c52df5937dcfdb19221e | [
"MIT"
] | 14 | 2018-12-03T22:05:51.000Z | 2021-09-30T10:58:04.000Z | using GDAL
using Test
@testset "GDAL" begin
# drivers
# before being able to use any drivers, they must be registered first
GDAL.gdalallregister()
version = GDAL.gdalversioninfo("--version")
n_gdal_driver = GDAL.gdalgetdrivercount()
n_ogr_driver = GDAL.ogrgetdrivercount()
@info """$version
$n_gdal_driver GDAL drivers found
$n_ogr_driver OGR drivers found
"""
@test n_gdal_driver > 0
@test n_ogr_driver > 0
srs = GDAL.osrnewspatialreference(C_NULL)
GDAL.osrimportfromepsg(srs, 4326) # fails if GDAL_DATA is not set correctly
xmlnode_pointer = GDAL.cplparsexmlstring("<a><b>hi</b></a>")
@test GDAL.cplgetxmlvalue(xmlnode_pointer, "b", "") == "hi"
# load into Julia struct, mutate, and put back as Ref into GDAL
xmlnode = unsafe_load(xmlnode_pointer)
@test GDAL.cplserializexmltree(Ref(xmlnode)) == "<a>\n <b>hi</b>\n</a>\n"
GDAL.cpldestroyxmlnode(xmlnode_pointer)
# ref https://github.com/JuliaGeo/GDAL.jl/pull/41#discussion_r143345433
gfld = GDAL.ogr_gfld_create("name-a", GDAL.wkbPoint)
@test gfld isa GDAL.OGRGeomFieldDefnH
@test GDAL.ogr_gfld_getnameref(gfld) == "name-a"
@test GDAL.ogr_gfld_gettype(gfld) == GDAL.wkbPoint
# same as above but for the lower level C API
gfld = GDAL.ogr_gfld_create("name-b", GDAL.wkbPolygon)
@test gfld isa Ptr{GDAL.OGRGeomFieldDefnHS}
@test GDAL.ogr_gfld_getnameref(gfld) == "name-b"
@test GDAL.ogr_gfld_gettype(gfld) == GDAL.wkbPolygon
cd(dirname(@__FILE__)) do
rm("tmp", recursive = true, force = true)
mkpath("tmp") # ensure it exists
include("tutorial_raster.jl")
include("tutorial_vector.jl")
include("tutorial_vrt.jl")
include("gdal_utils.jl")
include("gdal_jll_utils.jl")
include("drivers.jl")
include("error.jl")
end
GDAL.gdaldestroydrivermanager()
end
| 33.561404 | 79 | 0.679038 | [
"@testset \"GDAL\" begin\n\n # drivers\n # before being able to use any drivers, they must be registered first\n GDAL.gdalallregister()\n\n version = GDAL.gdalversioninfo(\"--version\")\n n_gdal_driver = GDAL.gdalgetdrivercount()\n n_ogr_driver = GDAL.ogrgetdrivercount()\n @info \"\"\"$version\n $n_gdal_driver GDAL drivers found\n $n_ogr_driver OGR drivers found\n \"\"\"\n\n @test n_gdal_driver > 0\n @test n_ogr_driver > 0\n\n srs = GDAL.osrnewspatialreference(C_NULL)\n GDAL.osrimportfromepsg(srs, 4326) # fails if GDAL_DATA is not set correctly\n\n xmlnode_pointer = GDAL.cplparsexmlstring(\"<a><b>hi</b></a>\")\n @test GDAL.cplgetxmlvalue(xmlnode_pointer, \"b\", \"\") == \"hi\"\n # load into Julia struct, mutate, and put back as Ref into GDAL\n xmlnode = unsafe_load(xmlnode_pointer)\n @test GDAL.cplserializexmltree(Ref(xmlnode)) == \"<a>\\n <b>hi</b>\\n</a>\\n\"\n GDAL.cpldestroyxmlnode(xmlnode_pointer)\n\n # ref https://github.com/JuliaGeo/GDAL.jl/pull/41#discussion_r143345433\n gfld = GDAL.ogr_gfld_create(\"name-a\", GDAL.wkbPoint)\n @test gfld isa GDAL.OGRGeomFieldDefnH\n @test GDAL.ogr_gfld_getnameref(gfld) == \"name-a\"\n @test GDAL.ogr_gfld_gettype(gfld) == GDAL.wkbPoint\n # same as above but for the lower level C API\n gfld = GDAL.ogr_gfld_create(\"name-b\", GDAL.wkbPolygon)\n @test gfld isa Ptr{GDAL.OGRGeomFieldDefnHS}\n @test GDAL.ogr_gfld_getnameref(gfld) == \"name-b\"\n @test GDAL.ogr_gfld_gettype(gfld) == GDAL.wkbPolygon\n\n cd(dirname(@__FILE__)) do\n rm(\"tmp\", recursive = true, force = true)\n mkpath(\"tmp\") # ensure it exists\n include(\"tutorial_raster.jl\")\n include(\"tutorial_vector.jl\")\n include(\"tutorial_vrt.jl\")\n include(\"gdal_utils.jl\")\n include(\"gdal_jll_utils.jl\")\n include(\"drivers.jl\")\n include(\"error.jl\")\n end\n\n GDAL.gdaldestroydrivermanager()\n\nend"
] |
f7215512da0154c3cb3b231e83d4d4e0ca40097a | 2,666 | jl | Julia | test/test_fileio.jl | chenspc/OWEN.jl | 842c8672dbc001180d980430e20652101929f32f | [
"MIT"
] | null | null | null | test/test_fileio.jl | chenspc/OWEN.jl | 842c8672dbc001180d980430e20652101929f32f | [
"MIT"
] | 2 | 2019-11-13T23:18:11.000Z | 2020-02-08T16:40:57.000Z | test/test_fileio.jl | chenspc/OWEN.jl | 842c8672dbc001180d980430e20652101929f32f | [
"MIT"
] | 1 | 2020-02-08T10:46:07.000Z | 2020-02-08T10:46:07.000Z | using Kahuna
using Test
@testset "kahuna_read" begin
@testset ".dm3 files" begin
@test 2 + 2 == 4
end
@testset ".dm4 files" begin
@test 2 + 2 == 4
end
@testset ".hdf5/.h5 files" begin
@test 2 + 2 == 4
end
@testset ".mat files" begin
# matfile = "test/sample_files/test_fileio_mat.mat";
matfile = "sample_files/test_fileio_mat.mat";
@test typeof(kahuna_read(matfile, "mat0d")) == Float64
@test typeof(kahuna_read(matfile, "mat1d")) == Array{Float64,2} && size(kahuna_read(matfile, "mat1d")) == (1,10)
@test typeof(kahuna_read(matfile, "mat2d")) == Array{Float64,2} && size(kahuna_read(matfile, "mat2d")) == (10,10)
@test typeof(kahuna_read(matfile, "mat3d")) == Array{Float64,3} && size(kahuna_read(matfile, "mat3d")) == (10,10,10)
@test typeof(kahuna_read(matfile, "mat4d")) == Array{Float64,4} && size(kahuna_read(matfile, "mat4d")) == (10,10,10,10)
@test kahuna_read(matfile; mode="list") == Set(["mat0d", "mat1d", "mat2d", "mat4d", "mat3d"])
@test kahuna_read(matfile) == Dict(map(x -> x => kahuna_read(matfile, x), collect(kahuna_read(matfile; mode="list"))))
end
@testset ".mib files" begin
mibfile512_12bit = "sample_files/test_512_12bit_single.mib";
# mibfiles = [mibfile256_1bit, mibfile256_6bit, mibfile256_12bit,
# mibfile256_1bit_raw, mibfile256_6bit_raw, mibfile256_12bit_raw,
# mibfile512_1bit, mibfile512_6bit, mibfile512_12bit,
# mibfile512_1bit_raw, mibfile512_6bit_raw, mibfile512_12bit_raw];
mibfiles = [mibfile512_12bit]
for mibfile in mibfiles
mib_images, mib_headers = kahuna_read(mibfile)
@test typeof(mib_images) == Array{Array{UInt16,2},1}
@test typeof(mib_headers) == Array{MIBHeader,1}
# @test typeof(kahuna_read(mibfile, [1, 10])) == Array{Float64,2} && size(kahuna_read(mibfile, [1, 10])) == (1,10)
# @test typeof(kahuna_read(mibfile, [1, 10])) == Array{Float64,2} && size(kahuna_read(mibfile, [1, 10])) == (10,10)
# @test typeof(kahuna_read(mibfile, [1, 10])) == Array{Float64,2} && size(kahuna_read(mibfile, [1, 10])) == (10,10)
end
end
@testset ".toml files" begin
@test 2 + 2 == 4
end
@testset ".jld files" begin
@test 2 + 2 == 4
end
end
@testset "kahuna_write" begin
@testset ".hdf5/.h5 files" begin
@test 2 + 2 == 4
end
@testset ".toml files" begin
@test 2 + 2 == 4
end
@testset ".jld files" begin
@test 2 + 2 == 4
end
end
| 33.746835 | 127 | 0.594524 | [
"@testset \"kahuna_read\" begin\n\n @testset \".dm3 files\" begin\n @test 2 + 2 == 4\n end\n\n @testset \".dm4 files\" begin\n @test 2 + 2 == 4\n end\n\n @testset \".hdf5/.h5 files\" begin\n @test 2 + 2 == 4\n end\n\n @testset \".mat files\" begin\n # matfile = \"test/sample_files/test_fileio_mat.mat\";\n matfile = \"sample_files/test_fileio_mat.mat\";\n @test typeof(kahuna_read(matfile, \"mat0d\")) == Float64\n @test typeof(kahuna_read(matfile, \"mat1d\")) == Array{Float64,2} && size(kahuna_read(matfile, \"mat1d\")) == (1,10)\n @test typeof(kahuna_read(matfile, \"mat2d\")) == Array{Float64,2} && size(kahuna_read(matfile, \"mat2d\")) == (10,10)\n @test typeof(kahuna_read(matfile, \"mat3d\")) == Array{Float64,3} && size(kahuna_read(matfile, \"mat3d\")) == (10,10,10)\n @test typeof(kahuna_read(matfile, \"mat4d\")) == Array{Float64,4} && size(kahuna_read(matfile, \"mat4d\")) == (10,10,10,10)\n\n @test kahuna_read(matfile; mode=\"list\") == Set([\"mat0d\", \"mat1d\", \"mat2d\", \"mat4d\", \"mat3d\"])\n\n @test kahuna_read(matfile) == Dict(map(x -> x => kahuna_read(matfile, x), collect(kahuna_read(matfile; mode=\"list\"))))\n end\n\n @testset \".mib files\" begin\n\n mibfile512_12bit = \"sample_files/test_512_12bit_single.mib\";\n\n # mibfiles = [mibfile256_1bit, mibfile256_6bit, mibfile256_12bit,\n # mibfile256_1bit_raw, mibfile256_6bit_raw, mibfile256_12bit_raw,\n # mibfile512_1bit, mibfile512_6bit, mibfile512_12bit,\n # mibfile512_1bit_raw, mibfile512_6bit_raw, mibfile512_12bit_raw];\n mibfiles = [mibfile512_12bit]\n\n for mibfile in mibfiles\n mib_images, mib_headers = kahuna_read(mibfile)\n @test typeof(mib_images) == Array{Array{UInt16,2},1}\n @test typeof(mib_headers) == Array{MIBHeader,1}\n # @test typeof(kahuna_read(mibfile, [1, 10])) == Array{Float64,2} && size(kahuna_read(mibfile, [1, 10])) == (1,10)\n # @test typeof(kahuna_read(mibfile, [1, 10])) == Array{Float64,2} && size(kahuna_read(mibfile, [1, 10])) == (10,10)\n # @test typeof(kahuna_read(mibfile, [1, 10])) == Array{Float64,2} && size(kahuna_read(mibfile, [1, 10])) == (10,10)\n end\n\n end\n\n @testset \".toml files\" begin\n @test 2 + 2 == 4\n end\n\n @testset \".jld files\" begin\n @test 2 + 2 == 4\n end\n\n\nend",
"@testset \"kahuna_write\" begin\n\n @testset \".hdf5/.h5 files\" begin\n @test 2 + 2 == 4\n end\n\n @testset \".toml files\" begin\n @test 2 + 2 == 4\n end\n\n @testset \".jld files\" begin\n @test 2 + 2 == 4\n end\n\nend"
] |
f7262dc1d65caed99f539aa39adc09adecee3524 | 1,713 | jl | Julia | test/simple_runner_tests.jl | grahamstark/ScottishTaxBenefitModel.jl | 42ca32a100c862c58bbcd98f6264f08d78453b5c | [
"MIT"
] | null | null | null | test/simple_runner_tests.jl | grahamstark/ScottishTaxBenefitModel.jl | 42ca32a100c862c58bbcd98f6264f08d78453b5c | [
"MIT"
] | null | null | null | test/simple_runner_tests.jl | grahamstark/ScottishTaxBenefitModel.jl | 42ca32a100c862c58bbcd98f6264f08d78453b5c | [
"MIT"
] | null | null | null | using Test
using CSV
using DataFrames
using StatsBase
using BenchmarkTools
using ScottishTaxBenefitModel
using ScottishTaxBenefitModel.GeneralTaxComponents
using ScottishTaxBenefitModel.STBParameters
using ScottishTaxBenefitModel.Runner: do_one_run!
using ScottishTaxBenefitModel.RunSettings: Settings, MT_Routing
using .Utils
using .ExampleHelpers
settings = Settings()
BenchmarkTools.DEFAULT_PARAMETERS.seconds = 120
BenchmarkTools.DEFAULT_PARAMETERS.samples = 2
function basic_run( ; print_test :: Bool, mtrouting :: MT_Routing )
settings.means_tested_routing = mtrouting
settings.run_name="run-$(mtrouting)-$(date_string())"
sys = [get_system(scotland=false), get_system( scotland=true )]
results = do_one_run!( settings, sys )
end
@testset "basic run timing" begin
for mt in instances( MT_Routing )
println( "starting run using $mt routing")
@time basic_run( print_test=true, mtrouting = mt )
end
# @benchmark frames =
# print(t)
end
#=
if print_test
summary_output = summarise_results!( results=results, base_results=base_results )
print( " deciles = $( summary_output.deciles)\n\n" )
print( " poverty_line = $(summary_output.poverty_line)\n\n" )
print( " inequality = $(summary_output.inequality)\n\n" )
print( " poverty = $(summary_output.poverty)\n\n" )
print( " gainlose_by_sex = $(summary_output.gainlose_by_sex)\n\n" )
print( " gainlose_by_thing = $(summary_output.gainlose_by_thing)\n\n" )
print( " metr_histogram= $(summary_output.metr_histogram)\n\n")
println( "SUMMARY OUTPUT")
println( summary_output )
println( "as JSON")
println( JSON.json( summary_output ))
end
=# | 32.320755 | 85 | 0.725044 | [
"@testset \"basic run timing\" begin\n for mt in instances( MT_Routing )\n println( \"starting run using $mt routing\")\n @time basic_run( print_test=true, mtrouting = mt )\n end\n # @benchmark frames = \n # print(t)\nend"
] |
f727f80483dbefe80bf5db5ac82e2786aea040ee | 1,036 | jl | Julia | test/runtests.jl | mauro3/course-101-0250-00-L6Testing.jl | c7d47e770d5eabbf7f28784f9a9bd279a3042af8 | [
"MIT"
] | 1 | 2022-03-01T09:48:55.000Z | 2022-03-01T09:48:55.000Z | test/runtests.jl | mauro3/course-101-0250-00-L6Testing.jl | c7d47e770d5eabbf7f28784f9a9bd279a3042af8 | [
"MIT"
] | null | null | null | test/runtests.jl | mauro3/course-101-0250-00-L6Testing.jl | c7d47e770d5eabbf7f28784f9a9bd279a3042af8 | [
"MIT"
] | 1 | 2021-11-02T10:16:55.000Z | 2021-11-02T10:16:55.000Z | using Test, ReferenceTests, BSON
include("../scripts/car_travels.jl")
## Unit tests
@testset "update_position" begin
@test update_position(0.0, 10, 1, 1, 200)[1] β 10.0
@test update_position(0.0, 10, 1, 1, 200)[2] == 1
@test update_position(0.0, 10, -1, 1, 200)[1] β -10.0
@test update_position(0.0, 10, -1, 1, 200)[2] == 1
@test update_position(0.0, 10, -1, 1, 200)[1] β -10.0
@test update_position(0.0, 10, -1, 1, 200)[2] == 1
end
## Reference Tests with ReferenceTests.jl
# We put both arrays X and T into a BSON.jl and then compare them
"Compare all dict entries"
comp(d1, d2) = keys(d1) == keys(d2) &&
all([ v1βv2 for (v1,v2) in zip(values(d1), values(d2))])
# run the model
T, X = car_travel_1D()
# Test just at some random indices. As for larger models,
# storing the full output array would create really large files!
inds = [18, 27, 45, 68, 71, 71, 102, 110, 123, 144]
d = Dict(:X=> X[inds], :T=>T[inds])
@testset "Ref-tests" begin
@test_reference "reftest-files/X.bson" d by=comp
end
| 28.777778 | 65 | 0.642857 | [
"@testset \"update_position\" begin\n @test update_position(0.0, 10, 1, 1, 200)[1] β 10.0\n @test update_position(0.0, 10, 1, 1, 200)[2] == 1\n\n @test update_position(0.0, 10, -1, 1, 200)[1] β -10.0\n @test update_position(0.0, 10, -1, 1, 200)[2] == 1\n\n @test update_position(0.0, 10, -1, 1, 200)[1] β -10.0\n @test update_position(0.0, 10, -1, 1, 200)[2] == 1\nend",
"@testset \"Ref-tests\" begin\n @test_reference \"reftest-files/X.bson\" d by=comp\nend"
] |
f72e3f7fe6055a37c495d6361bfec1323eaa14a6 | 89 | jl | Julia | test/runtests.jl | Shoram444/MPThemes.jl | 86a6699f70a3b7f77d6ae6a248b285cb46f26852 | [
"MIT"
] | null | null | null | test/runtests.jl | Shoram444/MPThemes.jl | 86a6699f70a3b7f77d6ae6a248b285cb46f26852 | [
"MIT"
] | null | null | null | test/runtests.jl | Shoram444/MPThemes.jl | 86a6699f70a3b7f77d6ae6a248b285cb46f26852 | [
"MIT"
] | null | null | null | using MPThemes
using Test
@testset "MPThemes.jl" begin
# Write your tests here.
end
| 12.714286 | 28 | 0.730337 | [
"@testset \"MPThemes.jl\" begin\n # Write your tests here.\nend"
] |
End of preview. Expand
in Data Studio
README.md exists but content is empty.
- Downloads last month
- 79