content stringlengths 6 1.03M | input_ids listlengths 4 535k | ratio_char_token float64 0.68 8.61 | token_count int64 4 535k |
|---|---|---|---|
struct InterfaceOperators
operators::Any
celltooperator::Any
ncells::Any
numoperators::Any
function InterfaceOperators(operators, celltooperator)
ncells = length(celltooperator)
numcouples, numoperators = size(operators)
@assert numcouples == 4
@assert all(celltooperator .>= 0)
@assert all(celltooperator .<= numoperators)
new(operators, celltooperator, ncells, numoperators)
end
end
function Base.getindex(interfaceoperator::InterfaceOperators, s1, s2, cellid)
row = cell_couple_sign_to_row(s1, s2)
idx = interfaceoperator.celltooperator[cellid]
idx > 0 || error("Cell $cellid does not have an operator")
return interfaceoperator.operators[row, idx]
end
function Base.show(io::IO, interfaceoperator::InterfaceOperators)
ncells = number_of_cells(interfaceoperator)
nops = number_of_operators(interfaceoperator)
str = "InterfaceOperator\n\tNum. Cells: $ncells\n\tNum. Operators: $nops"
print(io, str)
end
function number_of_cells(interfaceoperator::InterfaceOperators)
return interfaceoperator.ncells
end
function number_of_operators(interfaceoperator::InterfaceOperators)
return interfaceoperator.numoperators
end
function interface_mass_operators(basis, interfacequads, cellmap, cellsign, penalty)
ncells = length(cellsign)
hasinterface = cellsign .== 0
numinterfaces = count(hasinterface)
operators = Matrix{Any}(undef, 4, numinterfaces)
celltooperator = zeros(Int, ncells)
dim = dimension(basis)
cellids = findall(hasinterface)
for (idx, cellid) in enumerate(cellids)
normal = interface_normals(interfacequads, cellid)
facescale = scale_area(cellmap, normal)
for s1 in [+1, -1]
quad1 = interfacequads[s1, cellid]
for s2 in [+1, -1]
row = cell_couple_sign_to_row(s1, s2)
quad2 = interfacequads[s2, cellid]
mass = penalty * interface_mass_matrix(basis, quad1, quad2, facescale)
operators[row, idx] = mass
end
end
celltooperator[cellid] = idx
end
return InterfaceOperators(operators, celltooperator)
end
function interface_mass_operators(basis, interfacequads, cutmesh, penalty)
cellmap = cell_map(cutmesh, 1)
cellsign = cell_sign(cutmesh)
return interface_mass_operators(basis, interfacequads, cellmap, cellsign, penalty)
end
function interface_incoherent_mass_operators(
basis,
interfacequads,
cellmap,
cellsign,
penalty,
)
ncells = length(cellsign)
hasinterface = cellsign .== 0
numinterfaces = count(hasinterface)
operators = Matrix{Any}(undef, 4, numinterfaces)
celltooperator = zeros(Int, ncells)
dim = dimension(basis)
cellids = findall(hasinterface)
for (idx, cellid) in enumerate(cellids)
normal = interface_normals(interfacequads, cellid)
components = normal
facescale = scale_area(cellmap, normal)
for s1 in [+1, -1]
quad1 = interfacequads[s1, cellid]
for s2 in [+1, -1]
row = cell_couple_sign_to_row(s1, s2)
quad2 = interfacequads[s2, cellid]
mass =
penalty * interface_component_mass_matrix(
basis,
quad1,
quad2,
components,
facescale,
)
operators[row, idx] = mass
end
end
celltooperator[cellid] = idx
end
return InterfaceOperators(operators, celltooperator)
end
function interface_incoherent_mass_operators(basis, interfacequads, cutmesh, penalty)
cellmap = cell_map(cutmesh, 1)
cellsign = cell_sign(cutmesh)
return interface_incoherent_mass_operators(
basis,
interfacequads,
cellmap,
cellsign,
penalty,
)
end
function interface_traction_operators(basis, interfacequads, stiffness, cellmap, cellsign)
ncells = length(cellsign)
hasinterface = cellsign .== 0
numinterfaces = count(hasinterface)
operators = Matrix{Any}(undef, 4, numinterfaces)
celltooperator = zeros(Int, ncells)
cellids = findall(hasinterface)
for (idx, cellid) in enumerate(cellids)
normal = interface_normals(interfacequads, cellid)
for s1 in [+1, -1]
quad1 = interfacequads[s1, cellid]
for s2 in [+1, -1]
row = cell_couple_sign_to_row(s1, s2)
quad2 = interfacequads[s2, cellid]
top = coherent_traction_operator(
basis,
quad1,
quad2,
normal,
stiffness[s2],
cellmap,
)
operators[row, idx] = top
end
end
celltooperator[cellid] = idx
end
return InterfaceOperators(operators, celltooperator)
end
function interface_traction_operators(basis, interfacequads, stiffness, cutmesh)
cellmap = cell_map(cutmesh, 1)
cellsign = cell_sign(cutmesh)
return interface_traction_operators(basis, interfacequads, stiffness, cellmap, cellsign)
end
function interface_incoherent_traction_operators(basis, interfacequads, stiffness, cutmesh)
cellsign = cell_sign(cutmesh)
cellmap = cell_map(cutmesh, 1)
ncells = length(cellsign)
hasinterface = cellsign .== 0
numinterfaces = count(hasinterface)
operators = Matrix{Any}(undef, 4, numinterfaces)
celltooperator = zeros(Int, ncells)
cellids = findall(hasinterface)
for (idx, cellid) in enumerate(cellids)
normals = interface_normals(interfacequads, cellid)
for s1 in [+1, -1]
quad1 = interfacequads[s1, cellid]
for s2 in [+1, -1]
row = cell_couple_sign_to_row(s1, s2)
quad2 = interfacequads[s2, cellid]
top = incoherent_traction_operator(
basis,
quad1,
quad2,
normals,
stiffness[s2],
cellmap,
)
operators[row, idx] = top
end
end
celltooperator[cellid] = idx
end
return InterfaceOperators(operators, celltooperator)
end
struct InterfaceCondition
tractionoperator::Any
massoperator::Any
penalty::Any
ncells::Any
function InterfaceCondition(
tractionoperator::InterfaceOperators,
massoperator::InterfaceOperators,
penalty,
)
ncells = number_of_cells(massoperator)
@assert number_of_cells(tractionoperator) == ncells
new(tractionoperator, massoperator, penalty, ncells)
end
end
function coherent_interface_condition(basis, interfacequads, stiffness, cutmesh, penalty)
tractionoperator =
interface_traction_operators(basis, interfacequads, stiffness, cutmesh)
massoperator = interface_mass_operators(basis, interfacequads, cutmesh, penalty)
return InterfaceCondition(tractionoperator, massoperator, penalty)
end
function incoherent_interface_condition(basis, interfacequads, stiffness, cutmesh, penalty)
tractionoperator =
interface_incoherent_traction_operators(basis, interfacequads, stiffness, cutmesh)
massoperator =
interface_incoherent_mass_operators(basis, interfacequads, cutmesh, penalty)
return InterfaceCondition(tractionoperator, massoperator, penalty)
end
function traction_operator(interfacecondition::InterfaceCondition, s1, s2, cellid)
return interfacecondition.tractionoperator[s1, s2, cellid]
end
function mass_operator(interfacecondition::InterfaceCondition, s1, s2, cellid)
return interfacecondition.massoperator[s1, s2, cellid]
end
function Base.show(io::IO, interfacecondition::InterfaceCondition)
ncells = interfacecondition.ncells
penalty = interfacecondition.penalty
str = "InterfaceCondition\n\tNum. Cells: $ncells\n\tDisplacement Penalty: $penalty"
print(io, str)
end
function coherent_traction_operator(basis, quad1, quad2, normals, stiffness, cellmap)
numqp = length(quad1)
@assert length(quad2) == size(normals)[2] == numqp
dim = dimension(basis)
nf = number_of_basis_functions(basis)
ndofs = dim * nf
matrix = zeros(ndofs, ndofs)
vectosymmconverter = vector_to_symmetric_matrix_converter()
jac = jacobian(cellmap)
scalearea = scale_area(cellmap, normals)
for qpidx = 1:numqp
p1, w1 = quad1[qpidx]
p2, w2 = quad2[qpidx]
@assert w1 ≈ w2
vals = basis(p1)
grad = transform_gradient(gradient(basis, p2), jac)
normal = normals[:, qpidx]
NK = sum([make_row_matrix(vectosymmconverter[k], grad[:, k]) for k = 1:dim])
N = sum([normal[k] * vectosymmconverter[k]' for k = 1:dim])
NI = interpolation_matrix(vals, dim)
matrix .+= NI' * N * stiffness * NK * scalearea[qpidx] * w1
end
return matrix
end
function component_traction_operator(
basis,
quad1,
quad2,
components,
normals,
stiffness,
cellmap,
)
numqp = length(quad1)
@assert length(quad2) == size(normals)[2] == size(components)[2] == numqp
dim = dimension(basis)
nf = number_of_basis_functions(basis)
ndofs = dim * nf
matrix = zeros(ndofs, ndofs)
vectosymmconverter = vector_to_symmetric_matrix_converter()
jac = jacobian(cellmap)
scalearea = scale_area(cellmap, normals)
for qpidx = 1:numqp
p1, w1 = quad1[qpidx]
p2, w2 = quad2[qpidx]
@assert w1 ≈ w2
vals = basis(p1)
grad = transform_gradient(gradient(basis, p2), jac)
normal = normals[:, qpidx]
component = components[:, qpidx]
projector = component * component'
NK = sum([make_row_matrix(vectosymmconverter[k], grad[:, k]) for k = 1:dim])
N = sum([normal[k] * vectosymmconverter[k]' for k = 1:dim])
NI = interpolation_matrix(vals, dim)
matrix .+= NI' * projector * N * stiffness * NK * scalearea[qpidx] * w1
end
return matrix
end
function incoherent_traction_operator(basis, quad1, quad2, normals, stiffness, cellmap)
components = normals
return component_traction_operator(
basis,
quad1,
quad2,
components,
normals,
stiffness,
cellmap,
)
end
function interface_mass_matrix(basis, quad1, quad2, scale)
numqp = length(quad1)
@assert length(quad2) == length(scale) == numqp
nf = number_of_basis_functions(basis)
dim = dimension(basis)
totaldofs = dim * nf
matrix = zeros(totaldofs, totaldofs)
for qpidx = 1:numqp
p1, w1 = quad1[qpidx]
p2, w2 = quad2[qpidx]
@assert w1 ≈ w2
vals1 = basis(p1)
vals2 = basis(p2)
NI1 = interpolation_matrix(vals1, dim)
NI2 = interpolation_matrix(vals2, dim)
matrix .+= NI1' * NI2 * scale[qpidx] * w1
end
return matrix
end
function interface_component_mass_matrix(basis, quad1, quad2, components, scale)
numqp = length(quad1)
@assert length(quad2) == length(scale) == size(components)[2] == numqp
nf = number_of_basis_functions(basis)
dim = dimension(basis)
totaldofs = dim * nf
matrix = zeros(totaldofs, totaldofs)
for qpidx = 1:numqp
p1, w1 = quad1[qpidx]
p2, w2 = quad2[qpidx]
@assert w1 ≈ w2
component = components[:, qpidx]
projector = component * component'
vals1 = basis(p1)
vals2 = basis(p2)
NI1 = interpolation_matrix(vals1, dim)
NI2 = make_row_matrix(projector, vals2)
matrix .+= NI1' * NI2 * scale[qpidx] * w1
end
return matrix
end
| [
7249,
26491,
18843,
2024,
198,
220,
220,
220,
12879,
3712,
7149,
198,
220,
220,
220,
2685,
1462,
46616,
3712,
7149,
198,
220,
220,
220,
299,
46342,
3712,
7149,
198,
220,
220,
220,
997,
3575,
2024,
3712,
7149,
198,
220,
220,
220,
2163,... | 2.233685 | 5,302 |
# Script
using Distributions, PyPlot, BayesianNonparametricStatistics
β=0.5
θ = sumoffunctions(vcat([faberschauderone],[faberschauder(j,k) for j in 0:4 for k in 1:2^j]),vcat([1.0],[(-1)^(j*k)*2^(-β*j) for j in 0:4 for k in 1:2^j]))
x = 0.0:0.001:1.0
y = θ.(x)
# Uncomment the following lines to plot θ.
# clf()
# plot(x,y)
sde = SDEWithConstantVariance(θ, 1.0, 0.0, 1000.0, 0.01)
X = rand(sde)
# Uncomment the following lines to plot a sample from sde.
# clf()
# plot(X)
M = SDEModel(1.0, 0.0)
Π = FaberSchauderExpansionWithGaussianCoefficients([2^(β*j) for j in 0:4])
postΠ = calculateposterior(Π, X, M )
for k in 1:100
f = rand(postΠ)
y = f.(x)
plot(x,y)
end
| [
2,
12327,
198,
198,
3500,
46567,
507,
11,
9485,
43328,
11,
4696,
35610,
15419,
17143,
19482,
48346,
198,
198,
26638,
28,
15,
13,
20,
198,
138,
116,
796,
2160,
2364,
46797,
7,
85,
9246,
26933,
36434,
364,
354,
29233,
505,
38430,
36434,... | 2.063253 | 332 |
<reponame>JuliaPackageMirrors/Seismic.jl<filename>src/Wavelets/Berlage.jl<gh_stars>0
"""
Berlage(; <keyword arguments>)
Create a Berlage wavelet.
# Arguments
**Keyword arguments**
* `dt::Real=0.002`: sampling interval in secs.
* `f0::Real=20.0`: central frequency in Hz.
* `m::Real=2`: exponential parameter of Berlage wavelet.
* `alpha::Real=180.0`: alpha parameter of Berlage wavelet in rad/secs.
* `phi0::Real`: phase rotation in radians.
# Example
```julia
julia> w = Berlage(); plot(w);
```
**Reference**
* Aldridge, <NAME>., 1990, The berlage wavelet: GEOPHYSICS, 55, 1508--1511.
"""
function Berlage(; dt::Real=0.002, f0::Real=20.0, m::Real=2, alpha::Real=180.0,
phi0::Real=0.0)
nw = floor(Int, 2.2/(f0*dt))
t = dt*collect(0:1:nw-1)
w = (t.^m).*exp(-alpha*t).*cos(2*pi*f0*t + phi0);
w = w/maximum(w)
end
| [
27,
7856,
261,
480,
29,
16980,
544,
27813,
27453,
5965,
14,
4653,
1042,
291,
13,
20362,
27,
34345,
29,
10677,
14,
39709,
5289,
14,
24814,
75,
496,
13,
20362,
27,
456,
62,
30783,
29,
15,
198,
37811,
198,
220,
220,
220,
4312,
75,
49... | 2.171285 | 397 |
struct Lennnon2000Air <: ThermoState.ThermoModel end
const TAU_MAX_EXP_87 = 0.4207493606569795
const lemmon2000_air_R = 8.314510
const lemmon2000_air_T_reducing = 132.6312
const lemmon2000_air_P_reducing = 3.78502E6
const lemmon2000_air_rho_reducing = 10447.7
const lemmon2000_air_rho_reducing_inv = 1.0/lemmon2000_air_rho_reducing
const lemmon2000_air_MW = 28.9586
const lemmon2000_air_P_max = 2000E6
const lemmon2000_air_T_max = 2000.
molecular_weight(::Lennnon2000Air) = lemmon2000_air_MW
function _f0(::Lennnon2000Air, delta,tau)
tau_inv = one(tau)/tau
A0 = (-0.00019536342*tau*sqrt(tau) + 17.275266575*tau + tau_inv*(tau_inv*(6.057194e-8*tau_inv
- 0.0000210274769) - 0.000158860716) + log(delta) + 2.490888032*log(tau)
# These two logs both fail for tau < 1e-18, can be truncated but should not be necessary.
+ 0.791309509*log(1.0 - exp(-25.36365*tau)) + 0.212236768*log(1.0 - exp(-16.90741*tau))
- 13.841928076)
if tau < TAU_MAX_EXP_87
A0 -= 0.197938904*log(exp(87.31279*tau) + (2.0/3.0))
else
A0 -= 17.282597957782162*tau # 17.282... = 87.31279*0.197938904
return A0
end
end
function _fr(::Lennnon2000Air,delta,tau)
delta2 = delta*delta
delta3 = delta*delta2
delta4 = delta2*delta2
delta5 = delta*delta4
delta6 = delta2*delta4
taurt2 = sqrt(tau)
taurt4 = sqrt(taurt2)
tau2 = tau*tau
tau3 = tau*tau2
tau6 = tau3*tau3
tau12 = tau6*tau6
tau_100 = tau^0.01
tau2_100 = tau_100*tau_100
tau4_100 = tau2_100*tau2_100
tau5_100 = tau_100*tau4_100
tau10_100 = tau5_100*tau5_100
tau15_100 = tau5_100*tau10_100
tau8_100 = tau4_100*tau4_100
tau16_100 = tau8_100*tau8_100
tau20_100 = tau4_100*tau16_100
tau32_100 = tau16_100*tau16_100
tau33_100 = tau_100*tau32_100
tau64_100 = tau32_100*tau32_100
tau80_100 = tau16_100*tau64_100
tau40_100 = tau20_100*tau20_100
tau97_100 = tau33_100*tau64_100
tau45_100 = tau5_100*tau40_100
tau90_100 = tau45_100*tau45_100
tau160_100 = tau80_100*tau80_100
tau320_100 = tau160_100*tau160_100
x0 = exp(-delta)
x1 = exp(-delta2)
x2 = tau3*exp(-delta3)
return (-0.101365037911999994*delta*tau160_100*x0 + 0.713116392079000017*delta*tau33_100
- 0.146629609712999986*delta*tau40_100*x1*tau320_100
- 1.61824192067000006*delta*tau4_100*tau97_100 + 0.0148287891978000005*delta*taurt2*x2
+ 0.118160747228999996*delta + 0.0714140178971000017*delta2 + 0.134211176704000013*delta3*tau15_100
- 0.031605587982100003*delta3*tau6*x1 - 0.17381369096999999*delta3*tau80_100*x0
- 0.00938782884667000057*delta3*x2*tau12 - 0.0865421396646000041*delta3 - 0.042053322884200002*delta4*tau20_100
+ 0.0349008431981999989*delta4*tau2_100*tau33_100 + 0.0112626704218000001*delta4
- 0.0472103183731000034*delta5*tau15_100*x0*tau80_100 + 0.000233594806141999996*delta5*tau3*taurt4*x1*delta6
- 0.0122523554252999996*delta6*tau*taurt4*x0 + 0.000164957183186000006*delta6*tau45_100*tau90_100)
end
function αR_impl(mt::SingleVT,::Lennnon2000Air, _rho, T)
R = lemmon2000_air_R
delta = rho*lemmon2000_air_rho_reducing_inv
tau = lemmon2000_air_T_reducing/T
return _fr(model, delta, tau)
end
function α0_impl(mt::SingleVT,::Lennnon2000Air, _rho, T)
R = lemmon2000_air_R
delta = rho*lemmon2000_air_rho_reducing_inv
tau = lemmon2000_air_T_reducing/T
return _f0(model, delta, tau)
end
function mol_helmholtzR_impl(mt::SingleVT,::Lennnon2000Air, v, t)
rho = 1.0e-3 / v
R = lemmon2000_air_R
delta = lemmon2000_air_rho_reducing_inv/v
tau = lemmon2000_air_T_reducing/T
return R*t*_fr(model, delta, tau)
end
function mol_helmholtz0_impl(mt::SingleVT,::Lennnon2000Air, v, t)
rho = 1.0e-3 / v
R = lemmon2000_air_R
delta = lemmon2000_air_rho_reducing_inv/v
tau = lemmon2000_air_T_reducing/T
return R*t*_f0(model, delta, tau)
end
function mol_helmholtz_impl(mt::SingleVT,::Lennnon2000Air, v, t)
rho = 1.0e-3 / v
R = lemmon2000_air_R
delta = lemmon2000_air_rho_reducing_inv/v
tau = lemmon2000_air_T_reducing/T
return R*t*(_f0(model, delta, tau)+_fr(model, delta, tau))
end
| [
7249,
28423,
13159,
11024,
16170,
1279,
25,
12634,
5908,
9012,
13,
35048,
5908,
17633,
886,
198,
9979,
21664,
52,
62,
22921,
62,
49864,
62,
5774,
796,
657,
13,
19,
22745,
2920,
15277,
2996,
3388,
41544,
198,
9979,
443,
76,
2144,
11024,
... | 1.861965 | 2,311 |
<gh_stars>1-10
export SingleLayer
"""
singleLayer
σ(K*s+b)
where K,b are trainable weights
"""
struct SingleLayer
end
mσ(x::AbstractArray{R}) where R<:Real = abs.(x)+log.(R(1) .+ exp.(-R(2)*abs.(x)))
mdσ(x::AbstractArray{R}) where R<:Real = tanh.(x)
md2σ(x::AbstractArray{R}) where R<:Real = one(eltype(x)) .- tanh.(x).^2
"""
evaluate layer for current weights Θ=(K,b)
"""
function (N::SingleLayer)(S::AbstractArray{R},Θ::Tuple{AbstractArray{R,2},AbstractArray{R,1}}) where R <: Real
(K,b) = Θ
return mσ(K*S .+ b)
end
"""
compute matvec J_S N(S,Θ)'*Z
"""
function getJSTmv(N::SingleLayer,Z::AbstractArray{R},S::AbstractArray{R},Θ::Tuple{AbstractArray{R,2},AbstractArray{R,1}}) where R <: Real
(K,b) = Θ
return K'*(mdσ(K*S .+ b) .* Z)
end
function getJSTmv(N::SingleLayer,Z::AbstractArray{R,3},S::AbstractArray{R},Θ::Tuple{AbstractArray{R,2},AbstractArray{R,1}}) where R <: Real
(d,nex) = size(S)
(K,b) = Θ
t1 = reshape(mdσ(K*S .+ b),size(K,1),1,nex)
t2 = K'*reshape(t1 .* Z, size(K,1),:)
return reshape(t2,size(K,2),size(Z,2),nex)
end
"""
compute hessian matvec
"""
function getTraceHessAndGrad(N::SingleLayer, w::AbstractArray{R},S::AbstractArray{R},Θ::Tuple{AbstractArray{R,2},AbstractArray{R,1}}) where R <: Real
(d,nex) = size(S)
(K,b) = Θ
t1 = K*S .+ b
Jac = reshape(mdσ(t1),size(K,1),1,nex) .* K
return vec(sum(reshape(md2σ(t1) .* w,size(K,1),:,nex).*(K.^2),dims=(1,2))), Jac
end
function getTraceHessAndGrad(N::SingleLayer, w::AbstractArray{R},Jac::AbstractArray{R,2},S::AbstractArray{R},Θ::Tuple{AbstractArray{R,2},AbstractArray{R,1}}) where R <: Real
(d,nex) = size(S)
(K,b) = Θ
t1 = K*S .+ b;
Jac = K * Jac
trH = vec(sum(reshape(md2σ(t1) .* w,size(K,1),:,nex).*(Jac).^2,dims=(1,2)))
Jac = reshape(mdσ(t1),size(K,1),1,nex) .* Jac
return trH,Jac
end
function getTraceHessAndGrad(N::SingleLayer, w::AbstractArray{R},Jac::AbstractArray{R,3},S::AbstractArray{R},Θ::Tuple{AbstractArray{R,2},AbstractArray{R,1}}) where R <: Real
(d,nex) = size(S)
(K,b) = Θ
t1 = K*S .+ b;
Jac = reshape(K* reshape(Jac,size(K,2),:), size(K,1),:,nex)
trH = vec(sum(reshape(md2σ(t1) .* w,size(K,1),:,nex).*(Jac).^2,dims=(1,2)))
Jac = reshape(mdσ(t1),size(K,1),1,nex) .* Jac
return trH,Jac
end
function getTraceHess(N::SingleLayer, w::AbstractArray{R},S::AbstractArray{R},Θ::Tuple{AbstractArray{R,2},AbstractArray{R,1}}) where R <: Real
(d,nex) = size(S)
(K,b) = Θ
t1 = K*S .+ b
return vec(sum(reshape(md2σ(t1) .* w,size(K,1),:,nex).*(K.^2),dims=(1,2)))
end
function getTraceHess(N::SingleLayer, w::AbstractArray{R},Jac::AbstractArray{R,2},S::AbstractArray{R},Θ::Tuple{AbstractArray{R,2},AbstractArray{R,1}}) where R <: Real
(d,nex) = size(S)
(K,b) = Θ
t1 = K*S .+ b;
Jac = K * Jac
trH = vec(sum(reshape(md2σ(t1) .* w,size(K,1),:,nex).*(Jac).^2,dims=(1,2)))
return trH
end
function getTraceHess(N::SingleLayer, w::AbstractArray{R},Jac::AbstractArray{R,3},S::AbstractArray{R},Θ::Tuple{AbstractArray{R,2},AbstractArray{R,1}}) where R <: Real
(d,nex) = size(S)
(K,b) = Θ
t1 = K*S .+ b;
Jac = reshape(K* reshape(Jac,size(K,2),:), size(K,1),:,nex)
trH = vec(sum(reshape(md2σ(t1) .* w,size(K,1),:,nex).*(Jac).^2,dims=(1,2)))
return trH
end
function getDiagHess(N::SingleLayer, w::AbstractArray{R}, Z::AbstractArray{R},S::AbstractArray{R},Θ::Tuple{AbstractArray{R,2},AbstractArray{R,1}}) where R <: Real
(d,nex) = size(S)
(K,b) = Θ
return sum((md2σ(K*S .+ b) .* w).*(K*Z).^2,dims=1)
end
function getHessmv(N::SingleLayer, w::AbstractArray{R}, Z::AbstractArray{R},S::AbstractArray{R},Θ::Tuple{AbstractArray{R,2},AbstractArray{R,1}}) where R <: Real
(d,nex) = size(S)
(K,b) = Θ
return K'*(md2σ(K*S .+ b) .* w .* (K*Z))
end
function getHessmv(N::SingleLayer, w::AbstractArray{R}, Z::AbstractArray{R,3},S::AbstractArray{R},Θ::Tuple{AbstractArray{R,2},AbstractArray{R,1}}) where R <: Real
(d,nex) = size(S)
(K,b) = Θ
t1 = reshape(md2σ(K*S .+ b).*w,size(K,1),1,nex)
t2 = t1 .* reshape(K*reshape(Z,size(K,2),:),size(K,1),:,nex)
t2 = K'* reshape(t2,size(K,1),:)
return reshape(t2,size(K,2),size(Z,2),nex)
end
"""
compute matvec J_S N(S,Θ)*Z
"""
function getJSmv(N::SingleLayer,Z::AbstractArray{R},S::AbstractArray{R},Θ::Tuple{AbstractArray{R,2},AbstractArray{R,1}}) where R <: Real
(K,b) = Θ
return mdσ(K*S .+ b) .* (K * Z)
end
"""
compute matvec J_S N(S,Θ)*Z
"""
function getJSmv(N::SingleLayer,Z::AbstractArray{R,3},S::AbstractArray{R,2},Θ::Tuple{AbstractArray{R,2},AbstractArray{R,1}}) where R <: Real
(d,nex) = size(S)
(K,b) = Θ
# K * Z
KZ = K*reshape(Z,size(K,2),:)
return reshape(mdσ(K*S .+ b),size(K,1),1,nex) .* reshape(KZ,size(K,1),size(Z,2),nex)
end
"""
compute matvec J_S(J_S N(S,Θ)'*Z(S))
here we use product rule
J_S N(S,Θ)'*dZ + J_S(N(S,Θ)'*Zfix)
"""
function getJSJSTmv(N::SingleLayer,dz::AbstractVector{R},d2z::AbstractArray{R},s::AbstractVector{R},Θ::Tuple{AbstractArray{R,2},AbstractArray{R,1}}) where R <: Real
(K,b) = Θ
t1 = K*s + b
ndσ = mdσ(t1)
return K'* ( Diagonal(md2σ(t1) .* dz) +
ndσ .* d2z .* ndσ') *K
end
function getJSJSTmv(N::SingleLayer,dZ::AbstractArray{R},d2Z::AbstractArray{R},S::AbstractArray{R},Θ::Tuple{AbstractArray{R,2},AbstractArray{R,1}}) where R <: Real
(K,b) = Θ
t1 = K* S .+ b
(d,nex) = size(t1)
H1 = getJSJSTmv(N,dZ,S,Θ)
t2 = mdσ(t1)
H2 = reshape(t2,d,1,:) .* d2Z .* reshape(t2,1,d,:)
s1 = K' * reshape(H2,size(K,1),:)
s1 = permutedims(reshape(s1,size(K,2),size(K,1),nex),(2,1,3))
s2 = K'*reshape(s1,size(K,1),:)
return H1 + permutedims(reshape(s2,size(K,2),size(K,2),nex),(2,1,3))
end
function getJSJSTmv(N::SingleLayer,dZ::AbstractArray{R},S::AbstractArray{R,2},Θ::Tuple{AbstractArray{R,2},AbstractArray{R,1}}) where R<:Real
(K,b) = Θ
t1 = K* S .+ b
(d,nex) = size(t1)
dZ2 = reshape(dZ .* md2σ(t1),size(dZ,1),1,nex)
KtdZK = K'*reshape(dZ2.*K,size(K,1),:)
return reshape(KtdZK,size(K,2),size(K,2),nex)
end
function getGradAndHessian(N::SingleLayer,dZ::AbstractArray{R},S::AbstractArray{R,2},Θ::Tuple{AbstractArray{R,2},AbstractArray{R,1}}) where R<:Real
# Here, no d2Z is give, so we assume it is zero
(K,b) = Θ
t1 = K* S .+ b
(d,nex) = size(t1)
t2 = reshape(dZ .* md2σ(t1),size(dZ,1),1,nex)
KtdZK = K'*reshape(t2.*K,size(K,1),:)
H = reshape(KtdZK,size(K,2),size(K,2),nex)
return K'*(mdσ(t1) .* dZ),H
end
function getGradAndHessian(N::SingleLayer,dZ::AbstractArray{R},d2Z::AbstractArray{R},S::AbstractArray{R},Θ::Tuple{AbstractArray{R,2},AbstractArray{R,1}}) where R <: Real
(K,b) = Θ
t1 = K * S .+ b
(d,nex) = size(t1)
t2 = reshape(dZ .* md2σ(t1),size(dZ,1),1,nex)
KtdZK = K'*reshape(t2.*K,size(K,1),:)
H1 = reshape(KtdZK,size(K,2),size(K,2),nex)
dσt = mdσ(t1)
t3 = reshape(dσt,d,1,:) .* d2Z .* reshape(dσt,1,d,:)
s1 = K' * reshape(t3,size(K,1),:)
s1 = permutedims(reshape(s1,size(K,2),size(K,1),nex),(2,1,3))
s2 = K'*reshape(s1,size(K,1),:)
H2 = permutedims(reshape(s2,size(K,2),size(K,2),nex),(2,1,3))
return K'*(dσt .* dZ), H1+H2
end
function getJSJSTmv(N::SingleLayer,d2Z::AbstractArray{R,3},S::AbstractArray{R,2},Θ::Tuple{AbstractArray{R,2},AbstractArray{R,1}}) where R <: Real
(d,nex) = size(S)
(K,b) = Θ
# K * Z
t1 = reshape(mdσ(K*S .+ b),size(K,1),1,nex)
t1 = K'*reshape(t1 .* d2Z, size(K,1),:)
t1 = reshape(t1,size(K,2),size(d2Z,2),nex)
permutedims!(t1,t1,(2,1,3))
t1 = K'*reshape(t1 .* d2Z, size(K,1),:)
t1 = reshape(t1,size(K,2),size(d2Z,2),nex)
return t1
end
function getJSJSTmv(N::SingleLayer,d2Z::AbstractArray{R,3},S::AbstractArray{R,2},Θ::Tuple{AbstractArray{R,2},AbstractArray{R,1}},hk::R) where R <: Real
(d,nex) = size(S)
(K,b) = Θ
# K * Z
t1 = reshape(mdσ(K*S .+ b),size(K,1),1,nex)
t1 = K'*reshape(t1 .* d2Z, size(K,1),:)
t1 = d2Z + hk .* reshape(t1,size(K,2),size(d2Z,2),nex)
permutedims!(t1,t1,(2,1,3))
t1 = K'*reshape(t1 .* d2Z, size(K,1),:)
t1 = d2Z + hk .* reshape(t1,size(K,2),size(d2Z,2),nex)
return t1
end
| [
27,
456,
62,
30783,
29,
16,
12,
940,
198,
39344,
14206,
49925,
198,
198,
37811,
198,
29762,
49925,
198,
198,
38392,
7,
42,
9,
82,
10,
65,
8,
198,
198,
3003,
509,
11,
65,
389,
4512,
540,
19590,
198,
198,
37811,
198,
7249,
14206,
... | 1.906438 | 4,318 |
"""
parseFunctionNode(nodeDict::Dict)
Parses a [`FunctionNode`](@ref) from a node set configuration file.
"""
function parseFunctionNode(nodeDict::Dict)
func = get(nodeDict, "function", false)
if func == false
error("function field is not set in FunctionNode")
else
aux = 0
try
aux = eval(Meta.parse(func))
catch e
error("The following function: '$func' in FunctionNode is not defined")
end
if typeof(aux) <: Function
func = aux
else
error("The following function: '$func' in FunctionNode is not a function")
end
end
arity = get(nodeDict, "arity", false)
if arity == false
error("arity field is not set in FunctionNode: '$func'")
else
if !(typeof(arity) <: Integer)
error("arity field ($arity) in FunctionNode '$func' must be an integer number")
elseif arity <= 0
error("Arity must be an integer greater than 0 in FunctionNode: '$func'")
end
end
returnType = get(nodeDict, "returnType", "")
if !(typeof(returnType) <: String)
error("returnType field must be a string in FunctionNode: '$func'")
end
if returnType != ""
aux = 0
try
aux = eval(Meta.parse(returnType))
catch e
error("The following type: '$returnType' in TerminalNode: '$func' is not defined")
end
if typeof(aux) <: DataType || typeof(aux) <: Union
returnType = aux
else
error("The following type: '$returnType' in TerminalNode: '$func' is not a type")
end
else
error("Function nodes must have a return type in FunctionNode: '$func'")
end
argTypes = get(nodeDict, "argumentTypes", false)
if argTypes == false
error("Function nodes must have a list of argument types in FunctionNode: '$func'")
end
if arity == 1 && !(typeof(argTypes) <: Array)
argTypes = [argTypes]
elseif arity > 1 && !(typeof(argTypes) <: Array)
error("argumentTypes field must be an array of string when arity is greater than 1 in FunctionNode: '$func'")
elseif arity > 1 && !(typeof(argTypes) <: Array)
error("argumentTypes field must be an array of string when arity is greater than 1 in FunctionNode: '$func'")
end
if length(argTypes) != arity
error("Number of elements in argumentTypes field must match arity field in FunctionNode: '$func'")
end
for i=1:length(argTypes)
if !(typeof(argTypes[i]) <: String)
error("The elements of argumentTypes field must be strings in FunctionNode: '$func'")
end
try
aux = eval(Meta.parse(argTypes[i]))
catch e
error("The following type: '$(argTypes[i])' in TerminalNode: '$func' is not defined")
end
if typeof(aux) <: DataType || typeof(aux) <: Union
argTypes[i] = aux
else
error("The following type: '$(argTypes[i])' in TerminalNode: '$func' is not a type")
end
end
argTypes = Array{Union{DataType, Union}}(argTypes)
return FunctionNode(func, arity, returnType, argTypes)
end # function
"""
parseTerminalNode(nodeDict::Dict)
Parses a [`TerminalNode`](@ref) from a node set configuration file.
"""
function parseTerminalNode(nodeDict::Dict)
terminalNode = 0
kind = get(nodeDict, "kind", false)
if kind == false
error("kind field not specified in TerminalNode")
elseif !(typeof(kind) <: String)
error("kind field in TerminalNode must be one of these strings: \"variable\", \"constant\", \"ephemeralConstant\" or \"terminalFunction\"")
end
if kind == "variable"
name = get(nodeDict, "name", false)
if name == false
error("name field is not set in TerminalNode of kind variable")
else
if !(typeof(name) <: String)
error("name field ($name) in TerminalNode of kind variable '$name' must be a string")
end
end
type = get(nodeDict, "type", false)
if type == false
error("TerminalNode of kind variable '$name' has no type specified")
end
aux = 0
try
aux = eval(Meta.parse(type))
catch e
error("The following type: '$type' in TerminalNode: '$name' is not defined")
end
if typeof(aux) <: DataType
type = aux
else
error("The following type: '$type' in TerminalNode: '$name' is not a type")
end
terminalNode = VariableNode(name, type)
elseif kind == "constant"
value = get(nodeDict, "value", nothing)
if value == nothing
error("TerminalNode of kind constant has no value")
end
try
aux = Meta.parse(value)
value = eval(aux)
catch e
# Empty
end
terminalNode = ConstantNode(value)
elseif kind == "ephemeralConstant"
func = get(nodeDict, "function", false)
if func == false
error("TerminalNode of kind ephemeralConstant has no function")
else
aux = 0
try
aux = eval(Meta.parse(func))
catch e
error("The following function: '$func' in TerminalNode of kind ephemeralConstant is not defined")
end
if typeof(aux) <: Function
func = aux
else
error("The following function: '$func' TerminalNode of kind ephemeralConstant is not a function")
end
end
varArgs = get(nodeDict, "arguments", [])
if !(typeof(varArgs) <: Array)
aux = Array{Any}(undef, 1)
aux[1] = varArgs
varArgs = aux
end
for i=1:length(varArgs)
try
arg = Meta.parse(varArgs[i])
varArgs[i] = eval(arg)
catch e
# Empty
end
end
terminalNode = ConstantNode(func, varArgs)
elseif kind == "terminalFunction"
func = get(nodeDict, "function", false)
if func == false
error("TerminalNode of kind terminalFunction '$func' has no function")
else
aux = 0
try
aux = eval(Meta.parse(func))
catch e
error("The following function: '$func' in TerminalNode of kind terminalFunction is not defined")
end
if typeof(aux) <: Function
func = aux
else
error("The following function: '$func' TerminalNode of kind terminalFunction is not a function")
end
end
terminalNode = NoArgsFunctionNode(func)
else
error("kind field of TerminalNode not supported: '$kind'")
end
return terminalNode
end # function
"""
createNodes(jsonFile::String, verbose::Bool=true)
Parses a node set configuration file that contains the information about the
nodes of a Genetic Programming problem.
"""
function createNodes(jsonFile::String)
if !isfile(jsonFile)
error("File $jsonFile does not exist in working directory")
end
file=open(jsonFile)
dictionary = JSON.parse(file)
close(file)
if get(dictionary, "FunctionNodes", false) == false
error("Nodes configuration file '$jsonFile' must have function nodes")
end
if get(dictionary, "TerminalNodes", false) == false
error("Nodes configuration file '$jsonFile' must have terminal nodes")
end
nFunctions = length(dictionary["FunctionNodes"])
nTerminals = length(dictionary["TerminalNodes"])
if nFunctions == 0
error("Nodes configuration file '$jsonFile' must have function nodes")
end
if nTerminals == 0
error("Nodes configuration file '$jsonFile' must have terminal nodes")
end
functionSet = Array{FunctionNode}(undef, nFunctions)
terminalSet = Array{TerminalNode}(undef, nTerminals)
for i=1:nFunctions
functionSet[i] = parseFunctionNode(dictionary["FunctionNodes"][i]["Node"])
end
for i=1:nTerminals
terminalSet[i] = parseTerminalNode(dictionary["TerminalNodes"][i]["Node"])
end
return functionSet, terminalSet
end # function
| [
37811,
198,
220,
220,
220,
21136,
22203,
19667,
7,
17440,
35,
713,
3712,
35,
713,
8,
198,
198,
47,
945,
274,
257,
685,
63,
22203,
19667,
63,
16151,
31,
5420,
8,
422,
257,
10139,
900,
8398,
2393,
13,
198,
37811,
198,
8818,
21136,
2... | 2.331475 | 3,587 |
export DepthMap
import ImageView
type DepthMap
camera :: M34
depth :: Array{Float32, 2}
nxcorr :: Array{Float32, 2}
end
function DepthMap(view, nbrs, voi, w = 3)
cam = view.camera
im = view.image
(nc, nx, ny) = size(im)
mn = LibAminda.mean_and_inverse_deviation(im, w)
# determine depth range, resolution
bnds = bounds(cam, voi)
near = bnds[1][3]
far = bnds[2][3]
dz = near * camera_resolution(cam)
nz = ceil(Int, (far - near) / dz)
dz = (far - near) / nz
max_nxcorr = LibAminda.fill(nx, ny, -1.0)
depth = LibAminda.fill(nx, ny, near)
for k in 1:nz
z = near + dz*(k-0.5)
nxcorr = LibAminda.fill(nx, ny, -1.0)
for nbr in nbrs
cam2 = nbr.camera
hom = Array(homography(cam, cam2, z))
im2 = LibAminda.map_to_plane(nbr.image, hom, nx, ny)
mn2 = LibAminda.mean_and_inverse_deviation(im2, w)
nxc = LibAminda.normalized_cross_correlation(im, mn, im2, mn2, w)
nxcorr = LibAminda.maximum(nxcorr, nxc)
end
LibAminda.update_depth(nxcorr, max_nxcorr, depth, z)
end
return DepthMap(cam, depth, max_nxcorr)
end
| [
39344,
36350,
13912,
198,
198,
11748,
7412,
7680,
198,
198,
4906,
36350,
13912,
198,
220,
4676,
7904,
337,
2682,
198,
220,
6795,
7904,
15690,
90,
43879,
2624,
11,
362,
92,
198,
220,
299,
87,
10215,
81,
7904,
15690,
90,
43879,
2624,
11... | 2.091429 | 525 |
# This file is a part of Julia. License is MIT: https://julialang.org/license
module REPLCompletions
export completions, shell_completions, bslash_completions, completion_text
using Base.Meta
using Base: propertynames, something
abstract type Completion end
struct KeywordCompletion <: Completion
keyword::String
end
struct PathCompletion <: Completion
path::String
end
struct ModuleCompletion <: Completion
parent::Module
mod::String
end
struct PackageCompletion <: Completion
package::String
end
struct PropertyCompletion <: Completion
value
property::Symbol
end
struct FieldCompletion <: Completion
typ::DataType
field::Symbol
end
struct MethodCompletion <: Completion
func
input_types::Type
method::Method
end
struct BslashCompletion <: Completion
bslash::String
end
struct ShellCompletion <: Completion
text::String
end
struct DictCompletion <: Completion
dict::AbstractDict
key::String
end
# interface definition
function Base.getproperty(c::Completion, name::Symbol)
if name === :keyword
return getfield(c, :keyword)::String
elseif name === :path
return getfield(c, :path)::String
elseif name === :parent
return getfield(c, :parent)::Module
elseif name === :mod
return getfield(c, :mod)::String
elseif name === :package
return getfield(c, :package)::String
elseif name === :property
return getfield(c, :property)::Symbol
elseif name === :field
return getfield(c, :field)::Symbol
elseif name === :method
return getfield(c, :method)::Method
elseif name === :bslash
return getfield(c, :bslash)::String
elseif name === :text
return getfield(c, :text)::String
elseif name === :key
return getfield(c, :key)::String
end
return getfield(c, name)
end
_completion_text(c::KeywordCompletion) = c.keyword
_completion_text(c::PathCompletion) = c.path
_completion_text(c::ModuleCompletion) = c.mod
_completion_text(c::PackageCompletion) = c.package
_completion_text(c::PropertyCompletion) = string(c.property)
_completion_text(c::FieldCompletion) = string(c.field)
_completion_text(c::MethodCompletion) = sprint(io -> show(io, c.method))
_completion_text(c::BslashCompletion) = c.bslash
_completion_text(c::ShellCompletion) = c.text
_completion_text(c::DictCompletion) = c.key
completion_text(c) = _completion_text(c)::String
const Completions = Tuple{Vector{Completion}, UnitRange{Int}, Bool}
function completes_global(x, name)
return startswith(x, name) && !('#' in x)
end
function appendmacro!(syms, macros, needle, endchar)
for s in macros
if endswith(s, needle)
from = nextind(s, firstindex(s))
to = prevind(s, sizeof(s)-sizeof(needle)+1)
push!(syms, s[from:to]*endchar)
end
end
end
function filtered_mod_names(ffunc::Function, mod::Module, name::AbstractString, all::Bool = false, imported::Bool = false)
ssyms = names(mod, all = all, imported = imported)
filter!(ffunc, ssyms)
syms = String[string(s) for s in ssyms]
macros = filter(x -> startswith(x, "@" * name), syms)
appendmacro!(syms, macros, "_str", "\"")
appendmacro!(syms, macros, "_cmd", "`")
filter!(x->completes_global(x, name), syms)
return [ModuleCompletion(mod, sym) for sym in syms]
end
# REPL Symbol Completions
function complete_symbol(sym::String, ffunc, context_module::Module=Main)
mod = context_module
name = sym
lookup_module = true
t = Union{}
val = nothing
if something(findlast(in(non_identifier_chars), sym), 0) < something(findlast(isequal('.'), sym), 0)
# Find module
lookup_name, name = rsplit(sym, ".", limit=2)
ex = Meta.parse(lookup_name, raise=false, depwarn=false)
b, found = get_value(ex, context_module)
if found
val = b
if isa(b, Module)
mod = b
lookup_module = true
elseif Base.isstructtype(typeof(b))
lookup_module = false
t = typeof(b)
end
else # If the value is not found using get_value, the expression contain an advanced expression
lookup_module = false
t, found = get_type(ex, context_module)
end
found || return Completion[]
end
suggestions = Completion[]
if lookup_module
# We will exclude the results that the user does not want, as well
# as excluding Main.Main.Main, etc., because that's most likely not what
# the user wants
p = let mod=mod, modname=nameof(mod)
s->(!Base.isdeprecated(mod, s) && s != modname && ffunc(mod, s)::Bool)
end
# Looking for a binding in a module
if mod == context_module
# Also look in modules we got through `using`
mods = ccall(:jl_module_usings, Any, (Any,), context_module)::Vector
for m in mods
append!(suggestions, filtered_mod_names(p, m::Module, name))
end
append!(suggestions, filtered_mod_names(p, mod, name, true, true))
else
append!(suggestions, filtered_mod_names(p, mod, name, true, false))
end
elseif val !== nothing # looking for a property of an instance
for property in propertynames(val, false)
# TODO: support integer arguments (#36872)
if property isa Symbol && startswith(string(property), name)
push!(suggestions, PropertyCompletion(val, property))
end
end
else
# Looking for a member of a type
if t isa DataType && t != Any
# Check for cases like Type{typeof(+)}
if t isa DataType && t.name === Base._TYPE_NAME
t = typeof(t.parameters[1])
end
# Only look for fields if this is a concrete type
if isconcretetype(t)
fields = fieldnames(t)
for field in fields
s = string(field)
if startswith(s, name)
push!(suggestions, FieldCompletion(t, field))
end
end
end
end
end
suggestions
end
const sorted_keywords = [
"abstract type", "baremodule", "begin", "break", "catch", "ccall",
"const", "continue", "do", "else", "elseif", "end", "export", "false",
"finally", "for", "function", "global", "if", "import",
"let", "local", "macro", "module", "mutable struct",
"primitive type", "quote", "return", "struct",
"true", "try", "using", "while"]
function complete_keyword(s::Union{String,SubString{String}})
r = searchsorted(sorted_keywords, s)
i = first(r)
n = length(sorted_keywords)
while i <= n && startswith(sorted_keywords[i],s)
r = first(r):i
i += 1
end
Completion[KeywordCompletion(kw) for kw in sorted_keywords[r]]
end
function complete_path(path::AbstractString, pos::Int; use_envpath=false, shell_escape=false)
if Base.Sys.isunix() && occursin(r"^~(?:/|$)", path)
# if the path is just "~", don't consider the expanded username as a prefix
if path == "~"
dir, prefix = homedir(), ""
else
dir, prefix = splitdir(homedir() * path[2:end])
end
else
dir, prefix = splitdir(path)
end
local files
try
if isempty(dir)
files = readdir()
elseif isdir(dir)
files = readdir(dir)
else
return Completion[], 0:-1, false
end
catch
return Completion[], 0:-1, false
end
matches = Set{String}()
for file in files
if startswith(file, prefix)
id = try isdir(joinpath(dir, file)) catch; false end
# joinpath is not used because windows needs to complete with double-backslash
push!(matches, id ? file * (@static Sys.iswindows() ? "\\\\" : "/") : file)
end
end
if use_envpath && length(dir) == 0
# Look for files in PATH as well
local pathdirs = split(ENV["PATH"], @static Sys.iswindows() ? ";" : ":")
for pathdir in pathdirs
local actualpath
try
actualpath = realpath(pathdir)
catch
# Bash doesn't expect every folder in PATH to exist, so neither shall we
continue
end
if actualpath != pathdir && in(actualpath,pathdirs)
# Remove paths which (after resolving links) are in the env path twice.
# Many distros eg. point /bin to /usr/bin but have both in the env path.
continue
end
local filesinpath
try
filesinpath = readdir(pathdir)
catch e
# Bash allows dirs in PATH that can't be read, so we should as well.
if isa(e, Base.IOError) || isa(e, Base.ArgumentError)
continue
else
# We only handle IOError and ArgumentError here
rethrow()
end
end
for file in filesinpath
# In a perfect world, we would filter on whether the file is executable
# here, or even on whether the current user can execute the file in question.
if startswith(file, prefix) && isfile(joinpath(pathdir, file))
push!(matches, file)
end
end
end
end
matchList = Completion[PathCompletion(shell_escape ? replace(s, r"\s" => s"\\\0") : s) for s in matches]
startpos = pos - lastindex(prefix) + 1 - count(isequal(' '), prefix)
# The pos - lastindex(prefix) + 1 is correct due to `lastindex(prefix)-lastindex(prefix)==0`,
# hence we need to add one to get the first index. This is also correct when considering
# pos, because pos is the `lastindex` a larger string which `endswith(path)==true`.
return matchList, startpos:pos, !isempty(matchList)
end
function complete_expanduser(path::AbstractString, r)
expanded = expanduser(path)
return Completion[PathCompletion(expanded)], r, path != expanded
end
# Determines whether method_complete should be tried. It should only be done if
# the string endswiths ',' or '(' when disregarding whitespace_chars
function should_method_complete(s::AbstractString)
method_complete = false
for c in reverse(s)
if c in [',', '(']
method_complete = true
break
elseif !(c in whitespace_chars)
method_complete = false
break
end
end
method_complete
end
# Returns a range that includes the method name in front of the first non
# closed start brace from the end of the string.
function find_start_brace(s::AbstractString; c_start='(', c_end=')')
braces = 0
r = reverse(s)
i = firstindex(r)
in_single_quotes = false
in_double_quotes = false
in_back_ticks = false
while i <= ncodeunits(r)
c, i = iterate(r, i)
if !in_single_quotes && !in_double_quotes && !in_back_ticks
if c == c_start
braces += 1
elseif c == c_end
braces -= 1
elseif c == '\''
in_single_quotes = true
elseif c == '"'
in_double_quotes = true
elseif c == '`'
in_back_ticks = true
end
else
if !in_back_ticks && !in_double_quotes &&
c == '\'' && i <= ncodeunits(r) && iterate(r, i)[1] != '\\'
in_single_quotes = !in_single_quotes
elseif !in_back_ticks && !in_single_quotes &&
c == '"' && i <= ncodeunits(r) && iterate(r, i)[1] != '\\'
in_double_quotes = !in_double_quotes
elseif !in_single_quotes && !in_double_quotes &&
c == '`' && i <= ncodeunits(r) && iterate(r, i)[1] != '\\'
in_back_ticks = !in_back_ticks
end
end
braces == 1 && break
end
braces != 1 && return 0:-1, -1
method_name_end = reverseind(s, i)
startind = nextind(s, something(findprev(in(non_identifier_chars), s, method_name_end), 0))::Int
return (startind:lastindex(s), method_name_end)
end
# Returns the value in a expression if sym is defined in current namespace fn.
# This method is used to iterate to the value of a expression like:
# :(REPL.REPLCompletions.whitespace_chars) a `dump` of this expression
# will show it consist of Expr, QuoteNode's and Symbol's which all needs to
# be handled differently to iterate down to get the value of whitespace_chars.
function get_value(sym::Expr, fn)
sym.head !== :. && return (nothing, false)
for ex in sym.args
fn, found = get_value(ex, fn)
!found && return (nothing, false)
end
return (fn, true)
end
get_value(sym::Symbol, fn) = isdefined(fn, sym) ? (getfield(fn, sym), true) : (nothing, false)
get_value(sym::QuoteNode, fn) = isdefined(fn, sym.value) ? (getfield(fn, sym.value), true) : (nothing, false)
get_value(sym, fn) = (sym, true)
# Return the value of a getfield call expression
function get_value_getfield(ex::Expr, fn)
# Example :((top(getfield))(Base,:max))
val, found = get_value_getfield(ex.args[2],fn) #Look up Base in Main and returns the module
(found && length(ex.args) >= 3) || return (nothing, false)
return get_value_getfield(ex.args[3], val) #Look up max in Base and returns the function if found.
end
get_value_getfield(sym, fn) = get_value(sym, fn)
# Determines the return type with Base.return_types of a function call using the type information of the arguments.
function get_type_call(expr::Expr)
f_name = expr.args[1]
# The if statement should find the f function. How f is found depends on how f is referenced
if isa(f_name, GlobalRef) && isconst(f_name.mod,f_name.name) && isdefined(f_name.mod,f_name.name)
ft = typeof(eval(f_name))
found = true
else
ft, found = get_type(f_name, Main)
end
found || return (Any, false) # If the function f is not found return Any.
args = Any[]
for ex in expr.args[2:end] # Find the type of the function arguments
typ, found = get_type(ex, Main)
found ? push!(args, typ) : push!(args, Any)
end
# use _methods_by_ftype as the function is supplied as a type
world = Base.get_world_counter()
matches = Base._methods_by_ftype(Tuple{ft, args...}, -1, world)
length(matches) == 1 || return (Any, false)
match = first(matches)
# Typeinference
interp = Core.Compiler.NativeInterpreter()
return_type = Core.Compiler.typeinf_type(interp, match.method, match.spec_types, match.sparams)
return_type === nothing && return (Any, false)
return (return_type, true)
end
# Returns the return type. example: get_type(:(Base.strip("", ' ')), Main) returns (String, true)
function try_get_type(sym::Expr, fn::Module)
val, found = get_value(sym, fn)
found && return Core.Typeof(val), found
if sym.head === :call
# getfield call is special cased as the evaluation of getfield provides good type information,
# is inexpensive and it is also performed in the complete_symbol function.
a1 = sym.args[1]
if isa(a1,GlobalRef) && isconst(a1.mod,a1.name) && isdefined(a1.mod,a1.name) &&
eval(a1) === Core.getfield
val, found = get_value_getfield(sym, Main)
return found ? Core.Typeof(val) : Any, found
end
return get_type_call(sym)
elseif sym.head === :thunk
thk = sym.args[1]
rt = ccall(:jl_infer_thunk, Any, (Any, Any), thk::Core.CodeInfo, fn)
rt !== Any && return (rt, true)
elseif sym.head === :ref
# some simple cases of `expand`
return try_get_type(Expr(:call, GlobalRef(Base, :getindex), sym.args...), fn)
elseif sym.head === :. && sym.args[2] isa QuoteNode # second check catches broadcasting
return try_get_type(Expr(:call, GlobalRef(Core, :getfield), sym.args...), fn)
end
return (Any, false)
end
try_get_type(other, fn::Module) = get_type(other, fn)
function get_type(sym::Expr, fn::Module)
# try to analyze nests of calls. if this fails, try using the expanded form.
val, found = try_get_type(sym, fn)
found && return val, found
return try_get_type(Meta.lower(fn, sym), fn)
end
function get_type(sym, fn::Module)
val, found = get_value(sym, fn)
return found ? Core.Typeof(val) : Any, found
end
# Method completion on function call expression that look like :(max(1))
function complete_methods(ex_org::Expr, context_module::Module=Main)
args_ex = Any[]
func, found = get_value(ex_org.args[1], context_module)::Tuple{Any,Bool}
!found && return Completion[]
funargs = ex_org.args[2:end]
# handle broadcasting, but only handle number of arguments instead of
# argument types
if ex_org.head === :. && ex_org.args[2] isa Expr
for _ in (ex_org.args[2]::Expr).args
push!(args_ex, Any)
end
else
for ex in funargs
val, found = get_type(ex, context_module)
push!(args_ex, val)
end
end
out = Completion[]
t_in = Tuple{Core.Typeof(func), args_ex...} # Input types
na = length(args_ex)+1
ml = methods(func)
for method in ml
ms = method.sig
# Check if the method's type signature intersects the input types
if typeintersect(Base.rewrap_unionall(Tuple{(Base.unwrap_unionall(ms)::DataType).parameters[1 : min(na, end)]...}, ms), t_in) !== Union{}
push!(out, MethodCompletion(func, t_in, method))
end
end
return out
end
include("latex_symbols.jl")
include("emoji_symbols.jl")
const non_identifier_chars = [" \t\n\r\"\\'`\$><=:;|&{}()[],+-*/?%^~"...]
const whitespace_chars = [" \t\n\r"...]
# "\"'`"... is added to whitespace_chars as non of the bslash_completions
# characters contain any of these characters. It prohibits the
# bslash_completions function to try and complete on escaped characters in strings
const bslash_separators = [whitespace_chars..., "\"'`"...]
# Aux function to detect whether we're right after a
# using or import keyword
function afterusing(string::String, startpos::Int)
(isempty(string) || startpos == 0) && return false
str = string[1:prevind(string,startpos)]
isempty(str) && return false
rstr = reverse(str)
r = findfirst(r"\s(gnisu|tropmi)\b", rstr)
r === nothing && return false
fr = reverseind(str, last(r))
return occursin(r"^\b(using|import)\s*((\w+[.])*\w+\s*,\s*)*$", str[fr:end])
end
function bslash_completions(string::String, pos::Int)
slashpos = something(findprev(isequal('\\'), string, pos), 0)
if (something(findprev(in(bslash_separators), string, pos), 0) < slashpos &&
!(1 < slashpos && (string[prevind(string, slashpos)]=='\\')))
# latex / emoji symbol substitution
s = string[slashpos:pos]
latex = get(latex_symbols, s, "")
if !isempty(latex) # complete an exact match
return (true, (Completion[BslashCompletion(latex)], slashpos:pos, true))
end
emoji = get(emoji_symbols, s, "")
if !isempty(emoji)
return (true, (Completion[BslashCompletion(emoji)], slashpos:pos, true))
end
# return possible matches; these cannot be mixed with regular
# Julian completions as only latex / emoji symbols contain the leading \
if startswith(s, "\\:") # emoji
namelist = Iterators.filter(k -> startswith(k, s), keys(emoji_symbols))
else # latex
namelist = Iterators.filter(k -> startswith(k, s), keys(latex_symbols))
end
return (true, (Completion[BslashCompletion(name) for name in sort!(collect(namelist))], slashpos:pos, true))
end
return (false, (Completion[], 0:-1, false))
end
function dict_identifier_key(str::String, tag::Symbol, context_module::Module = Main)
if tag === :string
str_close = str*"\""
elseif tag === :cmd
str_close = str*"`"
else
str_close = str
end
frange, end_of_identifier = find_start_brace(str_close, c_start='[', c_end=']')
isempty(frange) && return (nothing, nothing, nothing)
obj = context_module
for name in split(str[frange[1]:end_of_identifier], '.')
Base.isidentifier(name) || return (nothing, nothing, nothing)
sym = Symbol(name)
isdefined(obj, sym) || return (nothing, nothing, nothing)
obj = getfield(obj, sym)
end
(isa(obj, AbstractDict) && length(obj)::Int < 1_000_000) || return (nothing, nothing, nothing)
begin_of_key = something(findnext(!isspace, str, nextind(str, end_of_identifier) + 1), # +1 for [
lastindex(str)+1)
return (obj::AbstractDict, str[begin_of_key:end], begin_of_key)
end
# This needs to be a separate non-inlined function, see #19441
@noinline function find_dict_matches(identifier::AbstractDict, partial_key)
matches = String[]
for key in keys(identifier)
rkey = repr(key)
startswith(rkey,partial_key) && push!(matches,rkey)
end
return matches
end
function project_deps_get_completion_candidates(pkgstarts::String, project_file::String)
loading_candidates = String[]
d = Base.parsed_toml(project_file)
pkg = get(d, "name", nothing)::Union{String, Nothing}
if pkg !== nothing && startswith(pkg, pkgstarts)
push!(loading_candidates, pkg)
end
deps = get(d, "deps", nothing)::Union{Dict{String, Any}, Nothing}
if deps !== nothing
for (pkg, _) in deps
startswith(pkg, pkgstarts) && push!(loading_candidates, pkg)
end
end
return Completion[PackageCompletion(name) for name in loading_candidates]
end
function completions(string::String, pos::Int, context_module::Module=Main)
# First parse everything up to the current position
partial = string[1:pos]
inc_tag = Base.incomplete_tag(Meta.parse(partial, raise=false, depwarn=false))
# if completing a key in a Dict
identifier, partial_key, loc = dict_identifier_key(partial, inc_tag, context_module)
if identifier !== nothing
matches = find_dict_matches(identifier, partial_key)
length(matches)==1 && (lastindex(string) <= pos || string[nextind(string,pos)] != ']') && (matches[1]*=']')
length(matches)>0 && return Completion[DictCompletion(identifier, match) for match in sort!(matches)], loc::Int:pos, true
end
# otherwise...
if inc_tag in [:cmd, :string]
m = match(r"[\t\n\r\"`><=*?|]| (?!\\)", reverse(partial))
startpos = nextind(partial, reverseind(partial, m.offset))
r = startpos:pos
expanded = complete_expanduser(replace(string[r], r"\\ " => " "), r)
expanded[3] && return expanded # If user expansion available, return it
paths, r, success = complete_path(replace(string[r], r"\\ " => " "), pos)
if inc_tag === :string &&
length(paths) == 1 && # Only close if there's a single choice,
!isdir(expanduser(replace(string[startpos:prevind(string, first(r))] * paths[1].path,
r"\\ " => " "))) && # except if it's a directory
(lastindex(string) <= pos ||
string[nextind(string,pos)] != '"') # or there's already a " at the cursor.
paths[1] = PathCompletion(paths[1].path * "\"")
end
#Latex symbols can be completed for strings
(success || inc_tag==:cmd) && return sort!(paths, by=p->p.path), r, success
end
ok, ret = bslash_completions(string, pos)
ok && return ret
# Make sure that only bslash_completions is working on strings
inc_tag==:string && return Completion[], 0:-1, false
if inc_tag === :other && should_method_complete(partial)
frange, method_name_end = find_start_brace(partial)
# strip preceding ! operator
s = replace(partial[frange], r"\!+([^=\(]+)" => s"\1")
ex = Meta.parse(s * ")", raise=false, depwarn=false)
if isa(ex, Expr)
if ex.head === :call
return complete_methods(ex, context_module), first(frange):method_name_end, false
elseif ex.head === :. && ex.args[2] isa Expr && (ex.args[2]::Expr).head === :tuple
return complete_methods(ex, context_module), first(frange):(method_name_end - 1), false
end
end
elseif inc_tag === :comment
return Completion[], 0:-1, false
end
dotpos = something(findprev(isequal('.'), string, pos), 0)
startpos = nextind(string, something(findprev(in(non_identifier_chars), string, pos), 0))
# strip preceding ! operator
if (m = match(r"^\!+", string[startpos:pos])) !== nothing
startpos += length(m.match)
end
ffunc = (mod,x)->true
suggestions = Completion[]
comp_keywords = true
if afterusing(string, startpos)
# We're right after using or import. Let's look only for packages
# and modules we can reach from here
# If there's no dot, we're in toplevel, so we should
# also search for packages
s = string[startpos:pos]
if dotpos <= startpos
for dir in Base.load_path()
if basename(dir) in Base.project_names && isfile(dir)
append!(suggestions, project_deps_get_completion_candidates(s, dir))
end
isdir(dir) || continue
for pname in readdir(dir)
if pname[1] != '.' && pname != "METADATA" &&
pname != "REQUIRE" && startswith(pname, s)
# Valid file paths are
# <Mod>.jl
# <Mod>/src/<Mod>.jl
# <Mod>.jl/src/<Mod>.jl
if isfile(joinpath(dir, pname))
endswith(pname, ".jl") && push!(suggestions,
PackageCompletion(pname[1:prevind(pname, end-2)]))
else
mod_name = if endswith(pname, ".jl")
pname[1:prevind(pname, end-2)]
else
pname
end
if isfile(joinpath(dir, pname, "src",
"$mod_name.jl"))
push!(suggestions, PackageCompletion(mod_name))
end
end
end
end
end
end
ffunc = (mod,x)->(Base.isbindingresolved(mod, x) && isdefined(mod, x) && isa(getfield(mod, x), Module))
comp_keywords = false
end
startpos == 0 && (pos = -1)
dotpos < startpos && (dotpos = startpos - 1)
s = string[startpos:pos]
comp_keywords && append!(suggestions, complete_keyword(s))
# The case where dot and start pos is equal could look like: "(""*"").d","". or CompletionFoo.test_y_array[1].y
# This case can be handled by finding the beginning of the expression. This is done below.
if dotpos == startpos
i = prevind(string, startpos)
while 0 < i
c = string[i]
if c in [')', ']']
if c==')'
c_start='('; c_end=')'
elseif c==']'
c_start='['; c_end=']'
end
frange, end_of_identifier = find_start_brace(string[1:prevind(string, i)], c_start=c_start, c_end=c_end)
startpos = first(frange)
i = prevind(string, startpos)
elseif c in ('\'', '\"', '\`')
s = "$c$c"*string[startpos:pos]
break
else
break
end
s = string[startpos:pos]
end
end
append!(suggestions, complete_symbol(s, ffunc, context_module))
return sort!(unique(suggestions), by=completion_text), (dotpos+1):pos, true
end
function shell_completions(string, pos)
# First parse everything up to the current position
scs = string[1:pos]
local args, last_parse
try
args, last_parse = Base.shell_parse(scs, true)::Tuple{Expr,UnitRange{Int}}
catch
return Completion[], 0:-1, false
end
ex = args.args[end]::Expr
# Now look at the last thing we parsed
isempty(ex.args) && return Completion[], 0:-1, false
arg = ex.args[end]
if all(s -> isa(s, AbstractString), ex.args)
arg = arg::AbstractString
# Treat this as a path
# As Base.shell_parse throws away trailing spaces (unless they are escaped),
# we need to special case here.
# If the last char was a space, but shell_parse ignored it search on "".
ignore_last_word = arg != " " && scs[end] == ' '
prefix = ignore_last_word ? "" : join(ex.args)
# Also try looking into the env path if the user wants to complete the first argument
use_envpath = !ignore_last_word && length(args.args) < 2
return complete_path(prefix, pos, use_envpath=use_envpath, shell_escape=true)
elseif isexpr(arg, :incomplete) || isexpr(arg, :error)
partial = scs[last_parse]
ret, range = completions(partial, lastindex(partial))
range = range .+ (first(last_parse) - 1)
return ret, range, true
end
return Completion[], 0:-1, false
end
end # module
| [
2,
770,
2393,
318,
257,
636,
286,
22300,
13,
13789,
318,
17168,
25,
3740,
1378,
73,
377,
498,
648,
13,
2398,
14,
43085,
198,
198,
21412,
45285,
5377,
37069,
507,
198,
198,
39344,
1224,
45240,
11,
7582,
62,
785,
37069,
507,
11,
275,
... | 2.280064 | 13,047 |
# This file is a part of JuliaFEM.
# License is MIT: see https://github.com/JuliaFEM/JuliaFEM.jl/blob/master/LICENSE.md
using JuliaFEM
using JuliaFEM.Preprocess
using JuliaFEM.Testing
function JuliaFEM.get_mesh(::Type{Val{Symbol("two elements 1.0x0.5 with 0.1 gap in y direction")}})
mesh = Mesh()
add_node!(mesh, 1, [0.0, 0.0])
add_node!(mesh, 2, [1.0, 0.0])
add_node!(mesh, 3, [1.0, 0.5])
add_node!(mesh, 4, [0.0, 0.5])
add_node!(mesh, 5, [0.0, 0.6])
add_node!(mesh, 6, [1.0, 0.6])
add_node!(mesh, 7, [1.0, 1.1])
add_node!(mesh, 8, [0.0, 1.1])
add_element!(mesh, 1, :Quad4, [1, 2, 3, 4])
add_element!(mesh, 2, :Quad4, [5, 6, 7, 8])
add_element!(mesh, 3, :Seg2, [1, 2])
add_element!(mesh, 4, :Seg2, [7, 8])
add_element!(mesh, 5, :Seg2, [4, 3])
add_element!(mesh, 6, :Seg2, [6, 5])
add_element_to_element_set!(mesh, :LOWER, 1)
add_element_to_element_set!(mesh, :UPPER, 2)
add_element_to_element_set!(mesh, :LOWER_BOTTOM, 3)
add_element_to_element_set!(mesh, :UPPER_TOP, 4)
add_element_to_element_set!(mesh, :LOWER_TOP, 5)
add_element_to_element_set!(mesh, :UPPER_BOTTOM, 6)
return mesh
end
function JuliaFEM.get_model(::Type{Val{Symbol("two element contact")}})
mesh = get_mesh("two elements 1.0x0.5 with 0.1 gap in y direction")
upper = Problem(Elasticity, "UPPER", 2)
upper.properties.formulation = :plane_stress
upper.elements = create_elements(mesh, "UPPER")
update!(upper.elements, "youngs modulus", 288.0)
update!(upper.elements, "poissons ratio", 1/3)
lower = Problem(Elasticity, "LOWER", 2)
lower.properties.formulation = :plane_stress
lower.elements = create_elements(mesh, "LOWER")
update!(lower.elements, "youngs modulus", 288.0)
update!(lower.elements, "poissons ratio", 1/3)
bc_upper = Problem(Dirichlet, "UPPER_TOP", 2, "displacement")
bc_upper.elements = create_elements(mesh, "UPPER_TOP")
#update!(bc_upper.elements, "displacement 1", -17/90)
#update!(bc_upper.elements, "displacement 1", -17/90)
update!(bc_upper.elements, "displacement 1", -0.2)
update!(bc_upper.elements, "displacement 2", -0.2)
bc_lower = Problem(Dirichlet, "LOWER_BOTTOM", 2, "displacement")
bc_lower.elements = create_elements(mesh, "LOWER_BOTTOM")
update!(bc_lower.elements, "displacement 1", 0.0)
update!(bc_lower.elements, "displacement 2", 0.0)
interface = Problem(Contact, "LOWER_TO_UPPER", 2, "displacement")
interface.properties.dimension = 1
interface_slave_elements = create_elements(mesh, "LOWER_TOP")
interface_master_elements = create_elements(mesh, "UPPER_BOTTOM")
update!(interface_slave_elements, "master elements", interface_master_elements)
interface.elements = [interface_master_elements; interface_slave_elements]
solver = Solver(Nonlinear)
push!(solver, upper, lower, bc_upper, bc_lower, interface)
return solver
end
@testset "test simple two element contact" begin
solver = get_model("two element contact")
solver()
contact = solver["LOWER_TO_UPPER"]
master = first(contact.elements)
slave = last(contact.elements)
u = master("displacement", [0.0], 0.0)
la = slave("reaction force", [0.0], 0.0)
info("u = $u, la = $la")
@test isapprox(u, [-0.2, -0.15])
@test isapprox(la, [0.0, -30.375])
# FIXME
end
| [
2,
770,
2393,
318,
257,
636,
286,
22300,
37,
3620,
13,
198,
2,
13789,
318,
17168,
25,
766,
3740,
1378,
12567,
13,
785,
14,
16980,
544,
37,
3620,
14,
16980,
544,
37,
3620,
13,
20362,
14,
2436,
672,
14,
9866,
14,
43,
2149,
24290,
... | 2.258367 | 1,494 |
using StatsBase
# Support some of the weighted statistics function in StatsBase
# NOTES:
# - Ambiguity errors are still possible for weights with overly specific methods (e.g., UnitWeights)
# - Ideally, when the weighted statistics is moved to Statistics.jl we can remove this entire file.
# https://github.com/JuliaLang/Statistics.jl/pull/2
function Statistics.mean(A::KeyedArray, wv::AbstractWeights; dims=:, kwargs...)
dims === Colon() && return mean(parent(A), wv; kwargs...)
numerical_dims = NamedDims.dim(A, dims)
data = mean(parent(A), wv; dims=numerical_dims, kwargs...)
new_keys = ntuple(d -> d in numerical_dims ? Base.OneTo(1) : axiskeys(A,d), ndims(A))
return KeyedArray(data, map(copy, new_keys))#, copy(A.meta))
end
# var and std are separate cause they don't use the dims keyword and we need to set corrected=true
for fun in [:var, :std]
@eval function Statistics.$fun(A::KeyedArray, wv::AbstractWeights; dims=:, corrected=true, kwargs...)
dims === Colon() && return $fun(parent(A), wv; kwargs...)
numerical_dims = NamedDims.dim(A, dims)
data = $fun(parent(A), wv, numerical_dims; corrected=corrected, kwargs...)
new_keys = ntuple(d -> d in numerical_dims ? Base.OneTo(1) : axiskeys(A,d), ndims(A))
return KeyedArray(data, map(copy, new_keys))#, copy(A.meta))
end
end
for fun in [:cov, :cor]
@eval function Statistics.$fun(A::KeyedMatrix, wv::AbstractWeights; dims=1, kwargs...)
d = NamedDims.dim(A, dims)
data = $fun(keyless_unname(A), wv, d; kwargs...)
L1 = dimnames(A, 3 - d)
data2 = hasnames(A) ? NamedDimsArray(data, (L1, L1)) : data
K1 = axiskeys(A, 3 - d)
KeyedArray(data2, (copy(K1), copy(K1)))
end
end
# scattermat is a StatsBase function and takes dims as a kwarg
function StatsBase.scattermat(A::KeyedMatrix, wv::AbstractWeights; dims=1, kwargs...)
d = NamedDims.dim(A, dims)
data = scattermat(keyless_unname(A), wv; dims=d, kwargs...)
L1 = dimnames(A, 3 - d)
data2 = hasnames(A) ? NamedDimsArray(data, (L1, L1)) : data
K1 = axiskeys(A, 3 - d)
KeyedArray(data2, (copy(K1), copy(K1)))
end
for fun in (:std, :var, :cov)
full_name = Symbol("mean_and_$fun")
@eval StatsBase.$full_name(A::KeyedMatrix, wv::Vararg{<:AbstractWeights}; dims=:, corrected::Bool=true, kwargs...) =
(
mean(A, wv...; dims=dims, kwargs...),
$fun(A, wv...; dims=dims, corrected=corrected, kwargs...)
)
end
# Since we get ambiguity errors with specific implementations we need to wrap each supported method
# A better approach might be to add `NamedDims` support to CovarianceEstimators.jl in the future.
using CovarianceEstimation
estimators = [
:SimpleCovariance,
:LinearShrinkage,
:DiagonalUnitVariance,
:DiagonalCommonVariance,
:DiagonalUnequalVariance,
:CommonCovariance,
:PerfectPositiveCorrelation,
:ConstantCorrelation,
:AnalyticalNonlinearShrinkage,
]
for estimator in estimators
@eval function Statistics.cov(ce::$estimator, A::KeyedMatrix, wv::Vararg{<:AbstractWeights}; dims=1, kwargs...)
d = NamedDims.dim(A, dims)
data = cov(ce, keyless_unname(A), wv...; dims=d, kwargs...)
L1 = dimnames(A, 3 - d)
data2 = hasnames(A) ? NamedDimsArray(data, (L1, L1)) : data
K1 = axiskeys(A, 3 - d)
KeyedArray(data2, (copy(K1), copy(K1)))
end
end
| [
198,
3500,
20595,
14881,
198,
198,
2,
7929,
617,
286,
262,
26356,
7869,
2163,
287,
20595,
14881,
198,
2,
5626,
1546,
25,
198,
2,
532,
12457,
328,
14834,
8563,
389,
991,
1744,
329,
19590,
351,
17698,
2176,
5050,
357,
68,
13,
70,
1539... | 2.387769 | 1,439 |
# Compute tracks from entity edge data.
using JLD, PyPlot
include("findtracks.jl")
include("findtrackgraph.jl")
# Load edge incidence matrix.
# Load the data file
file_dir = joinpath(Base.source_dir(),"../1EntityAnalysis/Entity.jld")
E = load(file_dir)["E"]
#E = loadassoc(file_dir)
E = logical(E)
# Set prefixes
p = StartsWith("PERSON/,")
t = StartsWith("TIME/,")
x = StartsWith("LOCATION/,")
# Build entity tracks with routine.
A = findtracks(E,t,p,x)
# Track Graph
G = findtrackgraph(A)
print(G > 5)
figure()
spy(G)
# Track graph pattern
o = "ORGANIZATION/international monetary fund,"
p = StartsWith("PERSON/,")
Go = findtrackgraph(A[:,getcol(E[getrow(E[:,o]),p])]);
print((Go > 2) & ((Go ./ G) > 0.2)) | [
2,
3082,
1133,
8339,
422,
9312,
5743,
1366,
13,
198,
198,
3500,
449,
11163,
11,
9485,
43328,
198,
17256,
7203,
19796,
46074,
13,
20362,
4943,
198,
17256,
7203,
19796,
11659,
34960,
13,
20362,
4943,
198,
198,
2,
8778,
5743,
18349,
17593,... | 2.640741 | 270 |
<reponame>pourya-b/CIAOAlgorithms.jl
struct SVRG_basic_iterable{R<:Real,C<:RealOrComplex{R},Tx<:AbstractArray{C},Tf,Tg}
F::Array{Tf} # smooth term
g::Tg # nonsmooth term
x0::Tx # initial point
N::Int # of data points in the finite sum problem
L::Maybe{Union{Array{R},R}} # Lipschitz moduli of nabla f_i
μ::Maybe{Union{Array{R},R}} # convexity moduli of the gradients
γ::Maybe{R} # stepsize
m::Maybe{Int} # number of inner loop updates
plus::Bool # for SVRG++ variant
end
mutable struct SVRG_basic_state{R<:Real,Tx}
γ::R # stepsize
m::Int # number of inner loop updates
av::Tx # the running average
z::Tx
z_full::Tx # the outer loop argument
w::Tx # the inner loop variable
ind::Array{Int} # running idx set
# some extra placeholders
∇f_temp::Tx # placeholder for gradients
temp::Tx
end
function SVRG_basic_state(γ::R, m, av::Tx, z::Tx, z_full::Tx, w::Tx, ind) where {R,Tx}
return SVRG_basic_state{R,Tx}(γ, m, av, z, z_full, w, ind, copy(av), copy(av))
end
function Base.iterate(iter::SVRG_basic_iterable{R}) where {R}
N = iter.N
ind = collect(1:N)
m = iter.m === nothing ? m = 2 * N : m = iter.m
# updating the stepsize
if iter.γ === nothing
if iter.plus
@warn "provide a stepsize γ"
return nothing
else
if iter.L === nothing || iter.μ === nothing
@warn "smoothness or convexity parameter absent"
return nothing
else
L_M = maximum(iter.L)
μ_M = maximum(iter.μ)
γ = 1 / (10 * L_M)
# condition Theorem 3.1
rho = (1 + 4 * L_M * γ^2 * μ_M * (N + 1)) / (μ_M * γ * N * (1 - 4L_M * γ))
if rho >= 1
@warn "convergence condition violated...provide a stepsize!"
end
end
end
else
γ = iter.γ # provided γ
end
# initializing the vectors
av = zero(iter.x0)
for i = 1:N
∇f, ~ = gradient(iter.F[i], iter.x0)
∇f ./= N
av .+= ∇f
end
z_full = copy(iter.x0)
z = zero(av)
w = copy(iter.x0)
state = SVRG_basic_state(γ, m, av, z, z_full, w, ind)
print()
return state, state
end
function Base.iterate(iter::SVRG_basic_iterable{R}, state::SVRG_basic_state{R}) where {R}
# The inner cycle
for i in rand(state.ind, state.m)
gradient!(state.temp, iter.F[i], state.z_full)
gradient!(state.∇f_temp, iter.F[i], state.w)
state.temp .-= state.∇f_temp
state.temp .-= state.av
state.temp .*= state.γ
state.temp .+= state.w
CIAOAlgorithms.prox!(state.w, iter.g, state.temp, state.γ)
state.z .+= state.w # keeping track of the sum of w's
end
# full update
state.z_full .= state.w #state.z ./ state.m
# iter.plus || (state.w .= state.z_full) # only for basic SVRG
state.z = zero(state.z) # for next iterate
state.av .= state.z
for i = 1:iter.N
gradient!(state.∇f_temp, iter.F[i], state.z_full)
state.∇f_temp ./= iter.N
state.av .+= state.∇f_temp
end
# iter.plus && (state.m *= 2) # only for SVRG++
return state, state
end
solution(state::SVRG_basic_state) = state.z_full
| [
27,
7856,
261,
480,
29,
48681,
3972,
12,
65,
14,
49732,
46,
2348,
7727,
907,
13,
20362,
198,
7249,
311,
13024,
38,
62,
35487,
62,
2676,
540,
90,
49,
27,
25,
15633,
11,
34,
27,
25,
15633,
5574,
5377,
11141,
90,
49,
5512,
46047,
2... | 1.915807 | 1,841 |
<reponame>ozmaden/GNSSBenchmarks.jl
# CuArray{ComplexF32}
function gpu_downconvert!(
downconverted_signal::CuVector{ComplexF32},
carrier::CuVector{ComplexF32},
signal::CuVector{ComplexF32},
start_sample::Integer,
num_samples_left::Integer
)
@. @views downconverted_signal[start_sample:num_samples_left + start_sample - 1] =
signal[start_sample:num_samples_left + start_sample - 1] *
conj(carrier[start_sample:num_samples_left + start_sample - 1])
end
# CuArray{ComplexF32} Matrix
function gpu_downconvert!(
downconverted_signal::CuMatrix{ComplexF32},
carrier::CuVector{ComplexF32},
signal::CuMatrix{ComplexF32},
start_sample::Integer,
num_samples_left::Integer
)
@. downconverted_signal = signal * conj(carrier)
end
# StructArray{CuArray} Vector
function gpu_downconvert!(
downconverted_signal_re::CuVector{Float32},
downconverted_signal_im::CuVector{Float32},
carrier_re::CuVector{Float32},
carrier_im::CuVector{Float32},
signal_re::CuVector{Float32},
signal_im::CuVector{Float32},
start_sample::Integer,
num_samples_left::Integer
)
@. downconverted_signal_re = signal_re * carrier_re + signal_im * carrier_im
@. downconverted_signal_im = signal_im * carrier_re - signal_re * carrier_im
end
# StructArray{CuArray} Matrix
function gpu_downconvert!(
downconverted_signal_re::CuMatrix{Float32},
downconverted_signal_im::CuMatrix{Float32},
carrier_re::CuVector{Float32},
carrier_im::CuVector{Float32},
signal_re::CuMatrix{Float32},
signal_im::CuMatrix{Float32},
start_sample::Integer,
num_samples_left::Integer
)
@. downconverted_signal_re = signal_re * carrier_re + signal_im * carrier_im
@. downconverted_signal_im = signal_im * carrier_re - signal_re * carrier_im
end
# Float32 implementation of the orig. CPU function
function cpu_downconvert!(
downconverted_signal_re::Vector{Float32},
downconverted_signal_im::Vector{Float32},
carrier_re::Vector{Float32},
carrier_im::Vector{Float32},
signal_re::Vector{Float32},
signal_im::Vector{Float32},
start_sample::Integer,
num_samples_left::Integer
)
@avx unroll = 3 for i = start_sample:num_samples_left + start_sample - 1
downconverted_signal_re[i] = signal_re[i] * carrier_re[i] +
signal_im[i] * carrier_im[i]
downconverted_signal_im[i] = signal_im[i] * carrier_re[i] -
signal_re[i] * carrier_im[i]
end
end
# Float32 implementation of the orig. CPU function for matrices
function cpu_downconvert!(
downconverted_signal_re::Matrix{Float32},
downconverted_signal_im::Matrix{Float32},
carrier_re::Vector{Float32},
carrier_im::Vector{Float32},
signal_re::Matrix{Float32},
signal_im::Matrix{Float32},
start_sample::Integer,
num_samples_left::Integer
)
@avx unroll = 3 for i = start_sample:num_samples_left + start_sample - 1, j = 1:size(signal_re, 2)
# Calculate signal * carrier'
downconverted_signal_re[i, j] = signal_re[i, j] * carrier_re[i] +
signal_im[i, j] * carrier_im[i]
downconverted_signal_im[i, j] = signal_im[i, j] * carrier_re[i] -
signal_re[i, j] * carrier_im[i]
end
end
| [
27,
7856,
261,
480,
29,
8590,
9937,
268,
14,
16630,
5432,
44199,
14306,
13,
20362,
198,
2,
14496,
19182,
90,
5377,
11141,
37,
2624,
92,
198,
8818,
308,
19944,
62,
2902,
1102,
1851,
0,
7,
198,
220,
220,
220,
866,
1102,
13658,
62,
1... | 2.412417 | 1,353 |
module ComradeDynesty
using Comrade
using AbstractMCMC
using TupleVectors
using Reexport
@reexport using Dynesty
Comrade.samplertype(::Type{<:NestedSampler}) = Comrade.IsCube()
Comrade.samplertype(::Type{<:DynamicNestedSampler}) = Comrade.IsCube()
function AbstractMCMC.sample(post::Comrade.TransformedPosterior,
sampler::Union{NestedSampler, DynamicNestedSampler},
args...;
kwargs...)
ℓ = logdensityof(post)
kw = delete!(Dict(kwargs), :init_params)
res = sample(ℓ, identity, sampler, args...; kw...)
samples, weights = res["samples"].T, exp.(res["logwt"].T .- res["logz"][end])
chain = transform.(Ref(post), eachcol(samples)) |> TupleVector
stats = (logl = res["logl"].T,
logz = res["logz"][end],
logzerr = res["logz"][end],
weights = weights,
)
return TupleVector(chain), stats
end
end
| [
21412,
955,
27585,
35,
2047,
9673,
198,
198,
3500,
955,
27585,
198,
198,
3500,
27741,
9655,
9655,
198,
3500,
309,
29291,
53,
478,
669,
198,
3500,
797,
39344,
198,
198,
31,
631,
87,
634,
1262,
39530,
9673,
628,
198,
5377,
27585,
13,
... | 2.137778 | 450 |
using Test
using POMDPs
using Random
let
struct M <: POMDP{Int, Int, Char} end
@test_throws MethodError generate_s(M(), 1, 1, MersenneTwister(4))
POMDPs.transition(::M, ::Int, ::Int) = [1]
@test generate_s(M(), 1, 1, MersenneTwister(4)) == 1
@test_throws MethodError generate_sor(M(), 1, 1, MersenneTwister(4))
@test_throws MethodError generate_sr(M(), 1, 1, MersenneTwister(4))
POMDPs.reward(::M, ::Int, ::Int, ::Int) = 0.0
@test generate_sr(M(), 1, 1, MersenneTwister(4)) == (1, 0.0)
@test_throws MethodError generate_sor(M(), 1, 1, MersenneTwister(4))
POMDPs.generate_o(::M, ::Int, ::Int, ::Int, ::AbstractRNG) = `a`
@test generate_sor(M(), 1, 1, MersenneTwister(4)) == (1, `a`, 0.0)
end
| [
3500,
6208,
198,
3500,
350,
2662,
6322,
82,
198,
3500,
14534,
198,
198,
1616,
198,
220,
220,
220,
2878,
337,
1279,
25,
350,
2662,
6322,
90,
5317,
11,
2558,
11,
3178,
92,
886,
198,
220,
220,
220,
2488,
9288,
62,
400,
8516,
11789,
1... | 2.190476 | 336 |
<gh_stars>0
let
doc = open("$testdir/example.html") do example
example |> readstring |> parsehtml
end
io = IOBuffer()
print(io, doc)
seek(io, 0)
newdoc = io |> readstring |> parsehtml
@test newdoc == doc
end
| [
27,
456,
62,
30783,
29,
15,
198,
198,
1616,
198,
220,
220,
220,
2205,
796,
1280,
7203,
3,
9288,
15908,
14,
20688,
13,
6494,
4943,
466,
1672,
198,
220,
220,
220,
220,
220,
220,
220,
1672,
930,
29,
1100,
8841,
930,
29,
21136,
6494,
... | 2.268519 | 108 |
#=##############################################################################
# DESCRIPTION
Utilities.
# AUTHORSHIP
* Author : <NAME>
* Email : <EMAIL>
* Created : Sep 2018
* License : MIT License
=###############################################################################
"""
`simplewing(b, ar, tr, twist_root, twist_tip, lambda, gamma;
bodytype=RigidWakeBody,
span_NDIVS="automatic", rfl_NDIVS="automatic",
airfoil_root="naca6412.dat", airfoil_tip="naca6412.dat",
airfoil_path=def_rfl_path)`
Generates a symmetric single-section wing.
**ARGUMENTS**
* `b::Real` : Span.
* `ar::Real` : Aspect ratio defined as b/c_tip.
* `tr::Real` : Taper ratio defined as c_tip/c_root.
* `twist_root::Real`: (deg) twist of the root.
* `twist_tip::Real` : (deg) twist of the tip.
* `lambda::Real` : (deg) sweep.
* `gamma::Real` : (deg) dihedral.
**OPTIONAL ARGUMENTS**
* `bodytype::Type{LBodyTypes}`: Type of lifting body to generate.
* `span_NDIVS::ndivstype` : Spanwise divisions.
* `rfl_NDIVS::ndivstype` : Chordwise divisions.
* `airfoil_root::String` : File to root airfoil contour.
* `airfoil_tip::String` : File to tip airfoil contour.
* `airfoil_path::String` : Path to airfoil files.
NOTE: See gt.multidscretize for a description of arguments of type `ndivstype`.
NOTE2: In the current implementation, sweep and dihedral are done about the LE.
"""
function simplewing(b::RType, ar::RType, tr::RType, twist_root::RType,
twist_tip::RType, lambda::RType, gamma::RType;
bodytype::Type{LBodyTypes}=RigidWakeBody,
span_NDIVS::ndivstype=nothing,
rfl_NDIVS::ndivstype=nothing,
airfoil_root::String="naca6412.dat",
airfoil_tip::String="naca6412.dat",
airfoil_path::String=def_rfl_path,
spl_s::Real=0.0000001,
rflspl_s::Real=0.00000001,
verify_spline::Bool=true,
verify_rflspline::Bool=true,
opt_args...
)
# ----------------- GEOMETRY DESCRIPTION -------------------------------------
c_tip = b/ar # Tip chord
c_root = c_tip/tr # Root chord
semispan = b/2 # (m) semi-span length
y_tip = b/2
x_tip = y_tip*tan(lambda*pi/180)
z_tip = y_tip*tan(gamma*pi/180)
chords = [0.00 c_root/semispan; # (semi-span position, chord c/semib)
1.00 c_tip/semispan]
twists = [0.0 twist_root; # (semi-span position, twist (deg))
1.0 twist_tip]
x_pos = [0.00 0; # (semi-span position, LE x-position x/semib)
1.00 x_tip/semispan]
z_pos = [0.00 0; # (semi-span position, LE z-position x/semib)
1.00 z_tip/semispan]
airfoil_files = [(0.0, airfoil_root), # (semi-span position, airfoil file)
(1.0, airfoil_tip)]
# ----------------- DISCRETIZATION 0000----------------------------------------
# Defines divisions
if span_NDIVS==nothing
b_NDIVS = [(1.0, 35, 20.0, true)] # Span cell sections
else
b_NDIVS = span_NDIVS
end
if rfl_NDIVS==nothing
urfl_NDIVS = [(0.25, 7, 10.0, false), # Cells on upper side of airfoils
(0.50, 5, 1.0, true),
(0.25, 6, 1/10.0, false)]
else
urfl_NDIVS = rfl_NDIVS
end
lrfl_NDIVS = urfl_NDIVS # Cells on lower side of airfoils
# ----------------- LOFTING PARAMETERS ---------------------------------------
b_low = -1.0 # Lower bound of span lofting
b_up = 1.0 # Upper bound of span lofting
symmetric = true # Lofting symmetric about b=0
spl_k = 1 # Spline order of distributions along span
# spl_s = 0.0000001 # Spline smoothing of distribution along span
# rflspl_s = 0.00000001 # Spline smoothing of airfoil cross sections.
# verify_spline = false # Plots the splined distributions
# verify_rflspline = true # Plots the splined airfoil cross sections
return generate_loft_liftbody(bodytype, airfoil_files, airfoil_path,
urfl_NDIVS, lrfl_NDIVS,
semispan, b_low, b_up, b_NDIVS,
chords, twists, x_pos, z_pos;
dimsplit=1,
symmetric=symmetric,
spl_k=spl_k, spl_s=spl_s,
verify_spline=verify_spline,
verify_rflspline=verify_rflspline,
rflspl_s=rflspl_s,
opt_args...
)
end
| [
2,
28,
29113,
29113,
7804,
4242,
2235,
198,
2,
22196,
40165,
198,
220,
220,
220,
41086,
13,
198,
2,
44746,
49423,
198,
220,
1635,
6434,
220,
220,
220,
1058,
1279,
20608,
29,
198,
220,
1635,
9570,
220,
220,
220,
220,
1058,
1279,
2763... | 1.921382 | 2,633 |
if Base.libllvm_version >= v"7.0"
include(joinpath("gcn_intrinsics", "math.jl"))
end
include(joinpath("gcn_intrinsics", "indexing.jl"))
include(joinpath("gcn_intrinsics", "assertion.jl"))
include(joinpath("gcn_intrinsics", "synchronization.jl"))
include(joinpath("gcn_intrinsics", "extras.jl"))
| [
361,
7308,
13,
8019,
297,
14761,
62,
9641,
18189,
410,
1,
22,
13,
15,
1,
198,
220,
220,
220,
2291,
7,
22179,
6978,
7203,
70,
31522,
62,
600,
81,
1040,
873,
1600,
366,
11018,
13,
20362,
48774,
198,
437,
198,
17256,
7,
22179,
6978,
... | 2.471074 | 121 |
using LinearAlgebra
export transform,
backtransform
"""
backtransform(Rsets::ReachSolution, options::Options)
Undo a coordinate transformation.
### Input
- `Rsets` -- flowpipe
- `option` -- problem options containing an `:transformation_matrix` entry
### Output
A new flowpipe where each reach set has been transformed.
### Notes
The transformation is implemented with a lazy `LinearMap`.
"""
function backtransform(Rsets, options::Options)
transformation_matrix = options[:transformation_matrix]
if transformation_matrix == nothing
return Rsets
end
return project(Rsets, transformation_matrix)
end
"""
transform(problem::InitialValueProblem, options::Options)
Interface function that calls the respective transformation function.
### Input
- `problem` -- discrete or continuous initial-value problem
- `option` -- problem options
### Output
A tuple containing the transformed problem and the transformed options.
### Notes
The functions that are called in the background should return a the transformed
system components `A`, `X0`, and `U`, and also an inverse transformation matrix `M`.
If the system has an invariant, it is transformed as well.
"""
function transform(problem::InitialValueProblem, options::Options)
method = options[:coordinate_transformation]
if method == ""
nothing # no-op
elseif method == "schur"
problem, T_inverse = schur_transform(problem)
options[:transformation_matrix] = T_inverse
else
error("the transformation method $method is undefined")
end
return (problem, options)
end
"""
schur_transform(problem::InitialValueProblem)
Applies a Schur transformation to a discrete or continuous initial-value problem.
### Input
- `problem` -- discrete or continuous initial-value problem
### Output
Transformed problem.
### Algorithm
We use Julia's default `schurfact` function to compute a
[Schur decomposition](https://en.wikipedia.org/wiki/Schur_decomposition)
of the coefficients matrix ``A``.
"""
function schur_transform(problem::InitialValueProblem{PT, ST}
) where {PT <: Union{ConstrainedLinearControlDiscreteSystem, ConstrainedLinearControlContinuousSystem}, ST<:LazySet}
n = size(problem.s.A, 1)
# full (dense) matrix is required by schur
# result S is a struct such that
# - S.Schur is in Schur form and
# - A == S.vectors * S.Schur * S.vectors'
S = schur(Matrix(problem.s.A))
# recall that for Schur matrices, inv(T) == T'
Z_inverse = copy(transpose(S.vectors))
# obtain transformed system matrix
A_new = S.Schur
# apply transformation to the initial states
X0_new = Z_inverse * problem.x0
# apply transformation to the inputs
B_new = Z_inverse * problem.s.B
U_new = problem.s.U
# matrix for reverting the transformation again
T_inverse = S.vectors
# apply transformation to the invariant
if hasmethod(stateset, Tuple{typeof(problem.s)})
invariant_new = T_inverse * problem.s.X
else
invariant_new = Universe(n)
end
system_new = _wrap_system(PT, A_new, B_new, invariant_new, U_new)
problem_new = InitialValueProblem(system_new, X0_new)
return problem_new, T_inverse
end
function _wrap_system(PT::Type{<:ConstrainedLinearControlDiscreteSystem},
A, B, invariant, U)
return ConstrainedLinearControlDiscreteSystem(A, B, invariant, U)
end
function _wrap_system(PT::Type{<:ConstrainedLinearControlContinuousSystem},
A, B, invariant, U)
return ConstrainedLinearControlContinuousSystem(A, B, invariant, U)
end
| [
3500,
44800,
2348,
29230,
198,
198,
39344,
6121,
11,
198,
220,
220,
220,
220,
220,
220,
736,
35636,
198,
198,
37811,
198,
220,
220,
220,
736,
35636,
7,
49,
28709,
3712,
3041,
620,
46344,
11,
3689,
3712,
29046,
8,
198,
198,
31319,
78... | 2.955574 | 1,238 |
function renderloop(screen::Screen; framerate = 1/30, prerender = () -> nothing)
try
while isopen(screen)
t = time()
GLFW.PollEvents() # GLFW poll
prerender()
make_context_current(screen)
render_frame(screen)
GLFW.SwapBuffers(to_native(screen))
diff = framerate - (time() - t)
if diff > 0
sleep(diff)
else # if we don't sleep, we need to yield explicitely
yield()
end
end
catch e
destroy!(screen)
rethrow(e)
end
destroy!(screen)
return
end
function setup!(screen)
glEnable(GL_SCISSOR_TEST)
if isopen(screen)
glScissor(0, 0, widths(screen)...)
glClearColor(1, 1, 1, 1)
glClear(GL_COLOR_BUFFER_BIT)
for (id, rect, clear, visible, color) in screen.screens
if visible[]
a = rect[]
rt = (minimum(a)..., widths(a)...)
glViewport(rt...)
if clear[]
c = color[]
glScissor(rt...)
glClearColor(red(c), green(c), blue(c), alpha(c))
glClear(GL_COLOR_BUFFER_BIT)
end
end
end
end
glDisable(GL_SCISSOR_TEST)
return
end
const selection_queries = Function[]
"""
Renders a single frame of a `window`
"""
function render_frame(screen::Screen)
nw = to_native(screen)
GLAbstraction.is_context_active(nw) || return
fb = screen.framebuffer
wh = Int.(framebuffer_size(nw))
resize!(fb, wh)
w, h = wh
glDisable(GL_STENCIL_TEST)
#prepare for geometry in need of anti aliasing
glBindFramebuffer(GL_FRAMEBUFFER, fb.id[1]) # color framebuffer
glDrawBuffers(2, [GL_COLOR_ATTACHMENT0, GL_COLOR_ATTACHMENT1])
glClearColor(0,0,0,0)
glClear(GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT | GL_COLOR_BUFFER_BIT)
setup!(screen)
glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE)
GLAbstraction.render(screen, true)
# transfer color to luma buffer and apply fxaa
glBindFramebuffer(GL_FRAMEBUFFER, fb.id[2]) # luma framebuffer
glDrawBuffer(GL_COLOR_ATTACHMENT0)
glViewport(0, 0, w, h)
glClearColor(0,0,0,0)
glClear(GL_COLOR_BUFFER_BIT)
GLAbstraction.render(fb.postprocess[1]) # add luma and preprocess
glBindFramebuffer(GL_FRAMEBUFFER, fb.id[1]) # transfer to non fxaa framebuffer
glViewport(0, 0, w, h)
glDrawBuffer(GL_COLOR_ATTACHMENT0)
GLAbstraction.render(fb.postprocess[2]) # copy with fxaa postprocess
#prepare for non anti aliased pass
glDrawBuffers(2, [GL_COLOR_ATTACHMENT0, GL_COLOR_ATTACHMENT1])
GLAbstraction.render(screen, false)
#Read all the selection queries
glReadBuffer(GL_COLOR_ATTACHMENT1)
for query_func in selection_queries
query_func(fb.objectid, w, h)
end
glBindFramebuffer(GL_FRAMEBUFFER, 0) # transfer back to window
glViewport(0, 0, w, h)
glClearColor(0, 0, 0, 0)
glClear(GL_COLOR_BUFFER_BIT)
GLAbstraction.render(fb.postprocess[3]) # copy postprocess
return
end
function id2rect(screen, id1)
# TODO maybe we should use a different data structure
for (id2, rect, clear, color) in screen.screens
id1 == id2 && return true, rect
end
false, IRect(0,0,0,0)
end
function GLAbstraction.render(screen::Screen, fxaa::Bool)
for (zindex, screenid, elem) in screen.renderlist
found, rect = id2rect(screen, screenid)
found || continue
a = rect[]
glViewport(minimum(a)..., widths(a)...)
if fxaa && elem[:fxaa][]
render(elem)
end
if !fxaa && !elem[:fxaa][]
render(elem)
end
end
return
end
| [
8818,
8543,
26268,
7,
9612,
3712,
23901,
26,
5346,
21620,
796,
352,
14,
1270,
11,
662,
13287,
796,
7499,
4613,
2147,
8,
198,
220,
220,
220,
1949,
198,
220,
220,
220,
220,
220,
220,
220,
981,
318,
9654,
7,
9612,
8,
198,
220,
220,
... | 2.090209 | 1,818 |
"""
module that holds functions needed to react to scrolling
Generally first we need to pass the GLFW callback to the Rocket obeservable
code adapted from https://discourse.julialang.org/t/custom-subject-in-rocket-jl-for-mouse-events-from-glfw/65133/3
"""
module ReactToScroll
using ModernGL, ..DisplayWords,Rocket, GLFW, ..ForDisplayStructs, ..TextureManag,Logging, ..DataStructs, ..StructsManag
export reactToScroll
export registerMouseScrollFunctions
"""
configuting Rocket on Subscribe so we get custom handler of input as we see we still need to define actor
"""
function Rocket.on_subscribe!(handler::ScrollCallbackSubscribable, actor::SyncActor{Any, ActorWithOpenGlObjects})
return subscribe!(handler.subject, actor)
end
"""
we define how handler should act on the subject - observable so it will pass event onto subject
If we will scroll fast number will change much and we will skip some slices
"""
function (handler::ScrollCallbackSubscribable)(_, xoff, yoff)
if(!handler.isBusy[]) # if program is ready to repsond
handler.numberToSend=0
next!(handler.subject,Int64(handler.numberToSend+yoff))#true if we scroll up
else
handler.numberToSend+=yoff
end
end
"""
uploading data to given texture; of given types associated
returns subscription in order to enable unsubscribing in the end
window - GLFW window
stopListening - atomic boolean able to stop the event listening cycle
return scrollback - that holds boolean subject (observable) to which we can react by subscribing appropriate actor
"""
function registerMouseScrollFunctions(window::GLFW.Window
,stopListening::Base.Threads.Atomic{Bool}
,isBusy::Base.Threads.Atomic{Bool} )
stopListening[]=true # stoping event listening loop to free the GLFW context
scrollback = ScrollCallbackSubscribable( isBusy,0 ,Subject(Int64, scheduler = AsyncScheduler()))
GLFW.SetScrollCallback(window, (a, xoff, yoff) -> scrollback(a, xoff, yoff))
stopListening[]=false # reactivate event listening loop
return scrollback
end #registerMouseScrollFunctions
"""
captures information send from handler that scroll was executed by the
"""
"""
in case of the scroll p true will be send in case of down - false
in response to it it sets new screen int variable and changes displayed screen
toBeSavedForBack - just marks weather we wat to save the info how to undo latest action
- false if we invoke it from undoing
"""
function reactToScroll(scrollNumb::Int64
,actor::SyncActor{Any, ActorWithOpenGlObjects}
,toBeSavedForBack::Bool = true)
actor.actor.mainForDisplayObjects.stopListening[]=true
current = actor.actor.currentDisplayedSlice
old = current
#when shift is pressed scrolling is 10 times faster
if(!actor.actor.mainForDisplayObjects.isFastScroll)
current+=scrollNumb
else
current+=scrollNumb*10
end
#isScrollUp ? current+=1 : current-=1
# we do not want to move outside of possible range of slices
lastSlice = actor.actor.onScrollData.slicesNumber
if(lastSlice>1)
actor.actor.isSliceChanged = true
actor.actor.isBusy[] = true
if(current<1) current=1 end
if(lastSlice<1) lastSlice=1 end
if(current>=lastSlice) current=lastSlice end
#logic to change displayed screen
#we select slice that we are intrested in
singleSlDat= actor.actor.onScrollData.dataToScroll|>
(scrDat)-> map(threeDimDat->threeToTwoDimm(threeDimDat.type,Int64(current),actor.actor.onScrollData.dimensionToScroll,threeDimDat ),scrDat) |>
(twoDimList)-> SingleSliceDat(listOfDataAndImageNames=twoDimList
,sliceNumber=current
,textToDisp = getTextForCurrentSlice(actor.actor.onScrollData, Int32(current)) )
updateImagesDisplayed(singleSlDat
,actor.actor.mainForDisplayObjects
,actor.actor.textDispObj
,actor.actor.calcDimsStruct
,actor.actor.valueForMasToSet )
actor.actor.currentlyDispDat=singleSlDat
# updating the last mouse position so when we will change plane it will better show actual position
currentDim =Int64(actor.actor.onScrollData.dataToScrollDims.dimensionToScroll)
lastMouse = actor.actor.lastRecordedMousePosition
locArr = [lastMouse[1],lastMouse[2],lastMouse[3]]
locArr[currentDim]= current
actor.actor.lastRecordedMousePosition=CartesianIndex(locArr[1],locArr[2],locArr[3])
#saving information about current slice for future reference
actor.actor.currentDisplayedSlice = current
#enable undoing the action
if(toBeSavedForBack)
func = ()-> reactToScroll(old-=scrollNumb, actor,false )
addToforUndoVector(actor,func )
end
end#if
actor.actor.isBusy[] = false
actor.actor.mainForDisplayObjects.stopListening[]=false
end#reactToScroll
end #ReactToScroll | [
198,
37811,
198,
21412,
326,
6622,
5499,
2622,
284,
220,
6324,
284,
28659,
198,
37058,
717,
356,
761,
284,
1208,
262,
10188,
24160,
23838,
284,
262,
16920,
909,
274,
712,
540,
220,
198,
8189,
16573,
422,
3740,
1378,
15410,
9047,
13,
7... | 2.610805 | 1,999 |
module SMRTypes
import Base: show
export cSMRWMrkChannel, SMRWMrkChannel, cSMRContChannel, SMRContChannel,
cSMREventChannel, SMREventChannel, cSMRMarkerChannel, SMRMarkerChannel,
cSMRChannelInfo, cSMRChannelInfoArray, SMRChannelInfo, show,
channel_string
const MARKER_SIZE = UInt8(4)
abstract type SMRCType end
abstract type SMRType end
# =========================================================================== #
struct cSMRWMrkChannel <: SMRCType
length::UInt64
npt::UInt64
timestamps::Ptr{Float64}
markers::Ptr{UInt8}
wavemarks::Ptr{Int16}
end
mutable struct SMRWMrkChannel <: SMRType
timestamps::Vector{Float64}
markers::Matrix{UInt8}
wavemarks::Matrix{Int16}
function SMRWMrkChannel(x::cSMRWMrkChannel)
self = new()
self.timestamps = Vector{Float64}(undef, x.length)
copyto!(self.timestamps, unsafe_wrap(Vector{Float64}, x.timestamps, x.length, own=false))
self.markers = Matrix{UInt8}(undef, MARKER_SIZE, x.length)
copyto!(self.markers, unsafe_wrap(Vector{UInt8}, x.markers, x.length * MARKER_SIZE, own=false))
self.wavemarks = Matrix{Int16}(undef, x.npt, x.length)
copyto!(self.wavemarks, unsafe_wrap(Vector{Int16}, x.wavemarks, x.length * x.npt, own=false))
return self
end
end
# =========================================================================== #
struct cSMRContChannel <: SMRCType
length::UInt64
sampling_rate::Float64
data::Ptr{Int16}
end
mutable struct SMRContChannel <: SMRType
data::Vector{Int16}
sampling_rate::Float64
function SMRContChannel(x::cSMRContChannel)
self = new()
self.data = Vector{Int16}(undef, x.length)
copyto!(self.data, unsafe_wrap(Vector{Int16}, x.data, x.length, own=false))
self.sampling_rate = x.sampling_rate
return self
end
end
# =========================================================================== #
struct cSMREventChannel <: SMRCType
length::UInt64
data::Ptr{Float64}
end
mutable struct SMREventChannel <: SMRType
data::Vector{Float64}
function SMREventChannel(x::cSMREventChannel)
self = new()
self.data = Vector{Float64}(undef, x.length)
copyto!(self.data, unsafe_wrap(Vector{Float64}, x.data, x.length, own=false))
return self
end
end
# =========================================================================== #
struct cSMRMarkerChannel <: SMRCType
length::UInt64
npt::UInt64
timestamps::Ptr{Float64}
markers::Ptr{UInt8}
text::Ptr{UInt8}
end
mutable struct SMRMarkerChannel <: SMRType
timestamps::Vector{Float64}
markers::Matrix{UInt8}
text::Vector{String}
function SMRMarkerChannel(mrk::cSMRMarkerChannel)
self = new()
self.timestamps = Vector{Float64}(undef, mrk.length)
copyto!(self.timestamps, unsafe_wrap(Vector{Float64}, mrk.timestamps, mrk.length, own=false))
self.markers = Matrix{UInt8}(undef, MARKER_SIZE, mrk.length)
copyto!(self.markers, unsafe_wrap(Vector{UInt8}, mrk.markers, mrk.length * MARKER_SIZE, own=false))
self.text = Vector{String}(undef, mrk.length)
ary = unsafe_wrap(Vector{UInt8}, mrk.text, mrk.length * mrk.npt, own=false)
for k = 1:mrk.length
isrt = ((k-1)*mrk.npt)+1
iend = k*mrk.npt
if all(x->x=='\0', ary[isrt:iend])
self.text[k] = ""
else
self.text[k] = strip(join(map(Char, ary[isrt:iend])), '\0')
end
end
return self
end
end
# =========================================================================== #
struct cSMRChannelInfo <: SMRCType
title::Cstring
index::Int32
kind::UInt8
phy_chan::Int16
end
struct cSMRChannelInfoArray <: SMRCType
length::UInt32
ifo::Ptr{Ptr{cSMRChannelInfo}}
end
mutable struct SMRChannelInfo <: SMRType
title::String
index::Int
kind::Int
phy_chan::Int
function SMRChannelInfo(x::cSMRChannelInfo)
self = new()
self.title = unsafe_string(x.title)
self.index = Int(x.index)
self.kind = Int(x.kind)
self.phy_chan = Int(x.phy_chan)
return self
end
end
# =========================================================================== #
function show(io::IO, ifo::SMRChannelInfo)
str = "\"" * ifo.title *
"\": " * channel_string(ifo.kind) *
", " * string(ifo.index)
print(io, str)
end
# =========================================================================== #
function channel_string(x::T) where T<:Integer
sx = string(x)
if x == 1
str = "continuous"
elseif x == 2
str = "event"
elseif x == 3
str = "event"
elseif x == 4
str = "event"
elseif x == 5
str = "marker"
elseif x == 6
str = "adc_marker"
elseif x == 7
str = "real_marker"
elseif x == 8
str = "text_marker"
elseif x == 9
str = "real_wave"
else
@warn(string(x) * " is not a valid channel type")
str = "INVALID"
end
return str
end
# =========================================================================== #
end #END MODULE
| [
21412,
9447,
14181,
9497,
198,
198,
11748,
7308,
25,
905,
198,
198,
39344,
269,
12310,
46747,
5246,
74,
29239,
11,
9447,
46747,
5246,
74,
29239,
11,
269,
12310,
49,
4264,
29239,
11,
9447,
49,
4264,
29239,
11,
198,
220,
220,
220,
220,
... | 2.379638 | 2,210 |
<reponame>Dictino/julia
# This file is a part of Julia. License is MIT: https://julialang.org/license
using Random
using LinearAlgebra
function isnan_type(::Type{T}, x) where T
isa(x, T) && isnan(x)
end
@testset "clamp" begin
@test clamp(0, 1, 3) == 1
@test clamp(1, 1, 3) == 1
@test clamp(2, 1, 3) == 2
@test clamp(3, 1, 3) == 3
@test clamp(4, 1, 3) == 3
@test clamp(0.0, 1, 3) == 1.0
@test clamp(1.0, 1, 3) == 1.0
@test clamp(2.0, 1, 3) == 2.0
@test clamp(3.0, 1, 3) == 3.0
@test clamp(4.0, 1, 3) == 3.0
@test clamp.([0, 1, 2, 3, 4], 1.0, 3.0) == [1.0, 1.0, 2.0, 3.0, 3.0]
@test clamp.([0 1; 2 3], 1.0, 3.0) == [1.0 1.0; 2.0 3.0]
@test clamp(-200, Int8) === typemin(Int8)
@test clamp(100, Int8) === Int8(100)
@test clamp(200, Int8) === typemax(Int8)
begin
x = [0.0, 1.0, 2.0, 3.0, 4.0]
clamp!(x, 1, 3)
@test x == [1.0, 1.0, 2.0, 3.0, 3.0]
end
end
@testset "constants" begin
@test pi != ℯ
@test ℯ != 1//2
@test 1//2 <= ℯ
@test ℯ <= 15//3
@test big(1//2) < ℯ
@test ℯ < big(20//6)
@test ℯ^pi == exp(pi)
@test ℯ^2 == exp(2)
@test ℯ^2.4 == exp(2.4)
@test ℯ^(2//3) == exp(2//3)
@test Float16(3.0) < pi
@test pi < Float16(4.0)
@test widen(pi) === pi
@test occursin("3.14159", sprint(show, MIME"text/plain"(), π))
@test repr(Any[pi ℯ; ℯ pi]) == "Any[π ℯ; ℯ π]"
@test string(pi) == "π"
end
@testset "frexp,ldexp,significand,exponent" begin
@testset "$T" for T in (Float16,Float32,Float64)
for z in (zero(T),-zero(T))
frexp(z) === (z,0)
significand(z) === z
@test_throws DomainError exponent(z)
end
for (a,b) in [(T(12.8),T(0.8)),
(prevfloat(floatmin(T)), prevfloat(one(T), 2)),
(prevfloat(floatmin(T)), prevfloat(one(T), 2)),
(prevfloat(floatmin(T)), nextfloat(one(T), -2)),
(nextfloat(zero(T), 3), T(0.75)),
(prevfloat(zero(T), -3), T(0.75)),
(nextfloat(zero(T)), T(0.5))]
n = Int(log2(a/b))
@test frexp(a) == (b,n)
@test ldexp(b,n) == a
@test ldexp(a,-n) == b
@test significand(a) == 2b
@test exponent(a) == n-1
@test frexp(-a) == (-b,n)
@test ldexp(-b,n) == -a
@test ldexp(-a,-n) == -b
@test significand(-a) == -2b
@test exponent(-a) == n-1
end
@test_throws DomainError exponent(convert(T,NaN))
@test isnan_type(T, significand(convert(T,NaN)))
x,y = frexp(convert(T,NaN))
@test isnan_type(T, x)
@test y == 0
@testset "ldexp function" begin
@test ldexp(T(0.0), 0) === T(0.0)
@test ldexp(T(-0.0), 0) === T(-0.0)
@test ldexp(T(Inf), 1) === T(Inf)
@test ldexp(T(Inf), 10000) === T(Inf)
@test ldexp(T(-Inf), 1) === T(-Inf)
@test isnan_type(T, ldexp(T(NaN), 10))
@test ldexp(T(1.0), 0) === T(1.0)
@test ldexp(T(0.8), 4) === T(12.8)
@test ldexp(T(-0.854375), 5) === T(-27.34)
@test ldexp(T(1.0), typemax(Int)) === T(Inf)
@test ldexp(T(1.0), typemin(Int)) === T(0.0)
@test ldexp(prevfloat(floatmin(T)), typemax(Int)) === T(Inf)
@test ldexp(prevfloat(floatmin(T)), typemin(Int)) === T(0.0)
@test ldexp(T(0.0), Int128(0)) === T(0.0)
@test ldexp(T(-0.0), Int128(0)) === T(-0.0)
@test ldexp(T(1.0), Int128(0)) === T(1.0)
@test ldexp(T(0.8), Int128(4)) === T(12.8)
@test ldexp(T(-0.854375), Int128(5)) === T(-27.34)
@test ldexp(T(1.0), typemax(Int128)) === T(Inf)
@test ldexp(T(1.0), typemin(Int128)) === T(0.0)
@test ldexp(prevfloat(floatmin(T)), typemax(Int128)) === T(Inf)
@test ldexp(prevfloat(floatmin(T)), typemin(Int128)) === T(0.0)
@test ldexp(T(0.0), BigInt(0)) === T(0.0)
@test ldexp(T(-0.0), BigInt(0)) === T(-0.0)
@test ldexp(T(1.0), BigInt(0)) === T(1.0)
@test ldexp(T(0.8), BigInt(4)) === T(12.8)
@test ldexp(T(-0.854375), BigInt(5)) === T(-27.34)
@test ldexp(T(1.0), BigInt(typemax(Int128))) === T(Inf)
@test ldexp(T(1.0), BigInt(typemin(Int128))) === T(0.0)
@test ldexp(prevfloat(floatmin(T)), BigInt(typemax(Int128))) === T(Inf)
@test ldexp(prevfloat(floatmin(T)), BigInt(typemin(Int128))) === T(0.0)
# Test also against BigFloat reference. Needs to be exactly rounded.
@test ldexp(floatmin(T), -1) == T(ldexp(big(floatmin(T)), -1))
@test ldexp(floatmin(T), -2) == T(ldexp(big(floatmin(T)), -2))
@test ldexp(floatmin(T)/2, 0) == T(ldexp(big(floatmin(T)/2), 0))
@test ldexp(floatmin(T)/3, 0) == T(ldexp(big(floatmin(T)/3), 0))
@test ldexp(floatmin(T)/3, -1) == T(ldexp(big(floatmin(T)/3), -1))
@test ldexp(floatmin(T)/3, 11) == T(ldexp(big(floatmin(T)/3), 11))
@test ldexp(floatmin(T)/11, -10) == T(ldexp(big(floatmin(T)/11), -10))
@test ldexp(-floatmin(T)/11, -10) == T(ldexp(big(-floatmin(T)/11), -10))
end
end
end
# We compare to BigFloat instead of hard-coding
# values, assuming that BigFloat has an independently tested implementation.
@testset "basic math functions" begin
@testset "$T" for T in (Float32, Float64)
x = T(1//3)
y = T(1//2)
yi = 4
@testset "Random values" begin
@test x^y ≈ big(x)^big(y)
@test x^1 === x
@test x^yi ≈ big(x)^yi
@test acos(x) ≈ acos(big(x))
@test acosh(1+x) ≈ acosh(big(1+x))
@test asin(x) ≈ asin(big(x))
@test asinh(x) ≈ asinh(big(x))
@test atan(x) ≈ atan(big(x))
@test atan(x,y) ≈ atan(big(x),big(y))
@test atanh(x) ≈ atanh(big(x))
@test cbrt(x) ≈ cbrt(big(x))
@test cos(x) ≈ cos(big(x))
@test cosh(x) ≈ cosh(big(x))
@test exp(x) ≈ exp(big(x))
@test exp10(x) ≈ exp10(big(x))
@test exp2(x) ≈ exp2(big(x))
@test expm1(x) ≈ expm1(big(x))
@test hypot(x,y) ≈ hypot(big(x),big(y))
@test hypot(x,x,y) ≈ hypot(hypot(big(x),big(x)),big(y))
@test hypot(x,x,y,y) ≈ hypot(hypot(big(x),big(x)),hypot(big(y),big(y)))
@test log(x) ≈ log(big(x))
@test log10(x) ≈ log10(big(x))
@test log1p(x) ≈ log1p(big(x))
@test log2(x) ≈ log2(big(x))
@test sin(x) ≈ sin(big(x))
@test sinh(x) ≈ sinh(big(x))
@test sqrt(x) ≈ sqrt(big(x))
@test tan(x) ≈ tan(big(x))
@test tanh(x) ≈ tanh(big(x))
@test sec(x) ≈ sec(big(x))
@test csc(x) ≈ csc(big(x))
@test secd(x) ≈ secd(big(x))
@test cscd(x) ≈ cscd(big(x))
@test sech(x) ≈ sech(big(x))
@test csch(x) ≈ csch(big(x))
end
@testset "Special values" begin
@test isequal(T(1//4)^T(1//2), T(1//2))
@test isequal(T(1//4)^2, T(1//16))
@test isequal(acos(T(1)), T(0))
@test isequal(acosh(T(1)), T(0))
@test asin(T(1)) ≈ T(pi)/2 atol=eps(T)
@test atan(T(1)) ≈ T(pi)/4 atol=eps(T)
@test atan(T(1),T(1)) ≈ T(pi)/4 atol=eps(T)
@test isequal(cbrt(T(0)), T(0))
@test isequal(cbrt(T(1)), T(1))
@test isequal(cbrt(T(1000000000)), T(1000))
@test isequal(cos(T(0)), T(1))
@test cos(T(pi)/2) ≈ T(0) atol=eps(T)
@test isequal(cos(T(pi)), T(-1))
@test exp(T(1)) ≈ T(ℯ) atol=10*eps(T)
@test isequal(exp10(T(1)), T(10))
@test isequal(exp2(T(1)), T(2))
@test isequal(expm1(T(0)), T(0))
@test expm1(T(1)) ≈ T(ℯ)-1 atol=10*eps(T)
@test isequal(hypot(T(3),T(4)), T(5))
@test isequal(hypot(floatmax(T),T(1)),floatmax(T))
@test isequal(hypot(floatmin(T)*sqrt(eps(T)),T(0)),floatmin(T)*sqrt(eps(T)))
@test isequal(floatmin(T)*hypot(1.368423059742933,1.3510496552495361),hypot(floatmin(T)*1.368423059742933,floatmin(T)*1.3510496552495361))
@test isequal(log(T(1)), T(0))
@test isequal(log(ℯ,T(1)), T(0))
@test log(T(ℯ)) ≈ T(1) atol=eps(T)
@test isequal(log10(T(1)), T(0))
@test isequal(log10(T(10)), T(1))
@test isequal(log1p(T(0)), T(0))
@test log1p(T(ℯ)-1) ≈ T(1) atol=eps(T)
@test isequal(log2(T(1)), T(0))
@test isequal(log2(T(2)), T(1))
@test isequal(sin(T(0)), T(0))
@test isequal(sin(T(pi)/2), T(1))
@test sin(T(pi)) ≈ T(0) atol=eps(T)
@test isequal(sqrt(T(0)), T(0))
@test isequal(sqrt(T(1)), T(1))
@test isequal(sqrt(T(100000000)), T(10000))
@test isequal(tan(T(0)), T(0))
@test tan(T(pi)/4) ≈ T(1) atol=eps(T)
@test isequal(sec(T(pi)), -one(T))
@test isequal(csc(T(pi)/2), one(T))
@test isequal(secd(T(180)), -one(T))
@test isequal(cscd(T(90)), one(T))
@test isequal(sech(log(one(T))), one(T))
@test isequal(csch(zero(T)), T(Inf))
end
@testset "Inverses" begin
@test acos(cos(x)) ≈ x
@test acosh(cosh(x)) ≈ x
@test asin(sin(x)) ≈ x
@test cbrt(x)^3 ≈ x
@test cbrt(x^3) ≈ x
@test asinh(sinh(x)) ≈ x
@test atan(tan(x)) ≈ x
@test atan(x,y) ≈ atan(x/y)
@test atanh(tanh(x)) ≈ x
@test cos(acos(x)) ≈ x
@test cosh(acosh(1+x)) ≈ 1+x
@test exp(log(x)) ≈ x
@test exp10(log10(x)) ≈ x
@test exp2(log2(x)) ≈ x
@test expm1(log1p(x)) ≈ x
@test log(exp(x)) ≈ x
@test log10(exp10(x)) ≈ x
@test log1p(expm1(x)) ≈ x
@test log2(exp2(x)) ≈ x
@test sin(asin(x)) ≈ x
@test sinh(asinh(x)) ≈ x
@test sqrt(x)^2 ≈ x
@test sqrt(x^2) ≈ x
@test tan(atan(x)) ≈ x
@test tanh(atanh(x)) ≈ x
end
@testset "Relations between functions" begin
@test cosh(x) ≈ (exp(x)+exp(-x))/2
@test cosh(x)^2-sinh(x)^2 ≈ 1
@test hypot(x,y) ≈ sqrt(x^2+y^2)
@test sin(x)^2+cos(x)^2 ≈ 1
@test sinh(x) ≈ (exp(x)-exp(-x))/2
@test tan(x) ≈ sin(x)/cos(x)
@test tanh(x) ≈ sinh(x)/cosh(x)
@test sec(x) ≈ inv(cos(x))
@test csc(x) ≈ inv(sin(x))
@test secd(x) ≈ inv(cosd(x))
@test cscd(x) ≈ inv(sind(x))
@test sech(x) ≈ inv(cosh(x))
@test csch(x) ≈ inv(sinh(x))
end
@testset "Edge cases" begin
@test isinf(log(zero(T)))
@test isnan_type(T, log(convert(T,NaN)))
@test_throws DomainError log(-one(T))
@test isinf(log1p(-one(T)))
@test isnan_type(T, log1p(convert(T,NaN)))
@test_throws DomainError log1p(convert(T,-2.0))
@test hypot(T(0), T(0)) === T(0)
@test hypot(T(Inf), T(Inf)) === T(Inf)
@test hypot(T(Inf), T(x)) === T(Inf)
@test hypot(T(Inf), T(NaN)) === T(Inf)
@test isnan_type(T, hypot(T(x), T(NaN)))
end
end
end
@testset "exp function" for T in (Float64, Float32)
@testset "$T accuracy" begin
X = map(T, vcat(-10:0.0002:10, -80:0.001:80, 2.0^-27, 2.0^-28, 2.0^-14, 2.0^-13))
for x in X
y, yb = exp(x), exp(big(x))
@test abs(y-yb) <= 1.0*eps(T(yb))
end
end
@testset "$T edge cases" begin
@test isnan_type(T, exp(T(NaN)))
@test exp(T(-Inf)) === T(0.0)
@test exp(T(Inf)) === T(Inf)
@test exp(T(0.0)) === T(1.0) # exact
@test exp(T(5000.0)) === T(Inf)
@test exp(T(-5000.0)) === T(0.0)
end
end
@testset "exp10 function" begin
@testset "accuracy" begin
X = map(Float64, vcat(-10:0.00021:10, -35:0.0023:100, -300:0.001:300))
for x in X
y, yb = exp10(x), exp10(big(x))
@test abs(y-yb) <= 1.2*eps(Float64(yb))
end
X = map(Float32, vcat(-10:0.00021:10, -35:0.0023:35, -35:0.001:35))
for x in X
y, yb = exp10(x), exp10(big(x))
@test abs(y-yb) <= 1.2*eps(Float32(yb))
end
end
@testset "$T edge cases" for T in (Float64, Float32)
@test isnan_type(T, exp10(T(NaN)))
@test exp10(T(-Inf)) === T(0.0)
@test exp10(T(Inf)) === T(Inf)
@test exp10(T(0.0)) === T(1.0) # exact
@test exp10(T(1.0)) === T(10.0)
@test exp10(T(3.0)) === T(1000.0)
@test exp10(T(5000.0)) === T(Inf)
@test exp10(T(-5000.0)) === T(0.0)
end
end
@testset "test abstractarray trig functions" begin
TAA = rand(2,2)
TAA = (TAA + TAA')/2.
STAA = Symmetric(TAA)
@test Array(atanh.(STAA)) == atanh.(TAA)
@test Array(asinh.(STAA)) == asinh.(TAA)
TAA .+= 1
@test Array(acosh.(STAA)) == acosh.(TAA)
@test Array(acsch.(STAA)) == acsch.(TAA)
@test Array(acoth.(STAA)) == acoth.(TAA)
end
@testset "check exp2(::Integer) matches exp2(::Float)" begin
for ii in -2048:2048
expected = exp2(float(ii))
@test exp2(Int16(ii)) == expected
@test exp2(Int32(ii)) == expected
@test exp2(Int64(ii)) == expected
@test exp2(Int128(ii)) == expected
if ii >= 0
@test exp2(UInt16(ii)) == expected
@test exp2(UInt32(ii)) == expected
@test exp2(UInt64(ii)) == expected
@test exp2(UInt128(ii)) == expected
end
end
end
@testset "deg2rad/rad2deg" begin
@testset "$T" for T in (Int, Float64, BigFloat)
@test deg2rad(T(180)) ≈ 1pi
@test deg2rad.(T[45, 60]) ≈ [pi/T(4), pi/T(3)]
@test rad2deg.([pi/T(4), pi/T(3)]) ≈ [45, 60]
@test rad2deg(T(1)*pi) ≈ 180
@test rad2deg(T(1)) ≈ rad2deg(true)
@test deg2rad(T(1)) ≈ deg2rad(true)
end
@test deg2rad(180 + 60im) ≈ pi + (pi/3)*im
@test rad2deg(pi + (pi/3)*im) ≈ 180 + 60im
end
@testset "degree-based trig functions" begin
@testset "$T" for T = (Float32,Float64,Rational{Int})
fT = typeof(float(one(T)))
fTsc = typeof( (float(one(T)), float(one(T))) )
for x = -400:40:400
@test sind(convert(T,x))::fT ≈ convert(fT,sin(pi/180*x)) atol=eps(deg2rad(convert(fT,x)))
@test cosd(convert(T,x))::fT ≈ convert(fT,cos(pi/180*x)) atol=eps(deg2rad(convert(fT,x)))
s,c = sincosd(convert(T,x))
@test s::fT ≈ convert(fT,sin(pi/180*x)) atol=eps(deg2rad(convert(fT,x)))
@test c::fT ≈ convert(fT,cos(pi/180*x)) atol=eps(deg2rad(convert(fT,x)))
end
@testset "sind" begin
@test sind(convert(T,0.0))::fT === zero(fT)
@test sind(convert(T,180.0))::fT === zero(fT)
@test sind(convert(T,360.0))::fT === zero(fT)
T != Rational{Int} && @test sind(convert(T,-0.0))::fT === -zero(fT)
@test sind(convert(T,-180.0))::fT === -zero(fT)
@test sind(convert(T,-360.0))::fT === -zero(fT)
end
@testset "cosd" begin
@test cosd(convert(T,90))::fT === zero(fT)
@test cosd(convert(T,270))::fT === zero(fT)
@test cosd(convert(T,-90))::fT === zero(fT)
@test cosd(convert(T,-270))::fT === zero(fT)
end
@testset "sincosd" begin
@test sincosd(convert(T,-360))::fTsc === ( -zero(fT), one(fT) )
@test sincosd(convert(T,-270))::fTsc === ( one(fT), zero(fT) )
@test sincosd(convert(T,-180))::fTsc === ( -zero(fT), -one(fT) )
@test sincosd(convert(T, -90))::fTsc === ( -one(fT), zero(fT) )
@test sincosd(convert(T, 0))::fTsc === ( zero(fT), one(fT) )
@test sincosd(convert(T, 90))::fTsc === ( one(fT), zero(fT) )
@test sincosd(convert(T, 180))::fTsc === ( zero(fT), -one(fT) )
@test sincosd(convert(T, 270))::fTsc === ( -one(fT), zero(fT) )
end
@testset "sinpi and cospi" begin
for x = -3:0.3:3
@test sinpi(convert(T,x))::fT ≈ convert(fT,sin(pi*x)) atol=eps(pi*convert(fT,x))
@test cospi(convert(T,x))::fT ≈ convert(fT,cos(pi*x)) atol=eps(pi*convert(fT,x))
end
@test sinpi(convert(T,0.0))::fT === zero(fT)
@test sinpi(convert(T,1.0))::fT === zero(fT)
@test sinpi(convert(T,2.0))::fT === zero(fT)
T != Rational{Int} && @test sinpi(convert(T,-0.0))::fT === -zero(fT)
@test sinpi(convert(T,-1.0))::fT === -zero(fT)
@test sinpi(convert(T,-2.0))::fT === -zero(fT)
@test_throws DomainError sinpi(convert(T,Inf))
@test cospi(convert(T,0.5))::fT === zero(fT)
@test cospi(convert(T,1.5))::fT === zero(fT)
@test cospi(convert(T,-0.5))::fT === zero(fT)
@test cospi(convert(T,-1.5))::fT === zero(fT)
@test_throws DomainError cospi(convert(T,Inf))
end
@testset "Check exact values" begin
@test sind(convert(T,30)) == 0.5
@test cosd(convert(T,60)) == 0.5
@test sind(convert(T,150)) == 0.5
@test sinpi(one(T)/convert(T,6)) == 0.5
@test_throws DomainError sind(convert(T,Inf))
@test_throws DomainError cosd(convert(T,Inf))
T != Float32 && @test cospi(one(T)/convert(T,3)) == 0.5
T == Rational{Int} && @test sinpi(5//6) == 0.5
end
end
end
@testset "Integer args to sinpi/cospi/sinc/cosc" begin
@test sinpi(1) == 0
@test sinpi(-1) == -0
@test cospi(1) == -1
@test cospi(2) == 1
@test sinc(1) == 0
@test sinc(complex(1,0)) == 0
@test sinc(0) == 1
@test sinc(Inf) == 0
@test cosc(1) == -1
@test cosc(0) == 0
@test cosc(complex(1,0)) == -1
@test cosc(Inf) == 0
end
@testset "Irrational args to sinpi/cospi/sinc/cosc" begin
for x in (pi, ℯ, Base.MathConstants.golden)
@test sinpi(x) ≈ Float64(sinpi(big(x)))
@test cospi(x) ≈ Float64(cospi(big(x)))
@test sinc(x) ≈ Float64(sinc(big(x)))
@test cosc(x) ≈ Float64(cosc(big(x)))
@test sinpi(complex(x, x)) ≈ Complex{Float64}(sinpi(complex(big(x), big(x))))
@test cospi(complex(x, x)) ≈ Complex{Float64}(cospi(complex(big(x), big(x))))
@test sinc(complex(x, x)) ≈ Complex{Float64}(sinc(complex(big(x), big(x))))
@test cosc(complex(x, x)) ≈ Complex{Float64}(cosc(complex(big(x), big(x))))
end
end
@testset "trig function type stability" begin
@testset "$T $f" for T = (Float32,Float64,BigFloat), f = (sind,cosd,sinpi,cospi)
@test Base.return_types(f,Tuple{T}) == [T]
end
end
# useful test functions for relative error, which differ from isapprox (≈)
# in that relerrc separately looks at the real and imaginary parts
relerr(z, x) = z == x ? 0.0 : abs(z - x) / abs(x)
relerrc(z, x) = max(relerr(real(z),real(x)), relerr(imag(z),imag(x)))
≅(a,b) = relerrc(a,b) ≤ 1e-13
@testset "subnormal flags" begin
# Ensure subnormal flags functions don't segfault
@test any(set_zero_subnormals(true) .== [false,true])
@test any(get_zero_subnormals() .== [false,true])
@test set_zero_subnormals(false)
@test !get_zero_subnormals()
end
@testset "evalpoly" begin
@test @evalpoly(2,3,4,5,6) == 3+2*(4+2*(5+2*6)) == @evalpoly(2+0im,3,4,5,6)
a0 = 1
a1 = 2
c = 3
@test @evalpoly(c, a0, a1) == 7
@test @evalpoly(1, 2) == 2
end
@testset "evalpoly real" begin
for x in -1.0:2.0, p1 in -3.0:3.0, p2 in -3.0:3.0, p3 in -3.0:3.0
evpm = @evalpoly(x, p1, p2, p3)
@test evalpoly(x, (p1, p2, p3)) == evpm
@test evalpoly(x, [p1, p2, p3]) == evpm
end
end
@testset "evalpoly complex" begin
for x in -1.0:2.0, y in -1.0:2.0, p1 in -3.0:3.0, p2 in -3.0:3.0, p3 in -3.0:3.0
z = x + im * y
evpm = @evalpoly(z, p1, p2, p3)
@test evalpoly(z, (p1, p2, p3)) == evpm
@test evalpoly(z, [p1, p2, p3]) == evpm
end
@test evalpoly(1+im, (2,)) == 2
@test evalpoly(1+im, [2,]) == 2
end
@testset "cis" begin
for z in (1.234, 1.234 + 5.678im)
@test cis(z) ≈ exp(im*z)
end
let z = [1.234, 5.678]
@test cis.(z) ≈ exp.(im*z)
end
end
@testset "modf" begin
@testset "$elty" for elty in (Float16, Float32, Float64)
@test modf( convert(elty,1.2) )[1] ≈ convert(elty,0.2)
@test modf( convert(elty,1.2) )[2] ≈ convert(elty,1.0)
@test modf( convert(elty,1.0) )[1] ≈ convert(elty,0.0)
@test modf( convert(elty,1.0) )[2] ≈ convert(elty,1.0)
end
end
@testset "frexp" begin
@testset "$elty" for elty in (Float16, Float32, Float64)
@test frexp( convert(elty,0.5) ) == (0.5, 0)
@test frexp( convert(elty,4.0) ) == (0.5, 3)
@test frexp( convert(elty,10.5) ) == (0.65625, 4)
end
end
@testset "log/log1p" begin
# using Tang's algorithm, should be accurate to within 0.56 ulps
X = rand(100)
for x in X
for n = -5:5
xn = ldexp(x,n)
for T in (Float32,Float64)
xt = T(x)
y = log(xt)
yb = log(big(xt))
@test abs(y-yb) <= 0.56*eps(T(yb))
y = log1p(xt)
yb = log1p(big(xt))
@test abs(y-yb) <= 0.56*eps(T(yb))
if n <= 0
y = log1p(-xt)
yb = log1p(big(-xt))
@test abs(y-yb) <= 0.56*eps(T(yb))
end
end
end
end
for n = 0:28
@test log(2,2^n) == n
end
setprecision(10_000) do
@test log(2,big(2)^100) == 100
@test log(2,big(2)^200) == 200
@test log(2,big(2)^300) == 300
@test log(2,big(2)^400) == 400
end
for T in (Float32,Float64)
@test log(zero(T)) == -Inf
@test isnan_type(T, log(T(NaN)))
@test_throws DomainError log(-one(T))
@test log1p(-one(T)) == -Inf
@test isnan_type(T, log1p(T(NaN)))
@test_throws DomainError log1p(-2*one(T))
end
end
@testset "vectorization of 2-arg functions" begin
binary_math_functions = [
copysign, flipsign, log, atan, hypot, max, min,
]
@testset "$f" for f in binary_math_functions
x = y = 2
v = [f(x,y)]
@test f.([x],y) == v
@test f.(x,[y]) == v
@test f.([x],[y]) == v
end
end
@testset "issues #3024, #12822, #24240" begin
p2 = -2
p3 = -3
@test_throws DomainError 2 ^ p2
@test 2 ^ -2 == 0.25 == (2^-1)^2
@test_throws DomainError (-2)^(2.2)
@test_throws DomainError (-2.0)^(2.2)
@test_throws DomainError false ^ p2
@test false ^ -2 == Inf
@test 1 ^ -2 === (-1) ^ -2 == 1 ^ p2 === (-1) ^ p2 === 1
@test (-1) ^ -1 === (-1) ^ -3 == (-1) ^ p3 === -1
@test true ^ -2 == true ^ p2 === true
end
@testset "issue #13748" begin
let A = [1 2; 3 4]; B = [5 6; 7 8]; C = [9 10; 11 12]
@test muladd(A,B,C) == A*B + C
end
end
@testset "issue #19872" begin
f19872a(x) = x ^ 5
f19872b(x) = x ^ (-1024)
@test 0 < f19872b(2.0) < 1e-300
@test issubnormal(2.0 ^ (-1024))
@test issubnormal(f19872b(2.0))
@test !issubnormal(f19872b(0.0))
@test f19872a(2.0) === 32.0
@test !issubnormal(f19872a(2.0))
@test !issubnormal(0.0)
end
# no domain error is thrown for negative values
@test invoke(cbrt, Tuple{AbstractFloat}, -1.0) == -1.0
@testset "promote Float16 irrational #15359" begin
@test typeof(Float16(.5) * pi) == Float16
end
@testset "sincos" begin
@test sincos(1.0) === (sin(1.0), cos(1.0))
@test sincos(1f0) === (sin(1f0), cos(1f0))
@test sincos(Float16(1)) === (sin(Float16(1)), cos(Float16(1)))
@test sincos(1) === (sin(1), cos(1))
@test sincos(big(1)) == (sin(big(1)), cos(big(1)))
@test sincos(big(1.0)) == (sin(big(1.0)), cos(big(1.0)))
@test sincos(NaN) === (NaN, NaN)
@test sincos(NaN32) === (NaN32, NaN32)
end
@testset "test fallback definitions" begin
@test exp10(5) ≈ exp10(5.0)
@test exp10(50//10) ≈ exp10(5.0)
@test log10(exp10(ℯ)) ≈ ℯ
@test log(ℯ) === 1
@test exp2(Float16(2.0)) ≈ exp2(2.0)
@test exp2(Float16(1.0)) === Float16(exp2(1.0))
@test exp10(Float16(1.0)) === Float16(exp10(1.0))
end
# #22742: updated isapprox semantics
@test !isapprox(1.0, 1.0+1e-12, atol=1e-14)
@test isapprox(1.0, 1.0+0.5*sqrt(eps(1.0)))
@test !isapprox(1.0, 1.0+1.5*sqrt(eps(1.0)), atol=sqrt(eps(1.0)))
# test AbstractFloat fallback pr22716
struct Float22716{T<:AbstractFloat} <: AbstractFloat
x::T
end
Base.:^(x::Number, y::Float22716) = x^(y.x)
let x = 2.0
@test exp2(Float22716(x)) === 2^x
@test exp10(Float22716(x)) === 10^x
end
@testset "asin #23088" begin
for T in (Float32, Float64)
@test asin(zero(T)) === zero(T)
@test asin(-zero(T)) === -zero(T)
@test asin(nextfloat(zero(T))) === nextfloat(zero(T))
@test asin(prevfloat(zero(T))) === prevfloat(zero(T))
@test asin(one(T)) === T(pi)/2
@test asin(-one(T)) === -T(pi)/2
for x in (0.45, 0.6, 0.98)
by = asin(big(T(x)))
@test T(abs(asin(T(x)) - by))/eps(T(abs(by))) <= 1
bym = asin(big(T(-x)))
@test T(abs(asin(T(-x)) - bym))/eps(T(abs(bym))) <= 1
end
@test_throws DomainError asin(-T(Inf))
@test_throws DomainError asin(T(Inf))
@test isnan_type(T, asin(T(NaN)))
end
end
@testset "sin, cos, sincos, tan #23088" begin
for T in (Float32, Float64)
@test sin(zero(T)) === zero(T)
@test sin(-zero(T)) === -zero(T)
@test cos(zero(T)) === T(1.0)
@test cos(-zero(T)) === T(1.0)
@test sin(nextfloat(zero(T))) === nextfloat(zero(T))
@test sin(prevfloat(zero(T))) === prevfloat(zero(T))
@test cos(nextfloat(zero(T))) === T(1.0)
@test cos(prevfloat(zero(T))) === T(1.0)
for x in (0.1, 0.45, 0.6, 0.75, 0.79, 0.98)
for op in (sin, cos, tan)
by = T(op(big(x)))
@test abs(op(T(x)) - by)/eps(by) <= one(T)
bym = T(op(big(-x)))
@test abs(op(T(-x)) - bym)/eps(bym) <= one(T)
end
end
@test_throws DomainError sin(-T(Inf))
@test_throws DomainError sin(T(Inf))
@test_throws DomainError cos(-T(Inf))
@test_throws DomainError cos(T(Inf))
@test_throws DomainError tan(-T(Inf))
@test_throws DomainError tan(T(Inf))
@test sin(T(NaN)) === T(NaN)
@test cos(T(NaN)) === T(NaN)
@test tan(T(NaN)) === T(NaN)
end
end
@testset "rem_pio2 #23088" begin
vals = (2.356194490192345f0, 3.9269908169872414f0, 7.0685834705770345f0,
5.497787143782138f0, 4.216574282663131f8, 4.216574282663131f12)
for (i, x) in enumerate(vals)
for op in (prevfloat, nextfloat)
Ty = Float32(Base.Math.rem_pio2_kernel(op(vals[i]))[2].hi)
By = Float32(rem(big(op(x)), pi/2))
@test Ty ≈ By || Ty ≈ By-Float32(pi)/2
end
end
end
@testset "atan #23383" begin
for T in (Float32, Float64)
@test atan(T(NaN)) === T(NaN)
@test atan(-T(Inf)) === -T(pi)/2
@test atan(T(Inf)) === T(pi)/2
# no reduction needed |x| < 7/16
@test atan(zero(T)) === zero(T)
@test atan(prevfloat(zero(T))) === prevfloat(zero(T))
@test atan(nextfloat(zero(T))) === nextfloat(zero(T))
for x in (T(7/16), (T(7/16)+T(11/16))/2, T(11/16),
(T(11/16)+T(19/16))/2, T(19/16),
(T(19/16)+T(39/16))/2, T(39/16),
(T(39/16)+T(2)^23)/2, T(2)^23)
x = T(7/16)
by = T(atan(big(x)))
@test abs(atan(x) - by)/eps(by) <= one(T)
x = prevfloat(T(7/16))
by = T(atan(big(x)))
@test abs(atan(x) - by)/eps(by) <= one(T)
x = nextfloat(T(7/16))
by = T(atan(big(x)))
@test abs(atan(x) - by)/eps(by) <= one(T)
end
# This case was used to find a bug, but it isn't special in itself
@test atan(1.7581305072934137) ≈ 1.053644580517088
end
end
@testset "atan" begin
for T in (Float32, Float64)
@test isnan_type(T, atan(T(NaN), T(NaN)))
@test isnan_type(T, atan(T(NaN), T(0.1)))
@test isnan_type(T, atan(T(0.1), T(NaN)))
r = T(randn())
absr = abs(r)
# y zero
@test atan(T(r), one(T)) === atan(T(r))
@test atan(zero(T), absr) === zero(T)
@test atan(-zero(T), absr) === -zero(T)
@test atan(zero(T), -absr) === T(pi)
@test atan(-zero(T), -absr) === -T(pi)
# x zero and y not zero
@test atan(one(T), zero(T)) === T(pi)/2
@test atan(-one(T), zero(T)) === -T(pi)/2
# isinf(x) == true && isinf(y) == true
@test atan(T(Inf), T(Inf)) === T(pi)/4 # m == 0 (see atan code)
@test atan(-T(Inf), T(Inf)) === -T(pi)/4 # m == 1
@test atan(T(Inf), -T(Inf)) === 3*T(pi)/4 # m == 2
@test atan(-T(Inf), -T(Inf)) === -3*T(pi)/4 # m == 3
# isinf(x) == true && isinf(y) == false
@test atan(absr, T(Inf)) === zero(T) # m == 0
@test atan(-absr, T(Inf)) === -zero(T) # m == 1
@test atan(absr, -T(Inf)) === T(pi) # m == 2
@test atan(-absr, -T(Inf)) === -T(pi) # m == 3
# isinf(y) == true && isinf(x) == false
@test atan(T(Inf), absr) === T(pi)/2
@test atan(-T(Inf), absr) === -T(pi)/2
@test atan(T(Inf), -absr) === T(pi)/2
@test atan(-T(Inf), -absr) === -T(pi)/2
# |y/x| above high threshold
atanpi = T(1.5707963267948966)
@test atan(T(2.0^61), T(1.0)) === atanpi # m==0
@test atan(-T(2.0^61), T(1.0)) === -atanpi # m==1
@test atan(T(2.0^61), -T(1.0)) === atanpi # m==2
@test atan(-T(2.0^61), -T(1.0)) === -atanpi # m==3
@test atan(-T(Inf), -absr) === -T(pi)/2
# |y|/x between 0 and low threshold
@test atan(T(2.0^-61), -T(1.0)) === T(pi) # m==2
@test atan(-T(2.0^-61), -T(1.0)) === -T(pi) # m==3
# y/x is "safe" ("arbitrary values", just need to hit the branch)
_ATAN_PI_LO(::Type{Float32}) = -8.7422776573f-08
_ATAN_PI_LO(::Type{Float64}) = 1.2246467991473531772E-16
@test atan(T(5.0), T(2.5)) === atan(abs(T(5.0)/T(2.5)))
@test atan(-T(5.0), T(2.5)) === -atan(abs(-T(5.0)/T(2.5)))
@test atan(T(5.0), -T(2.5)) === T(pi)-(atan(abs(T(5.0)/-T(2.5)))-_ATAN_PI_LO(T))
@test atan(-T(5.0), -T(2.5)) === -(T(pi)-atan(abs(-T(5.0)/-T(2.5)))-_ATAN_PI_LO(T))
@test atan(T(1235.2341234), T(2.5)) === atan(abs(T(1235.2341234)/T(2.5)))
@test atan(-T(1235.2341234), T(2.5)) === -atan(abs(-T(1235.2341234)/T(2.5)))
@test atan(T(1235.2341234), -T(2.5)) === T(pi)-(atan(abs(T(1235.2341234)/-T(2.5)))-_ATAN_PI_LO(T))
@test atan(-T(1235.2341234), -T(2.5)) === -(T(pi)-(atan(abs(-T(1235.2341234)/T(2.5)))-_ATAN_PI_LO(T)))
end
end
@testset "atand" begin
for T in (Float32, Float64)
r = T(randn())
absr = abs(r)
# Tests related to the 1-argument version of `atan`.
# ==================================================
@test atand(T(Inf)) === T(90.0)
@test atand(-T(Inf)) === -T(90.0)
@test atand(zero(T)) === T(0.0)
@test atand(one(T)) === T(45.0)
@test atand(-one(T)) === -T(45.0)
# Tests related to the 2-argument version of `atan`.
# ==================================================
# If `x` is one, then `atand(y,x)` must be equal to `atand(y)`.
@test atand(T(r), one(T)) === atand(T(r))
# `y` zero.
@test atand(zero(T), absr) === zero(T)
@test atand(-zero(T), absr) === -zero(T)
@test atand(zero(T), -absr) === T(180.0)
@test atand(-zero(T), -absr) === -T(180.0)
# `x` zero and `y` not zero.
@test atand(one(T), zero(T)) === T(90.0)
@test atand(-one(T), zero(T)) === -T(90.0)
# `x` and `y` equal for each quadrant.
@test atand(+absr, +absr) === T(45.0)
@test atand(-absr, +absr) === -T(45.0)
@test atand(+absr, -absr) === T(135.0)
@test atand(-absr, -absr) === -T(135.0)
end
end
@testset "acos #23283" begin
for T in (Float32, Float64)
@test acos(zero(T)) === T(pi)/2
@test acos(-zero(T)) === T(pi)/2
@test acos(nextfloat(zero(T))) === T(pi)/2
@test acos(prevfloat(zero(T))) === T(pi)/2
@test acos(one(T)) === T(0.0)
@test acos(-one(T)) === T(pi)
for x in (0.45, 0.6, 0.98)
by = acos(big(T(x)))
@test T((acos(T(x)) - by))/eps(abs(T(by))) <= 1
bym = acos(big(T(-x)))
@test T(abs(acos(T(-x)) - bym))/eps(abs(T(bym))) <= 1
end
@test_throws DomainError acos(-T(Inf))
@test_throws DomainError acos(T(Inf))
@test isnan_type(T, acos(T(NaN)))
end
end
#prev, current, next float
pcnfloat(x) = prevfloat(x), x, nextfloat(x)
import Base.Math: COSH_SMALL_X, H_SMALL_X, H_MEDIUM_X, H_LARGE_X
@testset "sinh" begin
for T in (Float32, Float64)
@test sinh(zero(T)) === zero(T)
@test sinh(-zero(T)) === -zero(T)
@test sinh(nextfloat(zero(T))) === nextfloat(zero(T))
@test sinh(prevfloat(zero(T))) === prevfloat(zero(T))
@test sinh(T(1000)) === T(Inf)
@test sinh(-T(1000)) === -T(Inf)
@test isnan_type(T, sinh(T(NaN)))
for x in Iterators.flatten(pcnfloat.([H_SMALL_X(T), H_MEDIUM_X(T), H_LARGE_X(T)]))
@test sinh(x) ≈ sinh(big(x)) rtol=eps(T)
@test sinh(-x) ≈ sinh(big(-x)) rtol=eps(T)
end
end
end
@testset "cosh" begin
for T in (Float32, Float64)
@test cosh(zero(T)) === one(T)
@test cosh(-zero(T)) === one(T)
@test cosh(nextfloat(zero(T))) === one(T)
@test cosh(prevfloat(zero(T))) === one(T)
@test cosh(T(1000)) === T(Inf)
@test cosh(-T(1000)) === T(Inf)
@test isnan_type(T, cosh(T(NaN)))
for x in Iterators.flatten(pcnfloat.([COSH_SMALL_X(T), H_MEDIUM_X(T), H_LARGE_X(T)]))
@test cosh(x) ≈ cosh(big(x)) rtol=eps(T)
@test cosh(-x) ≈ cosh(big(-x)) rtol=eps(T)
end
end
end
@testset "tanh" begin
for T in (Float32, Float64)
@test tanh(zero(T)) === zero(T)
@test tanh(-zero(T)) === -zero(T)
@test tanh(nextfloat(zero(T))) === nextfloat(zero(T))
@test tanh(prevfloat(zero(T))) === prevfloat(zero(T))
@test tanh(T(1000)) === one(T)
@test tanh(-T(1000)) === -one(T)
@test isnan_type(T, tanh(T(NaN)))
for x in Iterators.flatten(pcnfloat.([H_SMALL_X(T), T(1.0), H_MEDIUM_X(T)]))
@test tanh(x) ≈ tanh(big(x)) rtol=eps(T)
@test tanh(-x) ≈ tanh(big(-x)) rtol=eps(T)
end
end
end
@testset "asinh" begin
for T in (Float32, Float64)
@test asinh(zero(T)) === zero(T)
@test asinh(-zero(T)) === -zero(T)
@test asinh(nextfloat(zero(T))) === nextfloat(zero(T))
@test asinh(prevfloat(zero(T))) === prevfloat(zero(T))
@test isnan_type(T, asinh(T(NaN)))
for x in Iterators.flatten(pcnfloat.([T(2)^-28,T(2),T(2)^28]))
@test asinh(x) ≈ asinh(big(x)) rtol=eps(T)
@test asinh(-x) ≈ asinh(big(-x)) rtol=eps(T)
end
end
end
@testset "acosh" begin
for T in (Float32, Float64)
@test_throws DomainError acosh(T(0.1))
@test acosh(one(T)) === zero(T)
@test isnan_type(T, acosh(T(NaN)))
for x in Iterators.flatten(pcnfloat.([nextfloat(T(1.0)), T(2), T(2)^28]))
@test acosh(x) ≈ acosh(big(x)) rtol=eps(T)
end
end
end
@testset "atanh" begin
for T in (Float32, Float64)
@test_throws DomainError atanh(T(1.1))
@test atanh(zero(T)) === zero(T)
@test atanh(-zero(T)) === -zero(T)
@test atanh(one(T)) === T(Inf)
@test atanh(-one(T)) === -T(Inf)
@test atanh(nextfloat(zero(T))) === nextfloat(zero(T))
@test atanh(prevfloat(zero(T))) === prevfloat(zero(T))
@test isnan_type(T, atanh(T(NaN)))
for x in Iterators.flatten(pcnfloat.([T(2.0)^-28, T(0.5)]))
@test atanh(x) ≈ atanh(big(x)) rtol=eps(T)
@test atanh(-x) ≈ atanh(big(-x)) rtol=eps(T)
end
end
end
# Define simple wrapper of a Float type:
struct FloatWrapper <: Real
x::Float64
end
import Base: +, -, *, /, ^, sin, cos, exp, sinh, cosh, convert, isfinite, float, promote_rule
for op in (:+, :-, :*, :/, :^)
@eval $op(x::FloatWrapper, y::FloatWrapper) = FloatWrapper($op(x.x, y.x))
end
for op in (:sin, :cos, :exp, :sinh, :cosh, :-)
@eval $op(x::FloatWrapper) = FloatWrapper($op(x.x))
end
for op in (:isfinite,)
@eval $op(x::FloatWrapper) = $op(x.x)
end
convert(::Type{FloatWrapper}, x::Int) = FloatWrapper(float(x))
promote_rule(::Type{FloatWrapper}, ::Type{Int}) = FloatWrapper
float(x::FloatWrapper) = x
@testset "exp(Complex(a, b)) for a and b of non-standard real type #25292" begin
x = FloatWrapper(3.1)
y = FloatWrapper(4.1)
@test sincos(x) == (sin(x), cos(x))
z = Complex(x, y)
@test isa(exp(z), Complex)
@test isa(sin(z), Complex)
@test isa(cos(z), Complex)
end
@testset "cbrt" begin
for T in (Float32, Float64)
@test cbrt(zero(T)) === zero(T)
@test cbrt(-zero(T)) === -zero(T)
@test cbrt(one(T)) === one(T)
@test cbrt(-one(T)) === -one(T)
@test cbrt(T(Inf)) === T(Inf)
@test cbrt(-T(Inf)) === -T(Inf)
@test isnan_type(T, cbrt(T(NaN)))
for x in (pcnfloat(nextfloat(nextfloat(zero(T))))...,
pcnfloat(prevfloat(prevfloat(zero(T))))...,
0.45, 0.6, 0.98,
map(x->x^3, 1.0:1.0:1024.0)...,
nextfloat(-T(Inf)), prevfloat(T(Inf)))
by = cbrt(big(T(x)))
@test cbrt(T(x)) ≈ by rtol=eps(T)
bym = cbrt(big(T(-x)))
@test cbrt(T(-x)) ≈ bym rtol=eps(T)
end
end
end
isdefined(Main, :Furlongs) || @eval Main include("testhelpers/Furlongs.jl")
using .Main.Furlongs
@test hypot(Furlong(0), Furlong(0)) == Furlong(0.0)
@test hypot(Furlong(3), Furlong(4)) == Furlong(5.0)
@test hypot(Furlong(NaN), Furlong(Inf)) == Furlong(Inf)
@test hypot(Furlong(Inf), Furlong(NaN)) == Furlong(Inf)
@test hypot(Furlong(Inf), Furlong(Inf)) == Furlong(Inf)
| [
27,
7856,
261,
480,
29,
35,
713,
2879,
14,
73,
43640,
198,
2,
770,
2393,
318,
257,
636,
286,
22300,
13,
13789,
318,
17168,
25,
3740,
1378,
73,
377,
498,
648,
13,
2398,
14,
43085,
198,
198,
3500,
14534,
198,
3500,
44800,
2348,
2923... | 1.758359 | 22,252 |
@testset "zygote_adjoints" begin
rng = MersenneTwister(123456)
x = rand(rng, 5)
y = rand(rng, 5)
r = rand(rng, 5)
Q = Matrix(Cholesky(rand(rng, 5, 5), 'U', 0))
@assert isposdef(Q)
gzeucl = gradient(:Zygote, [x, y]) do xy
evaluate(Euclidean(), xy[1], xy[2])
end
gzsqeucl = gradient(:Zygote, [x, y]) do xy
evaluate(SqEuclidean(), xy[1], xy[2])
end
gzdotprod = gradient(:Zygote, [x, y]) do xy
evaluate(KernelFunctions.DotProduct(), xy[1], xy[2])
end
gzdelta = gradient(:Zygote, [x, y]) do xy
evaluate(KernelFunctions.Delta(), xy[1], xy[2])
end
gzsinus = gradient(:Zygote, [x, y]) do xy
evaluate(KernelFunctions.Sinus(r), xy[1], xy[2])
end
gzsqmaha = gradient(:Zygote, [Q, x, y]) do xy
evaluate(SqMahalanobis(xy[1]), xy[2], xy[3])
end
gfeucl = gradient(:FiniteDiff, [x, y]) do xy
evaluate(Euclidean(), xy[1], xy[2])
end
gfsqeucl = gradient(:FiniteDiff, [x, y]) do xy
evaluate(SqEuclidean(), xy[1], xy[2])
end
gfdotprod = gradient(:FiniteDiff, [x, y]) do xy
evaluate(KernelFunctions.DotProduct(), xy[1], xy[2])
end
gfdelta = gradient(:FiniteDiff, [x, y]) do xy
evaluate(KernelFunctions.Delta(), xy[1], xy[2])
end
gfsinus = gradient(:FiniteDiff, [x, y]) do xy
evaluate(KernelFunctions.Sinus(r), xy[1], xy[2])
end
gfsqmaha = gradient(:FiniteDiff, [Q, x, y]) do xy
evaluate(SqMahalanobis(xy[1]), xy[2], xy[3])
end
@test all(gzeucl .≈ gfeucl)
@test all(gzsqeucl .≈ gfsqeucl)
@test all(gzdotprod .≈ gfdotprod)
@test all(gzdelta .≈ gfdelta)
@test all(gzsinus .≈ gfsinus)
@test all(gzsqmaha .≈ gfsqmaha)
end
| [
31,
9288,
2617,
366,
7357,
70,
1258,
62,
41255,
1563,
82,
1,
2221,
198,
220,
220,
220,
374,
782,
796,
337,
364,
29727,
5080,
1694,
7,
10163,
29228,
8,
198,
220,
220,
220,
2124,
796,
43720,
7,
81,
782,
11,
642,
8,
198,
220,
220,
... | 1.894336 | 918 |
<reponame>mdneuzerling/advent_of_code
module TestDay05
import AOC2021.Day05.part1, AOC2021.Day05.part2
using Test
test_input_raw = """
0,9 -> 5,9
8,0 -> 0,8
9,4 -> 3,4
2,2 -> 2,1
7,0 -> 7,4
6,4 -> 2,0
0,9 -> 2,9
3,4 -> 1,4
0,0 -> 8,8
5,5 -> 8,2"""
test_input = [string(x) for x in split(test_input_raw, "\n")]
@testset "Day 05" begin
@testset "part 1" begin
@test part1(test_input) == 5
end
@testset "part 2" begin
@test part2(test_input) == 12
end
end
end # module
| [
27,
7856,
261,
480,
29,
9132,
710,
84,
9107,
1359,
14,
324,
1151,
62,
1659,
62,
8189,
198,
21412,
6208,
12393,
2713,
198,
198,
11748,
317,
4503,
1238,
2481,
13,
12393,
2713,
13,
3911,
16,
11,
317,
4503,
1238,
2481,
13,
12393,
2713,
... | 1.972549 | 255 |
<gh_stars>0
@doc raw"""
BrownianMotionTorus(n::Int)
Returns a hidden state model corresponding to a Brownian motion on an `n`-dimensional torus, with initial condition drawn uniformly at random.
"""
struct BrownianMotionTorus <: HiddenStateModel{Vector{Float64}, ContinuousTime}
n::Int
end
#####################
### BASIC METHODS ###
#####################
initial_condition(::BrownianMotionTorus) = Uniform(0,2π)
state_dim(model::BrownianMotionTorus) = model.n
"""
noise_dim(model)
Returns the dimension of the Brownian motion ``W_t`` in the diffusion model ``dX_t = f(X_t)dt + g(X_t)dW_t``.
"""
noise_dim(model::BrownianMotionTorus) = model.n
function initialize(model::BrownianMotionTorus)
x = rand(initial_condition(model), state_dim(model))
return x
end
function Base.show(io::IO, ::MIME"text/plain", model::BrownianMotionTorus)
print(io, "Brownian motion on the ", state_dim(model), "-torus for the hidden state
type of hidden state: ", state_dim(model),"-dimensional vector
number of independent Brownian motions: ", noise_dim(model),"
initial condition: uniform")
end
function Base.show(io::IO, model::BrownianMotionTorus)
print(io, "Brownian motion on the ", state_dim(model), "-torus")
end
######################
### TIME EVOLUTION ###
######################
function (model::BrownianMotionTorus)(x::AbstractVector{T}, dt) where T
return mod2pi.(x .+ sqrt(dt) .* randn(T, noise_dim(model)))
end
function (model::BrownianMotionTorus)(x::AbstractMatrix, dt)
N = size(x, 2)
out = zeros(eltype(x), state_dim(model), N)
sqr = sqrt(dt)
@inbounds for b in 1:N
for a in 1:state_dim(model)
out[a,b] += mod2pi(x[a,b] + sqr * randn(eltype(x)))
end
end
return out
end
function propagate!(x::AbstractVector, model::BrownianMotionTorus, dt)
N = size(x, 2)
sqr = sqrt(dt)
@inbounds for a in 1:state_dim(model)
x[a] += sqr * randn(eltype(x))
x[a] = mod2pi(x[a])
end
return x
end
function propagate!(x::AbstractMatrix, model::BrownianMotionTorus, dt)
N = size(x, 2)
sqr = sqrt(dt)
@inbounds for b in 1:N
for a in 1:state_dim(model)
x[a,b] += sqr * randn(eltype(x))
x[a,b] = mod2pi(x[a,b])
end
end
return x
end
################################
### CONVENIENCE CONSTRUCTORS ###
################################
const BrownianMotionCircle() = BrownianMotionTorus(1)
| [
27,
456,
62,
30783,
29,
15,
198,
31,
15390,
8246,
37811,
198,
220,
220,
220,
4373,
666,
45740,
51,
15125,
7,
77,
3712,
5317,
8,
198,
198,
35561,
257,
7104,
1181,
2746,
11188,
284,
257,
4373,
666,
6268,
319,
281,
4600,
77,
63,
12,
... | 1.791209 | 1,911 |
<gh_stars>1-10
# Helper functions
sameSense(pra::Int, ra::Int) = mod(pra,2)==mod(ra,2)
downSense(ra::Int) = (ra>0) .& (mod(ra,2)==1)
upSense(ra::Int) = (ra>0) .& (mod(ra,2)==0)
# Reward function for VerticalCAS MDP
function POMDPs.reward(mdp::VerticalCAS_MDP, s::stateType, ra::actType)
h = s[1]; vown = s[2]; vint = s[3]; pra = s[4]; resp = s[5];
tau = mdp.currentTau
r = 0.0
sep = abs(h)
closure = abs(vint-vown)
crossing = ((h<0) .& downSense(ra)) .| ((h>0) .& upSense(ra))
deltaVown = 0.0
corrective = false
preventitive = false
weakening = false
strengthening = false
reversal = false
if ra>COC
if pra>COC
reversal = mod(pra,2)!=mod(ra,2)
end
if !reversal
weakening = pra>ra
strengthening = pra<ra
end
vLow, vHigh = mdp.velRanges[ra]
corrective = (vown>vLow) .& (vown < vHigh)
preventitive = !corrective
if corrective
if downSense(ra)
deltaVown = abs(vLow-vown)
else
deltaVown = abs(vHigh-vown)
end
end
end
lolo = (ra==DNC) .| (ra==DND)
if (sep<=175) .& (tau==0)
r-=1.0
end
if mdp.allowedTrans[pra][ra+1]==0
r-=1.0
end
if crossing
if preventitive
r-=1.0
end
if sep>500
r-=0.01
end
end
if corrective
r-=1e-5
if (sep>650) .& (closure<2000.0/60.0)
r-= 0.1
end
if (sep>1000) .& (closure<4000.0/60.0)
r-=0.03
end
elseif preventitive
if (sep>650) .& (closure<2000.0/60.0)
r-=0.01
end
end
if reversal
r-= 8e-3
end
if strengthening
r-=5e-3
end
if weakening
r-=1e-3
end
if lolo
r-=1e-4
if closure > 3000.0/60.0
r-=5e-4
end
elseif (ra!=COC) .& (closure > 3000.0/60.0)
r-=1.5e-3
end
if closure < 3000.0/60.0
r-=2.3e-3
end
if ra==COC
r+=1e-9
else
r-=3e-5*deltaVown
if closure > 3000.0/60.0
r-=1.5e-3
end
end
return r
end | [
27,
456,
62,
30783,
29,
16,
12,
940,
198,
2,
5053,
525,
5499,
198,
31642,
41166,
7,
79,
430,
3712,
5317,
11,
2179,
3712,
5317,
8,
796,
953,
7,
79,
430,
11,
17,
8,
855,
4666,
7,
430,
11,
17,
8,
198,
2902,
41166,
7,
430,
3712,... | 1.688066 | 1,324 |
name = "LLVM"
llvm_full_version = v"11.0.1+3"
libllvm_version = v"11.0.1+3"
# Include common LLVM stuff
include("../common.jl")
build_tarballs(ARGS, configure_extraction(ARGS, llvm_full_version, name, libllvm_version; experimental_platforms=true, assert=true)...; skip_audit=true, julia_compat="1.6")
| [
3672,
796,
366,
3069,
15996,
1,
198,
297,
14761,
62,
12853,
62,
9641,
796,
410,
1,
1157,
13,
15,
13,
16,
10,
18,
1,
198,
8019,
297,
14761,
62,
9641,
796,
410,
1,
1157,
13,
15,
13,
16,
10,
18,
1,
198,
198,
2,
40348,
2219,
271... | 2.525 | 120 |
<reponame>tpprescott/mf-abc
include("MultiFidelityABC.jl")
mkpath("figures")
using StatsPlots, Random
println("#### Repressilator")
println("# Loading data")
bm = MakeBenchmarkCloud("repressilator/output")
epsilons = (50.0,50.0)
sample_size = 10^4
println("# Fig 1")
fig1a = view_distances(bm[1:sample_size], epsilons)
fig1b = view_distances(bm[1:sample_size], epsilons, 2, L"n")
savefig(plot(fig1a, fig1b, layout=2, size=(900,360)), "figures/fig1.pdf")
println("# Fig 2 and Table 1")
fig2a = compare_efficiencies(bm, sample_size, epsilons, output="theory")
fig2b, table1, latex_table1 = compare_efficiencies(bm, sample_size, epsilons, output="plot")
savefig(plot(fig2a, fig2b, layout=2, size=(1100,440)), "figures/fig2.pdf")
println("# Table 2 and 3")
eta_tab, phi_tab = get_eta(bm, epsilons, Repressilator.F)
Random.seed!(123)
var_tab = variance_table(bm, 10^3, epsilons, eta_tab, Repressilator.F, 30.0)
Random.seed!()
println("# Supplementary")
t,x,p = simulate(Repressilator.tlm)
tt,xx = complete(Repressilator.tlm, p)
function show_plots(solutions::NTuple{N,Tuple{Times,States,String}}, j::Integer; kwargs...) where N
fig = plot(; kwargs...);
for (t,x,l) in solutions
plot!(t,x[j,:]; label=l)
end
return fig
end
titles = ["mRNA1", "mRNA2", "mRNA3", "P1", "P2", "P3"]
figs = [show_plots(((t,x,"Tau-leap"),(tt,xx,"Gillespie")),j; title=ttl, legend=:none) for (j, ttl) in enumerate(titles)]
figs[1] = plot!(figs[1]; legend=:topleft)
repressilator_eg = plot(figs..., layout=6)
savefig(repressilator_eg, "./figures/SFig1.pdf")
println("#### Viral")
println("# Load data")
bm = MakeBenchmarkCloud("viral/output")
mf_smallBI_location = "./viral/output/mf_smallBI/"
mf_largeBI_location = "./viral/output/mf_largeBI/"
epsilons = (0.25,0.25)
eta_0 = 0.01
smallBI_size = 10^3
largeBI_size = length(bm)
function divide_cloud(c::MFABCCloud, s::Integer; stage::String)
if stage=="bm"
return c[1:s]
elseif stage=="inc"
return c[s+1:end]
end
error("What stage? bm or inc")
end
bm_set = Array{MFABCCloud,1}()
mf_set = Array{MFABCCloud,1}()
inc_smallBI_set = Array{MFABCCloud,1}()
inc_largeBI_set = Array{MFABCCloud,1}()
for cloud_location in mf_smallBI_location.*readdir(mf_smallBI_location)
c = MakeMFABCCloud(cloud_location)
push!(mf_set, c)
push!(bm_set, divide_cloud(c, smallBI_size, stage="bm"))
push!(inc_smallBI_set, divide_cloud(c, smallBI_size, stage="inc"))
end
for cloud_location in mf_largeBI_location.*readdir(mf_largeBI_location)
c = MakeMFABCCloud(cloud_location)
push!(inc_largeBI_set, divide_cloud(c, largeBI_size, stage="inc"))
end
println("# Fig 3")
savefig(view_distances(bm[1:10000], epsilons, epsilons.*2), "figures/fig3.pdf")
println("# Fig 4")
fig4 = plot_eta_estimates(bm, epsilons, (bm_set,"After burn-in"), (mf_set,"After adaptation"); method="mf", lower_eta=eta_0)
plot!(xlim=(0,0.4),ylim=(0,0.2))
savefig(fig4, "figures/fig4.pdf")
println("# Fig 5")
savefig(plot_apost_efficiencies((inc_largeBI_set,"After large burn-in"), (inc_smallBI_set, "After small burn-in"), (bm_set, "During burn-in")), "figures/fig5.pdf")
println("# Supplementary")
t,x,p = simulate(Repressilator.tlm)
tt,xx = complete(Repressilator.tlm, p)
f = [plot(t,x',label=["template" "genome" "struct" "virus"]), plot(tt,xx',legend=:none)]
savefig(plot(f..., layout=2, size=(1000,400)), "./figures/SFig2.pdf")
| [
27,
7856,
261,
480,
29,
83,
381,
411,
14612,
14,
76,
69,
12,
39305,
198,
17256,
7203,
29800,
37,
23091,
24694,
13,
20362,
4943,
198,
28015,
6978,
7203,
5647,
942,
4943,
198,
198,
3500,
20595,
3646,
1747,
11,
14534,
198,
198,
35235,
... | 2.321282 | 1,466 |
<gh_stars>10-100
struct Lattice{D,T,M,U}
site::Vector{Site{D,T}} # sorted by ID
coord_order::Vector{Site{D,T}} # sort by coord
neighbors::Vector{Vector{Int}}
types::Vector{U}
end
function Lattice(coord, types::Vector;
nbhood = VonNeumann(),
type_list = unique(types)
)
dimension = coord isa Matrix ? size(coord, 1) : length(coord[1])
# unique_types = sort!(type_list)
unique_types = type_list
number_types = length(unique_types)
number_neighbors = capacity(nbhood, dimension)
labels = OrderedDict(unique_types[i] => i+1 for i in eachindex(unique_types))
site = build_sites(coord, types, labels)
site_by_coord = sort(site, by = coordinates)
neighbors = [sizehint!(Int[], number_neighbors) for i in eachindex(site)]
mapping = [(label => l) for (label, l) in labels]
T = coord isa Matrix ? eltype(coord) : eltype(coord[1])
U = eltype(mapping)
return Lattice{dimension,T,typeof(nbhood),U}(site, site_by_coord, neighbors, mapping)
end
function build_sites(coord::Matrix, types, labels)
site = [Site(i, State(labels[types[i]]), tuple(coord[:,i]...)) for i in eachindex(types)]
end
function build_sites(coord::Vector{NTuple{N,T}}, types, labels) where {N,T}
site = [Site(i, State(labels[types[i]]), coord[i]) for i in eachindex(types)]
end
##### accessors
dimension(::Lattice{D}) where D = D
topology(::Lattice{D,T,M}) where {D,T,M} = M()
number_types(x::Lattice) = length(x.types)
number_sites(x::Lattice) = length(x.site)
##### IO
function Base.summary(io::IO, ::Lattice{D,T,M}) where {D,T,M}
print(io, "$(D)-D Lattice with $(M) neighborhoods")
end
function Base.show(io::IO, lattice::Lattice)
Base.summary(io, lattice)
print(io, "\n species: ")
show(io, lattice.types)
end
function Base.show(io::IO, m::MIME"text/plain", lattice::Lattice)
Base.summary(io, lattice)
print(io, "\n", "species: ")
show(io, m, lattice.types)
end
##### other Base overloads
Base.copy(lattice::Lattice) = deepcopy(lattice)
# this is a dumb hack to make Lattice compatible with SamplePath
Base.size(lattice::Lattice) = size(lattice.types)
function __simple_copy(lattice)
coord = coordinates.(lattice.site)
tcode = get_ptype.(lattice.site)
idx = findall(!isequal(1), tcode)
return coord[idx], tcode[idx]
end
##### query API
function istracked(lattice::Lattice, coord)
idx = searchsorted(lattice.coord_order, coord, by = coordinates, lt = <)
return !isempty(idx)
end
# get the site living at the given coordinates
function get_site(lattice::Lattice, coord)
idx = searchsorted(lattice.coord_order, coord, by = coordinates, lt = <)
lattice.coord_order[idx[1]]
end
# get the site with the given id
function get_site(lattice::Lattice, id::Int)
# idx = searchsorted(lattice.site, id, by = label)
#
# # if idx is empty, site was not found
#
# # if idx has length > 1, we have a duplicate
#
# # if idx has length = 1, return the object
# lattice.site[idx[1]]
lattice.site[id]
end
function neighborhood(lattice::Lattice, x::Site)
lattice.neighbors[label(x)]
end
##### update API
function spawn_new_site(lattice::Lattice, coord)
id = number_sites(lattice) + 1
return Site(id, State(), coord)
end
# this function assumes new_site is not tracked!
function add_site!(lattice::Lattice, new_site::Site)
# add neighborhood for site
nbmax = capacity(topology(lattice), dimension(lattice))
push!(lattice.neighbors, sizehint!(Int[], nbmax))
# new sites are always added to the end of the list sorted by IDs
push!(lattice.site, new_site)
# add site to list sorted by coord
idx = searchsortedfirst(lattice.coord_order, new_site, by = coordinates)
insert!(lattice.coord_order, idx[1], new_site)
new_site
end
function add_neighbor!(lattice::Lattice{D}, x::Site{D}, y::Site{D}) where D
nb = neighborhood(lattice, x)
nbtype = topology(lattice)
neighbor_count = length(nb)
nbhood_capacity = capacity(nbtype, D)
if label(y) in nb
error("$(y) is already a neighbor of $(x)")
end
if neighbor_count ≥ nbhood_capacity
msg = """
Neighborhood of $(x) at capacity ($(nbhood_capacity)).
Failed to add $(y) as a neighbor.
"""
nb_y = neighborhood(lattice, y)
@debug """
Neighbor data:
x: $(label(x)) / $(get_ptype(x))|$(get_neighbor_class(x)) @ $(x)
y: $(label(y)) / $(get_ptype(y))|$(get_neighbor_class(y)) @ $(y)
Neighborhood:
x: $(nb)
y: $(nb_y)
Sites:
x: $(lattice.site[nb])
y: $(lattice.site[nb_y])
"""
throw(ErrorException(msg))
end
# store neighbors in sorted fashion
i = label(y)
idx = searchsortedfirst(nb, i)
insert!(nb, idx, i)
end
function rmv_neighbor!(lattice::Lattice, x::Site, y::Site)
i = label(x)
j = label(y)
nb = neighborhood(lattice, x)
idx = findfirst(isequal(j), nb)
if idx isa Nothing
msg = """
$(y) is not adjacent to $(x) under $(topology(lattice)) structure.
Failed to remove neighbor.
"""
throw(ErrorException(msg))
end
# delete item, which preserves ordering
deleteat!(nb, idx)
end
##### recipes
@recipe function f(lattice::Lattice{D,T,VonNeumann}) where {D,T}
site = lattice.site
types = lattice.types
seriestype := :scatter
aspect_ratio --> 1
markerstrokewidth --> 0
markershape --> :square
# color --> map(get_ptype, site)
# label --> map(first, lattice.types)
for t in types
myseries = filter(x -> get_ptype(x) == last(t), site)
@series begin
label --> first(t)
[tuple(s...) for s in myseries]
end
end
end
@recipe function f(lattice::Lattice{2,T,Hexagonal}) where {T}
site = lattice.site
types = lattice.types
seriestype := :scatter
aspect_ratio --> 1
markerstrokewidth --> 0
markershape --> :hexagon
# color --> map(get_ptype, site)
# label --> map(first, lattice.types)
A = cos(pi / 3)
B = sin(pi / 3)
for t in types
s = filter(x -> get_ptype(x) == last(t), site)
@series begin
label --> first(t)
new_coords = Vector{NTuple{2,Float64}}(undef, length(s))
for i in eachindex(s)
p = s[i]
x = p[1] + A * p[2]
y = B * p[2]
new_coords[i] = (x, y)
end
new_coords
end
end
end
##### building neighborhoods
function build_neighborhoods!(nbtype::VonNeumann, lattice::Lattice{1})
sites = lattice.coord_order
sort_by_x_1D!(sites)
sweep_neighbors!(nbtype, lattice, sites)
return nothing
end
function build_neighborhoods!(nbtype::VonNeumann, lattice::Lattice{2})
sites = lattice.coord_order
sort_by_x_2D!(sites)
sweep_neighbors!(nbtype, lattice, sites)
sort_by_y_2D!(sites)
sweep_neighbors!(nbtype, lattice, sites)
return nothing
end
function build_neighborhoods!(nbtype::VonNeumann, lattice::Lattice{3})
sites = lattice.coord_order
sort_by_x_3D!(sites)
sweep_neighbors!(nbtype, lattice, sites)
sort_by_y_3D!(sites)
sweep_neighbors!(nbtype, lattice, sites)
sort_by_z_3D!(sites)
sweep_neighbors!(nbtype, lattice, sites)
return nothing
end
function sweep_neighbors!(nbtype::VonNeumann, lattice, sites)
for i = 2:length(sites)
x = sites[i-1]
y = sites[i]
d = distance(nbtype, x, y)
if d == 1 && !(label(y) in neighborhood(lattice, x))
add_neighbor!(lattice, x, y)
add_neighbor!(lattice, y, x)
end
end
return sites
end
function build_neighborhoods!(nbtype::Hexagonal, lattice::Lattice{2})
sites = lattice.coord_order
sweep_neighbors!(nbtype, lattice, sites)
end
function sweep_neighbors!(nbtype::Hexagonal, lattice, sites)
n = length(sites)
for i in 1:n
for j in 1:i-1 # j < i
x = sites[i]
y = sites[j]
d = distance(nbtype, x, y)
if d == 1 && !(label(y) in neighborhood(lattice, x))
add_neighbor!(lattice, x, y)
add_neighbor!(lattice, y, x)
end
end
end
return sites
end
### TODO: 3D
| [
27,
456,
62,
30783,
29,
940,
12,
3064,
198,
7249,
406,
1078,
501,
90,
35,
11,
51,
11,
44,
11,
52,
92,
198,
220,
2524,
3712,
38469,
90,
29123,
90,
35,
11,
51,
11709,
220,
1303,
23243,
416,
4522,
198,
220,
6349,
62,
2875,
3712,
... | 2.442959 | 3,217 |
<gh_stars>1-10
using LinearAlgebra, Jets, JetPack, Test
n1,n2 = 33,44
@testset "JopLog, correctness T=$(T)" for T in (Float64,Float32,Complex{Float64},Complex{Float32})
F = JopLog(JetSpace(T,n1,n2))
x1 = rand(domain(F)) .+ T(0.0001)
@test F*x1 ≈ log.(x1)
end
@testset "JopLog, linearity test, T=$(T)" for T in (Float64,Float32,Complex{Float64},Complex{Float32})
F = JopLog(JetSpace(T,n1,n2))
J = jacobian(F, rand(domain(F)) .+ T(0.0001))
lhs,rhs = linearity_test(J)
@test lhs ≈ rhs
end
@testset "JopLog, dot product test, T=$(T)" for T in (Float64,Float32,Complex{Float64},Complex{Float32})
F = JopLog(JetSpace(T,n1,n2))
J = jacobian!(F, rand(domain(F)) .+ T(0.0001))
lhs, rhs = dot_product_test(J, -1 .+ 2 .* rand(domain(J)), -1 .+ 2 .* rand(range(J)))
@test lhs ≈ rhs
end
# note the key here is to increase the size of the nonlinear vector
@testset "JopLog, linearization test, T=$(T)" for T in (Float64,Float32,Complex{Float64},Complex{Float32})
F = JopLog(JetSpace(T,n1,n2))
m0 = 1000 .* rand(domain(F)) .+ T(1)
μ = sqrt.([1/1,1/2,1/4,1/8,1/16,1/32,1/64,1/128,1/256,1/512,1/1024,1/2048])
δm = rand(domain(F)) .+ T(0.0001)
observed, expected = linearization_test(F, m0, μ = μ, δm = δm)
δ = minimum(abs, observed - expected)
#write(stdout, @sprintf("\nLinearization test -- type(%s)\n", T))
#for i = 1:length(observed)
#write(stdout, @sprintf("mu,observed,expected,diff; %12.6f %12.6f %12.6f %12.6f\n", μ[i], observed[i], expected[i], abs(observed[i] - expected[i])))
#end
#write(stdout, @sprintf("minimum difference %12.6f\n", minimum(abs,observed .- expected)))
@test δ < 0.1
end
| [
27,
456,
62,
30783,
29,
16,
12,
940,
198,
3500,
44800,
2348,
29230,
11,
14728,
11,
19013,
11869,
11,
6208,
198,
198,
77,
16,
11,
77,
17,
796,
4747,
11,
2598,
198,
198,
31,
9288,
2617,
366,
41,
404,
11187,
11,
29409,
309,
43641,
... | 2.106117 | 801 |
using FileIO, BedgraphFiles
using Bedgraph
using IteratorInterfaceExtensions
using TableTraits
using DataFrames
using Query
using Test
using Logging
# old_logger = global_logger(ConsoleLogger(stdout, Logging.Debug))
module Bag
using Bedgraph
const chroms = ["chr19", "chr19", "chr19", "chr19", "chr19", "chr19", "chr19", "chr19", "chr19"]
const firsts = [49302000, 49302300, 49302600, 49302900, 49303200, 49303500, 49303800, 49304100, 49304400]
const lasts = [49302300, 49302600, 49302900, 49303200, 49303500, 49303800, 49304100, 49304400, 49304700]
const values = [-1.0, -0.75, -0.50, -0.25, 0.0, 0.25, 0.50, 0.75, 1.00]
const record = Bedgraph.Record("chr1", 1, 1, 0)
const records = Bedgraph.Record.(Bag.chroms, Bag.firsts, Bag.lasts, Bag.values)
const file = joinpath(@__DIR__, "data.bedgraph")
const file_headerless = joinpath(@__DIR__, "data-headerless.bedgraph")
const tmp_output_path = tempname() * ".bedgraph"
end # module Bag
using .Bag
@testset "BedgraphFiles" begin
@test isfile(Bag.file) == true
@test isfile(Bag.file_headerless) == true
# Load tests.
loader = load(Bag.file)
@test IteratorInterfaceExtensions.isiterable(loader) == true
@test TableTraits.isiterabletable(loader) == true
loaded = Vector{Bedgraph.Record}(loader)
@test Vector{Bedgraph.Record} == typeof(loaded)
loader_from_headerless = load(Bag.file_headerless)
@test isiterable(loader_from_headerless) == true
@test TableTraits.isiterabletable(loader_from_headerless) == true
loaded_from_headerless = Vector{Bedgraph.Record}(loader_from_headerless)
@test Vector{Bedgraph.Record} == typeof(loaded_from_headerless)
@test IteratorInterfaceExtensions.isiterable(Bag.records) == true
@test TableTraits.isiterabletable(Bag.records) == true
@test Bag.records == loaded
@test Bag.records == loaded_from_headerless
# Save and load from Vector{Bedgraph.Record}.
save(Bag.tmp_output_path, Bag.records)
@debug "direct load into Vector{Bedgraph.Record} - commencing"
@test Bag.records == Vector{Bedgraph.Record}(load(Bag.tmp_output_path))
@debug "direct load into Vector{Bedgraph.Record} - complete"
@test Bag.records == load(Bag.tmp_output_path) |> Vector{Bedgraph.Record}
# Save usign query.
Bag.records |> save(Bag.tmp_output_path)
@test Bag.records == Vector{Bedgraph.Record}(load(Bag.tmp_output_path))
@test Bag.records == load(Bag.tmp_output_path) |> Vector{Bedgraph.Record}
# Check return of data from save method.
@test Bag.records == Bag.records |> save(Bag.tmp_output_path)
# Check piping/continuations through Query.jl.
load("data.bedgraph") |> @filter(_.chrom == "chr19" && _.first > 49302900 && _.last < 49303800) |> save(Bag.tmp_output_path)
@test [Bedgraph.Record("chr19", 49303200, 49303500, 0.0)] == load(Bag.tmp_output_path) |> Vector{Bedgraph.Record}
@testset "Integrations" begin
include("integrations/test-DataFrames.jl")
end # testset Transformers
println()
show(load(Bag.file))
println()
end
| [
3500,
9220,
9399,
11,
15585,
34960,
25876,
198,
3500,
15585,
34960,
198,
198,
3500,
40806,
1352,
39317,
11627,
5736,
198,
3500,
8655,
15721,
896,
198,
198,
3500,
6060,
35439,
198,
3500,
43301,
198,
198,
3500,
6208,
198,
3500,
5972,
2667,
... | 2.7 | 1,080 |
<reponame>cvdlab/ViewerGL.js<filename>test/GLUtils.jl<gh_stars>1-10
using Test
using LinearAlgebraicRepresentation
Lar = LinearAlgebraicRepresentation
using ViewerGL
GL = ViewerGL
@testset "GLUtils.jl" begin
# function lar4mesh(verts,cells) # cells are triangles
@testset "lar4mesh" begin
@test
@test
@test
@test
end
# function two2three(points)
@testset "two2three" begin
@test
@test
@test
@test
end
# function glGenBuffer()
@testset "glGenBuffer" begin
@test
@test
@test
@test
end
# function glGenVertexArray()
@testset "glGenVertexArray" begin
@test
@test
@test
@test
end
# function glLineWidth()
@testset "glLineWidth" begin
@test
@test
@test
@test
end
# function glPointSize()
@testset "glPointSize" begin
@test
@test
@test
@test
end
# function glCheckError(actionName="")
@testset "glCheckError" begin
@test
@test
@test
@test
end
# function glErrorMessage()
@testset "glErrorMessage" begin
@test
@test
@test
@test
end
# function glDeleteLater(fun::Function)
@testset "glDeleteLater" begin
@test
@test
@test
@test
end
# function glDeleteNow()
@testset "glDeleteNow" begin
@test
@test
@test
@test
end
# function normalize2(V::Lar.Points; flag=true, fold=-1)
@testset "normalize2" begin
@test
@test
@test
@test
end
# function normalize3(V::Lar.Points, flag=true)
@testset "normalize3" begin
@test
@test
@test
@test
end
end
| [
27,
7856,
261,
480,
29,
66,
20306,
23912,
14,
7680,
263,
8763,
13,
8457,
27,
34345,
29,
9288,
14,
8763,
18274,
4487,
13,
20362,
27,
456,
62,
30783,
29,
16,
12,
940,
198,
3500,
6208,
198,
3500,
44800,
2348,
29230,
291,
40171,
341,
... | 2.128676 | 816 |
<reponame>sd109/BlackBoxOptim.jl<filename>spikes/subset_tournament_cmsa_es/subset_tournament_cmsa_es.jl
# This is Subset Tournament CMSA-ES as proposed by <NAME> in the paper:
# <NAME>, "Covariate Subset Tournaments for High-Dimensional Blackbox Optimization with Covariance Matrix Adapting Evolutionary Strategies", 2014
function normalize_utilities(utilities)
utilities / sum(utilities)
end
function linear_utilities(mu, lambda)
normalize_utilities(hcat(ones(1, mu), zeros(1, lambda-mu)))
end
function log_utilities(mu, lambda)
normalize_utilities(hcat((log(mu+1) - log(1:mu))', zeros(1, lambda-mu)))
end
# Optimize the n-dimensional objective func with a (mu,lambda) CMSA-ES using
# subset tournaments to optimize subsets of variables in rounds.
function st_cmsa_es(p;
trace = true,
# Stopping criteria related
max_seconds = 4*numdims(p), max_evals_per_dim = 1e7,
ftol = 1e-7, xtol = 1e-10, stol = 1e-10,
max_rounds_without_improvement = 200,
# Starting points, will be random unless specified
xmean = false,
# Algorithm specific params:
covarMatrixSampler = SubsetCholeskyCovarSampler,
utilitiesFunc = log_utilities,
lambda = 4*numdims(p),
mu = int(max(ceil(lambda/rand(4:20)), 1.0)),
tau = 1 / sqrt(2*numdims(p)), # Equation (1) on page 5 in Beyer2008
tau_c = 1 + numdims(p) * (numdims(p) + 1) / (2 * mu), # Equation (2) on page 5 in Beyer2008
sigma = 0.05*rand(1:8)*minimum(diameters(search_space(p))),
decompose_covar_prob = 0.4,
# Subsetting specific parameters:
subset_size = int(floor(0.20*numdims(p))),
subsets_per_tournament = 2,
num_rounds_per_tournament = 1,
num_rounds_of_optimization_between_tournaments = 30,
subset_selection_mechanism = :random
)
N = numdims(p)
ss = search_space(p)
max_evals = max_evals_per_dim * N
a = 1 - 1 / tau_c
C = covarMatrixSampler(N)
utilities = utilitiesFunc(mu, lambda)
xbest = xmean = rand_individual(ss) # Current best mean value.
fbest = eval1(xbest, p)
num_fevals = 1
archive = BlackBoxOptim.TopListArchive(N, 10)
add_candidate!(archive, fbest, xbest[:], num_fevals)
fevals_last_best = num_fevals
next_print_covar = 100
termination_reason = "?"
# Init for subset tournaments
st_state = :optimization # Can be either :tournament or :optimization and starts with :optimization to ensure we set up a new tournament
num_optimization_rounds_for_this_subset = num_rounds_of_optimization_between_tournaments # Also ensures we are end of opt round => new tournament will be set up
st_current_subset = 1 # When in :tournament mode this is the index to the subset currently being evaluated, can be in range 1-subsets_per_tournament
st_subsets = Array{Int, 1}[] # Subsets that are currently in a tournament
num_tournament_rounds_for_this_subset = 0 # Number of eval rounds we have ran with current subset in tournament mode
fitness_per_tournament_round = zeros(num_rounds_per_tournament, subsets_per_tournament)
# Keep stats per covariate being optimized for how effective the optimization is when they are included.
# We save the expected relative change per round of optimization and use a history parameter in [0.0, 1.0]
# to decide how much history should be saved when updating it, i.e. newval = oldvalue * h + delta * (1-h).
# These stats values are used in multiple tournaments like so:
# 1. Repeat until the right number of vars has been added to selected set
# 2. Randomly sample two unselected vars
# 3. Select the one with the best stats value (lowest value if minimizing)
improvement_per_var = 0.01 * ones(N) # Start with some value since it will be adapted as we go...
start_time = time()
# Now lets optimize! Ensure we run at least one iteration.
while(true)
if (time() - start_time) > max_seconds
termination_reason = "Exceeded time budget"
break
end
if num_fevals > max_evals
termination_reason = "Exceeded function eval budget"
break
end
if st_state == :tournament
if num_tournament_rounds_for_this_subset < num_rounds_per_tournament
num_tournament_rounds_for_this_subset += 1
else
st_current_subset += 1
if st_current_subset <= subsets_per_tournament
set_subset!(C, st_subsets[st_current_subset])
num_tournament_rounds_for_this_subset = 1
else # No more tournaments needed, select best subset and start optimizing with it
winning_subset = select_winning_subset(st_subsets, fitness_per_tournament_round)
set_subset!(C, st_subsets[winning_subset])
st_current_subset = winning_subset
st_state = :optimization
num_optimization_rounds_for_this_subset = 1
end
end
else # In optimization mode
if num_optimization_rounds_for_this_subset < num_rounds_of_optimization_between_tournaments
num_optimization_rounds_for_this_subset += 1
else
# We have now used the current subset for a full set of optimization rounds so we set up for a new tournament
st_subsets = select_new_subsets(st_subsets, N, subsets_per_tournament, st_current_subset, subset_selection_mechanism, subset_size)
st_state = :tournament
st_current_subset = 1
set_subset!(C, st_subsets[st_current_subset])
num_tournament_rounds_for_this_subset = 1
end
end
# Decompose only with a given probability => saves time
if rand() <= decompose_covar_prob
decompose!(C)
end
# Generate new population
sigmas = sigma * exp( tau * randn(1, lambda) ) # 1*lambda
s = multivariate_normal_sample(C, N, lambda)
z = broadcast(*, sigmas, s) # n*lambda
xs = repmat(xmean, 1, lambda) + z # n*lambda
# Evaluate fitness
fitnesses = eval_fitnesses(p, xs, lambda)
num_fevals += lambda
# Check if best new fitness is best ever and print some info if tracing.
indbest = argmin(fitnesses)
fbest_new = fitnesses[indbest]
# Save info about the fitnesses if we are in tournament mode
if st_state == :tournament
fitness_per_tournament_round[num_tournament_rounds_for_this_subset, st_current_subset] = fbest_new
end
if fbest_new < fbest
xbest = xs[:, indbest]
fbest = fbest_new
add_candidate!(archive, fbest, xbest, num_fevals)
fevals_last_best = num_fevals
if fitness_is_within_ftol(p, ftol, fbest)
termination_reason = "Within ftol"
break
end
if trace
println("$(num_fevals): Best fitness = $(fbest)")
if num_fevals > next_print_covar
next_print_covar = num_fevals + 100
#println("covar summary: ", sumstats(C.C, (x) -> @sprintf("%.2e", x)))
println("sigma: ", sigma)
end
end
else
if (num_fevals - fevals_last_best) > max_rounds_without_improvement * lambda
termination_reason = "Max rounds without improvement reached"
break
end
end
# Assign weights to the best individuals according to the utilities vector.
weights = assign_weights(lambda, fitnesses, utilities)
# Calculate new mean value based on weighted version of steps in population.
xmean += (z * weights)
# Update the covariance matrix
uc = zeros(Float64, N, N)
for i in 1:lambda
if weights[i] > 0.0
se = s[:,i]
uc += weights[i] * (se * se')
end
end
update_covariance_matrix!(C, uc, a)
#ws = broadcast(*, weights', s)
#update_covariance_matrix!(C, (ws * ws'), covar_learning_rate)
# Adapt sigma for next round
sigma = sigmas * weights
# Terminate if sigma very small
#println("sigma = $(sigma[1,1])")
if sigma[1,1] < stol
termination_reason = "Sigma too small"
break
end
end
return xbest, fbest, num_fevals, termination_reason, archive
end
function select_winning_subset(subsets, fitness_per_tournament_round)
if length(size(fitness_per_tournament_round)) > 1
fitness_summary = mean(fitness_per_tournament_round, 1)
argmin(fitness_summary)
else
argmin(fitness_per_tournament_round)
end
end
function select_new_subsets(subsets, n, num_subsets, winning_subset, subset_selection_mechanism, subset_size)
if length(subsets) == 0 || subset_selection_mechanism == :random
generate_random_subsets(num_subsets, subset_size, n)
else # We should keep the winner
# Generate new subsets randomly
new_subsets = generate_random_subsets(num_subsets-1, subset_size, n)
# Add the previous winning subset
push!(new_subsets, subsets[winning_subset])
new_subsets
end
end
# Select subsets based on running binary tournaments between stats values
function select_new_subsets_based_on_stats(n, num_subsets, subset_size, stats, smaller_stats_is_better)
[generate_subset_based_on_stats(subset_size, n, stats, smaller_stats_is_better) for i in 1:num_subsets]
end
function generate_subset_based_on_stats(subset_size, n, stats, smaller_stats_is_better = false)
subset = Int[]
candidates = shuffle(collect(1:n))
len = n-1
op = smaller_stats_is_better ? < : >
for i in 1:subset_size
index = rand(1:len)
candidate1 = candidates[index]
candidate2 = candidates[index+1]
println("Comparing $(candidate1) and $(candidate2)")
if op( stats[candidate1], stats[candidate2] )
splice!(candidates, index)
push!(subset, candidate1)
else
splice!(candidates, index+1)
push!(subset, candidate2)
end
len -= 1
end
subset
end
function generate_random_subsets(num_subsets, subset_size, n)
new_subsets = Array{Int, 1}[]
for i in 1:num_subsets
push!(new_subsets, sort(shuffle(collect(1:n))[1:subset_size]))
end
new_subsets
end
function eval_fitnesses(problem, xs, lambda = size(xs, 2))
fitnesses = zeros(lambda)
for i in 1:lambda
fitnesses[i] = eval1(xs[:,i], problem)
end
fitnesses
end
function assign_weights(n, fitnesses, utilities; minimize = true)
us_ordered = zeros(n, 1)
perms = sortperm(fitnesses, rev = !minimize)
for i in 1:n
us_ordered[perms[i]] = utilities[i]
end
us_ordered
end
# We create different types of Covariance matrix samplers based on different
# decompositions.
abstract type CovarianceMatrixSampler end
function update_covariance_matrix!(cms::CovarianceMatrixSampler, delta, a)
C = a * cms.C + (1 - a) * delta
cms.C = triu(C) + triu(C,1)' # Ensure C is symmetric. Should not be needed, investigate...
end
mutable struct EigenCovarSampler <: CovarianceMatrixSampler
C::Array{Float64,2}
B::Array{Float64,2}
diagD::Array{Float64,1}
EigenCovarSampler(n) = begin
new(Matrix{Float64}(I, n,n), Matrix{Float64}(I, n,n), ones(n))
end
end
function decompose!(cms::EigenCovarSampler)
try
EV, B = eig(cms.C)
cms.B = B
cms.diagD = sqrt(EV)
catch
# We don't update if there is some problem
end
end
function multivariate_normal_sample(cms::CovarianceMatrixSampler, n, m)
cms.B * (cms.diagD .* randn(n, m))
end
mutable struct CholeskyCovarSampler <: CovarianceMatrixSampler
C::Array{Float64,2}
sqrtC::Array{Float64,2}
CholeskyCovarSampler(n) = begin
new(Matrix{Float64}(I, n,n), Matrix{Float64}(I, n,n))
end
end
function decompose!(cms::CholeskyCovarSampler)
try
cms.sqrtC = chol(cms.C)'
catch error
# We don't update if there is some problem
end
end
function multivariate_normal_sample(cms::CholeskyCovarSampler, n, m)
cms.sqrtC * randn(n, m)
end
# The subset CholeskyCovarSampler only uses a subset of the variables in the
# (expensive) cholesky decomposition, the other variables are kept constant.
# However, the covariance matrix itself is always updated and saved in full
# so that the overall learning of the shape of the fitness landscape is not lost.
mutable struct SubsetCholeskyCovarSampler <: CovarianceMatrixSampler
C::Array{Float64,2}
sqrtC::Array{Float64,2}
subset::Array{Int, 1} # Indices of the currently active subset of variables
SubsetCholeskyCovarSampler(n) = begin
new(Matrix{Float64}(I, n,n), Matrix{Float64}(I, n,n), collect(1:n))
end
end
# Set new subset.
function set_subset!(cms::SubsetCholeskyCovarSampler, subset)
cms.subset = subset
decompose!(cms)
end
using JSON
function decompose!(cms::SubsetCholeskyCovarSampler)
try
subset_C = cms.C[cms.subset, cms.subset]
cms.sqrtC = chol(subset_C)'
catch error
# We don't update if there is some problem
println("ERROR: Could not do cholesky decomposition!")
show(error)
end
end
function multivariate_normal_sample(cms::SubsetCholeskyCovarSampler, n, m)
subset_size = length(cms.subset)
subset_inv_c = 1
# Calc the inverted covar matrix only for the subset
try
subset_inv_c = cms.sqrtC * randn(subset_size, m)
catch error
println("Error in subset_inv_c multiplication, size(cms.sqrtC) = $(size(cms.sqrtC)), subset_size = $(subset_size), m = $(m), size(cms.C) = $(size(cms.C))")
show(error)
end
# The rest should be zero. Maybe faster if we do this with a sparse matrix
# since most of them will be zero?
inv_c = zeros(n, m)
for i in 1:subset_size
si = cms.subset[i]
for j in 1:m
inv_c[si, j] = subset_inv_c[i, j]
end
end
inv_c
end
| [
27,
7856,
261,
480,
29,
21282,
14454,
14,
9915,
14253,
27871,
320,
13,
20362,
27,
34345,
29,
2777,
7938,
14,
7266,
2617,
62,
83,
5138,
62,
46406,
64,
62,
274,
14,
7266,
2617,
62,
83,
5138,
62,
46406,
64,
62,
274,
13,
20362,
198,
... | 2.602315 | 5,097 |
<filename>C/Chafa/build_tarballs.jl
using BinaryBuilder
name = "Chafa"
version = v"1.4.1"
sources = [
ArchiveSource("https://hpjansson.org/chafa/releases/chafa-$(version).tar.xz",
"46d34034f4c96d120e0639f87a26590427cc29e95fe5489e903a48ec96402ba3"),
]
script = raw"""
cd ${WORKSPACE}/srcdir/chafa-*/
if [[ "${target}" == *darwin* ]]; then
# For some reason building with Clang for macOS doesn't work
export CC=gcc
fi
if [[ "${proc_family}" == intel ]]; then
BUILTIN_FUNCS=yes
else
BUILTIN_FUNCS=no
fi
./autogen.sh \
--prefix=${prefix} \
--build=${MACHTYPE} \
--host=${target} \
ax_cv_gcc_check_x86_cpu_init="${BUILTIN_FUNCS}" \
ax_cv_gcc_check_x86_cpu_supports="${BUILTIN_FUNCS}"
make -j${nproc}
make install
"""
# Chafa itself does not support Windows
platforms = filter!(!Sys.iswindows, supported_platforms())
products = [
LibraryProduct("libchafa", :libchafa),
ExecutableProduct("chafa", :chafa),
]
dependencies = [
Dependency("FreeType2_jll"),
Dependency("Glib_jll", v"2.59.0"; compat="2.59.0"),
Dependency("ImageMagick_jll"),
]
# Build the tarballs.
build_tarballs(ARGS, name, version, sources, script, platforms, products, dependencies)
| [
27,
34345,
29,
34,
14,
1925,
28485,
14,
11249,
62,
18870,
21591,
13,
20362,
198,
3500,
45755,
32875,
198,
198,
3672,
796,
366,
1925,
28485,
1,
198,
9641,
796,
410,
1,
16,
13,
19,
13,
16,
1,
198,
198,
82,
2203,
796,
685,
198,
220... | 2.35249 | 522 |
"""
ULMFiT - Text Classifier
This is wrapper around the LanguageMode struct. It has three fields:
vocab : contains the same vocabulary from the LanguageModel
rnn_layers : contains same DroppedEmebeddings, LSTM (AWD_LSTM) and VarDrop layers of LanguageModel except for last softmax layer
linear_layers : contains Chain of two Dense layers [PooledDense and Dense] with softmax layer
To train create and instance and give it as first argument to 'train_classifier!' function
"""
mutable struct TextClassifier
vocab::Vector
rnn_layers::Flux.Chain
linear_layers::Flux.Chain
end
function TextClassifier(lm::LanguageModel=LanguageModel(), clsfr_out_sz::Integer=1, clsfr_hidden_sz::Integer=50, clsfr_hidden_drop::Float64=0.4)
return TextClassifier(
lm.vocab,
lm.layers[1:8],
Chain(
gpu(PooledDense(length(lm.layers[7].layer.cell.h), clsfr_hidden_sz)),
gpu(BatchNorm(clsfr_hidden_sz, relu)),
Dropout(clsfr_hidden_drop),
gpu(Dense(clsfr_hidden_sz, clsfr_out_sz)),
gpu(BatchNorm(clsfr_out_sz)),
softmax
)
)
end
Flux.@functor TextClassifier
"""
Cross Validate
This function will be used to cross-validate the classifier
Arguments:
tc : Instance of TextClassfier
gen : 'Channel' to get a mini-batch from validation set
num_of_batches : specifies the number of batches the validation will be done
If num_of_batches is not specified then all the batches which can be given by the
gen will be used for validation
"""
function validate(tc::TextClassifier, gen::Channel, num_of_batches::Union{Colon, Integer})
n_classes = size(tc.linear_layers[end-2].W, 1)
classifier = tc
Flux.testmode!(classifier)
loss = 0
iters = take!(gen)
((num_of_batches != :) & (num_of_batches < iters)) && (iters = num_of_batches)
TP, TN = gpu(zeros(Float32, n_classes, 1)), gpu(zeros(Float32, n_classes, 1))
FP, FN = gpu(zeros(Float32, n_classes, 1)), gpu(zeros(Float32, n_classes, 1))
for i=1:num_of_batches
X = take!(gen)
Y = gpu(take!(gen))
X = map(x -> indices(x, classifier.vocab, "_unk_"), X)
H = classifier.rnn_layers.(X)
H = classifier.linear_layers(H)
l = crossentropy(H, Y)
Flux.reset!(classifier.rnn_layers)
TP .+= sum(H .* Y, dims=2)
FN .+= sum(((-1 .* H) .+ 1) .* Y, dims=2)
FP .+= sum(H .* ((-1 .* Y) .+ 1), dims=2)
TN .+= sum(((-1 .* H) .+ 1) .* ((-1 .* Y) .+ 1), dims=2)
loss += l
end
precisions = TP ./ (TP .+ FP)
recalls = TP ./ (TP .+ FN)
F1 = (2 .* (precisions .* recalls)) ./ (precisions .+ recalls)
accuracy = (TP[1] + TN[1])/(TP[1] + TN[1] + FP[1] + FN[1])
return (loss, accuracy, precisions, recalls, F1)
end
"""
Forward pass
This funciton does the main computation of a mini-batch.
It computes the output of the all the layers [RNN and DENSE layers] and returns the predicted output for that pass.
It uses Truncated Backprop through time to compute the output.
Arguments:
tc : Instance of TextClassifier
gen : data loader, which will give 'X' of the mini-batch in one call
tracked_steps : This is the number of tracked time-steps for Truncated Backprop thorugh time,
these will be last time-steps for which gradients will be calculated.
"""
function forward(tc::TextClassifier, gen::Channel, tracked_steps::Integer=32)
# swiching off tracking
classifier = tc
X = take!(gen)
l = length(X)
# Truncated Backprop through time
Zygote.ignore() do
for i=1:ceil(l/tracked_steps)-1 # Tracking is swiched off inside this loop
(i == 1 && l%tracked_steps != 0) ? (last_idx = l%tracked_steps) : (last_idx = tracked_steps)
H = broadcast(x -> indices(x, classifier.vocab, "_unk_"), X[1:last_idx])
H = classifier.rnn_layers.(H)
X = X[last_idx+1:end]
end
end
# set the lated hidden states to original model
for (t_layer, unt_layer) in zip(tc.rnn_layers[2:end], classifier.rnn_layers[2:end])
if t_layer isa AWD_LSTM
t_layer.layer.state = unt_layer.layer.state
continue
end
if !unt_layer.reset
t_layer.mask = unt_layer.mask
t_layer.reset = false
end
end
# last part of the sequecnes in X - Tracking is swiched on
H = broadcast(x -> tc.rnn_layers[1](indices(x, classifier.vocab, "_unk_")), X)
H = tc.rnn_layers[2:end].(H)
H = tc.linear_layers(H)
return H
end
"""
loss(classifier::TextClassifier, gen::Channel, tracked_steps::Integer=32)
LOSS function
It takes the output of the forward funciton and returns crossentropy loss.
Arguments:
classifier : Instance of TextClassifier
gen : 'Channel' [data loader], to give a mini-batch
tracked_steps : specifies the number of time-steps for which tracking is on
"""
function loss(classifier::TextClassifier, gen::Channel, tracked_steps::Integer=32)
H = forward(classifier, gen, tracked_steps)
Y = gpu(take!(gen))
l = crossentropy(H, Y)
Flux.reset!(classifier.rnn_layers)
return l
end
function discriminative_step!(layers, classifier::TextClassifier, gen::Channel, tracked_steps::Integer, ηL::Float64, opts::Vector)
@assert length(opts) == length(layers)
# Gradient calculation
grads = Zygote.gradient(() -> loss(classifier, gen, tracked_steps = tracked_steps), get_trainable_params(layers))
# discriminative step
ηl = ηL/(2.6^(length(layers)-1))
for (layer, opt) in zip(layers, opts)
opt.eta = ηl
for ps in get_trainable_params([layer])
Flux.Optimise.update!(opt, ps, grads[ps])
end
ηl *= 2.6
end
return
end
"""
train_classifier!(classifier::TextClassifier=TextClassifier(), classes::Integer=1,
data_loader::Channel=imdb_classifier_data, hidden_layer_size::Integer=50;kw...)
It contains main training loops for training a defined classifer for specified classes and data.
Usage is discussed in the docs.
"""
function train_classifier!(classifier::TextClassifier=TextClassifier(), classes::Integer=1,
data_loader::Channel=imdb_classifier_data, hidden_layer_size::Integer=50;
stlr_cut_frac::Float64=0.1, stlr_ratio::Number=32, stlr_η_max::Float64=0.01,
val_loader::Channel=nothing, cross_val_batches::Union{Colon, Integer}=:,
epochs::Integer=1, checkpoint_itvl=5000, tracked_steps::Integer=32)
trainable = []
append!(trainable, [classifier.rnn_layers[[1, 3, 5, 7]]...])
push!(trainable, [classifier.linear_layers[1:2]...])
push!(trainable, [classifier.linear_layers[4:5]...])
opts = [ADAM(0.001, (0.7, 0.99)) for i=1:length(trainable)]
gpu!.(classifier.rnn_layers)
for epoch=1:epochs
println("Epoch: $epoch")
gen = data_loader()
num_of_iters = take!(gen)
cut = num_of_iters * epochs * stlr_cut_frac
for iter=1:num_of_iters
# Slanted triangular learning rates
t = iter + (epoch-1)*num_of_iters
p_frac = (iter < cut) ? iter/cut : (1 - ((iter-cut)/(cut*(1/stlr_cut_frac-1))))
ηL = stlr_η_max*((1+p_frac*(stlr_ratio-1))/stlr_ratio)
# Gradual-unfreezing Step with discriminative fine-tuning
unfreezed_layers, cur_opts = (epoch < length(trainable)) ? (trainable[end-epoch+1:end], opts[end-epoch+1:end]) : (trainable, opts)
discriminative_step!(unfreezed_layers, classifier, gen, tracked_steps,ηL, cur_opts)
reset_masks!.(classifier.rnn_layers) # reset all dropout masks
end
println("Train set accuracy: $trn_accu , Training loss: $trn_loss")
if val_loader != nothing
val_loss, val_acc, val_precisions, val_reacalls, val_F1_scores = validate(classifer, val_loader)
else
continue
end
#!(val_loader isa nothing) ? (val_loss, val_acc, val_precisions, val_reacalls, val_F1_scores = validate(classifer, val_loader)) : continue
println("Cross validation loss: $val_loss")
println("Cross validation accuracy:\n $val_acc")
println("Cross validation class wise Precisions:\n $val_precisions")
println("Cross validation class wise Recalls:\n $val_recalls")
println("Cross validation class wise F1 scores:\n $val_F1_scores")
end
end
"""
predict(tc::TextClassifier, text_sents::Corpus)
This function can be used to test the model after training.
It returns the predictions done by the model for given `Corpus` of `Documents`
All the preprocessing related to the used vocabulary should be done before using this function.
Use `prepare!` function to do preprocessing
"""
function predict(tc::TextClassifier, text_sents::Corpus)
classifier = tc
Flux.testmode!(classifier)
predictions = []
expr(x) = indices(x, classifier.vocab, "_unk_")
for text in text_sents
tokens_ = tokens(text)
h = classifier.rnn_layers.(expr.(tokens_))
probability_dist = classifier.linear_layers(h)
class = argmax(probaility_dist)
push!(predictions, class)
end
return predictions
end
| [
37811,
198,
6239,
44,
10547,
51,
532,
8255,
5016,
7483,
198,
198,
1212,
318,
29908,
1088,
262,
15417,
19076,
2878,
13,
632,
468,
1115,
7032,
25,
198,
198,
18893,
397,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
1058,
4909,
262... | 2.38677 | 3,855 |
## 2D Panels
#==========================================================================================#
abstract type AbstractPanel2D <: AbstractPanel end
struct Panel2D{T <: Real} <: AbstractPanel2D
p1 :: SVector{2,T}
p2 :: SVector{2,T}
end
struct WakePanel2D{T <: Real} <: AbstractPanel2D
p1 :: SVector{2,T}
p2 :: SVector{2,T}
end
Base.length(:: Panel2D) = 1
Panel2D(p1 :: FieldVector{2,T}, p2 :: FieldVector{2,T}) where T <: Real = Panel2D{T}(p1, p2)
WakePanel2D(p1 :: FieldVector{2,T}, p2 :: FieldVector{2,T}) where T <: Real = WakePanel2D{T}(p1, p2)
a :: AbstractPanel2D + b :: AbstractPanel2D = Panel2D(p1(a) + p1(b), p2(a) + p2(b))
a :: AbstractPanel2D - b :: AbstractPanel2D = Panel2D(p1(a) - p1(b), p2(a) - p2(b))
collocation_point(panel :: AbstractPanel2D, a = 0.5) = a * (p1(panel) + p2(panel))
panel_vector(panel :: AbstractPanel2D) = p2(panel) - p1(panel)
panel_length(panel :: AbstractPanel2D) = (norm ∘ panel_vector)(panel)
function transform_panel_points(panel_1 :: AbstractPanel2D, panel_2 :: AbstractPanel2D)
x1, y1, x2, y2 = p1(panel_2), p2(panel_2)
xs, ys = p1(panel_1)
xp1, yp1 = affine_2D(x1, y1, xs, ys, panel_angle(panel_1))
xp2, yp2 = affine_2D(x2, y2, xs, ys, panel_angle(panel_1))
xp1, yp1, xp2, yp2
end
function transform_panel(panel :: AbstractPanel2D, point :: SVector{2,<: Real})
xs, ys = p1(panel)
affine_2D(first(point), last(point), xs, ys, panel_angle(panel))
end
panel_angle(panel :: AbstractPanel2D) = let (x, y) = panel_vector(panel); atan(y, x) end
panel_tangent(panel :: AbstractPanel2D) = rotation(1., 0., -panel_angle(panel))
panel_normal(panel :: AbstractPanel2D) = inverse_rotation(0., 1., panel_angle(panel))
panel_location(panel :: AbstractPanel2D) = let angle = panel_angle(panel); ifelse((π/2 <= angle <= π) || (-π <= angle <= -π/2), "lower", "upper") end
panel_points(panels) = [ p1.(panels); [(p2 ∘ last)(panels)] ]
reverse_panel(panel :: AbstractPanel2D) = Panel2D(panel.p2, panel.p1)
trailing_edge_panel(panels) = Panel2D((p2 ∘ last)(panels), (p1 ∘ first)(panels))
function wake_panel(panels, bound, α)
firstx, firsty = (p1 ∘ first)(panels)
lastx, lasty = (p2 ∘ last)(panels)
y_mid = (firsty + lasty) / 2
y_bound, x_bound = bound .* sincos(α)
WakePanel2D(SVector(lastx, y_mid), SVector(x_bound * lastx, y_bound * y_mid))
end
function wake_panels(panels, chord, length, num)
_, firsty = (p1 ∘ first)(panels)
_, lasty = (p2 ∘ last)(panels)
y_mid = (firsty + lasty) / 2
bounds = cosine_spacing(chord + length / 2, length, num)
@. WakePanel2D(SVector(bounds[1:end-1], y_mid), SVector(bounds[2:end], y_mid))
end
function panel_scalar(scalar_func, strength, panel :: AbstractPanel2D, x, y)
# Transform point to local panel coordinates
xp, yp = transform_panel(panel, SVector(x, y))
scalar_func(strength, xp, yp, 0., panel_length(panel))
end
function panel_scalar(scalar_func, strength, panel_j :: AbstractPanel2D, panel_i :: AbstractPanel2D, point = 0.5)
x, y = collocation_point(panel_i, point)
panel_scalar(scalar_func, strength, panel_j, x, y)
end
function panel_velocity(velocity_func, strength, panel :: AbstractPanel2D, x, y)
# Transform point to local panel coordinates
xp, yp = transform_panel(panel, SVector(x, y))
# Compute velocity in local panel coordinates
u, w = velocity_func(strength, xp, yp, 0., panel_length(panel))
# Transform velocity to original coordinates
inverse_rotation(u, w, panel_angle(panel))
end
function panel_velocity(velocity_func, strength, panel_j :: AbstractPanel2D, panel_i :: AbstractPanel2D, point = 0.5)
x, y = collocation_point(panel_i, point)
u, w = panel_velocity(velocity_func, strength, panel_j, x, y)
end
panel_velocity(f1, f2, str1, str2, panel :: AbstractPanel2D, x, y) = panel_velocity(f1, str1, panel, x, y) .+ panel_velocity(f2, str2, panel, x, y)
panel_velocity(f1, f2, str_j1, str_j2, panel_j :: AbstractPanel2D, panel_i :: AbstractPanel2D) = panel_velocity(f1, str_j1, panel_j, panel_i) .+ panel_velocity(f2, str_j2, panel_j, panel_i)
get_surface_values(panels, vals, surf = "upper", points = false) = partition(x -> (panel_location ∘ first)(x) == surf, (collect ∘ zip)(panels[2:end], vals), x -> ((first ∘ ifelse(points, p1, collocation_point) ∘ first)(x), last(x)))
| [
198,
2235,
362,
35,
5961,
1424,
198,
2,
23926,
4770,
2559,
855,
2,
198,
198,
397,
8709,
2099,
27741,
26639,
17,
35,
1279,
25,
27741,
26639,
886,
198,
198,
7249,
18810,
17,
35,
90,
51,
1279,
25,
6416,
92,
1279,
25,
27741,
26639,
17... | 2.426029 | 1,798 |
<reponame>bbejanov/ModelBaseEcon.jl<filename>src/transformations.jl
##################################################################################
# This file is part of ModelBaseEcon.jl
# BSD 3-Clause License
# Copyright (c) 2020, Bank of Canada
# All rights reserved.
##################################################################################
export Transformation, NoTransform, LogTransform, NegLogTransform, transformation, inverse_transformation
"""
transformation(::Type{<:Transformation})
Return a `Function` that will be substituted into the model equations and will be
called to transform the input data before solving. See also
[`inverse_transformation`](@ref).
It is expected that `transformation(T) ∘ inverse_transformation(T) == identity`
and `inverse_transformation(T) ∘ transformation(T) == identity`, but these is
not verified.
"""
function transformation end
"""
inverse_transformation(::Type{<:Transformation})
Return a `Function` that will be called to transform the simulation data after solving. See also
[`transformation`](@ref).
It is expected that `transformation(T) ∘ inverse_transformation(T) == identity`
and `inverse_transformation(T) ∘ transformation(T) == identity`, but these is
not verified.
"""
function inverse_transformation end
"""
abstract type Transformation end
The base class for all variable transformations.
"""
abstract type Transformation end
transformation(T::Type{<:Transformation}) = error("Transformation of type $T is not defined.")
inverse_transformation(T::Type{<:Transformation}) = error("Inverse transformation of type $T is not defined.")
"""
NoTransform <: Transformation
The identity transformation.
"""
struct NoTransform <: Transformation end
transformation(::Type{NoTransform}) = Base.identity
inverse_transformation(::Type{NoTransform}) = Base.identity
"""
LogTransform <: Transformation
The `log` transformation. The inverse is of course `exp`. This is the default
for variables declared with `@log`.
"""
struct LogTransform <: Transformation end
transformation(::Type{LogTransform}) = Base.log
inverse_transformation(::Type{LogTransform}) = Base.exp
"""
NegLogTransform <: Transformation
The `log(-x)`, with the inverse being `-exp(x)`. Use this when the variable is
negative with exponential behaviour (toward -∞).
"""
struct NegLogTransform <: Transformation end
"logm(x) = log(-x)" @inline logm(x) = log(-x)
"mexp(x) = -exp(x)" @inline mexp(x) = -exp(x)
transformation(::Type{NegLogTransform}) = logm
inverse_transformation(::Type{NegLogTransform}) = mexp
export logm, mexp
| [
27,
7856,
261,
480,
29,
65,
1350,
13881,
709,
14,
17633,
14881,
36,
1102,
13,
20362,
27,
34345,
29,
10677,
14,
35636,
602,
13,
20362,
198,
29113,
29113,
14468,
2235,
198,
2,
770,
2393,
318,
636,
286,
9104,
14881,
36,
1102,
13,
20362... | 3.678014 | 705 |
mutable struct Model{T, TV<:AbstractVector{T}, TC<:AbstractVector{<:Function}}
dim::Int
objective::Function
ineq_constraints::TC
box_max::TV
box_min::TV
end
GPUUtils.whichdevice(m::Model) = whichdevice(m.box_max)
dim(m::Model) = m.dim
min(m::Model, i::Integer) = m.box_min[i]
max(m::Model, i::Integer) = m.box_max[i]
min(m::Model)= m.box_max
max(m::Model) = m.box_min
objective(m::Model) = m.objective
constraints(m::Model) = m.ineq_constraints
constraint(m::Model, i::Integer) = m.ineq_constraints[i]
eval_objective(m, x::AbstractVector{T}) where {T} = eval_objective(m, x, T[])
eval_objective(m, x, ∇g) = eval_objective(whichdevice(objective(m)), m, x, ∇g)
function eval_objective(::CPU, m, x::AbstractVector{T}, ∇g) where {T}
return T(m.objective(x, ∇g))
end
eval_objective(::GPU, m, x::GPUVector{T}, ∇g) where {T} = T(m.objective(x, ∇g))
function eval_objective(::GPU, m, x::AbstractVector{T}, ∇g) where {T}
x_gpu = CuArray(x)
∇g_gpu = CuArray(∇g)
obj = T(m.objective(x_gpu, ∇g_gpu))
copyto!(∇g, ∇g_gpu)
return obj
end
function eval_objective(::CPU, m, x::CuVector{T}, ∇g) where {T}
error("Optimization on the GPU with the objective evaluation on the CPU is weird!")
end
eval_constraint(m, i, x::AbstractVector{T}) where {T} = eval_constraint(m, i, x, T[])
eval_constraint(m, i, x, ∇g) = eval_constraint(whichdevice(constraint(m, i)), m, i, x, ∇g)
eval_constraint(::CPU, m, i, x::AbstractVector{T}, ∇g) where T = T(constraint(m, i)(x, ∇g))
eval_constraint(::GPU, m, i, x::GPUVector{T}, ∇g) where T = T(constraint(m, i)(x, ∇g))
function eval_constraint(::GPU, m, i, x::AbstractVector{T}, ∇g) where T
x_gpu = CuArray(x)
∇g_gpu = CuArray(∇g)
constr = T(constraint(m, i)(x_gpu, ∇g_gpu))
copyto!(∇g, ∇g_gpu)
return constr
end
function eval_constraint(::CPU, m, i, x::GPUVector, ∇g)
error("Optimization on the GPU with the constraint evaluation on the CPU is weird!")
end
ftol(m) = m.ftol
xtol(m) = m.xtol
grtol(m) = m.grtol
ftol!(m, v) = m.ftol = v
xtol!(m, v) = m.xtol = v
grtol!(m, v) = m.grtol = v
Model(args...; kwargs...) = Model{CPU}(args...; kwargs...)
Model{T}(args...; kwargs...) where T = Model(T(), args...; kwargs...)
Model(::CPU, args...; kwargs...) = Model{Float64, Vector{Float64}, Vector{Function}}(args...; kwargs...)
Model(::GPU, args...; kwargs...) = Model{Float64, CuVector{Float64}, Vector{Function}}(args...; kwargs...)
function Model{T, TV, TC}(dim, objective::Function) where {T, TV, TC}
mins = ninfsof(TV, dim)
maxs = infsof(TV, dim)
Model{T, TV, TC}(dim, objective, Function[],
mins, maxs)
end
# Box constraints
function box!(m::Model, i::Integer, minb::T, maxb::T) where {T}
if !(1 <= i <= dim(m))
throw(ArgumentError("box constraint need to applied to an existing variable"))
end
m.box_min[i] = minb
m.box_max[i] = maxb
end
function box!(m::Model, minb::T, maxb::T) where {T}
nv = dim(m)
m.box_min[1:nv] .= minb
m.box_max[1:nv] .= maxb
end
function box!(m::Model, minbs::AbstractVector{T}, maxbs::AbstractVector{T}) where {T}
if (length(minbs) != dim(m)) || (length(minbs) != dim(m))
throw(ArgumentError("box constraint vector must have same size as problem dimension"))
end
nv = dim(m)
map!(identity, m.box_min, minbs)
map!(identity, m.box_max, maxbs)
end
function ineq_constraint!(m::Model, f::Function)
push!(m.ineq_constraints, f)
end
function ineq_constraint!(m::Model, fs::Vector{Function})
for f in fs
push!(m.ineq_constraints, f)
end
end
| [
76,
18187,
2878,
9104,
90,
51,
11,
3195,
27,
25,
23839,
38469,
90,
51,
5512,
17283,
27,
25,
23839,
38469,
90,
27,
25,
22203,
11709,
198,
220,
220,
220,
5391,
3712,
5317,
198,
220,
220,
220,
9432,
3712,
22203,
198,
220,
220,
220,
2... | 2.231539 | 1,598 |
#-------- Inverses of DPR1
function inv{T}(A::SymDPR1{T},i::Integer,tols::Vector{Float64})
# COMPUTES: inverse of a shifted SymDPR1 matrix A=diagm(A.D)+A.r*A.u*A.u',
# inv(A-A.D[i]*I) which is a SymArrow.
# Uses higher precision to compute top of the arrow element accurately, if
# needed.
# tols=[tolb,tolz] are tolerances, usually [1e3, 10*n]
# [0.0,0.0] forces DoubleDouble, [1e50,1e50] would never use it
# RETURNS: SymArrow(D,z,b,i), Kb, Kz, Qout
# Kb - condition Kb, Kz - condition Kz, Qout = 1 / 0 - double was / was not used
n=length(A.D)
D=Array(T,n-1)
z=Array(T,n-1)
wz=one(T)/A.u[i]
shift=A.D[i]
for k=1:i-1
D[k]=one(T)/(A.D[k]-shift)
z[k]=-A.u[k]*D[k]*wz
end
for k=i+1:n
D[k-1]=one(T)/(A.D[k]-shift)
z[k-1]=-A.u[k]*D[k-1]*wz
end
# compute the sum in a plain loop
P=zero(T)
Q=zero(T)
Qout=0
for k=1:i-1
D[k]>0.0 ? P+=A.u[k]^2*D[k] : Q+=A.u[k]^2*D[k]
end
for k=i:n-1
D[k]>0.0 ? P+=A.u[k+1]^2*D[k] : Q+=A.u[k+1]^2*D[k]
end
A.r>0 ? P=P+one(T)/A.r : Q=Q+one(T)/A.r
Kb=(P-Q)/abs(P+Q)
# compute Kz
Kz=(sumabs(A.u)-abs(A.u[i]))*abs(wz)
# Kz=maxabs(A.z)*abs(wz)
if Kb<tols[1] || Kz<tols[2]
b=(P+Q)*wz*wz
else # recompute in Double
Qout=1
shiftd=map(Double,A.D[i])
Dd=[Double{Float64}[Double(A.D[k])-shiftd for k=1:i-1];
Double{Float64}[Double(A.D[k])-shiftd for
k=i+1:length(A.D)]]
wzd=Double(A.u[i])
Pd,Qd=map(Double,(0.0,0.0))
for k=1:i-1
Dd[k].hi>0.0 ? Pd+=Double(A.u[k])^2/Dd[k] :
Qd+=Double(A.u[k])^2/Dd[k]
end
for k=i+1:n
Dd[k-1].hi>0.0 ? Pd+=Double(A.u[k])^2/Dd[k-1] :
Qd+=Double(A.u[k])^2/Dd[k-1]
# @show P,Q
end
A.r > 0 ? Pd+=Double(1.0)/Double(A.r) : Qd+=Double(1.0)/Double(A.r)
bd=(Pd+Qd)/(wzd*wzd)
b=bd.hi+bd.lo
end
# return this
SymArrow(D,z,b,i),Kb,Kz,Qout
end # inv
function inv{T}(A::SymDPR1{T}, shift::Float64, tolr::Float64)
# COMPUTES: inverse of the shifted SymDPR1 A = diagm(A.D)+A.r*A.u*A.u',
# inv(A-shift*I) = D + rho*u*u', shift!=A.D[i], which is again a SymDPR1
# uses DoubleDouble to compute A.r accurately, if needed.
# tolr is tolerance, usually 1e3, 0.0 forces Double, 1e50 would never use it
# RETURNS: SymDPR1(D,u,rho), Krho, Qout
# Krho - condition Krho, Qout = 1 / 0 - Double was / was not used
n=length(A.D)
D=Array(T,n)
u=Array(T,n)
for k=1:n
D[k]=one(T)/(A.D[k]-shift)
u[k]=A.u[k]*D[k]
end
# compute gamma and Kgamma
#--- compute the sum in a plain loop
P=zero(T)
Q=zero(T)
Qout=0
for k=1:n
D[k]>0.0 ? P=P+A.u[k]^2*D[k] : Q=Q+A.u[k]^2*D[k]
end
A.r>0 ? P=P+one(T)/A.r : Q=Q+one(T)/A.r
# Condition of rho
Krho=(P-Q)/abs(P+Q)
if Krho < tolr
rho=-one(T)/(P+Q)
else # recompute in Double
Qout=1
Pd,Qd=map(Double,(0.0,0.0))
shiftd=Double(shift)
for k=1:n
D[k]>0.0 ? Pd=Pd+Double(A.u[k])^2/(Double(A.D[k])-shiftd) : Qd=Qd+Double(A.u[k])^2/(Double(A.D[k])-shiftd)
end
A.r > 0 ? Pd+=Double(1.0)/Double(A.r) : Qd+=Double(1.0)/Double(A.r)
r=Double(1.0)/(Pd+Qd)
rho=-(r.hi+r.lo)
end
# returns the following
SymDPR1(D,u,rho), Krho, Qout
end # inv
function inv{T}(A::SymDPR1{T}, shift::Double)
# COMPUTES: inverse of the shifted SymDPR1 A = diagm(A.D)+A.r*A.u*A.u',
# inv(A-shift*I) = D + rho*u*u', shift!=A.D[i], which is again a SymDPR1
# here shift is Double so it uses Double to compute everything
# RETURNS: SymDPR1(D,u,rho), Qout
# Qout = 1 on exit meaning Double was used
n=length(A.D)
D=Array(Double,n)
u=Array(Double,n)
for k=1:n
D[k]=Double(A.D[k])-shift
end
u=map(Double,A.u)
oned=Double(1.0)
zerod=Double(0.0)
for k=1:n
u[k]=u[k]/D[k]
D[k]=oned/D[k]
end
# compute rho
# compute the sum in a plain loop
P,Q=zerod,zerod
Qout=1
for k=1:n
D[k].hi > 0.0 ? P=P+Double(A.u[k])*u[k] : Q=Q+Double(A.u[k])*u[k]
end
A.r > 0 ? P+=Double(1.0)/Double(A.r) : Q+=Double(1.0)/Double(A.r)
r=oned/(P+Q)
rho=-(r.hi+r.lo)
# returns the following
# SymDPR1(T[x.hi+x.lo for x=D],T[x.hi+x.lo for x=u],rho), Qout
D1=Array(T,n)
u1=Array(T,n)
for k=1:n
D1[k]=D[k].hi+D[k].lo
u1[k]=u[k].hi+u[k].lo
end
SymDPR1(D1,u1,rho), Qout
end # inv
function eig( A::SymDPR1,k::Integer,tols::Vector{Float64})
# COMPUTES: k-th eigenpair of an ordered irreducible SymDPR1
# A = diagm(A.D)+A.r*A.u*A.u', A.r > 0
# tols=[tolb,tolz,tolnu,tolrho,tollambda] = [1e3,10.0*n,1e3,1e3,1e3]
# RETURNS: lambda, v, Sind, Kb, Kz, Knu, Krho, Qout
# lambda - k-th eigenvalue in descending order
# v - lambda's normalized eigenvector
# Kb, Kz, Knu, Krho - condition numbers
# Qout = 1 / 0 - Double was / was not used
# Set the dimension
n = length(A.D)
# Set all conditions initially to zero
Kb,Kz,Knu,Krho=0.0,0.0,0.0,0.0
Qout=0
v=zeros(n)
# Kz is former kappa_nu
# Determine the shift sigma, the shift index i, and whether lambda
# is on the left or the right side of the nearest pole
# Exterior eigenvalues (k = 1 or k = n):
if k == 1
sigma,i,side = A.D[1],1,'R'
else
# Interior eigenvalues (k in (2,...n-1) ):
Dtemp = A.D-A.D[k]
middle = Dtemp[k-1]/2.0
Fmiddle = 1.0+A.r*sum(A.u.^2./(Dtemp-middle))
sigma,i,side = Fmiddle > 0.0 ? (A.D[k],k,'R') : (A.D[k-1],k-1,'L')
end
# Compute the inverse of the shifted matrix, A_i^(-1), Kb and Kz
Ainv,Kb,Kz,Qout = inv(A,i,tols[1:2])
# Compute the eigenvalue of the inverse shifted matrix
nu = bisect( Ainv,side )
# nu=fastzero([invD1; 0; invD2], [w1;wz;w2], b, side); # implement later
# [nu-nu1]/nu, [nu-nueva]/nu, pause # and compare
if abs(nu)==Inf
# this is nonstandard
# Deflation in dpr1eig (nu=Inf)
v[i]=1.0
lambda=sigma
else
# standard case, full computation
# nu1 is the F- or 1-norm of the inverse of the shifted matrix
# nu10=maximum([sum(abs(Ainv.z))+abs(Ainv.a), maximum(abs(Ainv.D)+abs(Ainv.z))])
nu1=0.0
for k=1:n-1
nu1=max(nu1,abs(Ainv.D[k])+abs(Ainv.z[k]))
end
nu1=max(sumabs(Ainv.z)+abs(Ainv.a), nu1)
Knu=nu1/abs(nu)
if Knu<tols[3]
# Accuracy is fine, compute the eigenvector
mu = 1.0/nu
# v=[ A.u./((A.D-sigma)-mu)]
for l=1:n
v[l]= A.u[l]/((A.D[l]-sigma)-mu)
end
# for k=1:n-1
# v[k] = A.z[k]/((A.D[k]-sigma)-mu)
# end
# v[n]=-one
lambda, v = mu+sigma, v/norm(v)
else
# Remedies according to Remark 3 - we shift between original
# eigenvalues and compute DPR1 matrix
# 1/nu1+sigma, 1/nu+sigma
println("Remedy 3 ")
nu = side=='R' ? abs(nu) : -abs(nu)
nu1=-sign(nu)*nu1
sigma1=(nu1+nu)/(2.0*nu*nu1)+sigma
if findfirst(A.D-sigma1,0.0)>0 # we came back with a pole
# recompute sigmav more accurately according with dekker
sigmav=(Double(nu1)+Double(nu))/(Double(2.0)*Double(nu)*Double(nu1))+Double(sigma)
# Compute the inverse of the shifted arrowhead (DPR1)
Ainv,Qout1=inv(A,sigmav) # Ainv is Float64
nu1=bisect(Ainv,side)
mu1 = 1.0/nu1
D0=map(A.D,Double)-sigmav
D1=D0.hi+D0.lo
if findfirst(D1-mu1,0.0)>0
ind=find(D1-mu1==0.0);
v=zeros(n)
v[ind]=1.0;
else
v=[ A.u./(D1-mu1)]
end
# Shift the eigenvalue back in Double
lam = Double(1.0)/Double(nu1)+sigmav
# Return this
lambda,v = lam.hi+lam.lo, v/norm(v)
else
# Compute the inverse of the shifted arrowhead (DPR1)
Ainv, Krho,Qout1=inv(A,sigma1,tols[4]) # Ainv is Float64
# Compute the eigenvalue by bisect for DPR1
# Note: instead of bisection could use dlaed4 (need a wrapper) but
# it is not faster. There norm(u)==1
nu1= bisect(Ainv,side)
mu1=1.0/nu1
# standard v
v=A.u./((A.D-sigma1)-mu1)
# Return this - shift the eigenvalue back and normalize the vector
lambda, v = mu1+sigma1, v/norm(v)
end
Qout==1 && (Qout=Qout+2)
end
# Remedy according to Remark 1 - we recompute the the eigenvalue
# near zero from the inverse of the original matrix (a DPR1 matrix).
if (abs(A.D[i])+abs(1.0/nu))/abs(lambda)>tols[5]
if (k==1 && A.D[1]<0.0 || side=='L' && sign(A.D[i])+sign(A.D[i+1])==0 ||
side=='R' && sign(A.D[i])+sign(A.D[i-1])==0)
println("Remedy 1 ")
# Compute the inverse of the original arrowhead (DPR1)
Ainv,Krho,Qout1 = inv(A,0.0,tols[4]) # Ainv is Float64
Qout==1 && (Qout=Qout+4)
if abs(Ainv.r)==Inf
lambda=0.0
else
# Here we do not need bisection. We compute the Rayleigh
# quotient by using already computed vectors which is
# componentwise accurate
nu1=sum(v.^2.*Ainv.D)+Ainv.r*sum(v.*Ainv.u)^2;
lambda=1.0/nu1
end
end
end
end
# Return this
lambda,v,i,Kb,Kz,Knu,Krho,Qout
end # eig (k)
function eig(A::SymDPR1, tols::Vector{Float64})
# COMPUTES: all eigenvalues and eigenvectors of a real symmetric SymDPR1
# A = diagm(A.D)+A.r*A.u*A.u'
# tols = [tolb,tolz,tolnu,tolrho,tollambda] = [1e3,10.0*n,1e3,1e3,1e3] or similar
# RETURNS: U, E, Sind, Kb, Kz, Knu, Krho, Qout
# U = eigenvectors, E = eigenvalues in decreasing order
# Sind[k] - shift index i for the k-th eigenvalue
# Kb, Kz, Knu, Krho [k] - respective conditions for the k-th eigenvalue
# Qout[k] = 1 / 0 - Double was / was not used when computing k-th eigenvalue
n=length(A.D)
n0=n
# Checking if A.r > 0
signr = A.r > 0.0 ? 1.0 : -1.0
# Ordering the matrix
D=signr*A.D
is=sortperm(D,rev=true)
D=D[is]
z=A.u[is]
rho=signr*A.r
U=eye(n,n)
E=zeros(n)
Kb=zeros(n); Kz=zeros(n); Knu=zeros(n); Krho=zeros(n)
Qout=zeros(Int,n); Sind=zeros(Int,n)
# Quick return for 1x1, this is trivial for SymArrow, not so trivial here :)
if n==1
U=1;
if (D==0)&&((rho==0)|(z==0))
E=0
else
E=A.D[1]+A.r*A.u[1]^2
# Higher accuracy if needed
KD=(abs(A.D[1])+abs(A.r)*A.u[1]^2)/abs(E)
if KD>tols[1]
Ed=Double(A.D[1])+Double(A.r)*Double(A.u[1])^2
E=Ed.hi+Ed.lo
end
end
return U, [E], Sind,Kb,Kz,Knu,Krho,Qout
end
# test for deflation in z
z0=find(z.==0)
zx=find(z.!=0)
if isempty(zx) # nothing to do
E=A.D
isE=sortperm(E,rev=true)
E=E[isE]
U=U[:,isE]
return U,E,Sind,Kb,Kz,Knu,Krho,Qout
end
if !isempty(z0)
E[z0]=D[z0]
D=D[zx]
z=z[zx]
if !isempty(z)
# return U,E,Sind,Kb,Kz,Knu,Krho,Qout
n=length(z)
end
end
# Test for deflation in D
g=D[1:n-1]-D[2:n]
# Can play with inexact deflation
# g0=find(abs(g)<eps)
# gx=find(abs(g)>=eps)
# Only exact deflation !!
g0=find(g.==0.0)
gx=find(g.!=0.0)
if !isempty(g0)
# Deflation
Dgx=D[gx]; zgx=z[gx]
lg0=length(g0)
R=Array(Tuple{Givens{Float64},Float64},lg0)
for l=lg0:-1:1
R[l]=givens(z[g0[l]],z[g0[l]+1],zx[g0[l]],zx[g0[l]+1])
z[g0[l]]=R[l][2]; z[g0[l]+1]=0.0
# A_mul_Bc!(U,R) # multiply R'*U later
E[zx[g0[l]+1]]=D[g0[l]+1]
end
# remains
gx=[0;gx]+1
nn=length(gx)
zxx=zx[gx]
for k=1:nn
E[zxx[k]],U[zxx,zxx[k]],Sind[zxx[k]],Kb[zxx[k]],Kz[zxx[k]],Knu[zxx[k]],Krho[zxx[k]],Qout[zxx[k]]=
eig(SymDPR1(D[gx],z[gx],rho),k,tols)
end
for l=1:lg0
U=R[l][1]'*U
end
else
# No deflation in D
for k=1:n
E[zx[k]],U[zx,zx[k]],Sind[zx[k]],Kb[zx[k]],Kz[zx[k]],Knu[zx[k]],Krho[zx[k]],Qout[zx[k]]=
eig(SymDPR1(D,z,rho),k,tols)
end
end
# back premutation of vectors
isi=sortperm(is)
# change the sign if A.r was negative
# must sort E once more
E=signr*E
es=sortperm(E,rev=true)
E=E[es]
U=U[isi,es]
# Return this
U,E,Sind[es],Kb[es],Kz[es],Knu[es],Krho[es],Qout[es]
end # eig (all)
| [
2,
982,
554,
690,
274,
286,
41176,
16,
198,
198,
8818,
800,
90,
51,
92,
7,
32,
3712,
43094,
35,
4805,
16,
90,
51,
5512,
72,
3712,
46541,
11,
83,
10220,
3712,
38469,
90,
43879,
2414,
30072,
628,
220,
220,
220,
1303,
24301,
3843,
... | 1.659746 | 8,188 |
# Tests for Mamlmquist DEA Model
@testset "MalmquistDEAModel" begin
## Test Mamlmquist DEA Model with 1 input and 1 output
X = Array{Float64,3}(undef, 5, 1, 2)
X[:, :, 1] = [2; 3; 5; 4; 4];
X[:, :, 2] = [1; 2; 4; 3; 4];
Y = Array{Float64,3}(undef, 5, 1, 2)
Y[:, :, 1] = [1; 4; 6; 3; 5];
Y[:, :, 2] = [1; 4; 6; 3; 3];
# Default Malmquist Productivity Index
mprod = malmquist(X, Y)
@test typeof(mprod) == MalmquistDEAModel
@test nobs(mprod) == 5
@test ninputs(mprod) == 1
@test noutputs(mprod) == 1
@test nperiods(mprod) == 2
@test prodchange(mprod) ≈ [2.0000000000;
1.5000000000;
1.2500000000;
1.3333333333;
0.6000000000]
@test prodchange(mprod, :Prod) == prodchange(mprod)
@test prodchange(mprod, :EC) ≈ [1.3333333333;
1.0000000000;
0.8333333333;
0.8888888889;
0.4000000000];
@test prodchange(mprod, :TC) ≈ [1.5; 1.5; 1.5; 1.5; 1.5];
# Default output oriented
mprodoo = malmquist(X, Y, orient = :Output)
@test prodchange(mprodoo) == prodchange(mprod)
@test prodchange(mprodoo, :Prod) == prodchange(mprod, :Prod)
@test prodchange(mprodoo, :EC) == prodchange(mprod, :EC)
@test prodchange(mprodoo, :TC) == prodchange(mprod, :TC)
# Test geomean is the geometric mean of TC
mprodbase = malmquist(X, Y, refperiod = :Base)
mprodcomparison = malmquist(X, Y, refperiod = :Comparison)
@test prodchange(mprod, :TC) == sqrt.( prodchange(mprodbase, :TC) .* prodchange(mprodcomparison, :TC) )
## Test Mamlmquist DEA Model with 1 input and 1 output; and 3 years
X = Array{Float64,3}(undef, 5, 1, 3)
X[:, :, 1] = [2; 3; 5; 4; 4];
X[:, :, 2] = [1; 2; 4; 3; 4];
X[:, :, 3] = [0.5; 1.5; 3; 2; 4]
Y = Array{Float64,3}(undef, 5, 1, 3)
Y[:, :, 1] = [1; 4; 6; 3; 5];
Y[:, :, 2] = [1; 4; 6; 3; 3];
Y[:, :, 3] = [2; 4; 6; 3; 1];
# Default Malmquist Productivity Index
mprod3 = malmquist(X, Y)
@test nobs(mprod3) == 5
@test ninputs(mprod3) == 1
@test noutputs(mprod3) == 1
@test nperiods(mprod3) == 3
@test prodchange(mprod3) ≈ [2.0000000000 4.0000000;
1.5000000000 1.3333333;
1.2500000000 1.3333333;
1.3333333333 1.5000000;
0.6000000000 0.3333333]
@test prodchange(mprod3, :Prod) == prodchange(mprod3)
@test prodchange(mprod3, :EC) ≈ [1.3333333333 2.0000000;
1.0000000000 0.6666667;
0.8333333333 0.6666667;
0.8888888889 0.7500000;
0.4000000000 0.1666667] atol = 1e-7;
@test prodchange(mprod3, :TC) ≈ [1.5 2.0; 1.5 2.0; 1.5 2.0; 1.5 2.0; 1.5 2.0];
# Print
show(IOBuffer(), mprod)
show(IOBuffer(), mprod3)
# Test errors
@test_throws DimensionMismatch malmquist(X[1:4,:,:], X[1:5,:,:]) # Different number of observations in inputs and outputs
@test_throws DimensionMismatch malmquist(X[:,:,1:2], X[:,:,1:3]) # Different number of time periods in inputs and outputs
@test_throws ArgumentError malmquist(X, Y, refperiod = :Error) # Invalid reference period
@test_throws ArgumentError prodchange(mprod, :Error)
end
| [
2,
30307,
329,
29926,
75,
76,
30062,
28647,
9104,
198,
31,
9288,
2617,
366,
15029,
76,
30062,
7206,
2390,
375,
417,
1,
2221,
628,
220,
220,
220,
22492,
6208,
29926,
75,
76,
30062,
28647,
9104,
351,
352,
5128,
290,
352,
5072,
198,
22... | 1.82359 | 1,950 |
# Implements node data types and associated helper functions
export
SumNode,
ProductNode,
CategoricalDistribution,
IndicatorFunction,
GaussianDistribution
"""
Node Data Structures
Implement a labeled sparse matrix.
"""
abstract type Node end
" Sum node data type "
struct SumNode <: Node
children::Vector{UInt}
weights::Vector{Float64}
# SumNode() = new(Vector{UInt}(),Vector{Float64}())
# SumNode(children::Vector{<:Integer},weights::Vector{Float64}) = new(children,weights)
end
" Product node data type "
struct ProductNode <: Node
children::Vector{UInt}
#ProductNode() = new(Vector{UInt}())
#ProductNode(children::Vector{<:Integer}) = new(children)
#ProductNode(children) = new(children)
end
" Abstract leaf node type "
abstract type LeafNode <: Node end
"""
Indicator Function Node. Tolerance sets a maximum discrepancy when evaluating the node at a given value. Its default value is 1e-6.
"""
struct IndicatorFunction <: LeafNode
scope::UInt
value::Float64
tolerance::Float64
IndicatorFunction(scope::Integer,value::Float64) = new(scope,value,1e-6)
IndicatorFunction(scope::Integer,value::Integer) = new(scope,Float64(value),1e-6)
end
"""
Univariate Categorical Distribution Node
"""
struct CategoricalDistribution <: LeafNode
scope::UInt
values::Vector{Float64}
end
"""
Univariate Gaussian Distribution Node
"""
mutable struct GaussianDistribution <: LeafNode
scope::UInt
mean::Float64
variance::Float64
end
# LeafNode = Union{IndicatorFunction,CategoricalDistribution,GaussianDistribution}
"""
IndicatorFunction(x::Vector{<:Real})::Float64
Evaluates indicator function at given configuration x.
"""
function (n::IndicatorFunction)(x::AbstractVector{<:Real})::Float64
return isnan(x[n.scope]) ? 1.0 : n.value ≈ x[n.scope] ? 1.0 : 0.0
end
"""
Evaluates categorical distribution at given configuration
"""
function (n::CategoricalDistribution)(x::AbstractVector{<:Real})::Float64
return isnan(x[n.scope]) ? 1.0 : n.values[Int(x[n.scope])]
end
"""
Evaluates Gaussian distribution at given configuration
"""
function (n::GaussianDistribution)(x::AbstractVector{<:Real})::Float64
return isnan(x[n.scope]) ? 1.0 : exp(-(x[n.scope]-n.mean)^2/(2*n.variance))/sqrt(2*π*n.variance)
end
"Is this a leaf node?"
@inline isleaf(n::Node) = isa(n,LeafNode)
"Is this a sum node?"
@inline issum(n::Node) = isa(n,SumNode)
"Is this a product node?"
@inline isprod(n::Node) = isa(n,ProductNode)
"""
logpdf(node,value)
Evaluates leaf `node` at the given `value` in log domain.
"""
@inline logpdf(n::IndicatorFunction, value::Integer) = isnan(value) ? 0.0 : value == Int(n.value) ? 0.0 : -Inf
@inline logpdf(n::IndicatorFunction, value::Float64) = isnan(value) ? 0.0 : abs(value - n.value) < n.tolerance ? 0.0 : -Inf
@inline logpdf(n::CategoricalDistribution, value::Integer) = log(n.values[value])
@inline logpdf(n::CategoricalDistribution, value::Float64) = isnan(value) ? 0.0 : logpdf(n,Int(value))
@inline logpdf(n::GaussianDistribution, value::Float64)::Float64 = isnan(value) ? 0.0 : (-(value-n.mean)^2/(2*n.variance)) - log(2*π*n.variance)/2
"""
maximum(node)
Returns the maximum value of the distribution
"""
@inline Base.maximum(n::IndicatorFunction) = 1.0
@inline Base.maximum(n::CategoricalDistribution) = Base.maximum(n.values)
@inline Base.maximum(n::GaussianDistribution) = 1/sqrt(2*π*n.variance)
"""
argmax(node)
Returns the value at which the distribution is maximum
"""
@inline Base.argmax(n::IndicatorFunction) = n.value
@inline Base.argmax(n::CategoricalDistribution) = Base.argmax(n.values)
@inline Base.argmax(n::GaussianDistribution) = n.mean
"""
scope(node)
Returns the scope of a leaf node
"""
scope(n::LeafNode) = n.scope
| [
2,
1846,
1154,
902,
10139,
1366,
3858,
290,
3917,
31904,
5499,
198,
39344,
220,
198,
220,
220,
220,
5060,
19667,
11,
220,
198,
220,
220,
220,
8721,
19667,
11,
198,
220,
220,
220,
327,
2397,
12409,
20344,
3890,
11,
198,
220,
220,
220... | 2.827586 | 1,334 |
<reponame>KlausC/Multroot.jl<filename>test/testsuit/petk06.m.jl
function petk06()
#
# <NAME> testing polynomials, page 146
#
y = [-1.0*[1;1;1;1];3*[1;1;1];-im;-im];
p1 = reverse(poly(y).a)
p2 = [1.0;-2;5];
p2 = conv(p2,p2);
p = conv(p1,p2);
z = [-1.0 4; 3 3; -im 2; 1+2*im 2; 1-2*im 2];
p, PolyZeros(z)
end
| [
27,
7856,
261,
480,
29,
42,
38024,
34,
14,
15205,
15763,
13,
20362,
27,
34345,
29,
9288,
14,
9288,
6063,
14,
6449,
74,
3312,
13,
76,
13,
20362,
198,
8818,
4273,
74,
3312,
3419,
198,
2,
198,
2,
1279,
20608,
29,
4856,
745,
6213,
2... | 1.705 | 200 |
<reponame>hasundue/TPT.jl
module TPT
export TPTSystem,
# Basic information
ncomp,
composition,
numberdensity,
totalnumberdensity,
temperature,
# Structural properties
structurefactor,
paircorrelation,
cavityfunction,
nndistance,
# Interatomic interaction
pairpotential,
pairpotential_minimum,
pairpotential_minimizer,
hsdiameter_estimate,
# Thermodynamic properties
kinetic,
entropy,
entropy_gas,
entropy_conf,
internal,
helmholtz,
# Reference systems
AHS,
WCA,
# Hard-sphere system
hsdiameter,
packingfraction,
totalpackingfraction,
contactvalue,
contactgradient,
# Perturbation
LennardJones,
NFE,
WHTB,
NFETB,
BOTB,
# WCA
blipfunction,
# NFE
Ashcroft,
BretonnetSilbert,
fermiwavenumber,
formfactor,
screenedformfactor,
dielectric,
localfiled,
wnechar,
# TB
bandwidth,
# utils
spline,
# constants
kB
import Optim
import NLopt
using Dierckx
using Polynomials
include("utils.jl")
include("types.jl")
include("tptsystem.jl")
include("constants.jl")
include(joinpath("reference", "reference.jl"))
include(joinpath("reference", "abstractwca.jl"))
include(joinpath("reference", "ahs.jl"))
include(joinpath("reference", "wca.jl"))
include(joinpath("reference", "lwca.jl"))
include(joinpath("perturbation", "perturbation.jl"))
include(joinpath("perturbation", "nullpert.jl"))
include(joinpath("perturbation", "lennardjones.jl"))
include(joinpath("perturbation", "nfetb.jl"))
include(joinpath("perturbation", "nfe.jl"))
include(joinpath("perturbation", "ashcroft.jl"))
include(joinpath("perturbation", "bretonnet_silbert.jl"))
include(joinpath("perturbation", "harrison.jl"))
include(joinpath("perturbation", "hausleitner.jl"))
end # module
| [
27,
7856,
261,
480,
29,
10134,
917,
518,
14,
7250,
51,
13,
20362,
198,
21412,
309,
11571,
201,
198,
201,
198,
39344,
24525,
4694,
6781,
11,
201,
198,
201,
198,
220,
220,
220,
220,
220,
220,
1303,
14392,
1321,
201,
198,
220,
220,
2... | 2.12475 | 1,002 |
import SMC
import Distributions
using DataFrames
include("hmm_serialization.jl")
include("schema.jl")
@everywhere begin
using SMC
using Distributions
include("smc_samplers.jl")
include("../aide.jl")
end
function generate_aide_estimates(hmm::HiddenMarkovModel,
observations::Vector{Int},
num_particles_list::Vector{Int},
num_metainference_list::Vector{Int},
num_replicates::Int)
data = DataFrame()
num_particles_column = Int[]
num_metainference_column = Int[]
proposal_name_column = String[]
aide_estimate_column = Float64[]
aide_stderr_column = Float64[]
gold_standard_name_column = String[]
exact_sampler = HMMExactSampler(hmm, observations)
gold_standard_sampler = make_optimal_proposal_smc_sampler(hmm, observations, 1000)
for num_particles in num_particles_list
for num_metainference in num_metainference_list
println("generating data for num_partices=$num_particles, num_metainference=$num_metainference...")
prior_smc_sampler = make_prior_proposal_smc_sampler(hmm, observations, num_particles)
kls = aide(exact_sampler, prior_smc_sampler, 1, num_metainference, num_replicates, num_replicates)
push!(num_particles_column, num_particles)
push!(num_metainference_column, num_metainference)
push!(proposal_name_column, PRIOR_PROPOSAL_NAME)
push!(aide_estimate_column, mean(kls))
push!(aide_stderr_column, std(kls)/sqrt(length(kls)))
push!(gold_standard_name_column, EXACT_GOLD_STANDARD)
prior_smc_sampler = make_prior_proposal_smc_sampler(hmm, observations, num_particles)
kls = aide(gold_standard_sampler, prior_smc_sampler, 1, num_metainference, num_replicates, num_replicates)
push!(num_particles_column, num_particles)
push!(num_metainference_column, num_metainference)
push!(proposal_name_column, PRIOR_PROPOSAL_NAME)
push!(aide_estimate_column, mean(kls))
push!(aide_stderr_column, std(kls)/sqrt(length(kls)))
push!(gold_standard_name_column, APPROXIMATE_GOLD_STANDARD)
optimal_smc_sampler = make_optimal_proposal_smc_sampler(hmm, observations, num_particles)
kls = aide(exact_sampler, optimal_smc_sampler, 1, num_metainference, num_replicates, num_replicates)
push!(num_particles_column, num_particles)
push!(num_metainference_column, num_metainference)
push!(proposal_name_column, OPTIMAL_PROPOSAL_NAME)
push!(aide_estimate_column, mean(kls))
push!(aide_stderr_column, std(kls)/sqrt(length(kls)))
push!(gold_standard_name_column, EXACT_GOLD_STANDARD)
optimal_smc_sampler = make_optimal_proposal_smc_sampler(hmm, observations, num_particles)
kls = aide(gold_standard_sampler, optimal_smc_sampler, 1, num_metainference, num_replicates, num_replicates)
push!(num_particles_column, num_particles)
push!(num_metainference_column, num_metainference)
push!(proposal_name_column, OPTIMAL_PROPOSAL_NAME)
push!(aide_estimate_column, mean(kls))
push!(aide_stderr_column, std(kls)/sqrt(length(kls)))
push!(gold_standard_name_column, APPROXIMATE_GOLD_STANDARD)
end
end
data[COL_NUM_PARTICLES] = num_particles_column
data[COL_NUM_METAINFERENCE] = num_metainference_column
data[COL_PROPOSAL_NAME] = proposal_name_column
data[COL_AIDE_ESTIMATE] = aide_estimate_column
data[COL_AIDE_STDERR] = aide_stderr_column
data[COL_GOLD_STANDARD_NAME] = gold_standard_name_column
return data
end
# do experiment
data_dir = "data"
plot_dir = "plots"
# load HMM and observations
hmm = load_hmm("$data_dir/hmm.json")
num_states = num_states(hmm)
num_obs = num_observations(hmm)
observations = load_observations("$data_dir/observations.json")
num_steps = length(observations)
# do AIDE experiment, save data to CSV file
num_particles_list = [1, 3, 10, 30, 100]
num_metainference_list = [1, 100]
num_replicates = 100
aide_estimates = generate_aide_estimates(hmm, observations, num_particles_list,
num_metainference_list, num_replicates)
writetable("$data_dir/aide_estimates.csv", aide_estimates)
println("done!")
| [
11748,
9447,
34,
198,
11748,
46567,
507,
198,
3500,
6060,
35439,
198,
17256,
7203,
71,
3020,
62,
46911,
1634,
13,
20362,
4943,
198,
17256,
7203,
15952,
2611,
13,
20362,
4943,
198,
198,
31,
16833,
3003,
2221,
198,
220,
220,
220,
1262,
... | 2.177498 | 2,062 |
<reponame>andrew-saydjari/disCovErr.jl<gh_stars>1-10
## utility functions
import OffsetArrays
import ImageFiltering
import ShiftedArrays
export cov_avg!
export boxsmooth!
export outest_bounds #
"""
outest_bounds(cx,sx) -> px0
Helper function to find maximum padding in pixels required to accomodate all query points `cx` outside of the image size 1:`sx`.
# Arguments:
- `cx`: list of integer star centers (in either x or y)
- `sx`: image dimension along the axis indexed by `cx`
# Outputs:
- `px0`: maximum padding in pixels required to accomodate all query points
"""
function outest_bounds(cx,sx)
px0 = 0
sortcx = sort(cx)
if sortcx[1] < 1
px0 = abs(sortcx[1]-1)
end
if sortcx[end] > sx
if px0 < (sortcx[end]-sx)
px0 = (sortcx[end]-sx)
end
end
return px0
end
"""
boxsmooth!(out::AbstractArray, arr::AbstractArray, tot::Array{T,1}, widx::Int, widy::Int)
Boxcar smooths an input image (or paddedview) `arr` with window size `widx` by
`widy`. We pass the original image size `sx` and `sy` to help handle image views.
# Arguments:
- `out::AbstractArray`: preallocated output array for the boxcar smoothed image
- `arr::AbstractArray`: input array for which boxcar smoothing is computed (generally paddedview)
- `tot::Array{T,1}`: preallocated array to hold moving sums along 1 dimension
- `widx::Int`: size of boxcar smoothing window in x
- `widy::Int`: size of boxcar smoothing window in y
"""
function boxsmooth!(out::AbstractArray, arr::AbstractArray, tot::Array{T,1}, widx::Int, widy::Int) where T
(sx, sy) = size(arr)
Δx = (widx-1)÷2
Δy = (widy-1)÷2
for j=1:(sy-widy+1)
if (j==1)
for n = 1:widy
@simd for m = 1:sx
@inbounds tot[m] += arr[m,n]
end
end
else
@simd for m = 1:sx
@inbounds tot[m] += arr[m,j+widy-1]-arr[m,j-1]
end
end
tt=zero(eltype(out))
for i=1:(sx-widx+1)
if (i==1)
@simd for n=1:widx
@inbounds tt += tot[n]
end
else
@inbounds tt += tot[i+widx-1]-tot[i-1]
end
@inbounds out[i,j] = tt
end
end
end
"""
cov_avg!(bimage, ism, bism, in_image; Np::Int=33, widx::Int=129, widy::Int=129, ftype::Int=32)
Key function for constructing the (shifted and multiplied) versions of the input image used to quickly
estimate the local covariance matrix at a large number of locations. The main output is in the preallocated
`bism` which is used as an input to `build_cov!`.
# Arguments:
- `bimage`: preallocated output array for the boxcar smoothed unshifted image
- `ism`: preallocated intermediate array for the input image times itself shifted
- `bism`: preallocated output array to store boxcar-smoothed image products for all shifts
- `in_image`: input image the local covariance of which we want to estimate
# Keywords:
- `Np::Int`: size of local covariance matrix in pixels (default 33)
- `widx::Int`: width of boxcar window in x which determines size of region used for samples for the local covariance estimate (default 129)
- `widy::Int`: width of boxcar window in y which determines size of region used for samples for the local covariance estimate (default 129)
- `ftype::Int`: determine the Float precision, 32 is Float32, otherwise Float64
"""
function cov_avg!(bimage, ism, bism, in_image; Np::Int=33, widx::Int=129, widy::Int=129, ftype::Int=32)
if ftype == 32
T = Float32
else
T = Float64
end
Δx = (widx-1)÷2
Δy = (widy-1)÷2
halfNp = (Np-1) ÷ 2
(sx1, sy1) = size(in_image)
tot = zeros(T,sx1);
boxsmooth!(bimage,in_image,tot,widx,widy)
# loop over shifts
for dc=0:Np-1 # column shift loop
for dr=1-Np:Np-1 # row loop, incl negatives
if (dr < 0) & (dc == 0)
continue
end
# ism = image, shifted and multipled
@inbounds ism .= in_image .* ShiftedArrays.circshift(in_image,(-dr, -dc))
fill!(tot,0)
boxsmooth!(view(bism,:,:,dr+Np,dc+1),ism,tot,widx,widy) # bism = boxcar(ism)
end
end
return
end
| [
27,
7856,
261,
480,
29,
392,
1809,
12,
16706,
28241,
2743,
14,
6381,
34,
709,
9139,
81,
13,
20362,
27,
456,
62,
30783,
29,
16,
12,
940,
198,
2235,
10361,
5499,
198,
11748,
3242,
2617,
3163,
20477,
198,
11748,
7412,
11928,
20212,
198... | 2.221299 | 1,925 |
<reponame>justinjhlo/TextGrids.jl
# insert functions
"""
insert_boundary!(tier, time; split_at = 0)
insert_boundary!(tg, num, time; split_at = 0)
Insert a boundary at `time` in an interval `tier`, which can also be specified
by its number in a `TextGrid`. This action splits an existing interval and
increases the size of the tier by 1.
The keyword argument `split_at` indicates the starting position in the label of
the interval being split that belongs to the right interval. The remaining part
of the label belongs to the left interval. A value outside any valid index for
the label (i.e. the default value 0, any negative integer, or any positive
integer greater than its length) means that the whole original label belongs to
the left interval.
"""
function insert_boundary!(tier::Tier, time::Real; split_at::Int = 0)
tier.class == "interval" || error("Cannot insert boundaries in point tiers.")
index = findfirst(x -> x.xmin ≤ time ≤ x.xmax, tier.contents)
isnothing(index) && error("Time out of bounds of tier.")
if tier.contents[index].xmin == time || tier.contents[index].xmax == time
error("Boundary already exists in Tier $tier at $(time)s.")
end
label_length = length(tier.contents[index].label)
split_at ∈ 1:label_length || (split_at = label_length + 1)
insert!(tier.contents, index + 1, Interval(index + 1, time, tier.contents[index].xmax, tier.contents[index].label[split_at:end]))
tier.contents[index].xmax = time
tier.contents[index].label = tier.contents[index].label[1:(split_at - 1)]
tier.size += 1
reindex_intervals!(tier, from = index + 1)
end
insert_boundary!(tg::TextGrid, num::Int, time::Real; split_at::Int = 0) = insert_boundary!(tg[num], time, split_at = split_at)
"""
insert_interval!(tier, start, stop, label; split_at = 0)
insert_interval!(tg, num, start, stop, label; split_at = 0)
Insert an interval from `start` to `stop` in an interval `tier`. Inserted
intervals must not straddle an existing boundary, but if either `start` or
`stop` coincides with an existing edge of an interval, then the other boundary
is added to split the original interval.
The keyword argument `split_at` indicates the starting position in the label of
the interval being split that belongs to the interval immediately to the right
of the inserted interval. Note that if either `start` or `stop` coincides with
an existing edge, `split_at` is automatically overridden and the whole original
label goes to the split interval. See also `insert_boundary!`.
"""
function insert_interval!(tier::Tier, start::Real, stop::Real, label::AbstractString; split_at::Int = 0)
tier.class == "interval" || error("Cannot insert intervals in point tiers.")
start_index, stop_index = is_interval_insertable(tier, start, stop)
if start == tier.contents[start_index].xmin
# If only left edge already exists, insert new right edge and force original label to the right
insert_boundary!(tier, stop, split_at = 1)
relabel!(tier, start_index, label)
elseif stop == tier.contents[stop_index].xmax
# If only right edge already exists, insert new left edge and force original label to the left
insert_boundary!(tier, start)
relabel!(tier, start_index + 1, label)
else
insert_boundary!(tier, stop, split_at = split_at)
insert_boundary!(tier, start)
relabel!(tier, start_index + 1, label)
end
return tier
end
insert_interval!(tg::TextGrid, num::Int, start::Real, stop::Real, label::AbstractString; split_at::Int = 0) = insert_interval!(tg[num], start, stop, label, split_at = split_at)
"""
copy_interval!(tier, source_tier, index; split_at = 0)
copy_interval!(tg, num, source_num, index; split_at = 0)
Copy the `index`-th interval from `source_tier` to `tier`.
See also `insert_interval!`.
"""
copy_interval!(tier::Tier, source_tier::Tier, index::Int; split_at::Int = 0) = insert_interval!(tier, source_tier.contents[index].xmin, source_tier.contents[index].xmax, source_tier.contents[index].label, split_at = split_at)
copy_interval!(tg::TextGrid, num::Int, source_num::Int, index::Int; split_at::Int = 0) = copy_interval!(tg[num], tg[source_num], index, split_at = split_at)
"""
Insert multiple intervals to an interval `tier`, with boundaries and labels
defined by `starts`, `stops` and `labels`. The keyword argument `split_at` can
be either a single integer if constant across inserted intervals or a vector of
integers if custom per interval.
Intervals are inserted in the specified order, and intervals that cannot be
inserted are simply discarded rather than throw an error.
See also `insert_interval!`
"""
function insert_intervals!(tier::Tier, starts::Vector{<:Real}, stops::Vector{<:Real}, labels::Vector{<:AbstractString}; split_at::Union{Int, Vector{Int}} = 0)
num_added = minimum(length, [starts, stops, labels])
if split_at isa Vector
length(split_at) < num_added && error("`split_at must not be shorter than `starts`, `stops` and `labels`.")
for (start, stop, label, split_at_i) in zip(starts, stops, labels, split_at)
try
insert_interval!(tier, start, stop, label, split_at = split_at_i)
catch
continue
end
end
else
for (start, stop, label) in zip(starts, stops, labels)
try
insert_interval!(tier, start, stop, label, split_at = split_at)
catch
continue
end
end
end
return tier
end
insert_intervals!(tg::TextGrid, num::Int, starts::Vector{<:Real}, stops::Vector{<:Real}, labels::Vector{<:AbstractString}; split_at::Union{Int, Vector{Int}} = 0) = insert_intervals!(tg[num], starts, stops, labels, split_at = split_at)
function is_interval_insertable(tier::Tier, start::Real, stop::Real)
start ≥ stop && error("Start time must come before stop time.")
start_index = find_interval(tier, start)
stop_index = find_low_interval(tier, stop)
start_index * stop_index == 0 && error("Time out of bounds of tier.")
start_index != stop_index && error("Cannot insert interval that straddles a boundary.")
start == tier.contents[start_index].xmin && stop == tier.contents[stop_index].xmax && error("Interval already exists with the same boundaries.")
return start_index, stop_index
end
# remove functions
"""
remove_left_boundary!(tier, index; delim = "")
remove_left_boundary!(tg, num, index; delim = "")
Remove the left boundary of the `index`-th interval in a `tier`.
The keyword argument `delim` defines the string used to join the labels from
the two intervals being combined.
"""
function remove_left_boundary!(tier::Tier, index::Int; delim::AbstractString = "")
tier.class == "interval" || error("Cannot remove boundaries from point tiers.")
index == 1 && error("Cannot remove left edge of tier.")
tier.contents[index - 1].xmax = tier.contents[index].xmax
relabel!(tier, index - 1, tier.contents[index - 1].label * delim * tier.contents[index].label)
deleteat!(tier.contents, index)
tier.size -= 1
reindex_intervals!(tier, from = index)
end
remove_left_boundary!(tg::TextGrid, num::Int, index::Int; delim::AbstractString = "") = remove_left_boundary!(tg[num], index, delim = delim)
"""
remove_right_boundary!(tier, index; delim = "")
remove_right_boundary!(tg, num, index; delim = "")
Remove the right boundary of the `index`-th interval in an interval `tier`.
The keyword argument `delim` defines the string used to join the labels from
the two intervals being combined.
"""
function remove_right_boundary!(tier::Tier, index::Int; delim::AbstractString = "")
tier.class == "interval" || error("Cannot remove boundaries from point tiers.")
index == tier.size && error("Cannot remove right edge of tier.")
remove_left_boundary!(tier, index + 1, delim = delim)
end
remove_right_boundary!(tg::TextGrid, num::Int, index::Int; delim::AbstractString = "") = remove_right_boundary!(tg[num], index, delim = delim)
"""
remove_boundary!(tier, time; delim = "", tolerance = 0.0001)
remove_boundary!(tier, num, time; delim = "", tolerance = 0.0001)
Remove the boundary at `time` (± `tolerance`) from an interval `tier`.
The keyword argument `delim` defines the string used to join the labels from
the two intervals being combined.
"""
function remove_boundary!(tier::Tier, time::AbstractFloat; delim::AbstractString = "", tolerance::Real = 0.0001)
index = findlast(x -> abs(x.xmin - time) ≤ tolerance, tier.contents)
if !isnothing(index)
remove_left_boundary!(tier, index, delim)
end
end
remove_boundary!(tg::TextGrid, num::Int, time::AbstractFloat; delim::AbstractString = "", tolerance::Real = 0.0001) = remove_boundary!(tg[num], time, delim = delim, tolerance = tolerance)
"""
remove_boundary!(tier, index, edge; delim = "")
remove_boundary!(tier, num, index, edge; delim = "")
Remove left and/or right boundaries of the `index`-th interval in `tier`.
Depending on value of `edge`, equivalent to:
- `"left"`: `remove_left_boundary!`
- `"right"`: `remove_right_boundary!`
- `"both"`: a combination of the two
The keyword argument `delim` defines the string used to join the labels from
the two intervals being combined.
"""
function remove_boundary!(tier::Tier, index::Int, edge::AbstractString; delim::AbstractString = "")
edge ∈ ("left", "right", "both") || error("`edge` must be `left`, `right` or `both`.")
edge != "left" && remove_right_boundary!(tier, index, delim = delim)
edge != "right" && remove_left_boundary!(tier, index, delim = delim)
return tier
end
remove_boundary!(tg::TextGrid, num::Int, index::Int, edge::AbstractString; delim::AbstractString = "") = remove_boundary!(tg[num], index, edge, delim = delim)
function remove_interval!(tier::Tier, index::Int)
tier.xmin == tier.contents[index].xmin && tier.xmax == tier.contents[index].xmax && error("Cannot remove only interval in tier $tier.")
relabel!(tier, index, "")
edge = if tier.contents[index].xmin == tier.xmin
"right"
elseif tier.contents[index].xmax == tier.xmax
"left"
else
"both"
end
remove_boundary!(tier, index, edge)
end
remove_interval!(tg::TextGrid, num::Int, index::Int) = remove_interval!(tg[num], index)
# move functions
"""
move_left_boundary!(tier, index; by = 0, to = -1)
move_left_boundary!(tg, num, index; by = 0, to = -1)
Move the left boundary of the `index`-th interval in an interval `tier`, either
`by` a specified shift or `to` a specified destination. Boundary movement is
not allowed to cross or overlap with other boundaries.
See also `move_right_boundary!`, `move_boundary!` and `move_interval!`.
"""
function move_left_boundary!(tier::Tier, index::Int; by::Real = 0, to::Real = -1)
index == 1 && error("Cannot move left edge of tier.")
by == 0 && to == -1 && error("Boundary movement not specified.")
by != 0 && to != -1 && error("Both `by` and `to` specified.")
by != 0 && (to = tier.contents[index].xmin + by)
(to < tier.xmin || to > tier.xmax) && error("Cannot move boundary out of bounds.")
(to ≤ tier.contents[index - 1].xmin || to ≥ tier.contents[index].xmax) && error("Cannot move boundary beyond interval duration.")
tier.contents[index].xmin = to
tier.contents[index - 1].xmax = to
return tier
end
move_left_boundary!(tg::TextGrid, num::Int, index::Int; by::Real = 0, to::Real = -1) = move_left_boundary!(tg[num], index, by = by, to = to)
"""
move_right_boundary!(tier, index; by = 0, to = -1)
move_right_boundary!(tg, num, index; by = 0, to = -1)
Move the right boundary of the `index`-th interval in an interval `tier`, either
`by` a specified shift or `to` a specified destination. Boundary movement is
not allowed to cross or overlap with other boundaries.
See also `move_left_boundary!`, `move_boundary!` and `move_interval!`.
"""
function move_right_boundary!(tier::Tier, index::Int; by::Real = 0, to::Real = -1)
index == tier.size && error("Cannot move right edge of tier.")
move_left_boundary!(tier, index + 1, by = by, to = to)
end
move_right_boundary!(tg::TextGrid, num::Int, index::Int; by::Real = 0, to::Real = -1) = move_right_boundary!(tg[num], index, by = by, to = to)
"""
move_interval!(tier, index; by = 0, to = -1)
move_interval!(tg, num, index; by = 0, to = -1)
Move both left and right boundaries of the `index`-th interval in an interval
`tier`, either `by` a specified shift or `to` a specified destination. `to`
specifies the destination of the left boundary. Boundary movement is not allowed
to cross or overlap with other boundaries.
See also `move_left_boundary!`, `move_right_boundary!` and `move_boundary!`.
"""
function move_interval!(tier::Tier, index::Int; by::Real = 0, to::Real = -1)
index == 1 && error("Cannot move left edge of tier.")
index == tier.size && error("Cannot move right edge of tier.")
by == 0 && to == -1 && error("Boundary movement not specified.")
by != 0 && to != -1 && error("Both `by` and `to` specified.")
if by != 0
left_to = tier.contents[index].xmin + by
right_to = tier.contents[index].xmax + by
else
left_to = to
right_to = to + tier.contents[index].xmax - tier.contents[index].xmin
end
(left_to < tier.xmin || right_to > tier.xmax) && error("Cannot move boundary out of bounds.")
(left_to ≤ tier.contents[index - 1].xmin || right_to ≥ tier.contents[index + 1].xmax) && error("Cannot move boundary beyond interval duration.")
tier.contents[index].xmin = left_to
tier.contents[index - 1].xmax = left_to
tier.contents[index].xmax = right_to
tier.contents[index + 1].xmin = right_to
return tier
end
move_interval!(tg::TextGrid, num::Int, index::Int; by::Real = 0, to::Real = -1) = move_interval!(tg[num], index, by = by, to = to)
"""
move_boundary!(tier, index, edge; by = 0, to = -1)
move_boundary!(tg, num, index, edge; by = 0, to = -1)
Move one or both boundaries of the `index`-th interval in an interval `tier`,
either `by` a specified shift or `to` a specified destination. Equivalent to
the following depending on value of `edge`:
- "left": `move_left_boundary!`
- "right": `move_right_boundary!`
- "both": `move_interval!`
"""
function move_boundary!(tier::Tier, index::Int, edge::AbstractString; by::Real = 0, to::Real = -1)
if edge == "left"
return move_left_boundary!(tier, index, by = by, to = to)
elseif edge == "right"
return move_right_boundary!(tier, index, by = by, to = to)
elseif edge == "both"
return move_interval!(tier, index, by = by, to = to)
else
error("`edge` must be `left`, `right` or `both`.")
end
end
move_boundary!(tg::TextGrid, num::Int, index::Int, edge::AbstractString; by::Real = 0, to::Real = -1) = move_boundary!(tg[num], index, edge, by = by, to = to)
"""
move_boundary!(tier, time; by = 0, to = -1, tolerance = 0.0001)
move_boundary!(tg, num, time; by = 0, to = -1, tolerance = 0.0001)
Move an interval boundary located at `time` (± `tolerance`), either `by` a
specified shift or `to` a specified destination. Boundary movement is not
allowed to cross or overlap with other boundaries. Nothing happens if no
boundary is found within the tolerance limit.
"""
function move_boundary!(tier::Tier, time::AbstractFloat; by::Real = 0, to::Real = -1, tolerance::Real = 0.0001)
index = find_interval(tier, time)
if time - tier.contents[index].xmin ≤ tolerance
return move_left_boundary!(tier, index, by = by, to = to)
elseif tier.contents[index].xmax - time ≤ tolerance
return move_right_boundary!(tier, index, by = by, to = to)
end
end
move_boundary!(tg::TextGrid, num::Int, time::AbstractFloat; by::Real = 0, to::Real = -1, tolerance::Real = 0.0001) = move_boundary!(tg[num], time, by = by, to = to, tolerance = tolerance) | [
27,
7856,
261,
480,
29,
3137,
259,
73,
71,
5439,
14,
8206,
8642,
2340,
13,
20362,
198,
2,
7550,
5499,
198,
198,
37811,
198,
220,
220,
220,
7550,
62,
7784,
560,
0,
7,
24948,
11,
640,
26,
6626,
62,
265,
796,
657,
8,
198,
220,
22... | 2.857143 | 5,600 |
######################
# 1: The Julia type for ToricVarieties
######################
abstract type AbstractNormalToricVariety end
struct NormalToricVariety <: AbstractNormalToricVariety
polymakeNTV::Polymake.BigObject
end
export NormalToricVariety
struct AffineNormalToricVariety <: AbstractNormalToricVariety
polymakeNTV::Polymake.BigObject
end
export AffineNormalToricVariety
function pm_ntv(v::AbstractNormalToricVariety)
return v.polymakeNTV
end
######################
# 2: Generic constructors
######################
@doc Markdown.doc"""
AffineNormalToricVariety(C::Cone)
Construct the affine normal toric variety $U_{C}$ corresponding to a polyhedral
cone `C`.
# Examples
Set `C` to be the positive orthant in two dimensions.
```jldoctest
julia> C = Oscar.positive_hull([1 0; 0 1])
A polyhedral cone in ambient dimension 2
julia> antv = AffineNormalToricVariety(C)
A normal toric variety corresponding to a polyhedral fan in ambient dimension 2
```
"""
function AffineNormalToricVariety(C::Cone)
pmc = Oscar.pm_cone(C)
fan = Polymake.fan.check_fan_objects(pmc)
pmntv = Polymake.fulton.NormalToricVariety(fan)
return AffineNormalToricVariety(pmntv)
end
@doc Markdown.doc"""
NormalToricVariety(C::Cone)
Construct the (affine) normal toric variety $X_{\Sigma}$ corresponding to a
polyhedral fan $\Sigma = C$ consisting only of the cone `C`.
# Examples
Set `C` to be the positive orthant in two dimensions.
```jldoctest
julia> C = Oscar.positive_hull([1 0; 0 1])
A polyhedral cone in ambient dimension 2
julia> ntv = NormalToricVariety(C)
A normal toric variety corresponding to a polyhedral fan in ambient dimension 2
```
"""
function NormalToricVariety(C::Cone)
return AffineNormalToricVariety(C)
end
@doc Markdown.doc"""
NormalToricVariety(PF::PolyhedralFan)
Construct the normal toric variety $X_{PF}$ corresponding to a polyhedral fan `PF`.
# Examples
Take `PF` to be the normal fan of the square.
```jldoctest
julia> square = Oscar.cube(2)
A polyhedron in ambient dimension 2
julia> nf = Oscar.normal_fan(square)
A polyhedral fan in ambient dimension 2
julia> ntv = NormalToricVariety(nf)
A normal toric variety corresponding to a polyhedral fan in ambient dimension 2
```
"""
function NormalToricVariety(PF::PolyhedralFan)
fan = Oscar.pm_fan(PF)
pmntv = Polymake.fulton.NormalToricVariety(fan)
if fan.N_MAXIMAL_CONES == 1
return AffineNormalToricVariety( pmntv )
end
return NormalToricVariety(pmntv)
end
@doc Markdown.doc"""
NormalToricVariety(P::Polyhedron)
Construct the normal toric variety $X_{\Sigma_P}$ corresponding to the normal
fan $\Sigma_P$ of the given polyhedron `P`.
Note that this only coincides with the projective variety associated to `P`, if
`P` is normal.
# Examples
Set `P` to be a square.
```jldoctest
julia> square = Oscar.cube(2)
A polyhedron in ambient dimension 2
julia> ntv = NormalToricVariety(square)
A normal toric variety corresponding to a polyhedral fan in ambient dimension 2
```
"""
function NormalToricVariety(P::Polyhedron)
fan = normal_fan(P)
return NormalToricVariety(fan)
end
@doc Markdown.doc"""
NormalToricVariety( r::Matrix{Int}, c::Vector{Vector{Int}} )
Construct the normal toric variety whose fan has ray generators `r` and maximal cones `c`.
# Examples
```jldoctest
julia> NormalToricVariety( [-1 5; 0 1; 1 0; 0 -1], [[1,2],[2,3],[3,4],[4,1]] )
A normal toric variety corresponding to a polyhedral fan in ambient dimension 2
```
"""
function NormalToricVariety( rays::Matrix{Int}, cones::Vector{Vector{Int}} )
Incidence = Oscar.IncidenceMatrix(cones)
arr = Polymake.@convert_to Array{Set{Int}} Polymake.common.rows(Incidence.pm_incidencematrix)
pmntv = Polymake.fulton.NormalToricVariety(
RAYS = Oscar.matrix_for_polymake(rays),
MAXIMAL_CONES = arr,
)
if length( cones ) == 1
return AffineNormalToricVariety( pmntv )
end
return NormalToricVariety( pmntv )
end
export NormalToricVariety
######################
# 3: Special constructors
######################
@doc Markdown.doc"""
projective_space( d::Int )
Construct the projective space of dimension `d`.
# Examples
```jldoctest
julia> projective_space( 2 )
A normal toric variety corresponding to a polyhedral fan in ambient dimension 2
```
"""
function projective_space( d::Int )
f = normal_fan(Oscar.simplex(d))
pm_ntv = Polymake.fulton.NormalToricVariety(Oscar.pm_fan(f))
return NormalToricVariety(pm_ntv)
end
export projective_space
@doc Markdown.doc"""
hirzebruch_surface( r::Int )
Constructs the r-th Hirzebruch surface.
# Examples
```jldoctest
julia> hirzebruch_surface( 5 )
A normal toric variety corresponding to a polyhedral fan in ambient dimension 2
```
"""
function hirzebruch_surface( r::Int )
Rays = [ 1 0; 0 1; -1 r; 0 -1]
Cones = [[1,2],[2,3],[3,4],[4,1]]
return NormalToricVariety( Rays, Cones )
end
export hirzebruch_surface
@doc Markdown.doc"""
delPezzo( b::Int )
Constructs the delPezzo surface with b blowups for b at most 3.
# Examples
```jldoctest
julia> del_pezzo( 3 )
A normal toric variety corresponding to a polyhedral fan in ambient dimension 2
```
"""
function del_pezzo( b::Int )
if b < 0
throw(ArgumentError("Number of blowups for construction of delPezzo surfaces must be non-negative."))
return 0
end
if b == 0
return projective_space( 2 )
end
if b == 1
Rays = [ 1 0; 0 1; -1 0; -1 -1 ]
Cones = [ [1,2],[2,3],[3,4],[4,1] ]
return NormalToricVariety( Rays, Cones )
end
if b == 2
Rays = [ 1 0; 0 1; -1 0; -1 -1; 0 -1 ]
Cones = [ [1,2],[2,3],[3,4],[4,5],[5,1] ]
return NormalToricVariety( Rays, Cones )
end
if b == 3
Rays = [ 1 0; 1 1; 0 1; -1 0; -1 -1; 0 -1 ]
Cones = [ [1,2],[2,3],[3,4],[4,5],[5,6],[6,1] ]
return NormalToricVariety( Rays, Cones )
end
if b > 3
throw(ArgumentError("delPezzo surfaces with more than 3 blowups are realized as subvarieties of toric ambient spaces. This is currently not supported."))
return 0
end
end
export del_pezzo
###############################################################################
###############################################################################
### Display
###############################################################################
###############################################################################
function Base.show(io::IO, ntv::AbstractNormalToricVariety)
# fan = get_polyhedral_fan(ntv)
pmntv = pm_ntv(ntv)
ambdim = pmntv.FAN_AMBIENT_DIM
print(io, "A normal toric variety corresponding to a polyhedral fan in ambient dimension $(ambdim)")
end
| [
14468,
4242,
2235,
198,
2,
352,
25,
383,
22300,
2099,
329,
4022,
291,
19852,
9545,
198,
14468,
4242,
2235,
198,
397,
8709,
2099,
27741,
26447,
51,
8146,
19852,
1905,
886,
198,
198,
7249,
14435,
51,
8146,
19852,
1905,
1279,
25,
27741,
... | 2.788588 | 2,436 |
<reponame>joshday/OfflinePluto.jl
# Download links in `assets.csv`
dir = joinpath(@__DIR__, "Pluto", "frontend", "offline_assets")
rm(dir, force=true, recursive=true)
mkpath(dir)
for url in readlines(joinpath(@__DIR__, "assets.csv") )
@info "Downloading: $url"
file = touch(joinpath(dir, basename(url)))
download(url, file)
end | [
27,
7856,
261,
480,
29,
73,
3768,
820,
14,
28657,
3646,
9390,
13,
20362,
198,
2,
10472,
6117,
287,
4600,
19668,
13,
40664,
63,
198,
198,
15908,
796,
4654,
6978,
7,
31,
834,
34720,
834,
11,
366,
3646,
9390,
1600,
366,
8534,
437,
16... | 2.544776 | 134 |
import sbp
err = sbp.MMS
using Test
using LinearAlgebra
using DataStructures
@testset "extract_vars" begin
n = [4, 8, 12]
data = Array(1:sum(n))
vars = OrderedDict("u" => n[1], "v" => n[2], "w" => n[3])
ans_u = Array(1:n[1])
ans_v = Array(n[1]+1:n[1] + n[2])
ans_w = Array(n[1]+n[2]+1:sum(n))
expected = Dict("u" => ans_u,
"v" => ans_v,
"w" => ans_w)
extracted_vars = err.extract_vars(data, vars)
@test extracted_vars == expected
end
@testset "l2_errors" begin
u = ones(4)
v = ones(4)
H = Matrix(I, 4, 4)
solution = Dict("u" => u, "v" => v)
norms = Dict("u" => H, "v" => H)
mms = Dict("u" => u, "v" => v)
errors = err.l2_errors(solution, mms, norms)
expected = Dict("u" => 0.0, "v" => 0.0)
@test errors == expected
end
@testset "functional_errors" begin
u = ones(4)
v = ones(4)
H = Matrix(I, 4, 4)
solution = Dict("u" => u, "v" => v)
norms = Dict("u" => H, "v" => H)
mms = Dict("u" => 4.0, "v" => 4.0)
errors = err.functional_errors(solution, mms, norms)
expected = Dict("u" => 0.0, "v" => 0.0)
@test errors == expected
end
@testset "log2_convergence_rates" begin
u = [16, 8, 4, 2]
q = err.log2_convergence_rates(u)
ans = [Inf, 1, 1, 1]
@test q == ans
end
| [
11748,
264,
46583,
198,
8056,
796,
264,
46583,
13,
44,
5653,
198,
3500,
6208,
198,
3500,
44800,
2348,
29230,
198,
3500,
6060,
44909,
942,
198,
198,
31,
9288,
2617,
366,
2302,
974,
62,
85,
945,
1,
2221,
198,
220,
220,
220,
220,
220,
... | 1.797781 | 811 |
{"timestamp": 1580591738.0, "score_count": 39747, "score": 7.45}
{"timestamp": 1580205822.0, "score_count": 39008, "score": 7.45}
{"timestamp": 1579597688.0, "score_count": 37448, "score": 7.46}
{"timestamp": 1578990311.0, "score_count": 35377, "score": 7.46}
{"timestamp": 1578377542.0, "score_count": 31908, "score": 7.47}
{"timestamp": 1578031145.0, "score_count": 28115, "score": 7.48}
{"timestamp": 1577768201.0, "score_count": 23767, "score": 7.5}
{"timestamp": 1577552588.0, "score_count": 11925, "score": 7.62}
{"timestamp": 1577338744.0, "score_count": 14489, "score": 7.61}
{"timestamp": 1577158935.0, "score_count": 14103, "score": 7.61}
{"timestamp": 1576708171.0, "score_count": 13283, "score": 7.61}
{"timestamp": 1576551396.0, "score_count": 13101, "score": 7.61}
{"timestamp": 1575925174.0, "score_count": 12164, "score": 7.6}
{"timestamp": 1575321002.0, "score_count": 11485, "score": 7.59}
{"timestamp": 1574546706.0, "score_count": 10572, "score": 7.61}
{"timestamp": 1574466320.0, "score_count": 10510, "score": 7.61}
{"timestamp": 1573863108.0, "score_count": 9788, "score": 7.63}
{"timestamp": 1573853347.0, "score_count": 9788, "score": 7.63}
{"timestamp": 1573245084.0, "score_count": 8993, "score": 7.66}
{"timestamp": 1572472448.0, "score_count": 7895, "score": 7.67}
{"timestamp": 1572284247.0, "score_count": 7628, "score": 7.68}
{"timestamp": 1572021916.0, "score_count": 7141, "score": 7.69}
{"timestamp": 1571657639.0, "score_count": 6511, "score": 7.71}
{"timestamp": 1571047292.0, "score_count": 5042, "score": 7.74}
{"timestamp": 1570438884.0, "score_count": 2647, "score": 7.8}
| [
4895,
16514,
27823,
1298,
1315,
1795,
3270,
1558,
2548,
13,
15,
11,
366,
26675,
62,
9127,
1298,
5014,
48882,
11,
366,
26675,
1298,
767,
13,
2231,
92,
198,
4895,
16514,
27823,
1298,
1315,
1795,
1238,
3365,
1828,
13,
15,
11,
366,
26675,... | 2.358187 | 684 |
<filename>src/pruning.jl
#================================
All kinds of functions related to pruning
=================================#
"""
get_next_prune_constraint(com::CS.CoM, constraint_idxs_vec)
Check which function will be called for pruning next. This is based on `constraint_idxs_vec`. The constraint with the lowest
value is chosen and if two have the same lowest value the first one is chosen.
Return the best value and the constraint index. Return a constraint index of 0 if there is no constraint with a less than maximal value
"""
function get_next_prune_constraint(com::CS.CoM, constraint_idxs_vec)
best_ci = 0
best_open = typemax(Int)
for ci in 1:length(constraint_idxs_vec)
if constraint_idxs_vec[ci] < best_open
best_ci = ci
best_open = constraint_idxs_vec[ci]
end
end
return best_open, best_ci
end
"""
open_possibilities(search_space, indices)
Return the sum of possible values for the given list of indices. Does not count 1 if the value is fixed
"""
function open_possibilities(search_space, indices)
open = 0
for vi in indices
if !isfixed(search_space[vi])
open += nvalues(search_space[vi])
end
end
return open
end
"""
prune!(com::CS.CoM; pre_backtrack=false, all=false, only_once=false, initial_check=false)
Prune based on changes by initial solve or backtracking. The information for it is stored in each variable.
There are several parameters:
`pre_backtrack` when set to true `com.info.in_backtrack_calls` is incremented
`all` instead of only looking at changes each constraint is check at least once (if there are open values)
`only_once` Just run on the changed constraints or `all` once instead of repeatedly until nothing can be pruned
`initial_check` Checks on `all` constraints and also checks if variables are set for the whole constraint
whether the constraint is fulfilled or the problem is infeasible.
Return whether it's still feasible
"""
function prune!(
com::CS.CoM;
pre_backtrack = false,
all = false,
only_once = false,
initial_check = false,
)
feasible = true
N = typemax(Int)
search_space = com.search_space
prev_var_length = zeros(Int, length(search_space))
constraint_idxs_vec = fill(N, length(com.constraints))
# get all constraints which need to be called (only once)
for var in search_space
new_var_length = num_changes(var, com.c_step_nr)
if new_var_length > 0 || all || initial_check
prev_var_length[var.idx] = new_var_length
for ci in com.subscription[var.idx]
com.constraints[ci].is_deactivated && continue
inner_constraint = com.constraints[ci]
constraint_idxs_vec[inner_constraint.idx] =
open_possibilities(search_space, inner_constraint.indices)
end
end
end
# while we haven't called every constraint
while true
open_pos, ci = get_next_prune_constraint(com, constraint_idxs_vec)
# no open values => don't need to call again
if open_pos == 0 && !initial_check
constraint_idxs_vec[ci] = N
continue
end
# checked all
if open_pos == N
break
end
constraint_idxs_vec[ci] = N
constraint = com.constraints[ci]
changed!(com, constraint, constraint.fct, constraint.set)
feasible =
prune_constraint!(com, constraint, constraint.fct, constraint.set; logs = false)
if !pre_backtrack
com.info.in_backtrack_calls += 1
else
com.info.pre_backtrack_calls += 1
end
if !feasible
break
end
# if we changed another variable increase the level of the constraints to call them later
for vidx in constraint.indices
var = search_space[vidx]
new_var_length = num_changes(var, com.c_step_nr)
if new_var_length > prev_var_length[var.idx]
prev_var_length[var.idx] = new_var_length
for ci in com.subscription[var.idx]
com.constraints[ci].is_deactivated && continue
# don't call the same constraint again.
# Each constraint should prune as much as possible
if ci != constraint.idx
inner_constraint = com.constraints[ci]
# if initial check or don't add constraints => update only those which already have open possibilities
if (only_once || initial_check) &&
constraint_idxs_vec[inner_constraint.idx] == N
continue
end
constraint_idxs_vec[inner_constraint.idx] =
open_possibilities(search_space, inner_constraint.indices)
end
end
end
end
end
return feasible
end
"""
restore_prune!(com::CS.CoM, prune_steps)
Prune the search space based on a list of backtracking indices `prune_steps`.
"""
function restore_prune!(com::CS.CoM, prune_steps)
search_space = com.search_space
for backtrack_idx in prune_steps
step_nr = com.backtrack_vec[backtrack_idx].step_nr
for var in search_space
for change in view_changes(var, step_nr)
fct_symbol = change[1]
val = change[2]
if fct_symbol == :fix
fix!(com, var, val; changes = false, check_feasibility = false)
elseif fct_symbol == :rm
rm!(com, var, val; changes = false, check_feasibility = false)
elseif fct_symbol == :remove_above
remove_above!(com, var, val; changes = false, check_feasibility = false)
elseif fct_symbol == :remove_below
remove_below!(com, var, val; changes = false, check_feasibility = false)
else
throw(ErrorException("There is no pruning function for $fct_symbol"))
end
end
end
com.c_backtrack_idx = backtrack_idx
end
call_restore_pruning!(com, prune_steps)
end
"""
single_reverse_pruning!(search_space, vidx::Int, prune_int::Int, prune_fix::Int)
Reverse a single variable using `prune_int` (number of value removals) and `prune_fix` (new last_ptr if not 0).
"""
function single_reverse_pruning!(search_space, vidx::Int, prune_int::Int, prune_fix::Int)
if prune_int > 0
var = search_space[vidx]
l_ptr = max(1, var.last_ptr)
new_l_ptr = var.last_ptr + prune_int
min_val, max_val = extrema(var.values[l_ptr:new_l_ptr])
if min_val < var.min
var.min = min_val
end
if max_val > var.max
var.max = max_val
end
var.last_ptr = new_l_ptr
end
if prune_fix > 0
var = search_space[vidx]
var.last_ptr = prune_fix
min_val, max_val = extrema(var.values[1:prune_fix])
if min_val < var.min
var.min = min_val
end
if max_val > var.max
var.max = max_val
end
var.first_ptr = 1
end
end
"""
reverse_pruning!(com, backtrack_idx)
Reverse the changes made by a specific backtrack object
"""
function reverse_pruning!(com::CS.CoM, backtrack_idx::Int)
com.c_backtrack_idx = backtrack_idx
step_nr = com.backtrack_vec[backtrack_idx].step_nr
search_space = com.search_space
for var in search_space
v_idx = var.idx
changes = var.changes.changes
ch_indices = var.changes.indices
for change_id in ch_indices[step_nr+1]-1:-1:ch_indices[step_nr]
single_reverse_pruning!(search_space, v_idx, changes[change_id][4], changes[change_id][3])
end
end
subscriptions = com.subscription
constraints = com.constraints
for var in search_space
num_changes(var, step_nr) == 0 && continue
var.idx > length(subscriptions) && continue
@inbounds for ci in subscriptions[var.idx]
constraint = constraints[ci]
single_reverse_pruning_constraint!(
com,
constraint,
constraint.fct,
constraint.set,
var,
backtrack_idx,
)
end
end
for constraint in constraints
reverse_pruning_constraint!(
com,
constraint,
constraint.fct,
constraint.set,
backtrack_idx,
)
end
com.c_backtrack_idx = com.backtrack_vec[backtrack_idx].parent_idx
end
| [
27,
34345,
29,
10677,
14,
1050,
46493,
13,
20362,
198,
2,
10052,
628,
220,
220,
220,
1439,
6982,
286,
5499,
3519,
284,
778,
46493,
198,
198,
10052,
46249,
198,
198,
37811,
198,
220,
220,
220,
651,
62,
19545,
62,
1050,
1726,
62,
1102... | 2.186191 | 4,012 |
using SuiteSparseMatrixCollection
using MatrixMarket
using SuiteSparseGraphBLAS
SuiteSparseGraphBLAS.gbset(SuiteSparseGraphBLAS.FORMAT, SuiteSparseGraphBLAS.BYROW)
using BenchmarkTools
using SparseArrays
using LinearAlgebra
include("tc.jl")
include("pr.jl")
graphs = [
#"karate",
#"com-Youtube",
#"as-Skitter",
#"com-LiveJournal",
#"com-Orkut",
"com-Friendster",
]
ssmc = ssmc_db()
matrices = filter(row -> row.name ∈ graphs, ssmc)
BenchmarkTools.DEFAULT_PARAMETERS.gcsample = true
for name ∈ graphs
path = fetch_ssmc(matrices[matrices.name .== name, :])[1]
G = GBMatrix(convert(SparseMatrixCSC{Float64}, MatrixMarket.mmread(joinpath(path, "$name.mtx"))))
SuiteSparseGraphBLAS.gbset(G, SuiteSparseGraphBLAS.FORMAT, SuiteSparseGraphBLAS.BYROW)
GC.gc()
G[:,:, mask=G, desc=SuiteSparseGraphBLAS.S] = 1
diag(G)
println("$name | $(size(G)) | $(nnz(G)) edges")
d = reduce(+, G; dims=2)
# for centrality in [PR, TC1, TC3]
for centrality in [TC3]
println("Benchmarking $(string(centrality)) on $(name)")
i = 0.0
centrality(G, d) #warmup
for run ∈ 1:3
i += @elapsed centrality(G, d)
end
println("$(string(centrality)) on $(name) over 3 runs took an average of: $(i / 3)s")
end
end
| [
3500,
26264,
50,
29572,
46912,
36307,
198,
3500,
24936,
27470,
198,
3500,
26264,
50,
29572,
37065,
9148,
1921,
198,
5606,
578,
50,
29572,
37065,
9148,
1921,
13,
70,
1443,
316,
7,
5606,
578,
50,
29572,
37065,
9148,
1921,
13,
21389,
1404,... | 2.270506 | 573 |
### A Pluto.jl notebook ###
# v0.17.4
using Markdown
using InteractiveUtils
# ╔═╡ 3668f786-9597-11eb-01a1-87d34b49eef9
begin
#packages for I/O, interpolation, etc
using MITgcmTools, MeshArrays, Plots, PlutoUI
PICKUP_hs94_download()
🏁 = "🏁"
"Downloads and packages : complete."
end
# ╔═╡ 19095067-33f5-495f-bc4d-ee6dacbf6ca8
begin
imgB="https://user-images.githubusercontent.com/20276764/113531401-b1780d00-9596-11eb-8e96-990cf9533ada.png"
md"""# Simple Atmosphere
###
In this notebook we:
1. run the Held and Suarez Benchmark
1. read MITgcm output for temperature
1. interpolate it via **`MeshArrays.jl`**
1. plot and animate
This should generate something like this:
$(Resource(imgB, :width => 240))
$(TableOfContents())
!!! note
If you use a live version of this notebook, selecting a different configuration from the list below will make the other notebook cells react (e.g. displayed contents). If you visualize an html version of this notebook, then cells wont react.
"""
end
# ╔═╡ 207e4c15-7818-4dc3-a048-1dd36ba5a73e
begin
myexp=verification_experiments("hs94.cs-32x32x5")
pth_run=joinpath(myexp.folder,string(myexp.ID),"run")
md""" ## Model Configuration
This is a data structure of type `MITgcm_config` (a concrete type of `AbstractModelConfig`).
"""
end
# ╔═╡ 2fd1ddf0-c3ee-4076-9f7f-b066da2baf50
myexp
# ╔═╡ bd0803d8-c70d-47b8-a76e-5765f4ba01c6
md""" ## Worflow Steps
The animation just above results from the following workflow steps:
- setup & compile
- run model
- process output
"""
# ╔═╡ 9238b863-dd69-42ce-8f36-995b4757cc1a
md""" #### Contents of model run folder
The first cell below list all files found in files the run directory. The second displays the end of the standard output file (e.g. `output.txt`) generated during the model run. The third is the result of the `scan_rundir` function.
"""
# ╔═╡ 88a5819f-6f35-40e8-9a82-8fd6f97001b1
md""" #### Contents of the pickup folder"""
# ╔═╡ 37294d8a-a70e-419a-a60b-11d09930c6b0
readdir(PICKUP_hs94_path)
# ╔═╡ f0185b52-2297-4e8c-b44b-8c29b634607a
md""" ## Appendices"""
# ╔═╡ fa968801-6892-4475-9b27-56472ca611b4
begin
#function used to modify model parameters (e.g. duration)
function modify_params_HS94(myexp)
par_path=joinpath(myexp.folder,string(myexp.ID),"log","tracked_parameters")
fil=joinpath(par_path,"data")
nml=read(fil,MITgcm_namelist())
nml.params[1][:useSingleCpuIO]=true
nml.params[3][:nIter0]=43200
nml.params[3][:nTimeSteps]=720
nml.params[3][:monitorFreq]= 21600.0
write(fil,nml)
#ClimateModels.git_log_fil(myexp,fil,"update parameter file : "*split(fil,"/")[end])
fil=joinpath(par_path,"data.pkg")
nml=read(fil,MITgcm_namelist())
nml.params[1][:useDiagnostics]=false
nml.params[1][:useMNC]=false
write(fil,nml)
#ClimateModels.git_log_fil(myexp,fil,"update parameter file : "*split(fil,"/")[end])
end
"helper function"
end
# ╔═╡ aad7e042-ba39-4518-8f3e-da59b77c13cb
begin
#set up run directory
setup(myexp)
#compile model if not already done
build(myexp,"--allow-skip")
#modify parameters to start from time step 43200, etc
modify_params_HS94(myexp)
#provide initial condition for time step 43200
fil1=joinpath(PICKUP_hs94_path,"pickup.0000043200.data")
fil2=joinpath(pth_run,"pickup.0000043200.data")
!isfile(fil2) ? cp(fil1,fil2) : nothing
fil1=joinpath(PICKUP_hs94_path,"pickup.0000043200.meta")
fil2=joinpath(pth_run,"pickup.0000043200.meta")
!isfile(fil2) ? cp(fil1,fil2) : nothing
#readdir(joinpath(myexp.folder,string(myexp.ID),"log"))
step1=🏁
end
# ╔═╡ 0aa37844-b4b9-4f58-adf7-15ae9a490993
begin
step1==🏁
MITgcmTools.launch(myexp)
step2=🏁
end
# ╔═╡ b77f7ff2-da7e-41b3-b3f6-3819b09cd33c
begin
step2==🏁
# isfile(joinpath(MITgcm_path[1],"verification",myexp.configuration,"build","mitgcmuv"))
isfile(joinpath(pth_run,"output.txt")) ? sc=scan_rundir(pth_run) : sc=(completed=false,)
#copy files to known location for subsequent notebooks (Makie, particles, etc)
function cp_run_dir()
p2=joinpath(PICKUP_hs94_path,"run")
tst = sc.completed&(!isdir(p2))
tst ? run(`cp -pr $pth_run $p2`) : nothing
isdir(p2)
end
cp_run_dir()
## read grid variables (for interpolation)
Γ = GridLoad_mdsio(myexp)
## setup interpolation (for plotting)
lon=[i for i=-179.5:1.0:179.5, j=-89.5:1.0:89.5]
lat=[j for i=-179.5:1.0:179.5, j=-89.5:1.0:89.5]
(f,i,j,w,_,_,_)=InterpolationFactors(Γ,vec(lon),vec(lat))
IntFac=(f,i,j,w)
#list of output files (1 per time record)
ff=readdir(pth_run); fil="T.0000"
ff=ff[findall(occursin.(fil,ff).*occursin.(".data",ff))]
nt=length(ff)
γ=Γ.XC.grid
step3=🏁
end
# ╔═╡ ee0e6f28-aa26-48de-8ddd-8bb2d1102ee9
begin
#function used to plot one time record
function myplot(fil,pth)
T=read(joinpath(pth,fil),MeshArray(γ,Float64))
TT=Interpolate(T,IntFac...)
contourf(vec(lon[:,1]),vec(lat[1,:]),TT,clims=(260.,320.))
end
#loop over model output files
dt=6
anim = @animate for i ∈ 1:dt:nt
myplot(ff[i],pth_run)
end
pp=tempdir()*"/"
gif(anim,pp*"hs94.cs.gif", fps = 8)
end
# ╔═╡ 0ca84f4e-f5bf-40d0-bf46-7a0e70b7aded
begin
step3==🏁
ls_run_dir=readdir(pth_run)
ls_run_dir
end
# ╔═╡ ca299148-6aa8-4379-88e3-c4500ddc779f
begin
step3==🏁
stdout=readlines(joinpath(pth_run,"output.txt"))
Dump(stdout)
end
# ╔═╡ b1ca8b16-7b63-470b-90d0-6ea41eeb5211
sc
# ╔═╡ 00000000-0000-0000-0000-000000000001
PLUTO_PROJECT_TOML_CONTENTS = """
[deps]
MITgcmTools = "62725fbc-3a66-4df3-9000-e33e85b3a198"
MeshArrays = "cb8c808f-1acf-59a3-9d2b-6e38d009f683"
Plots = "91a5bcdd-55d7-5caf-9e0b-520d859cae80"
PlutoUI = "7f904dfe-b85e-4ff6-b463-dae2292396a8"
[compat]
MITgcmTools = "~0.1.32"
MeshArrays = "~0.2.31"
Plots = "~1.25.2"
PlutoUI = "~0.7.25"
"""
# ╔═╡ 00000000-0000-0000-0000-000000000002
PLUTO_MANIFEST_TOML_CONTENTS = """
# This file is machine-generated - editing it directly is not advised
julia_version = "1.7.0"
manifest_format = "2.0"
[[deps.AWS]]
deps = ["Base64", "Compat", "Dates", "Downloads", "GitHub", "HTTP", "IniFile", "JSON", "MbedTLS", "Mocking", "OrderedCollections", "Retry", "Sockets", "URIs", "UUIDs", "XMLDict"]
git-tree-sha1 = "07d944e4d9946c2061f97c1564d1b7ae8ea8f189"
uuid = "fbe9abb3-538b-5e4e-ba9e-bc94f4f92ebc"
version = "1.61.0"
[[deps.AbstractPlutoDingetjes]]
deps = ["Pkg"]
git-tree-sha1 = "abb72771fd8895a7ebd83d5632dc4b989b022b5b"
uuid = "6e696c72-6542-2067-7265-42206c756150"
version = "1.1.2"
[[deps.Adapt]]
deps = ["LinearAlgebra"]
git-tree-sha1 = "84918055d15b3114ede17ac6a7182f68870c16f7"
uuid = "79e6a3ab-5dfb-504d-930d-738a2a938a0e"
version = "3.3.1"
[[deps.ArgTools]]
uuid = "0dad84c5-d112-42e6-8d28-ef12dabb789f"
[[deps.Artifacts]]
uuid = "56f22d72-fd6d-98f1-02f0-08ddc0907c33"
[[deps.Base64]]
uuid = "2a0f44e3-6c83-55bd-87e4-b1978d98bd5f"
[[deps.Blosc]]
deps = ["Blosc_jll"]
git-tree-sha1 = "575bdd70552dd9a7eaeba08ef2533226cdc50779"
uuid = "a74b3585-a348-5f62-a45c-50e91977d574"
version = "0.7.2"
[[deps.Blosc_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Lz4_jll", "Pkg", "Zlib_jll", "Zstd_jll"]
git-tree-sha1 = "91d6baa911283650df649d0aea7c28639273ae7b"
uuid = "0b7ba130-8d10-5ba8-a3d6-c5182647fed9"
version = "1.21.1+0"
[[deps.Bzip2_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "c3598e525718abcc440f69cc6d5f60dda0a1b61e"
uuid = "6e34b625-4abd-537c-b88f-471c36dfa7a0"
version = "1.0.6+5"
[[deps.CFTime]]
deps = ["Dates", "Printf"]
git-tree-sha1 = "bca6cb6ee746e6485ca4535f6cc29cf3579a0f20"
uuid = "179af706-886a-5703-950a-314cd64e0468"
version = "0.1.1"
[[deps.CSV]]
deps = ["CodecZlib", "Dates", "FilePathsBase", "InlineStrings", "Mmap", "Parsers", "PooledArrays", "SentinelArrays", "Tables", "Unicode", "WeakRefStrings"]
git-tree-sha1 = "49f14b6c56a2da47608fe30aed711b5882264d7a"
uuid = "336ed68f-0bac-5ca0-87d4-7b16caf5d00b"
version = "0.9.11"
[[deps.Cairo_jll]]
deps = ["Artifacts", "Bzip2_jll", "Fontconfig_jll", "FreeType2_jll", "Glib_jll", "JLLWrappers", "LZO_jll", "Libdl", "Pixman_jll", "Pkg", "Xorg_libXext_jll", "Xorg_libXrender_jll", "Zlib_jll", "libpng_jll"]
git-tree-sha1 = "e2f47f6d8337369411569fd45ae5753ca10394c6"
uuid = "83423d85-b0ee-5818-9007-b63ccbeb887a"
version = "1.16.0+6"
[[deps.CatViews]]
deps = ["Random", "Test"]
git-tree-sha1 = "23d1f1e10d4e24374112fcf800ac981d14a54b24"
uuid = "81a5f4ea-a946-549a-aa7e-2a7f63a27d31"
version = "1.0.0"
[[deps.ChainRulesCore]]
deps = ["Compat", "LinearAlgebra", "SparseArrays"]
git-tree-sha1 = "4c26b4e9e91ca528ea212927326ece5918a04b47"
uuid = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
version = "1.11.2"
[[deps.ChangesOfVariables]]
deps = ["ChainRulesCore", "LinearAlgebra", "Test"]
git-tree-sha1 = "bf98fa45a0a4cee295de98d4c1462be26345b9a1"
uuid = "9e997f8a-9a97-42d5-a9f1-ce6bfc15e2c0"
version = "0.1.2"
[[deps.ClimateModels]]
deps = ["AWS", "CFTime", "CSV", "DataFrames", "Dates", "Downloads", "Git", "NetCDF", "OrderedCollections", "Pkg", "Statistics", "Suppressor", "TOML", "Test", "UUIDs", "Zarr"]
git-tree-sha1 = "0e5e942b23049bca23fd66a08d549c234373dd1c"
uuid = "f6adb021-9183-4f40-84dc-8cea6f651bb0"
version = "0.1.21"
[[deps.CodecZlib]]
deps = ["TranscodingStreams", "Zlib_jll"]
git-tree-sha1 = "ded953804d019afa9a3f98981d99b33e3db7b6da"
uuid = "944b1d66-785c-5afd-91f1-9de20f533193"
version = "0.7.0"
[[deps.ColorSchemes]]
deps = ["ColorTypes", "Colors", "FixedPointNumbers", "Random"]
git-tree-sha1 = "a851fec56cb73cfdf43762999ec72eff5b86882a"
uuid = "35d6a980-a343-548e-a6ea-1d62b119f2f4"
version = "3.15.0"
[[deps.ColorTypes]]
deps = ["FixedPointNumbers", "Random"]
git-tree-sha1 = "024fe24d83e4a5bf5fc80501a314ce0d1aa35597"
uuid = "3da002f7-5984-5a60-b8a6-cbb66c0b333f"
version = "0.11.0"
[[deps.Colors]]
deps = ["ColorTypes", "FixedPointNumbers", "Reexport"]
git-tree-sha1 = "417b0ed7b8b838aa6ca0a87aadf1bb9eb111ce40"
uuid = "5ae59095-9a9b-59fe-a467-6f913c188581"
version = "0.12.8"
[[deps.Compat]]
deps = ["Base64", "Dates", "DelimitedFiles", "Distributed", "InteractiveUtils", "LibGit2", "Libdl", "LinearAlgebra", "Markdown", "Mmap", "Pkg", "Printf", "REPL", "Random", "SHA", "Serialization", "SharedArrays", "Sockets", "SparseArrays", "Statistics", "Test", "UUIDs", "Unicode"]
git-tree-sha1 = "44c37b4636bc54afac5c574d2d02b625349d6582"
uuid = "34da2185-b29b-5c13-b0c7-acf172513d20"
version = "3.41.0"
[[deps.CompilerSupportLibraries_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "e66e0078-7015-5450-92f7-15fbd957f2ae"
[[deps.ConstructionBase]]
deps = ["LinearAlgebra"]
git-tree-sha1 = "f74e9d5388b8620b4cee35d4c5a618dd4dc547f4"
uuid = "187b0558-2788-49d3-abe0-74a17ed4e7c9"
version = "1.3.0"
[[deps.Contour]]
deps = ["StaticArrays"]
git-tree-sha1 = "9f02045d934dc030edad45944ea80dbd1f0ebea7"
uuid = "d38c429a-6771-53c6-b99e-75d170b6e991"
version = "0.5.7"
[[deps.Crayons]]
git-tree-sha1 = "3f71217b538d7aaee0b69ab47d9b7724ca8afa0d"
uuid = "a8cc5b0e-0ffa-5ad4-8c14-923d3ee1735f"
version = "4.0.4"
[[deps.DataAPI]]
git-tree-sha1 = "cc70b17275652eb47bc9e5f81635981f13cea5c8"
uuid = "9a962f9c-6df0-11e9-0e5d-c546b8b5ee8a"
version = "1.9.0"
[[deps.DataFrames]]
deps = ["Compat", "DataAPI", "Future", "InvertedIndices", "IteratorInterfaceExtensions", "LinearAlgebra", "Markdown", "Missings", "PooledArrays", "PrettyTables", "Printf", "REPL", "Reexport", "SortingAlgorithms", "Statistics", "TableTraits", "Tables", "Unicode"]
git-tree-sha1 = "cfdfef912b7f93e4b848e80b9befdf9e331bc05a"
uuid = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0"
version = "1.3.1"
[[deps.DataStructures]]
deps = ["Compat", "InteractiveUtils", "OrderedCollections"]
git-tree-sha1 = "3daef5523dd2e769dad2365274f760ff5f282c7d"
uuid = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8"
version = "0.18.11"
[[deps.DataValueInterfaces]]
git-tree-sha1 = "bfc1187b79289637fa0ef6d4436ebdfe6905cbd6"
uuid = "e2d170a0-9d28-54be-80f0-106bbe20a464"
version = "1.0.0"
[[deps.Dates]]
deps = ["Printf"]
uuid = "ade2ca70-3891-5945-98fb-dc099432e06a"
[[deps.DelimitedFiles]]
deps = ["Mmap"]
uuid = "8bb1440f-4735-579b-a4ab-409b98df4dab"
[[deps.DiskArrays]]
git-tree-sha1 = "6a50d800025a1664c99a8e819e0568c75e3ac0c7"
uuid = "3c3547ce-8d99-4f5e-a174-61eb10b00ae3"
version = "0.2.12"
[[deps.Distances]]
deps = ["LinearAlgebra", "SparseArrays", "Statistics", "StatsAPI"]
git-tree-sha1 = "3258d0659f812acde79e8a74b11f17ac06d0ca04"
uuid = "b4f34e82-e78d-54a5-968a-f98e89d6e8f7"
version = "0.10.7"
[[deps.Distributed]]
deps = ["Random", "Serialization", "Sockets"]
uuid = "8ba89e20-285c-5b6f-9357-94700520ee1b"
[[deps.DocStringExtensions]]
deps = ["LibGit2"]
git-tree-sha1 = "b19534d1895d702889b219c382a6e18010797f0b"
uuid = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae"
version = "0.8.6"
[[deps.Downloads]]
deps = ["ArgTools", "LibCURL", "NetworkOptions"]
uuid = "f43a241f-c20a-4ad4-852c-f6b1247861c6"
[[deps.EarCut_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "3f3a2501fa7236e9b911e0f7a588c657e822bb6d"
uuid = "5ae413db-bbd1-5e63-b57d-d24a61df00f5"
version = "2.2.3+0"
[[deps.Expat_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "b3bfd02e98aedfa5cf885665493c5598c350cd2f"
uuid = "2e619515-83b5-522b-bb60-26c02a35a201"
version = "2.2.10+0"
[[deps.ExprTools]]
git-tree-sha1 = "b7e3d17636b348f005f11040025ae8c6f645fe92"
uuid = "e2ba6199-217a-4e67-a87a-7c52f15ade04"
version = "0.1.6"
[[deps.EzXML]]
deps = ["Printf", "XML2_jll"]
git-tree-sha1 = "0fa3b52a04a4e210aeb1626def9c90df3ae65268"
uuid = "8f5d6c58-4d21-5cfd-889c-e3ad7ee6a615"
version = "1.1.0"
[[deps.FFMPEG]]
deps = ["FFMPEG_jll"]
git-tree-sha1 = "b57e3acbe22f8484b4b5ff66a7499717fe1a9cc8"
uuid = "c87230d0-a227-11e9-1b43-d7ebe4e7570a"
version = "0.4.1"
[[deps.FFMPEG_jll]]
deps = ["Artifacts", "Bzip2_jll", "FreeType2_jll", "FriBidi_jll", "JLLWrappers", "LAME_jll", "LibVPX_jll", "Libdl", "Ogg_jll", "OpenSSL_jll", "Opus_jll", "Pkg", "Zlib_jll", "libass_jll", "libfdk_aac_jll", "libvorbis_jll", "x264_jll", "x265_jll"]
git-tree-sha1 = "3cc57ad0a213808473eafef4845a74766242e05f"
uuid = "b22a6f82-2f65-5046-a5b2-351ab43fb4e5"
version = "4.3.1+4"
[[deps.FilePathsBase]]
deps = ["Compat", "Dates", "Mmap", "Printf", "Test", "UUIDs"]
git-tree-sha1 = "04d13bfa8ef11720c24e4d840c0033d145537df7"
uuid = "48062228-2e41-5def-b9a4-89aafe57970f"
version = "0.9.17"
[[deps.FixedPointNumbers]]
deps = ["Statistics"]
git-tree-sha1 = "335bfdceacc84c5cdf16aadc768aa5ddfc5383cc"
uuid = "53c48c17-4a7d-5ca2-90c5-79b7896eea93"
version = "0.8.4"
[[deps.Fontconfig_jll]]
deps = ["Artifacts", "Bzip2_jll", "Expat_jll", "FreeType2_jll", "JLLWrappers", "Libdl", "Libuuid_jll", "Pkg", "Zlib_jll"]
git-tree-sha1 = "35895cf184ceaab11fd778b4590144034a167a2f"
uuid = "a3f928ae-7b40-5064-980b-68af3947d34b"
version = "2.13.1+14"
[[deps.Formatting]]
deps = ["Printf"]
git-tree-sha1 = "8339d61043228fdd3eb658d86c926cb282ae72a8"
uuid = "59287772-0a20-5a39-b81b-1366585eb4c0"
version = "0.4.2"
[[deps.FreeType2_jll]]
deps = ["Artifacts", "Bzip2_jll", "JLLWrappers", "Libdl", "Pkg", "Zlib_jll"]
git-tree-sha1 = "cbd58c9deb1d304f5a245a0b7eb841a2560cfec6"
uuid = "d7e528f0-a631-5988-bf34-fe36492bcfd7"
version = "2.10.1+5"
[[deps.FriBidi_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "aa31987c2ba8704e23c6c8ba8a4f769d5d7e4f91"
uuid = "559328eb-81f9-559d-9380-de523a88c83c"
version = "1.0.10+0"
[[deps.Future]]
deps = ["Random"]
uuid = "9fa8497b-333b-5362-9e8d-4d0656e87820"
[[deps.GLFW_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Libglvnd_jll", "Pkg", "Xorg_libXcursor_jll", "Xorg_libXi_jll", "Xorg_libXinerama_jll", "Xorg_libXrandr_jll"]
git-tree-sha1 = "0c603255764a1fa0b61752d2bec14cfbd18f7fe8"
uuid = "0656b61e-2033-5cc2-a64a-77c0f6c09b89"
version = "3.3.5+1"
[[deps.GR]]
deps = ["Base64", "DelimitedFiles", "GR_jll", "HTTP", "JSON", "Libdl", "LinearAlgebra", "Pkg", "Printf", "Random", "Serialization", "Sockets", "Test", "UUIDs"]
git-tree-sha1 = "30f2b340c2fff8410d89bfcdc9c0a6dd661ac5f7"
uuid = "28b8d3ca-fb5f-59d9-8090-bfdbd6d07a71"
version = "0.62.1"
[[deps.GR_jll]]
deps = ["Artifacts", "Bzip2_jll", "Cairo_jll", "FFMPEG_jll", "Fontconfig_jll", "GLFW_jll", "JLLWrappers", "JpegTurbo_jll", "Libdl", "Libtiff_jll", "Pixman_jll", "Pkg", "Qt5Base_jll", "Zlib_jll", "libpng_jll"]
git-tree-sha1 = "d59e8320c2747553788e4fc42231489cc602fa50"
uuid = "d2c73de3-f751-5644-a686-071e5b155ba9"
version = "0.58.1+0"
[[deps.GeometryBasics]]
deps = ["EarCut_jll", "IterTools", "LinearAlgebra", "StaticArrays", "StructArrays", "Tables"]
git-tree-sha1 = "58bcdf5ebc057b085e58d95c138725628dd7453c"
uuid = "5c1252a2-5f33-56bf-86c9-59e7332b4326"
version = "0.4.1"
[[deps.Gettext_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Libiconv_jll", "Pkg", "XML2_jll"]
git-tree-sha1 = "8c14294a079216000a0bdca5ec5a447f073ddc9d"
uuid = "78b55507-aeef-58d4-861c-77aaff3498b1"
version = "0.20.1+7"
[[deps.Git]]
deps = ["Git_jll"]
git-tree-sha1 = "d7bffc3fe097e9589145493c08c41297b457e5d0"
uuid = "d7ba0133-e1db-5d97-8f8c-041e4b3a1eb2"
version = "1.2.1"
[[deps.GitHub]]
deps = ["Base64", "Dates", "HTTP", "JSON", "MbedTLS", "Sockets", "SodiumSeal"]
git-tree-sha1 = "c8594dff1ed76e232d8063b2a2555335900af6f3"
uuid = "bc5e4493-9b4d-5f90-b8aa-2b2bcaad7a26"
version = "5.7.0"
[[deps.Git_jll]]
deps = ["Artifacts", "Expat_jll", "Gettext_jll", "JLLWrappers", "LibCURL_jll", "Libdl", "Libiconv_jll", "OpenSSL_jll", "PCRE2_jll", "Pkg", "Zlib_jll"]
git-tree-sha1 = "33be385f3432a5a5b7f6965af9592d4407f3167f"
uuid = "f8c6e375-362e-5223-8a59-34ff63f689eb"
version = "2.31.0+0"
[[deps.Glib_jll]]
deps = ["Artifacts", "Gettext_jll", "JLLWrappers", "Libdl", "Libffi_jll", "Libiconv_jll", "Libmount_jll", "PCRE_jll", "Pkg", "Zlib_jll"]
git-tree-sha1 = "04690cc5008b38ecbdfede949220bc7d9ba26397"
uuid = "7746bdde-850d-59dc-9ae8-88ece973131d"
version = "2.59.0+4"
[[deps.Grisu]]
git-tree-sha1 = "53bb909d1151e57e2484c3d1b53e19552b887fb2"
uuid = "42e2da0e-8278-4e71-bc24-59509adca0fe"
version = "1.0.2"
[[deps.HDF5_jll]]
deps = ["Artifacts", "JLLWrappers", "LibCURL_jll", "Libdl", "OpenSSL_jll", "Pkg", "Zlib_jll"]
git-tree-sha1 = "fd83fa0bde42e01952757f01149dd968c06c4dba"
uuid = "0234f1f7-429e-5d53-9886-15a909be8d59"
version = "1.12.0+1"
[[deps.HTTP]]
deps = ["Base64", "Dates", "IniFile", "Logging", "MbedTLS", "NetworkOptions", "Sockets", "URIs"]
git-tree-sha1 = "0fa77022fe4b511826b39c894c90daf5fce3334a"
uuid = "cd3eb016-35fb-5094-929b-558a96fad6f3"
version = "0.9.17"
[[deps.Hyperscript]]
deps = ["Test"]
git-tree-sha1 = "8d511d5b81240fc8e6802386302675bdf47737b9"
uuid = "47d2ed2b-36de-50cf-bf87-49c2cf4b8b91"
version = "0.0.4"
[[deps.HypertextLiteral]]
git-tree-sha1 = "2b078b5a615c6c0396c77810d92ee8c6f470d238"
uuid = "ac1192a8-f4b3-4bfe-ba22-af5b92cd3ab2"
version = "0.9.3"
[[deps.IOCapture]]
deps = ["Logging", "Random"]
git-tree-sha1 = "f7be53659ab06ddc986428d3a9dcc95f6fa6705a"
uuid = "b5f81e59-6552-4d32-b1f0-c071b021bf89"
version = "0.2.2"
[[deps.IniFile]]
deps = ["Test"]
git-tree-sha1 = "098e4d2c533924c921f9f9847274f2ad89e018b8"
uuid = "83e8ac13-25f8-5344-8a64-a9f2b223428f"
version = "0.5.0"
[[deps.InlineStrings]]
deps = ["Parsers"]
git-tree-sha1 = "8d70835a3759cdd75881426fced1508bb7b7e1b6"
uuid = "842dd82b-1e85-43dc-bf29-5d0ee9dffc48"
version = "1.1.1"
[[deps.InteractiveUtils]]
deps = ["Markdown"]
uuid = "b77e0a4c-d291-57a0-90e8-8db25a27a240"
[[deps.InverseFunctions]]
deps = ["Test"]
git-tree-sha1 = "a7254c0acd8e62f1ac75ad24d5db43f5f19f3c65"
uuid = "3587e190-3f89-42d0-90ee-14403ec27112"
version = "0.1.2"
[[deps.InvertedIndices]]
git-tree-sha1 = "bee5f1ef5bf65df56bdd2e40447590b272a5471f"
uuid = "41ab1584-1d38-5bbf-9106-f11c6c58b48f"
version = "1.1.0"
[[deps.IrrationalConstants]]
git-tree-sha1 = "7fd44fd4ff43fc60815f8e764c0f352b83c49151"
uuid = "92d709cd-6900-40b7-9082-c6be49f344b6"
version = "0.1.1"
[[deps.IterTools]]
git-tree-sha1 = "fa6287a4469f5e048d763df38279ee729fbd44e5"
uuid = "c8e1da08-722c-5040-9ed9-7db0dc04731e"
version = "1.4.0"
[[deps.IteratorInterfaceExtensions]]
git-tree-sha1 = "a3f24677c21f5bbe9d2a714f95dcd58337fb2856"
uuid = "82899510-4779-5014-852e-03e436cf321d"
version = "1.0.0"
[[deps.JLLWrappers]]
deps = ["Preferences"]
git-tree-sha1 = "642a199af8b68253517b80bd3bfd17eb4e84df6e"
uuid = "692b3bcd-3c85-4b1f-b108-f13ce0eb3210"
version = "1.3.0"
[[deps.JSON]]
deps = ["Dates", "Mmap", "Parsers", "Unicode"]
git-tree-sha1 = "8076680b162ada2a031f707ac7b4953e30667a37"
uuid = "682c06a0-de6a-54ab-a142-c8b1cf79cde6"
version = "0.21.2"
[[deps.JpegTurbo_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "d735490ac75c5cb9f1b00d8b5509c11984dc6943"
uuid = "aacddb02-875f-59d6-b918-886e6ef4fbf8"
version = "2.1.0+0"
[[deps.LAME_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "f6250b16881adf048549549fba48b1161acdac8c"
uuid = "c1c5ebd0-6772-5130-a774-d5fcae4a789d"
version = "3.100.1+0"
[[deps.LRUCache]]
git-tree-sha1 = "d64a0aff6691612ab9fb0117b0995270871c5dfc"
uuid = "8ac3fa9e-de4c-5943-b1dc-09c6b5f20637"
version = "1.3.0"
[[deps.LZO_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "e5b909bcf985c5e2605737d2ce278ed791b89be6"
uuid = "dd4b983a-f0e5-5f8d-a1b7-129d4a5fb1ac"
version = "2.10.1+0"
[[deps.LaTeXStrings]]
git-tree-sha1 = "f2355693d6778a178ade15952b7ac47a4ff97996"
uuid = "b964fa9f-0449-5b57-a5c2-d3ea65f4040f"
version = "1.3.0"
[[deps.Latexify]]
deps = ["Formatting", "InteractiveUtils", "LaTeXStrings", "MacroTools", "Markdown", "Printf", "Requires"]
git-tree-sha1 = "a8f4f279b6fa3c3c4f1adadd78a621b13a506bce"
uuid = "23fbe1c1-3f47-55db-b15f-69d7ec21a316"
version = "0.15.9"
[[deps.LazyArtifacts]]
deps = ["Artifacts", "Pkg"]
uuid = "4af54fe1-eca0-43a8-85a7-787d91b784e3"
[[deps.LibCURL]]
deps = ["LibCURL_jll", "MozillaCACerts_jll"]
uuid = "b27032c2-a3e7-50c8-80cd-2d36dbcbfd21"
[[deps.LibCURL_jll]]
deps = ["Artifacts", "LibSSH2_jll", "Libdl", "MbedTLS_jll", "Zlib_jll", "nghttp2_jll"]
uuid = "deac9b47-8bc7-5906-a0fe-35ac56dc84c0"
[[deps.LibGit2]]
deps = ["Base64", "NetworkOptions", "Printf", "SHA"]
uuid = "76f85450-5226-5b5a-8eaa-529ad045b433"
[[deps.LibSSH2_jll]]
deps = ["Artifacts", "Libdl", "MbedTLS_jll"]
uuid = "29816b5a-b9ab-546f-933c-edad1886dfa8"
[[deps.LibVPX_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "12ee7e23fa4d18361e7c2cde8f8337d4c3101bc7"
uuid = "dd192d2f-8180-539f-9fb4-cc70b1dcf69a"
version = "1.10.0+0"
[[deps.Libdl]]
uuid = "8f399da3-3557-5675-b5ff-fb832c97cbdb"
[[deps.Libffi_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "0b4a5d71f3e5200a7dff793393e09dfc2d874290"
uuid = "e9f186c6-92d2-5b65-8a66-fee21dc1b490"
version = "3.2.2+1"
[[deps.Libgcrypt_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Libgpg_error_jll", "Pkg"]
git-tree-sha1 = "64613c82a59c120435c067c2b809fc61cf5166ae"
uuid = "d4300ac3-e22c-5743-9152-c294e39db1e4"
version = "1.8.7+0"
[[deps.Libglvnd_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libX11_jll", "Xorg_libXext_jll"]
git-tree-sha1 = "7739f837d6447403596a75d19ed01fd08d6f56bf"
uuid = "7e76a0d4-f3c7-5321-8279-8d96eeed0f29"
version = "1.3.0+3"
[[deps.Libgpg_error_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "c333716e46366857753e273ce6a69ee0945a6db9"
uuid = "7add5ba3-2f88-524e-9cd5-f83b8a55f7b8"
version = "1.42.0+0"
[[deps.Libiconv_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "42b62845d70a619f063a7da093d995ec8e15e778"
uuid = "94ce4f54-9a6c-5748-9c1c-f9c7231a4531"
version = "1.16.1+1"
[[deps.Libmount_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "9c30530bf0effd46e15e0fdcf2b8636e78cbbd73"
uuid = "4b2f31a3-9ecc-558c-b454-b3730dcb73e9"
version = "2.35.0+0"
[[deps.Libtiff_jll]]
deps = ["Artifacts", "JLLWrappers", "JpegTurbo_jll", "Libdl", "Pkg", "Zlib_jll", "Zstd_jll"]
git-tree-sha1 = "340e257aada13f95f98ee352d316c3bed37c8ab9"
uuid = "89763e89-9b03-5906-acba-b20f662cd828"
version = "4.3.0+0"
[[deps.Libuuid_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "7f3efec06033682db852f8b3bc3c1d2b0a0ab066"
uuid = "38a345b3-de98-5d2b-a5d3-14cd9215e700"
version = "2.36.0+0"
[[deps.LinearAlgebra]]
deps = ["Libdl", "libblastrampoline_jll"]
uuid = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
[[deps.LogExpFunctions]]
deps = ["ChainRulesCore", "ChangesOfVariables", "DocStringExtensions", "InverseFunctions", "IrrationalConstants", "LinearAlgebra"]
git-tree-sha1 = "e5718a00af0ab9756305a0392832c8952c7426c1"
uuid = "2ab3a3ac-af41-5b50-aa03-7779005ae688"
version = "0.3.6"
[[deps.Logging]]
uuid = "56ddb016-857b-54e1-b83d-db4d58db5568"
[[deps.Lz4_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "5d494bc6e85c4c9b626ee0cab05daa4085486ab1"
uuid = "5ced341a-0733-55b8-9ab6-a4889d929147"
version = "1.9.3+0"
[[deps.MITgcmTools]]
deps = ["Artifacts", "ClimateModels", "DataFrames", "Dates", "LazyArtifacts", "MeshArrays", "NetCDF", "OrderedCollections", "Printf", "SparseArrays", "Suppressor", "UUIDs"]
git-tree-sha1 = "85fd18c07803b16af3e317e9568d526f5ca61853"
uuid = "62725fbc-3a66-4df3-9000-e33e85b3a198"
version = "0.1.32"
[[deps.MacroTools]]
deps = ["Markdown", "Random"]
git-tree-sha1 = "3d3e902b31198a27340d0bf00d6ac452866021cf"
uuid = "1914dd2f-81c6-5fcd-8719-6d5c9610ff09"
version = "0.5.9"
[[deps.Markdown]]
deps = ["Base64"]
uuid = "d6f4376e-aef5-505a-96c1-9c027394607a"
[[deps.MbedTLS]]
deps = ["Dates", "MbedTLS_jll", "Random", "Sockets"]
git-tree-sha1 = "1c38e51c3d08ef2278062ebceade0e46cefc96fe"
uuid = "739be429-bea8-5141-9913-cc70e7f3736d"
version = "1.0.3"
[[deps.MbedTLS_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "c8ffd9c3-330d-5841-b78e-0817d7145fa1"
[[deps.Measures]]
git-tree-sha1 = "e498ddeee6f9fdb4551ce855a46f54dbd900245f"
uuid = "442fdcdd-2543-5da2-b0f3-8c86c306513e"
version = "0.3.1"
[[deps.MeshArrays]]
deps = ["CatViews", "Dates", "Downloads", "LazyArtifacts", "NearestNeighbors", "Pkg", "Printf", "SparseArrays", "Statistics", "Unitful"]
git-tree-sha1 = "c5b9b98540a900934d9b531f8a153ce49016cec7"
uuid = "cb8c808f-1acf-59a3-9d2b-6e38d009f683"
version = "0.2.31"
[[deps.Missings]]
deps = ["DataAPI"]
git-tree-sha1 = "bf210ce90b6c9eed32d25dbcae1ebc565df2687f"
uuid = "e1d29d7a-bbdc-5cf2-9ac0-f12de2c33e28"
version = "1.0.2"
[[deps.Mmap]]
uuid = "a63ad114-7e13-5084-954f-fe012c677804"
[[deps.Mocking]]
deps = ["Compat", "ExprTools"]
git-tree-sha1 = "29714d0a7a8083bba8427a4fbfb00a540c681ce7"
uuid = "78c3b35d-d492-501b-9361-3d52fe80e533"
version = "0.7.3"
[[deps.MozillaCACerts_jll]]
uuid = "14a3606d-f60d-562e-9121-12d972cd8159"
[[deps.NaNMath]]
git-tree-sha1 = "f755f36b19a5116bb580de457cda0c140153f283"
uuid = "77ba4419-2d1f-58cd-9bb1-8ffee604a2e3"
version = "0.3.6"
[[deps.NearestNeighbors]]
deps = ["Distances", "StaticArrays"]
git-tree-sha1 = "16baacfdc8758bc374882566c9187e785e85c2f0"
uuid = "b8a86587-4115-5ab1-83bc-aa920d37bbce"
version = "0.4.9"
[[deps.NetCDF]]
deps = ["DiskArrays", "Formatting", "NetCDF_jll"]
git-tree-sha1 = "23b0e32fde256a4e2e497e678abcf956ed26204b"
uuid = "30363a11-5582-574a-97bb-aa9a979735b9"
version = "0.11.3"
[[deps.NetCDF_jll]]
deps = ["Artifacts", "HDF5_jll", "JLLWrappers", "LibCURL_jll", "LibSSH2_jll", "Libdl", "MbedTLS_jll", "Pkg", "Zlib_jll", "nghttp2_jll"]
git-tree-sha1 = "0cf4d1bf2ef45156aed85c9ac5f8c7e697d9288c"
uuid = "7243133f-43d8-5620-bbf4-c2c921802cf3"
version = "400.702.400+0"
[[deps.NetworkOptions]]
uuid = "ca575930-c2e3-43a9-ace4-1e988b2c1908"
[[deps.OffsetArrays]]
deps = ["Adapt"]
git-tree-sha1 = "043017e0bdeff61cfbb7afeb558ab29536bbb5ed"
uuid = "6fe1bfb0-de20-5000-8ca7-80f57d26f881"
version = "1.10.8"
[[deps.Ogg_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "7937eda4681660b4d6aeeecc2f7e1c81c8ee4e2f"
uuid = "e7412a2a-1a6e-54c0-be00-318e2571c051"
version = "1.3.5+0"
[[deps.OpenBLAS_jll]]
deps = ["Artifacts", "CompilerSupportLibraries_jll", "Libdl"]
uuid = "4536629a-c528-5b80-bd46-f80d51c5b363"
[[deps.OpenSSL_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "15003dcb7d8db3c6c857fda14891a539a8f2705a"
uuid = "458c3c95-2e84-50aa-8efc-19380b2a3a95"
version = "1.1.10+0"
[[deps.Opus_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "51a08fb14ec28da2ec7a927c4337e4332c2a4720"
uuid = "91d4177d-7536-5919-b921-800302f37372"
version = "1.3.2+0"
[[deps.OrderedCollections]]
git-tree-sha1 = "85f8e6578bf1f9ee0d11e7bb1b1456435479d47c"
uuid = "bac558e1-5e72-5ebc-8fee-abe8a469f55d"
version = "1.4.1"
[[deps.PCRE2_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "efcefdf7-47ab-520b-bdef-62a2eaa19f15"
[[deps.PCRE_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "b2a7af664e098055a7529ad1a900ded962bca488"
uuid = "2f80f16e-611a-54ab-bc61-aa92de5b98fc"
version = "8.44.0+0"
[[deps.Parsers]]
deps = ["Dates"]
git-tree-sha1 = "d7fa6237da8004be601e19bd6666083056649918"
uuid = "69de0a69-1ddd-5017-9359-2bf0b02dc9f0"
version = "2.1.3"
[[deps.Pixman_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "b4f5d02549a10e20780a24fce72bea96b6329e29"
uuid = "30392449-352a-5448-841d-b1acce4e97dc"
version = "0.40.1+0"
[[deps.Pkg]]
deps = ["Artifacts", "Dates", "Downloads", "LibGit2", "Libdl", "Logging", "Markdown", "Printf", "REPL", "Random", "SHA", "Serialization", "TOML", "Tar", "UUIDs", "p7zip_jll"]
uuid = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f"
[[deps.PlotThemes]]
deps = ["PlotUtils", "Requires", "Statistics"]
git-tree-sha1 = "a3a964ce9dc7898193536002a6dd892b1b5a6f1d"
uuid = "ccf2f8ad-2431-5c83-bf29-c5338b663b6a"
version = "2.0.1"
[[deps.PlotUtils]]
deps = ["ColorSchemes", "Colors", "Dates", "Printf", "Random", "Reexport", "Statistics"]
git-tree-sha1 = "e4fe0b50af3130ddd25e793b471cb43d5279e3e6"
uuid = "995b91a9-d308-5afd-9ec6-746e21dbc043"
version = "1.1.1"
[[deps.Plots]]
deps = ["Base64", "Contour", "Dates", "Downloads", "FFMPEG", "FixedPointNumbers", "GR", "GeometryBasics", "JSON", "Latexify", "LinearAlgebra", "Measures", "NaNMath", "PlotThemes", "PlotUtils", "Printf", "REPL", "Random", "RecipesBase", "RecipesPipeline", "Reexport", "Requires", "Scratch", "Showoff", "SparseArrays", "Statistics", "StatsBase", "UUIDs", "UnicodeFun"]
git-tree-sha1 = "65ebc27d8c00c84276f14aaf4ff63cbe12016c70"
uuid = "91a5bcdd-55d7-5caf-9e0b-520d859cae80"
version = "1.25.2"
[[deps.PlutoUI]]
deps = ["AbstractPlutoDingetjes", "Base64", "ColorTypes", "Dates", "Hyperscript", "HypertextLiteral", "IOCapture", "InteractiveUtils", "JSON", "Logging", "Markdown", "Random", "Reexport", "UUIDs"]
git-tree-sha1 = "93cf0910f09a9607add290a3a2585aa376b4feb6"
uuid = "7f904dfe-b85e-4ff6-b463-dae2292396a8"
version = "0.7.25"
[[deps.PooledArrays]]
deps = ["DataAPI", "Future"]
git-tree-sha1 = "db3a23166af8aebf4db5ef87ac5b00d36eb771e2"
uuid = "2dfb63ee-cc39-5dd5-95bd-886bf059d720"
version = "1.4.0"
[[deps.Preferences]]
deps = ["TOML"]
git-tree-sha1 = "00cfd92944ca9c760982747e9a1d0d5d86ab1e5a"
uuid = "21216c6a-2e73-6563-6e65-726566657250"
version = "1.2.2"
[[deps.PrettyTables]]
deps = ["Crayons", "Formatting", "Markdown", "Reexport", "Tables"]
git-tree-sha1 = "b7ff9f9ce50eab241e978cd975ad4ae113f5a41f"
uuid = "08abe8d2-0d0c-5749-adfa-8a2ac140af0d"
version = "1.3.0"
[[deps.Printf]]
deps = ["Unicode"]
uuid = "de0858da-6303-5e67-8744-51eddeeeb8d7"
[[deps.Qt5Base_jll]]
deps = ["Artifacts", "CompilerSupportLibraries_jll", "Fontconfig_jll", "Glib_jll", "JLLWrappers", "Libdl", "Libglvnd_jll", "OpenSSL_jll", "Pkg", "Xorg_libXext_jll", "Xorg_libxcb_jll", "Xorg_xcb_util_image_jll", "Xorg_xcb_util_keysyms_jll", "Xorg_xcb_util_renderutil_jll", "Xorg_xcb_util_wm_jll", "Zlib_jll", "xkbcommon_jll"]
git-tree-sha1 = "16626cfabbf7206d60d84f2bf4725af7b37d4a77"
uuid = "ea2cea3b-5b76-57ae-a6ef-0a8af62496e1"
version = "5.15.2+0"
[[deps.REPL]]
deps = ["InteractiveUtils", "Markdown", "Sockets", "Unicode"]
uuid = "3fa0cd96-eef1-5676-8a61-b3b8758bbffb"
[[deps.Random]]
deps = ["SHA", "Serialization"]
uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
[[deps.RecipesBase]]
git-tree-sha1 = "6bf3f380ff52ce0832ddd3a2a7b9538ed1bcca7d"
uuid = "3cdcf5f2-1ef4-517c-9805-6587b60abb01"
version = "1.2.1"
[[deps.RecipesPipeline]]
deps = ["Dates", "NaNMath", "PlotUtils", "RecipesBase"]
git-tree-sha1 = "7ad0dfa8d03b7bcf8c597f59f5292801730c55b8"
uuid = "01d81517-befc-4cb6-b9ec-a95719d0359c"
version = "0.4.1"
[[deps.Reexport]]
git-tree-sha1 = "45e428421666073eab6f2da5c9d310d99bb12f9b"
uuid = "189a3867-3050-52da-a836-e630ba90ab69"
version = "1.2.2"
[[deps.Requires]]
deps = ["UUIDs"]
git-tree-sha1 = "8f82019e525f4d5c669692772a6f4b0a58b06a6a"
uuid = "ae029012-a4dd-5104-9daa-d747884805df"
version = "1.2.0"
[[deps.Retry]]
git-tree-sha1 = "41ac127cd281bb33e42aba46a9d3b25cd35fc6d5"
uuid = "20febd7b-183b-5ae2-ac4a-720e7ce64774"
version = "0.4.1"
[[deps.SHA]]
uuid = "ea8e919c-243c-51af-8825-aaa63cd721ce"
[[deps.Scratch]]
deps = ["Dates"]
git-tree-sha1 = "0b4b7f1393cff97c33891da2a0bf69c6ed241fda"
uuid = "6c6a2e73-6563-6170-7368-637461726353"
version = "1.1.0"
[[deps.SentinelArrays]]
deps = ["Dates", "Random"]
git-tree-sha1 = "244586bc07462d22aed0113af9c731f2a518c93e"
uuid = "91c51154-3ec4-41a3-a24f-3f23e20d615c"
version = "1.3.10"
[[deps.Serialization]]
uuid = "9e88b42a-f829-5b0c-bbe9-9e923198166b"
[[deps.SharedArrays]]
deps = ["Distributed", "Mmap", "Random", "Serialization"]
uuid = "1a1011a3-84de-559e-8e89-a11a2f7dc383"
[[deps.Showoff]]
deps = ["Dates", "Grisu"]
git-tree-sha1 = "91eddf657aca81df9ae6ceb20b959ae5653ad1de"
uuid = "992d4aef-0814-514b-bc4d-f2e9a6c4116f"
version = "1.0.3"
[[deps.Sockets]]
uuid = "6462fe0b-24de-5631-8697-dd941f90decc"
[[deps.SodiumSeal]]
deps = ["Base64", "Libdl", "libsodium_jll"]
git-tree-sha1 = "80cef67d2953e33935b41c6ab0a178b9987b1c99"
uuid = "2133526b-2bfb-4018-ac12-889fb3908a75"
version = "0.1.1"
[[deps.SortingAlgorithms]]
deps = ["DataStructures"]
git-tree-sha1 = "b3363d7460f7d098ca0912c69b082f75625d7508"
uuid = "a2af1166-a08f-5f64-846c-94a0d3cef48c"
version = "1.0.1"
[[deps.SparseArrays]]
deps = ["LinearAlgebra", "Random"]
uuid = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
[[deps.StaticArrays]]
deps = ["LinearAlgebra", "Random", "Statistics"]
git-tree-sha1 = "3c76dde64d03699e074ac02eb2e8ba8254d428da"
uuid = "90137ffa-7385-5640-81b9-e52037218182"
version = "1.2.13"
[[deps.Statistics]]
deps = ["LinearAlgebra", "SparseArrays"]
uuid = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
[[deps.StatsAPI]]
git-tree-sha1 = "0f2aa8e32d511f758a2ce49208181f7733a0936a"
uuid = "82ae8749-77ed-4fe6-ae5f-f523153014b0"
version = "1.1.0"
[[deps.StatsBase]]
deps = ["DataAPI", "DataStructures", "LinearAlgebra", "LogExpFunctions", "Missings", "Printf", "Random", "SortingAlgorithms", "SparseArrays", "Statistics", "StatsAPI"]
git-tree-sha1 = "2bb0cb32026a66037360606510fca5984ccc6b75"
uuid = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91"
version = "0.33.13"
[[deps.StructArrays]]
deps = ["Adapt", "DataAPI", "StaticArrays", "Tables"]
git-tree-sha1 = "2ce41e0d042c60ecd131e9fb7154a3bfadbf50d3"
uuid = "09ab397b-f2b6-538f-b94a-2f83cf4a842a"
version = "0.6.3"
[[deps.Suppressor]]
git-tree-sha1 = "a819d77f31f83e5792a76081eee1ea6342ab8787"
uuid = "fd094767-a336-5f1f-9728-57cf17d0bbfb"
version = "0.2.0"
[[deps.TOML]]
deps = ["Dates"]
uuid = "fa267f1f-6049-4f14-aa54-33bafae1ed76"
[[deps.TableTraits]]
deps = ["IteratorInterfaceExtensions"]
git-tree-sha1 = "c06b2f539df1c6efa794486abfb6ed2022561a39"
uuid = "3783bdb8-4a98-5b6b-af9a-565f29a5fe9c"
version = "1.0.1"
[[deps.Tables]]
deps = ["DataAPI", "DataValueInterfaces", "IteratorInterfaceExtensions", "LinearAlgebra", "TableTraits", "Test"]
git-tree-sha1 = "bb1064c9a84c52e277f1096cf41434b675cd368b"
uuid = "bd369af6-aec1-5ad0-b16a-f7cc5008161c"
version = "1.6.1"
[[deps.Tar]]
deps = ["ArgTools", "SHA"]
uuid = "a4e569a6-e804-4fa4-b0f3-eef7a1d5b13e"
[[deps.Test]]
deps = ["InteractiveUtils", "Logging", "Random", "Serialization"]
uuid = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
[[deps.TranscodingStreams]]
deps = ["Random", "Test"]
git-tree-sha1 = "216b95ea110b5972db65aa90f88d8d89dcb8851c"
uuid = "3bb67fe8-82b1-5028-8e26-92a6c54297fa"
version = "0.9.6"
[[deps.URIs]]
git-tree-sha1 = "97bbe755a53fe859669cd907f2d96aee8d2c1355"
uuid = "5c2747f8-b7ea-4ff2-ba2e-563bfd36b1d4"
version = "1.3.0"
[[deps.UUIDs]]
deps = ["Random", "SHA"]
uuid = "cf7118a7-6976-5b1a-9a39-7adc72f591a4"
[[deps.Unicode]]
uuid = "4ec0a83e-493e-50e2-b9ac-8f72acf5a8f5"
[[deps.UnicodeFun]]
deps = ["REPL"]
git-tree-sha1 = "53915e50200959667e78a92a418594b428dffddf"
uuid = "1cfade01-22cf-5700-b092-accc4b62d6e1"
version = "0.4.1"
[[deps.Unitful]]
deps = ["ConstructionBase", "Dates", "LinearAlgebra", "Random"]
git-tree-sha1 = "0992ed0c3ef66b0390e5752fe60054e5ff93b908"
uuid = "1986cc42-f94f-5a68-af5c-568840ba703d"
version = "1.9.2"
[[deps.Wayland_jll]]
deps = ["Artifacts", "Expat_jll", "JLLWrappers", "Libdl", "Libffi_jll", "Pkg", "XML2_jll"]
git-tree-sha1 = "3e61f0b86f90dacb0bc0e73a0c5a83f6a8636e23"
uuid = "a2964d1f-97da-50d4-b82a-358c7fce9d89"
version = "1.19.0+0"
[[deps.Wayland_protocols_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "66d72dc6fcc86352f01676e8f0f698562e60510f"
uuid = "2381bf8a-dfd0-557d-9999-79630e7b1b91"
version = "1.23.0+0"
[[deps.WeakRefStrings]]
deps = ["DataAPI", "InlineStrings", "Parsers"]
git-tree-sha1 = "c69f9da3ff2f4f02e811c3323c22e5dfcb584cfa"
uuid = "ea10d353-3f73-51f8-a26c-33c1cb351aa5"
version = "1.4.1"
[[deps.XML2_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Libiconv_jll", "Pkg", "Zlib_jll"]
git-tree-sha1 = "1acf5bdf07aa0907e0a37d3718bb88d4b687b74a"
uuid = "02c8fc9c-b97f-50b9-bbe4-9be30ff0a78a"
version = "2.9.12+0"
[[deps.XMLDict]]
deps = ["EzXML", "IterTools", "OrderedCollections"]
git-tree-sha1 = "d9a3faf078210e477b291c79117676fca54da9dd"
uuid = "228000da-037f-5747-90a9-8195ccbf91a5"
version = "0.4.1"
[[deps.XSLT_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Libgcrypt_jll", "Libgpg_error_jll", "Libiconv_jll", "Pkg", "XML2_jll", "Zlib_jll"]
git-tree-sha1 = "91844873c4085240b95e795f692c4cec4d805f8a"
uuid = "aed1982a-8fda-507f-9586-7b0439959a61"
version = "1.1.34+0"
[[deps.Xorg_libX11_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libxcb_jll", "Xorg_xtrans_jll"]
git-tree-sha1 = "5be649d550f3f4b95308bf0183b82e2582876527"
uuid = "4f6342f7-b3d2-589e-9d20-edeb45f2b2bc"
version = "1.6.9+4"
[[deps.Xorg_libXau_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "4e490d5c960c314f33885790ed410ff3a94ce67e"
uuid = "0c0b7dd1-d40b-584c-a123-a41640f87eec"
version = "1.0.9+4"
[[deps.Xorg_libXcursor_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libXfixes_jll", "Xorg_libXrender_jll"]
git-tree-sha1 = "12e0eb3bc634fa2080c1c37fccf56f7c22989afd"
uuid = "935fb764-8cf2-53bf-bb30-45bb1f8bf724"
version = "1.2.0+4"
[[deps.Xorg_libXdmcp_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "4fe47bd2247248125c428978740e18a681372dd4"
uuid = "a3789734-cfe1-5b06-b2d0-1dd0d9d62d05"
version = "1.1.3+4"
[[deps.Xorg_libXext_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libX11_jll"]
git-tree-sha1 = "b7c0aa8c376b31e4852b360222848637f481f8c3"
uuid = "1082639a-0dae-5f34-9b06-72781eeb8cb3"
version = "1.3.4+4"
[[deps.Xorg_libXfixes_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libX11_jll"]
git-tree-sha1 = "0e0dc7431e7a0587559f9294aeec269471c991a4"
uuid = "d091e8ba-531a-589c-9de9-94069b037ed8"
version = "5.0.3+4"
[[deps.Xorg_libXi_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libXext_jll", "Xorg_libXfixes_jll"]
git-tree-sha1 = "89b52bc2160aadc84d707093930ef0bffa641246"
uuid = "a51aa0fd-4e3c-5386-b890-e753decda492"
version = "1.7.10+4"
[[deps.Xorg_libXinerama_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libXext_jll"]
git-tree-sha1 = "26be8b1c342929259317d8b9f7b53bf2bb73b123"
uuid = "d1454406-59df-5ea1-beac-c340f2130bc3"
version = "1.1.4+4"
[[deps.Xorg_libXrandr_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libXext_jll", "Xorg_libXrender_jll"]
git-tree-sha1 = "34cea83cb726fb58f325887bf0612c6b3fb17631"
uuid = "ec84b674-ba8e-5d96-8ba1-2a689ba10484"
version = "1.5.2+4"
[[deps.Xorg_libXrender_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libX11_jll"]
git-tree-sha1 = "19560f30fd49f4d4efbe7002a1037f8c43d43b96"
uuid = "ea2f1a96-1ddc-540d-b46f-429655e07cfa"
version = "0.9.10+4"
[[deps.Xorg_libpthread_stubs_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "6783737e45d3c59a4a4c4091f5f88cdcf0908cbb"
uuid = "14d82f49-176c-5ed1-bb49-ad3f5cbd8c74"
version = "0.1.0+3"
[[deps.Xorg_libxcb_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "XSLT_jll", "Xorg_libXau_jll", "Xorg_libXdmcp_jll", "Xorg_libpthread_stubs_jll"]
git-tree-sha1 = "daf17f441228e7a3833846cd048892861cff16d6"
uuid = "c7cfdc94-dc32-55de-ac96-5a1b8d977c5b"
version = "1.13.0+3"
[[deps.Xorg_libxkbfile_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libX11_jll"]
git-tree-sha1 = "926af861744212db0eb001d9e40b5d16292080b2"
uuid = "cc61e674-0454-545c-8b26-ed2c68acab7a"
version = "1.1.0+4"
[[deps.Xorg_xcb_util_image_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_xcb_util_jll"]
git-tree-sha1 = "0fab0a40349ba1cba2c1da699243396ff8e94b97"
uuid = "12413925-8142-5f55-bb0e-6d7ca50bb09b"
version = "0.4.0+1"
[[deps.Xorg_xcb_util_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libxcb_jll"]
git-tree-sha1 = "e7fd7b2881fa2eaa72717420894d3938177862d1"
uuid = "2def613f-5ad1-5310-b15b-b15d46f528f5"
version = "0.4.0+1"
[[deps.Xorg_xcb_util_keysyms_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_xcb_util_jll"]
git-tree-sha1 = "d1151e2c45a544f32441a567d1690e701ec89b00"
uuid = "975044d2-76e6-5fbe-bf08-97ce7c6574c7"
version = "0.4.0+1"
[[deps.Xorg_xcb_util_renderutil_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_xcb_util_jll"]
git-tree-sha1 = "dfd7a8f38d4613b6a575253b3174dd991ca6183e"
uuid = "0d47668e-0667-5a69-a72c-f761630bfb7e"
version = "0.3.9+1"
[[deps.Xorg_xcb_util_wm_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_xcb_util_jll"]
git-tree-sha1 = "e78d10aab01a4a154142c5006ed44fd9e8e31b67"
uuid = "c22f9ab0-d5fe-5066-847c-f4bb1cd4e361"
version = "0.4.1+1"
[[deps.Xorg_xkbcomp_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_libxkbfile_jll"]
git-tree-sha1 = "4bcbf660f6c2e714f87e960a171b119d06ee163b"
uuid = "35661453-b289-5fab-8a00-3d9160c6a3a4"
version = "1.4.2+4"
[[deps.Xorg_xkeyboard_config_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Xorg_xkbcomp_jll"]
git-tree-sha1 = "5c8424f8a67c3f2209646d4425f3d415fee5931d"
uuid = "33bec58e-1273-512f-9401-5d533626f822"
version = "2.27.0+4"
[[deps.Xorg_xtrans_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "79c31e7844f6ecf779705fbc12146eb190b7d845"
uuid = "c5fb5394-a638-5e4d-96e5-b29de1b5cf10"
version = "1.4.0+3"
[[deps.Zarr]]
deps = ["AWS", "Blosc", "CodecZlib", "DataStructures", "Dates", "DiskArrays", "HTTP", "JSON", "LRUCache", "OffsetArrays", "Pkg", "URIs"]
git-tree-sha1 = "7238cf588d2def313a65b63ea9bba07aa762f26b"
uuid = "0a941bbe-ad1d-11e8-39d9-ab76183a1d99"
version = "0.7.0"
[[deps.Zlib_jll]]
deps = ["Libdl"]
uuid = "83775a58-1f1d-513f-b197-d71354ab007a"
[[deps.Zstd_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "cc4bf3fdde8b7e3e9fa0351bdeedba1cf3b7f6e6"
uuid = "3161d3a3-bdf6-5164-811a-617609db77b4"
version = "1.5.0+0"
[[deps.libass_jll]]
deps = ["Artifacts", "Bzip2_jll", "FreeType2_jll", "FriBidi_jll", "JLLWrappers", "Libdl", "Pkg", "Zlib_jll"]
git-tree-sha1 = "acc685bcf777b2202a904cdcb49ad34c2fa1880c"
uuid = "0ac62f75-1d6f-5e53-bd7c-93b484bb37c0"
version = "0.14.0+4"
[[deps.libblastrampoline_jll]]
deps = ["Artifacts", "Libdl", "OpenBLAS_jll"]
uuid = "8e850b90-86db-534c-a0d3-1478176c7d93"
[[deps.libfdk_aac_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "7a5780a0d9c6864184b3a2eeeb833a0c871f00ab"
uuid = "f638f0a6-7fb0-5443-88ba-1cc74229b280"
version = "0.1.6+4"
[[deps.libpng_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Zlib_jll"]
git-tree-sha1 = "94d180a6d2b5e55e447e2d27a29ed04fe79eb30c"
uuid = "b53b4c65-9356-5827-b1ea-8c7a1a84506f"
version = "1.6.38+0"
[[deps.libsodium_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "848ab3d00fe39d6fbc2a8641048f8f272af1c51e"
uuid = "a9144af2-ca23-56d9-984f-0d03f7b5ccf8"
version = "1.0.20+0"
[[deps.libvorbis_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Ogg_jll", "Pkg"]
git-tree-sha1 = "c45f4e40e7aafe9d086379e5578947ec8b95a8fb"
uuid = "f27f6e37-5d2b-51aa-960f-b287f2bc3b7a"
version = "1.3.7+0"
[[deps.nghttp2_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "8e850ede-7688-5339-a07c-302acd2aaf8d"
[[deps.p7zip_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "3f19e933-33d8-53b3-aaab-bd5110c3b7a0"
[[deps.x264_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "d713c1ce4deac133e3334ee12f4adff07f81778f"
uuid = "1270edf5-f2f9-52d2-97e9-ab00b5d0237a"
version = "2020.7.14+2"
[[deps.x265_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "487da2f8f2f0c8ee0e83f39d13037d6bbf0a45ab"
uuid = "dfaa095f-4041-5dcd-9319-2fabd8486b76"
version = "3.0.0+3"
[[deps.xkbcommon_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Wayland_jll", "Wayland_protocols_jll", "Xorg_libxcb_jll", "Xorg_xkeyboard_config_jll"]
git-tree-sha1 = "ece2350174195bb31de1a63bea3a41ae1aa593b6"
uuid = "d8fb68d0-12a3-5cfd-a85a-d49703b185fd"
version = "0.9.1+5"
"""
# ╔═╡ Cell order:
# ╟─19095067-33f5-495f-bc4d-ee6dacbf6ca8
# ╟─207e4c15-7818-4dc3-a048-1dd36ba5a73e
# ╟─2fd1ddf0-c3ee-4076-9f7f-b066da2baf50
# ╟─ee0e6f28-aa26-48de-8ddd-8bb2d1102ee9
# ╟─bd0803d8-c70d-47b8-a76e-5765f4ba01c6
# ╠═aad7e042-ba39-4518-8f3e-da59b77c13cb
# ╠═0aa37844-b4b9-4f58-adf7-15ae9a490993
# ╟─b77f7ff2-da7e-41b3-b3f6-3819b09cd33c
# ╟─9238b863-dd69-42ce-8f36-995b4757cc1a
# ╟─0ca84f4e-f5bf-40d0-bf46-7a0e70b7aded
# ╟─ca299148-6aa8-4379-88e3-c4500ddc779f
# ╟─b1ca8b16-7b63-470b-90d0-6ea41eeb5211
# ╟─88a5819f-6f35-40e8-9a82-8fd6f97001b1
# ╟─37294d8a-a70e-419a-a60b-11d09930c6b0
# ╟─f0185b52-2297-4e8c-b44b-8c29b634607a
# ╟─3668f786-9597-11eb-01a1-87d34b49eef9
# ╟─fa968801-6892-4475-9b27-56472ca611b4
# ╟─00000000-0000-0000-0000-000000000001
# ╟─00000000-0000-0000-0000-000000000002
| [
21017,
317,
32217,
13,
20362,
20922,
44386,
198,
2,
410,
15,
13,
1558,
13,
19,
198,
198,
3500,
2940,
2902,
198,
3500,
21365,
18274,
4487,
198,
198,
2,
2343,
243,
242,
28670,
22880,
94,
513,
35809,
69,
46302,
12,
24,
43239,
12,
1157,... | 1.859883 | 24,815 |
<filename>src/Geometry/Primitives/Chebyshev.jl
# MIT License
# Copyright (c) Microsoft Corporation.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE
"""
Module to enclose [Chebyshev polynomial](https://en.wikipedia.org/wiki/Chebyshev_polynomials) specific functionality.
"""
module Chebyshev
using OpticSim: NaNsafeacos
"""
T(n::Int, q::R, fast::Bool = true) -> R
Evaluate Chebyshev polynomial of the first kind ``T_n(q)``.
`fast` will use trigonometric definition, rather than the recursive definition which is much faster but slightly less precise.
"""
@inline function T(n::Int, q::R, fast::Bool = true)::R where {R<:Real}
@assert n >= 0
if n === 0
return one(R)
elseif n === 1
return q
elseif fast && n > 3
if abs(q) < one(R)
return cos(n * NaNsafeacos(q))
elseif q >= one(R)
return cosh(n * acosh(q))
else
return (-1)^n * cosh(n * acosh(-q))
end
else
return 2q * T(n - 1, q, fast) - T(n - 2, q, fast)
end
end
"""
U(n::Int, q::R, fast::Bool = true) -> R
Evaluate Chebyshev polynomial of the second kind ``U_n(q)``.
`fast` will use trigonometric definition, rather than the recursive definition which is much faster but slightly less precise.
"""
@inline function U(n::Int, q::R, fast::Bool = true)::R where {R<:Real}
@assert n >= 0
if n === 0
return one(R)
elseif n === 1
return 2q
elseif abs(q) < one(R) && fast && q > 3
# much faster but not stable at |q| = 1
aq = NaNsafeacos(q)
return sin((n + 1) * aq) / sin(aq)
else
return 2q * U(n - 1, q, fast) - U(n - 2, q, fast)
end
end
"""
dTdq(n::Int, q::R, fast::Bool = true) -> R
Evaluate derivative of Chebyshev polynomial of the first kind ``\\frac{dT_n}{dq}(q)``.
`fast` will use trigonometric definition, rather than the recursive definition which is much faster but slightly less precise.
"""
@inline function dTdq(n::Int, q::R, fast::Bool = true)::R where {R<:Real}
@assert n >= 0
if n === 0
return zero(R)
elseif n === 1
return one(R)
elseif fast && n > 4
if abs(q) == one(R)
return q^(n + 1) * n^2
elseif abs(q) < one(R)
return n * sin(n * acos(q)) / sqrt(1 - q^2)
elseif q > one(R)
return n * sinh(n * acosh(q)) / sqrt(q^2 - 1)
else
return -n * (-1)^n * sinh(n * acosh(-q)) / sqrt(q^2 - 1)
end
else
return n * U(n - 1, q, fast)
end
end
# dUdq(n::Int, q::R)::R where {R<:Real} = ((n + 1) * T(n + 1, q) - q * U(n, q)) / (q^2 - one(T))
# d2Tq2(n::Int, q::R)::R where {R<:Real} = n * ((n + 1) * T(n, q) - U(n, q)) / (q^2 - 1)
end # module Chebyshev
"""
ChebyshevSurface{T,N,P,Q} <: ParametricSurface{T,N}
Rectangular surface incorporating Chebyshev polynomials as well as radius and conic terms.
`T` is the datatype, `N` is the dimensionality, `P` is the number of Chebyshev terms in u and `Q` is the number of Chebyshev terms in v.
The surface is centered at the origin and treated as being the cap of an infinite rectangular prism, thus creating a true half-space.
**Note that the surface is vertically offset so that the center (i.e., `(u,v) == (0,0)`) lies at 0 on the z-axis.**
```julia
ChebyshevSurface(halfsizeu, halfsizev, chebycoeff; radius = Inf, conic = 0)
```
`chebycoeff` is a vector containing tuples of the form `(i, j, v)` where `v` is the value of the coefficient ``c_{ij}``.
The sag is defined by the equation
```math
z(u,v) = \\frac{c(u^2 + v^2)^2}{1 + \\sqrt{1 - (1+k)c^2(u^2 + v^2)}} + \\sum_{i}^{P}\\sum_{j}^{Q}c_{ij}T_i(u)T_j(v)
```
where ``c = \\frac{1}{\\texttt{radius}}``, ``k = \\texttt{conic}`` and ``T_n`` is the nᵗʰ Chebyshev polynomial of the first kind.
"""
struct ChebyshevSurface{T,N,P} <: ParametricSurface{T,N}
halfsizeu::T
halfsizev::T
curvature::T
conic::T
boundingprism::BoundingBox{T}
chebycoeff::SVector{P,Tuple{Int,Int,T}}
offset::T
function ChebyshevSurface(halfsizeu::T, halfsizev::T, chebycoeff::Union{Nothing,Vector{Tuple{Int,Int,T}}}; radius::T = typemax(T), conic::T = zero(T)) where {T<:Real}
@assert !isnan(halfsizeu) && !isnan(halfsizev) && !isnan(radius) && !isnan(conic)
@assert halfsizeu > zero(T) && halfsizev > zero(T)
@assert one(T) - (1 / radius)^2 * (conic + one(T)) * (halfsizeu^2 + halfsizev^2) > 0 "Invalid surface (conic/radius combination: $radius, $conic)"
offset = zero(T)
if chebycoeff === nothing
P = 0
else
for (i, j, c) in chebycoeff
@assert i >= 0 && j >= 0 "i and j must be non-negative"
if i % 2 == 0 && j % 2 == 0
offset += c * (-1)^(i ÷ 2) * (-1)^(j ÷ 2)
end
end
chebycoeff = filter(k -> abs(k[3]) > zero(T), chebycoeff)
P = length(chebycoeff)
end
bounding_prism = BoundingBox(-halfsizeu, halfsizeu, -halfsizev, halfsizev, typemin(T), typemax(T))
return new{T,3,P}(halfsizeu, halfsizev, 1 / radius, conic, bounding_prism, SVector{P,Tuple{Int,Int,T}}(P === 0 ? [] : chebycoeff), offset)
end
end
export ChebyshevSurface
uvrange(::Type{ChebyshevSurface{T,N,P}}) where {T<:Real,N,P} = ((-one(T), one(T)), (-one(T), one(T)))
boundingobj(z::ChebyshevSurface{T}) where {T<:Real} = z.boundingprism
halfsizeu(z::ChebyshevSurface{T}) where {T<:Real} = z.halfsizeu
halfsizev(z::ChebyshevSurface{T}) where {T<:Real} = z.halfsizev
function point(s::ChebyshevSurface{T,3,P}, u::T, v::T)::SVector{3,T} where {T<:Real,P}
x = u * s.halfsizeu
y = v * s.halfsizev
r2 = (x^2 + y^2)
q = (one(T) + s.conic) * s.curvature^2 * r2
if q > one(T)
return SVector{3,T}(NaN, NaN, NaN)
end
z = s.curvature * r2 / (one(T) + sqrt(one(T) - q))
@inbounds @simd for ci in 1:P
i, j, c = s.chebycoeff[ci]
z += c * Chebyshev.T(i, u) * Chebyshev.T(j, v)
end
return SVector{3,T}(x, y, z - s.offset)
end
function partials(s::ChebyshevSurface{T,3,P}, u::T, v::T)::Tuple{SVector{3,T},SVector{3,T}} where {T<:Real,P}
x = u * s.halfsizeu
y = v * s.halfsizev
r2 = x^2 + y^2
t = one(T) - s.curvature^2 * (1 + s.conic) * r2
if t < zero(T)
return SVector{3,T}(NaN, NaN, NaN), SVector{3,T}(NaN, NaN, NaN)
end
q = s.curvature * sqrt(t) / t
dhdu = x * q * s.halfsizeu
dhdv = y * q * s.halfsizev
@inbounds @simd for k in s.chebycoeff
i, j, c = k
dhdu += c * Chebyshev.dTdq(i, u) * Chebyshev.T(j, v)
dhdv += c * Chebyshev.T(i, u) * Chebyshev.dTdq(j, v)
end
return SVector{3,T}(s.halfsizeu, 0.0, dhdu), SVector{3,T}(0.0, s.halfsizev, dhdv)
end
function normal(s::ChebyshevSurface{T,3,P}, u::T, v::T)::SVector{3,T} where {T<:Real,P}
du, dv = partials(s, u, v)
return normalize(cross(du, dv))
end
function uv(s::ChebyshevSurface{T,3,P}, p::SVector{3,T}) where {T<:Real,P}
return SVector{2,T}(p[1] / s.halfsizeu, p[2] / s.halfsizev)
end
function onsurface(surf::ChebyshevSurface{T,3,P}, p::SVector{3,T}) where {T<:Real,P}
u, v = uv(surf, p)
if abs(u) > one(T) || abs(v) > one(T)
return false
else
surfpoint = point(surf, u, v)
return samepoint(p[3], surfpoint[3])
end
end
function inside(surf::ChebyshevSurface{T,3,P}, p::SVector{3,T}) where {T<:Real,P}
u, v = uv(surf, p)
if abs(u) > one(T) || abs(v) > one(T)
return false
else
surfpoint = point(surf, u, v)
return p[3] < surfpoint[3]
end
end
#########################################################################################################
# Assumes the ray has been transformed into the canonical coordinate frame which has the vertical axis passing through (0,0,0) and aligned with the z axis.
function surfaceintersection(surf::AcceleratedParametricSurface{T,3,ChebyshevSurface{T,3,P}}, r::AbstractRay{T,3}) where {T<:Real,P}
bboxint = surfaceintersection(surf.surface.boundingprism, r)
if bboxint isa EmptyInterval{T}
return EmptyInterval(T)
else
if doesintersect(surf.triangles_bbox, r) || inside(surf.triangles_bbox, origin(r))
surfint = triangulatedintersection(surf, r)
if !(surfint isa EmptyInterval{T})
return intervalintersection(bboxint, surfint)
end
end
# hasn't hit the surface
if lower(bboxint) isa RayOrigin{T} && upper(bboxint) isa Infinity{T}
if inside(surf.surface, origin(r))
return Interval(RayOrigin(T), Infinity(T))
else
return EmptyInterval(T)
end
# otherwise check that the intersection is underneath the surface
else
p = point(closestintersection(bboxint, false))
ρ, ϕ = uv(surf, p)
surfpoint = point(surf.surface, ρ, ϕ)
if p[3] < surfpoint[3]
return bboxint # TODO!! UV (and interface) issues?
else
return EmptyInterval(T)
end
end
end
end
function BoundingBox(surf::ChebyshevSurface{T,3,P}) where {T<:Real,P}
xmin = -surf.halfsizeu
xmax = surf.halfsizeu
ymin = -surf.halfsizev
ymax = surf.halfsizev
# polynomials range between -1 and 1 so we have to sum the absolute value of every coefficient to get the theoretical max
zmax = P > 0 ? sum(abs(c) for (_, _, c) in surf.chebycoeff) : zero(T)
zmin = -zmax
q = one(T) - (one(T) + surf.conic) * surf.curvature^2 * (surf.halfsizeu^2 + surf.halfsizev^2)
if q < zero(T)
throw(ErrorException("The surface is invalid, no bounding box can be constructed"))
end
hmax = surf.curvature * (surf.halfsizeu^2 + surf.halfsizev^2) / (one(T) + sqrt(q))
if hmax > zero(T)
zmax += hmax
else
zmin += hmax
end
return BoundingBox(xmin, xmax, ymin, ymax, zmin, zmax)
end
| [
27,
34345,
29,
10677,
14,
10082,
15748,
14,
23828,
20288,
14,
7376,
48209,
258,
85,
13,
20362,
198,
2,
17168,
13789,
198,
198,
2,
15069,
357,
66,
8,
5413,
10501,
13,
198,
198,
2,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
... | 2.198321 | 5,002 |
<filename>distributed/dfract.jl
using Distributed
using Images
using FileIO
using Mmap
# calculate a distributed fractal
width = 40000
height = 20000
rmin = -2.5
rmax = 1.5
imin = -1.25
imax = 1.25
iter = 500
epsilon = 0.25
mapfile = "zset-$(width)-$(height)"
s = open(mapfile)
n = read(s, Int)
println("size of zset is $n")
if n != (width * height)
@warn("zset doesn't match dims")
exit()
end
zset = Mmap.mmap(s, Array{ComplexF64,1}, n)
close(s)
num_procs = 0
hosts = ["<EMAIL>"]
for i in 1:num_procs
println("started proc $(i)")
sshflags=`-i /Users/robert/src/julia/juliaSnippets/distributed/ms-keys`
addprocs(hosts, tunnel=false, sshflags=sshflags, max_parallel=10, dir="/Users/robert/tmp", topology=:all_to_all)
end
# define mandel everywhere
include("mandel.jl")
println("starting pmap on $(workers())")
#
#generate image from results
# time measurements
print("starting...\n")
tStart=time()
#zset = Array{ComplexF64,1}(undef,0)
m_set = mandelbrot_set(zset, rmin, rmax, imin, imax, width, height, iter, epsilon)
tStop = time()
# write the image-file
img = colorview(RGB, m_set)
save("mandel.jpg", img)
print("done. took ", tStop-tStart, " seconds\n");
| [
27,
34345,
29,
17080,
6169,
14,
7568,
974,
13,
20362,
198,
3500,
4307,
6169,
198,
3500,
5382,
198,
3500,
9220,
9399,
198,
3500,
337,
8899,
628,
198,
2,
15284,
257,
9387,
12999,
282,
198,
10394,
796,
604,
2388,
198,
17015,
796,
939,
... | 2.4625 | 480 |
<gh_stars>100-1000
const POWER_SYSTEM_DESCRIPTOR_FILE =
joinpath(dirname(pathof(PowerSystems)), "descriptors", "power_system_inputs.json")
const INPUT_CATEGORY_NAMES = [
("branch", InputCategory.BRANCH),
("bus", InputCategory.BUS),
("dc_branch", InputCategory.DC_BRANCH),
("gen", InputCategory.GENERATOR),
("load", InputCategory.LOAD),
("reserves", InputCategory.RESERVE),
("storage", InputCategory.STORAGE),
]
struct PowerSystemTableData
base_power::Float64
category_to_df::Dict{InputCategory, DataFrames.DataFrame}
timeseries_metadata_file::Union{String, Nothing}
directory::String
user_descriptors::Dict
descriptors::Dict
generator_mapping::Dict{NamedTuple, DataType}
end
function PowerSystemTableData(
data::Dict{String, Any},
directory::String,
user_descriptors::Union{String, Dict},
descriptors::Union{String, Dict},
generator_mapping::Union{String, Dict};
timeseries_metadata_file = joinpath(directory, "timeseries_pointers"),
)
category_to_df = Dict{InputCategory, DataFrames.DataFrame}()
if !haskey(data, "bus")
throw(DataFormatError("key 'bus' not found in input data"))
end
if !haskey(data, "base_power")
@warn "key 'base_power' not found in input data; using default=$(DEFAULT_BASE_MVA)"
end
base_power = get(data, "base_power", DEFAULT_BASE_MVA)
for (name, category) in INPUT_CATEGORY_NAMES
val = get(data, name, nothing)
if isnothing(val)
@debug "key '$name' not found in input data, set to nothing" _group =
IS.LOG_GROUP_PARSING
else
category_to_df[category] = val
end
end
if !isfile(timeseries_metadata_file)
if isfile(string(timeseries_metadata_file, ".json"))
timeseries_metadata_file = string(timeseries_metadata_file, ".json")
elseif isfile(string(timeseries_metadata_file, ".csv"))
timeseries_metadata_file = string(timeseries_metadata_file, ".csv")
else
timeseries_metadata_file = nothing
end
end
if user_descriptors isa AbstractString
user_descriptors = _read_config_file(user_descriptors)
end
if descriptors isa AbstractString
descriptors = _read_config_file(descriptors)
end
if generator_mapping isa AbstractString
generator_mapping = get_generator_mapping(generator_mapping)
end
return PowerSystemTableData(
base_power,
category_to_df,
timeseries_metadata_file,
directory,
user_descriptors,
descriptors,
generator_mapping,
)
end
"""
Reads in all the data stored in csv files
The general format for data is
folder:
gen.csv
branch.csv
bus.csv
..
load.csv
# Arguments
- `directory::AbstractString`: directory containing CSV files
- `base_power::Float64`: base power for System
- `user_descriptor_file::AbstractString`: customized input descriptor file
- `descriptor_file=POWER_SYSTEM_DESCRIPTOR_FILE`: PowerSystems descriptor file
- `generator_mapping_file=GENERATOR_MAPPING_FILE`: generator mapping configuration file
"""
function PowerSystemTableData(
directory::AbstractString,
base_power::Float64,
user_descriptor_file::AbstractString;
descriptor_file = POWER_SYSTEM_DESCRIPTOR_FILE,
generator_mapping_file = GENERATOR_MAPPING_FILE,
timeseries_metadata_file = joinpath(directory, "timeseries_pointers"),
)
files = readdir(directory)
REGEX_DEVICE_TYPE = r"(.*?)\.csv"
REGEX_IS_FOLDER = r"^[A-Za-z]+$"
data = Dict{String, Any}()
if length(files) == 0
error("No files in the folder")
else
data["base_power"] = base_power
end
encountered_files = 0
for d_file in files
try
if match(REGEX_IS_FOLDER, d_file) !== nothing
@info "Parsing csv files in $d_file ..."
d_file_data = Dict{String, Any}()
for file in readdir(joinpath(directory, d_file))
if match(REGEX_DEVICE_TYPE, file) !== nothing
@info "Parsing csv data in $file ..."
encountered_files += 1
fpath = joinpath(directory, d_file, file)
raw_data = DataFrames.DataFrame(CSV.File(fpath))
d_file_data[split(file, r"[.]")[1]] = raw_data
end
end
if length(d_file_data) > 0
data[d_file] = d_file_data
@info "Successfully parsed $d_file"
end
elseif match(REGEX_DEVICE_TYPE, d_file) !== nothing
@info "Parsing csv data in $d_file ..."
encountered_files += 1
fpath = joinpath(directory, d_file)
raw_data = DataFrames.DataFrame(CSV.File(fpath))
data[split(d_file, r"[.]")[1]] = raw_data
@info "Successfully parsed $d_file"
end
catch ex
@error "Error occurred while parsing $d_file" exception = ex
throw(ex)
end
end
if encountered_files == 0
error("No csv files or folders in $directory")
end
return PowerSystemTableData(
data,
directory,
user_descriptor_file,
descriptor_file,
generator_mapping_file,
timeseries_metadata_file = timeseries_metadata_file,
)
end
"""
Return the custom name stored in the user descriptor file.
Throws DataFormatError if a required value is not found in the file.
"""
function get_user_field(
data::PowerSystemTableData,
category::InputCategory,
field::AbstractString,
)
if !haskey(data.user_descriptors, category)
throw(DataFormatError("Invalid category=$category"))
end
try
for item in data.user_descriptors[category]
if item["name"] == field
return item["custom_name"]
end
end
catch
(err)
if err == KeyError
msg = "Failed to find category=$category field=$field in input descriptors $err"
throw(DataFormatError(msg))
else
throw(err)
end
end
end
"""Return a vector of user-defined fields for the category."""
function get_user_fields(data::PowerSystemTableData, category::InputCategory)
if !haskey(data.user_descriptors, category)
throw(DataFormatError("Invalid category=$category"))
end
return [x["name"] for x in data.user_descriptors[category]]
end
"""Return the dataframe for the category."""
function get_dataframe(data::PowerSystemTableData, category::InputCategory)
df = get(data.category_to_df, category, DataFrames.DataFrame())
isempty(df) && @warn("Missing $category data.")
return df
end
"""
Return a NamedTuple of parameters from the descriptor file for each row of a dataframe,
making type conversions as necessary.
Refer to the PowerSystems descriptor file for field names that will be created.
"""
function iterate_rows(data::PowerSystemTableData, category; na_to_nothing = true)
df = get_dataframe(data, category)
field_infos = _get_field_infos(data, category, names(df))
Channel() do channel
for row in eachrow(df)
obj = _read_data_row(data, row, field_infos; na_to_nothing = na_to_nothing)
put!(channel, obj)
end
end
end
"""
Construct a System from PowerSystemTableData data.
# Arguments
- `time_series_resolution::Union{DateTime, Nothing}=nothing`: only store time_series that match
this resolution.
- `time_series_in_memory::Bool=false`: Store time series data in memory instead of HDF5 file
- `time_series_directory=nothing`: Store time series data in directory instead of tmpfs
- `runchecks::Bool=true`: Validate struct fields.
Throws DataFormatError if time_series with multiple resolutions are detected.
- A time_series has a different resolution than others.
- A time_series has a different horizon than others.
"""
function System(
data::PowerSystemTableData;
time_series_resolution = nothing,
time_series_in_memory = false,
time_series_directory = nothing,
runchecks = true,
kwargs...,
)
sys = System(
data.base_power;
time_series_in_memory = time_series_in_memory,
time_series_directory = time_series_directory,
runchecks = runchecks,
kwargs...,
)
set_units_base_system!(sys, IS.UnitSystem.DEVICE_BASE)
loadzone_csv_parser!(sys, data)
bus_csv_parser!(sys, data)
# Services and time_series must be last.
parsers = (
(get_dataframe(data, InputCategory.BRANCH), branch_csv_parser!),
(get_dataframe(data, InputCategory.DC_BRANCH), dc_branch_csv_parser!),
(get_dataframe(data, InputCategory.GENERATOR), gen_csv_parser!),
(get_dataframe(data, InputCategory.LOAD), load_csv_parser!),
(get_dataframe(data, InputCategory.RESERVE), services_csv_parser!),
)
for (val, parser) in parsers
if !isnothing(val)
parser(sys, data)
end
end
timeseries_metadata_file =
get(kwargs, :timeseries_metadata_file, getfield(data, :timeseries_metadata_file))
if !isnothing(timeseries_metadata_file)
add_time_series!(sys, timeseries_metadata_file; resolution = time_series_resolution)
end
check(sys)
return sys
end
"""
Add buses and areas to the System from the raw data.
"""
function bus_csv_parser!(sys::System, data::PowerSystemTableData)
for bus in iterate_rows(data, InputCategory.BUS)
name = bus.name
bus_type =
isnothing(bus.bus_type) ? nothing : get_enum_value(BusTypes, bus.bus_type)
voltage_limits = make_minmaxlimits(bus.voltage_limits_min, bus.voltage_limits_max)
area_name = string(get(bus, :area, "area"))
area = get_component(Area, sys, area_name)
if isnothing(area)
area = Area(area_name)
add_component!(sys, area)
end
zone = get(bus, :zone, nothing)
ps_bus = Bus(;
number = bus.bus_id,
name = name,
bustype = bus_type,
angle = bus.angle,
magnitude = bus.voltage,
voltage_limits = voltage_limits,
base_voltage = bus.base_voltage,
area = area,
load_zone = get_component(LoadZone, sys, string(zone)),
)
add_component!(sys, ps_bus)
# add load if the following info is nonzero
if (bus.max_active_power != 0.0) || (bus.max_reactive_power != 0.0)
load = PowerLoad(
name = name,
available = true,
bus = ps_bus,
model = LoadModels.ConstantPower,
active_power = bus.active_power,
reactive_power = bus.reactive_power,
base_power = bus.base_power,
max_active_power = bus.max_active_power,
max_reactive_power = bus.max_reactive_power,
)
add_component!(sys, load)
end
end
end
"""
Add branches to the System from the raw data.
"""
function branch_csv_parser!(sys::System, data::PowerSystemTableData)
available = true
for branch in iterate_rows(data, InputCategory.BRANCH)
bus_from = get_bus(sys, branch.connection_points_from)
bus_to = get_bus(sys, branch.connection_points_to)
name = get(branch, :name, get_name(bus_from) * "_" * get_name(bus_to))
connection_points = Arc(bus_from, bus_to)
pf = branch.active_power_flow
qf = branch.reactive_power_flow
#TODO: noop math...Phase-Shifting Transformer angle
alpha = (branch.primary_shunt / 2) - (branch.primary_shunt / 2)
branch_type =
get_branch_type(branch.tap, alpha, get(branch, :is_transformer, nothing))
if branch_type == Line
b = branch.primary_shunt / 2
value = Line(
name = name,
available = available,
active_power_flow = pf,
reactive_power_flow = qf,
arc = connection_points,
r = branch.r,
x = branch.x,
b = (from = b, to = b),
rate = branch.rate,
angle_limits = (
min = branch.min_angle_limits,
max = branch.max_angle_limits,
),
)
elseif branch_type == Transformer2W
value = Transformer2W(
name = name,
available = available,
active_power_flow = pf,
reactive_power_flow = qf,
arc = connection_points,
r = branch.r,
x = branch.x,
primary_shunt = branch.primary_shunt,
rate = branch.rate,
)
elseif branch_type == TapTransformer
value = TapTransformer(
name = name,
available = available,
active_power_flow = pf,
reactive_power_flow = qf,
arc = connection_points,
r = branch.r,
x = branch.x,
primary_shunt = branch.primary_shunt,
tap = branch.tap,
rate = branch.rate,
)
elseif branch_type == PhaseShiftingTransformer
# TODO create PhaseShiftingTransformer
error("Unsupported branch type $branch_type")
else
error("Unsupported branch type $branch_type")
end
add_component!(sys, value)
end
end
"""
Add DC branches to the System from raw data.
"""
function dc_branch_csv_parser!(sys::System, data::PowerSystemTableData)
function make_dc_limits(dc_branch, min, max)
min_lim = dc_branch[min]
if isnothing(dc_branch[min]) && isnothing(dc_branch[max])
throw(DataFormatError("valid limits required for $min , $max"))
elseif isnothing(dc_branch[min])
min_lim = dc_branch[max] * -1.0
end
return (min = min_lim, max = dc_branch[max])
end
for dc_branch in iterate_rows(data, InputCategory.DC_BRANCH)
available = true
bus_from = get_bus(sys, dc_branch.connection_points_from)
bus_to = get_bus(sys, dc_branch.connection_points_to)
connection_points = Arc(bus_from, bus_to)
if dc_branch.control_mode == "Power"
mw_load = dc_branch.mw_load
activepowerlimits_from = make_dc_limits(
dc_branch,
:min_active_power_limit_from,
:max_active_power_limit_from,
)
activepowerlimits_to = make_dc_limits(
dc_branch,
:min_active_power_limit_to,
:max_active_power_limit_to,
)
reactivepowerlimits_from = make_dc_limits(
dc_branch,
:min_reactive_power_limit_from,
:max_reactive_power_limit_from,
)
reactivepowerlimits_to = make_dc_limits(
dc_branch,
:min_reactive_power_limit_to,
:max_reactive_power_limit_to,
)
loss = (l0 = 0.0, l1 = dc_branch.loss) #TODO: Can we infer this from the other data?,
value = HVDCLine(
name = dc_branch.name,
available = available,
active_power_flow = dc_branch.active_power_flow,
arc = connection_points,
active_power_limits_from = activepowerlimits_from,
active_power_limits_to = activepowerlimits_to,
reactive_power_limits_from = reactivepowerlimits_from,
reactive_power_limits_to = reactivepowerlimits_to,
loss = loss,
)
else
rectifier_taplimits = (
min = dc_branch.rectifier_tap_limits_min,
max = dc_branch.rectifier_tap_limits_max,
)
rectifier_xrc = dc_branch.rectifier_xrc #TODO: What is this?,
rectifier_firingangle = dc_branch.rectifier_firingangle
inverter_taplimits = (
min = dc_branch.inverter_tap_limits_min,
max = dc_branch.inverter_tap_limits_max,
)
inverter_xrc = dc_branch.inverter_xrc #TODO: What is this?
inverter_firingangle = (
min = dc_branch.inverter_firing_angle_min,
max = dc_branch.inverter_firing_angle_max,
)
value = VSCDCLine(
name = dc_branch.name,
available = true,
active_power_flow = pf,
arc = connection_points,
rectifier_taplimits = rectifier_taplimits,
rectifier_xrc = rectifier_xrc,
rectifier_firingangle = rectifier_firingangle,
inverter_taplimits = inverter_taplimits,
inverter_xrc = inverter_xrc,
inverter_firingangle = inverter_firingangle,
)
end
add_component!(sys, value)
end
end
"""
Add generators to the System from the raw data.
"""
struct _HeatRateColumns
columns::Base.Iterators.Zip{Tuple{Array{Symbol, 1}, Array{Symbol, 1}}}
end
struct _CostPointColumns
columns::Base.Iterators.Zip{Tuple{Array{Symbol, 1}, Array{Symbol, 1}}}
end
function gen_csv_parser!(sys::System, data::PowerSystemTableData)
output_point_fields = Vector{Symbol}()
heat_rate_fields = Vector{Symbol}()
cost_point_fields = Vector{Symbol}()
fields = get_user_fields(data, InputCategory.GENERATOR)
for field in fields
if occursin("output_point", field)
push!(output_point_fields, Symbol(field))
elseif occursin("heat_rate_", field)
push!(heat_rate_fields, Symbol(field))
elseif occursin("cost_point_", field)
push!(cost_point_fields, Symbol(field))
end
end
@assert length(output_point_fields) > 0
if length(heat_rate_fields) > 0 && length(cost_point_fields) > 0
throw(IS.ConflictingInputsError("Heat rate and cost points are both defined"))
elseif length(heat_rate_fields) > 0
cost_colnames = _HeatRateColumns(zip(heat_rate_fields, output_point_fields))
elseif length(cost_point_fields) > 0
cost_colnames = _CostPointColumns(zip(cost_point_fields, output_point_fields))
end
for gen in iterate_rows(data, InputCategory.GENERATOR)
@debug "making generator:" _group = IS.LOG_GROUP_PARSING gen.name
bus = get_bus(sys, gen.bus_id)
if isnothing(bus)
throw(DataFormatError("could not find $(gen.bus_id)"))
end
generator = make_generator(data, gen, cost_colnames, bus)
@debug "adding gen:" _group = IS.LOG_GROUP_PARSING generator
if !isnothing(generator)
add_component!(sys, generator)
end
end
end
"""
load_csv_parser!(sys::System, data::PowerSystemTableData)
Add loads to the System from the raw load data.
"""
function load_csv_parser!(sys::System, data::PowerSystemTableData)
for rawload in iterate_rows(data, InputCategory.LOAD)
bus = get_bus(sys, rawload.bus_id)
if isnothing(bus)
throw(
DataFormatError(
"could not find bus_number=$(rawload.bus_id) for load=$(rawload.name)",
),
)
end
load = PowerLoad(
name = rawload.name,
available = rawload.available,
bus = bus,
model = LoadModels.ConstantPower,
active_power = rawload.active_power,
reactive_power = rawload.reactive_power,
max_active_power = rawload.max_active_power,
max_reactive_power = rawload.max_reactive_power,
base_power = rawload.base_power,
)
add_component!(sys, load)
end
end
"""
loadzone_csv_parser!(sys::System, data::PowerSystemTableData)
Add branches to the System from the raw data.
"""
function loadzone_csv_parser!(sys::System, data::PowerSystemTableData)
buses = get_dataframe(data, InputCategory.BUS)
zone_column = get_user_field(data, InputCategory.BUS, "zone")
if !in(zone_column, names(buses))
@warn "Missing Data : no 'zone' information for buses, cannot create loads based on zones"
return
end
zones = unique(buses[!, zone_column])
for zone in zones
bus_numbers = Set{Int}()
active_powers = Vector{Float64}()
reactive_powers = Vector{Float64}()
for bus in iterate_rows(data, InputCategory.BUS)
if bus.zone == zone
bus_number = bus.bus_id
push!(bus_numbers, bus_number)
active_power = bus.max_active_power
push!(active_powers, active_power)
reactive_power = bus.max_reactive_power
push!(reactive_powers, reactive_power)
end
end
name = string(zone)
load_zone = LoadZone(name, sum(active_powers), sum(reactive_powers))
add_component!(sys, load_zone)
end
end
"""
Add services to the System from the raw data.
"""
function services_csv_parser!(sys::System, data::PowerSystemTableData)
bus_id_column = get_user_field(data, InputCategory.BUS, "bus_id")
bus_area_column = get_user_field(data, InputCategory.BUS, "area")
# Shortcut for data that looks like "(val1,val2,val3)"
make_array(x) = isnothing(x) ? x : split(strip(x, ['(', ')']), ",")
function _add_device!(contributing_devices, device_categories, name)
component = []
for dev_category in device_categories
component_type = _get_component_type_from_category(dev_category)
components = get_components_by_name(component_type, sys, name)
if length(components) == 0
# There multiple categories, so we might not find a match in some.
continue
elseif length(components) == 1
push!(component, components[1])
else
msg = "Found duplicate names type=$component_type name=$name"
throw(DataFormatError(msg))
end
end
if length(component) > 1
msg = "Found duplicate components with name=$name"
throw(DataFormatError(msg))
elseif length(component) == 1
push!(contributing_devices, component[1])
end
end
for reserve in iterate_rows(data, InputCategory.RESERVE)
device_categories = make_array(reserve.eligible_device_categories)
device_subcategories =
make_array(get(reserve, :eligible_device_subcategories, nothing))
devices = make_array(get(reserve, :contributing_devices, nothing))
regions = make_array(reserve.eligible_regions) #TODO: rename to "area"
requirement = get(reserve, :requirement, nothing)
contributing_devices = Vector{Device}()
if isnothing(device_subcategories)
@info("Adding contributing components for $(reserve.name) by component name")
for device in devices
_add_device!(contributing_devices, device_categories, device)
end
else
@info("Adding contributing generators for $(reserve.name) by category")
for gen in iterate_rows(data, InputCategory.GENERATOR)
buses = get_dataframe(data, InputCategory.BUS)
bus_ids = buses[!, bus_id_column]
gen_type =
get_generator_type(gen.fuel, gen.unit_type, data.generator_mapping)
sys_gen = get_component(
get_generator_type(gen.fuel, gen.unit_type, data.generator_mapping),
sys,
gen.name,
)
area = string(
buses[bus_ids .== get_number(get_bus(sys_gen)), bus_area_column][1],
)
if gen.category in device_subcategories && area in regions
_add_device!(contributing_devices, device_categories, gen.name)
end
end
unused_categories = setdiff(
device_subcategories,
get_dataframe(data, InputCategory.GENERATOR)[
!,
get_user_field(data, InputCategory.GENERATOR, "category"),
],
)
for cat in unused_categories
@warn(
"Device category: $cat not found in generators data; adding contributing devices by category only supported for generator data"
)
end
end
if length(contributing_devices) == 0
throw(
DataFormatError(
"did not find contributing devices for service $(reserve.name)",
),
)
end
direction = get_reserve_direction(reserve.direction)
if isnothing(requirement)
service = StaticReserve{direction}(reserve.name, true, reserve.timeframe, 0.0)
else
service = VariableReserve{direction}(
reserve.name,
true,
reserve.timeframe,
requirement,
)
end
add_service!(sys, service, contributing_devices)
end
end
function get_reserve_direction(direction::AbstractString)
if direction == "Up"
return ReserveUp
elseif direction == "Down"
return ReserveDown
else
throw(DataFormatError("invalid reserve direction $direction"))
end
end
"""Creates a generator of any type."""
function make_generator(data::PowerSystemTableData, gen, cost_colnames, bus)
generator = nothing
gen_type =
get_generator_type(gen.fuel, get(gen, :unit_type, nothing), data.generator_mapping)
if isnothing(gen_type)
@error "Cannot recognize generator type" gen.name
elseif gen_type == ThermalStandard
generator = make_thermal_generator(data, gen, cost_colnames, bus)
elseif gen_type == ThermalMultiStart
generator = make_thermal_generator_multistart(data, gen, cost_colnames, bus)
elseif gen_type <: HydroGen
generator = make_hydro_generator(gen_type, data, gen, cost_colnames, bus)
elseif gen_type <: RenewableGen
generator = make_renewable_generator(gen_type, data, gen, cost_colnames, bus)
elseif gen_type == GenericBattery
storage = get_storage_by_generator(data, gen.name).head
generator = make_storage(data, gen, storage, bus)
else
@error "Skipping unsupported generator" gen.name gen_type
end
return generator
end
function calculate_variable_cost(
data::PowerSystemTableData,
gen,
cost_colnames::_HeatRateColumns,
base_power,
)
fuel_cost = gen.fuel_price / 1000.0
vom = isnothing(gen.variable_cost) ? 0.0 : gen.variable_cost
if fuel_cost > 0.0
var_cost =
[(getfield(gen, hr), getfield(gen, mw)) for (hr, mw) in cost_colnames.columns]
var_cost = unique([
(tryparse(Float64, string(c[1])), tryparse(Float64, string(c[2]))) for
c in var_cost if !in(nothing, c)
])
else
var_cost = [(0.0, 0.0)]
end
if length(var_cost) > 1
var_cost[2:end] = [
(
(
var_cost[i][1] * fuel_cost * (var_cost[i][2] - var_cost[i - 1][2]) +
var_cost[i][2] * vom
),
var_cost[i][2],
) .* gen.active_power_limits_max .* base_power for i in 2:length(var_cost)
]
var_cost[1] =
((var_cost[1][1] * fuel_cost + vom) * var_cost[1][2], var_cost[1][2]) .*
gen.active_power_limits_max .* base_power
fixed = max(
0.0,
var_cost[1][1] -
(var_cost[2][1] / (var_cost[2][2] - var_cost[1][2]) * var_cost[1][2]),
)
var_cost[1] = (var_cost[1][1] - fixed, var_cost[1][2])
for i in 2:length(var_cost)
var_cost[i] = (var_cost[i - 1][1] + var_cost[i][1], var_cost[i][2])
end
elseif length(var_cost) == 1
# if there is only one point, use it to determine the constant $/MW cost
var_cost = var_cost[1][1] * fuel_cost + vom
fixed = 0.0
end
return var_cost, fixed, fuel_cost
end
function calculate_variable_cost(
data::PowerSystemTableData,
gen,
cost_colnames::_CostPointColumns,
base_power,
)
vom = isnothing(gen.variable_cost) ? 0.0 : gen.variable_cost
var_cost = [(getfield(gen, c), getfield(gen, mw)) for (c, mw) in cost_colnames.columns]
var_cost = unique([
(tryparse(Float64, string(c[1])), tryparse(Float64, string(c[2]))) for
c in var_cost if !in(nothing, c)
])
var_cost = [
((var_cost[i][1] + vom) * var_cost[i][2], var_cost[i][2]) .*
gen.active_power_limits_max .* base_power for i in 1:length(var_cost)
]
if length(var_cost) > 1
fixed = max(
0.0,
var_cost[1][1] -
(var_cost[2][1] / (var_cost[2][2] - var_cost[1][2]) * var_cost[1][2]),
)
var_cost = [(var_cost[i][1] - fixed, var_cost[i][2]) for i in 1:length(var_cost)]
elseif length(var_cost) == 1
var_cost = var_cost[1][1] + vom
fixed = 0.0
end
return var_cost, fixed, 0.0
end
function calculate_uc_cost(data, gen, fuel_cost)
startup_cost = gen.startup_cost
if isnothing(startup_cost)
if hasfield(typeof(gen), :startup_heat_cold_cost)
startup_cost = gen.startup_heat_cold_cost * fuel_cost * 1000
else
startup_cost = 0.0
@warn "No startup_cost defined for $(gen.name), setting to $startup_cost" maxlog =
5
end
end
shutdown_cost = get(gen, :shutdown_cost, nothing)
if isnothing(shutdown_cost)
@warn "No shutdown_cost defined for $(gen.name), setting to 0.0" maxlog = 1
shutdown_cost = 0.0
end
return startup_cost, shutdown_cost
end
function make_minmaxlimits(min::Union{Nothing, Float64}, max::Union{Nothing, Float64})
if isnothing(min) && isnothing(max)
minmax = nothing
else
minmax = (min = min, max = max)
end
return minmax
end
function make_ramplimits(
gen;
ramplimcol = :ramp_limits,
rampupcol = :ramp_up,
rampdncol = :ramp_down,
)
ramp = get(gen, ramplimcol, nothing)
if !isnothing(ramp)
up = ramp
down = ramp
else
up = get(gen, rampupcol, ramp)
up = typeof(up) == String ? tryparse(Float64, up) : up
down = get(gen, rampdncol, ramp)
down = typeof(down) == String ? tryparse(Float64, down) : down
end
ramplimits = isnothing(up) && isnothing(down) ? nothing : (up = up, down = down)
return ramplimits
end
function make_timelimits(gen, up_column::Symbol, down_column::Symbol)
up_time = get(gen, up_column, nothing)
up_time = typeof(up_time) == String ? tryparse(Float64, up_time) : up_time
down_time = get(gen, down_column, nothing)
down_time = typeof(down_time) == String ? tryparse(Float64, down_time) : down_time
timelimits =
isnothing(up_time) && isnothing(down_time) ? nothing :
(up = up_time, down = down_time)
return timelimits
end
function make_reactive_params(
gen;
powerfield = :reactive_power,
minfield = :reactive_power_limits_min,
maxfield = :reactive_power_limits_max,
)
reactive_power = get(gen, powerfield, 0.0)
reactive_power_limits_min = get(gen, minfield, nothing)
reactive_power_limits_max = get(gen, maxfield, nothing)
if isnothing(reactive_power_limits_min) && isnothing(reactive_power_limits_max)
reactive_power_limits = nothing
elseif isnothing(reactive_power_limits_min)
reactive_power_limits = (min = 0.0, max = reactive_power_limits_max)
else
reactive_power_limits =
(min = reactive_power_limits_min, max = reactive_power_limits_max)
end
return reactive_power, reactive_power_limits
end
function make_thermal_generator(data::PowerSystemTableData, gen, cost_colnames, bus)
@debug "Making ThermaStandard" _group = IS.LOG_GROUP_PARSING gen.name
active_power_limits =
(min = gen.active_power_limits_min, max = gen.active_power_limits_max)
(reactive_power, reactive_power_limits) = make_reactive_params(gen)
rating = calculate_rating(active_power_limits, reactive_power_limits)
ramplimits = make_ramplimits(gen)
timelimits = make_timelimits(gen, :min_up_time, :min_down_time)
primemover = parse_enum_mapping(PrimeMovers, gen.unit_type)
fuel = parse_enum_mapping(ThermalFuels, gen.fuel)
base_power = gen.base_mva
var_cost, fixed, fuel_cost =
calculate_variable_cost(data, gen, cost_colnames, base_power)
startup_cost, shutdown_cost = calculate_uc_cost(data, gen, fuel_cost)
op_cost = ThreePartCost(var_cost, fixed, startup_cost, shutdown_cost)
return ThermalStandard(
name = gen.name,
available = gen.available,
status = gen.status_at_start,
bus = bus,
active_power = gen.active_power,
reactive_power = reactive_power,
rating = rating,
prime_mover = primemover,
fuel = fuel,
active_power_limits = active_power_limits,
reactive_power_limits = reactive_power_limits,
ramp_limits = ramplimits,
time_limits = timelimits,
operation_cost = op_cost,
base_power = base_power,
)
end
function make_thermal_generator_multistart(
data::PowerSystemTableData,
gen,
cost_colnames,
bus,
)
thermal_gen = make_thermal_generator(data, gen, cost_colnames, bus)
@debug "Making ThermalMultiStart" _group = IS.LOG_GROUP_PARSING gen.name
base_power = get_base_power(thermal_gen)
var_cost, fixed, fuel_cost =
calculate_variable_cost(data, gen, cost_colnames, base_power)
if var_cost isa Float64
no_load_cost = 0.0
var_cost = VariableCost(var_cost)
else
no_load_cost = var_cost[1][1]
var_cost =
VariableCost([(c - no_load_cost, pp - var_cost[1][2]) for (c, pp) in var_cost])
end
lag_hot =
isnothing(gen.hot_start_time) ? get_time_limits(thermal_gen).down :
gen.hot_start_time
lag_warm = isnothing(gen.warm_start_time) ? 0.0 : gen.warm_start_time
lag_cold = isnothing(gen.cold_start_time) ? 0.0 : gen.cold_start_time
startup_timelimits = (hot = lag_hot, warm = lag_warm, cold = lag_cold)
start_types = sum(values(startup_timelimits) .> 0.0)
startup_ramp = isnothing(gen.startup_ramp) ? 0.0 : gen.startup_ramp
shutdown_ramp = isnothing(gen.shutdown_ramp) ? 0.0 : gen.shutdown_ramp
power_trajectory = (startup = startup_ramp, shutdown = shutdown_ramp)
hot_start_cost = isnothing(gen.hot_start_cost) ? gen.startup_cost : gen.hot_start_cost
if isnothing(hot_start_cost)
if hasfield(typeof(gen), :startup_heat_cold_cost)
hot_start_cost = gen.startup_heat_cold_cost * fuel_cost * 1000
else
hot_start_cost = 0.0
@warn "No hot_start_cost or startup_cost defined for $(gen.name), setting to $startup_cost" maxlog =
5
end
end
warm_start_cost = isnothing(gen.warm_start_cost) ? START_COST : gen.hot_start_cost #TODO
cold_start_cost = isnothing(gen.cold_start_cost) ? START_COST : gen.cold_start_cost
startup_cost = (hot = hot_start_cost, warm = warm_start_cost, cold = cold_start_cost)
shutdown_cost = gen.shutdown_cost
if isnothing(shutdown_cost)
@warn "No shutdown_cost defined for $(gen.name), setting to 0.0" maxlog = 1
shutdown_cost = 0.0
end
op_cost = MultiStartCost(var_cost, no_load_cost, fixed, startup_cost, shutdown_cost)
return ThermalMultiStart(;
name = get_name(thermal_gen),
available = get_available(thermal_gen),
status = get_status(thermal_gen),
bus = get_bus(thermal_gen),
active_power = get_active_power(thermal_gen),
reactive_power = get_reactive_power(thermal_gen),
rating = get_rating(thermal_gen),
prime_mover = get_prime_mover(thermal_gen),
fuel = get_fuel(thermal_gen),
active_power_limits = get_active_power_limits(thermal_gen),
reactive_power_limits = get_reactive_power_limits(thermal_gen),
ramp_limits = get_ramp_limits(thermal_gen),
power_trajectory = power_trajectory,
time_limits = get_time_limits(thermal_gen),
start_time_limits = startup_timelimits,
start_types = start_types,
operation_cost = op_cost,
base_power = get_base_power(thermal_gen),
time_at_status = get_time_at_status(thermal_gen),
must_run = gen.must_run,
)
end
function make_hydro_generator(gen_type, data::PowerSystemTableData, gen, cost_colnames, bus)
@debug "Making HydroGen" _group = IS.LOG_GROUP_PARSING gen.name
active_power_limits =
(min = gen.active_power_limits_min, max = gen.active_power_limits_max)
(reactive_power, reactive_power_limits) = make_reactive_params(gen)
rating = calculate_rating(active_power_limits, reactive_power_limits)
ramp_limits = make_ramplimits(gen)
min_up_time = gen.min_up_time
min_down_time = gen.min_down_time
time_limits = make_timelimits(gen, :min_up_time, :min_down_time)
base_power = gen.base_mva
if gen_type == HydroEnergyReservoir || gen_type == HydroPumpedStorage
if !haskey(data.category_to_df, InputCategory.STORAGE)
throw(DataFormatError("Storage information must defined in storage.csv"))
end
storage = get_storage_by_generator(data, gen.name)
var_cost, fixed, fuel_cost =
calculate_variable_cost(data, gen, cost_colnames, base_power)
operation_cost = TwoPartCost(var_cost, fixed)
if gen_type == HydroEnergyReservoir
@debug "Creating $(gen.name) as HydroEnergyReservoir" _group =
IS.LOG_GROUP_PARSING
hydro_gen = HydroEnergyReservoir(
name = gen.name,
available = gen.available,
bus = bus,
active_power = gen.active_power,
reactive_power = reactive_power,
prime_mover = parse_enum_mapping(PrimeMovers, gen.unit_type),
rating = rating,
active_power_limits = active_power_limits,
reactive_power_limits = reactive_power_limits,
ramp_limits = ramp_limits,
time_limits = time_limits,
operation_cost = operation_cost,
base_power = base_power,
storage_capacity = storage.head.storage_capacity,
inflow = storage.head.input_active_power_limit_max,
initial_storage = storage.head.energy_level,
)
elseif gen_type == HydroPumpedStorage
@debug "Creating $(gen.name) as HydroPumpedStorage" _group =
IS.LOG_GROUP_PARSING
pump_active_power_limits = (
min = gen.pump_active_power_limits_min,
max = gen.pump_active_power_limits_max,
)
(pump_reactive_power, pump_reactive_power_limits) = make_reactive_params(
gen,
powerfield = :pump_reactive_power,
minfield = :pump_reactive_power_limits_min,
maxfield = :pump_reactive_power_limits_max,
)
pump_rating =
calculate_rating(pump_active_power_limits, pump_reactive_power_limits)
pump_ramp_limits = make_ramplimits(
gen;
ramplimcol = :pump_ramp_limits,
rampupcol = :pump_ramp_up,
rampdncol = :pump_ramp_down,
)
pump_time_limits = make_timelimits(gen, :pump_min_up_time, :pump_min_down_time)
hydro_gen = HydroPumpedStorage(
name = gen.name,
available = gen.available,
bus = bus,
active_power = gen.active_power,
reactive_power = reactive_power,
rating = rating,
base_power = base_power,
prime_mover = parse_enum_mapping(PrimeMovers, gen.unit_type),
active_power_limits = active_power_limits,
reactive_power_limits = reactive_power_limits,
ramp_limits = ramp_limits,
time_limits = time_limits,
rating_pump = pump_rating,
active_power_limits_pump = pump_active_power_limits,
reactive_power_limits_pump = pump_reactive_power_limits,
ramp_limits_pump = pump_ramp_limits,
time_limits_pump = pump_time_limits,
storage_capacity = (
up = storage.head.storage_capacity,
down = storage.head.storage_capacity,
),
inflow = storage.head.input_active_power_limit_max,
outflow = storage.tail.input_active_power_limit_max,
initial_storage = (
up = storage.head.energy_level,
down = storage.tail.energy_level,
),
storage_target = (
up = storage.head.storage_target,
down = storage.tail.storage_target,
),
operation_cost = operation_cost,
pump_efficiency = storage.tail.efficiency,
)
end
elseif gen_type == HydroDispatch
@debug "Creating $(gen.name) as HydroDispatch" _group = IS.LOG_GROUP_PARSING
hydro_gen = HydroDispatch(
name = gen.name,
available = gen.available,
bus = bus,
active_power = gen.active_power,
reactive_power = reactive_power,
rating = rating,
prime_mover = parse_enum_mapping(PrimeMovers, gen.unit_type),
active_power_limits = active_power_limits,
reactive_power_limits = reactive_power_limits,
ramp_limits = ramp_limits,
time_limits = time_limits,
base_power = base_power,
)
else
error("Tabular data parser does not currently support $gen_type creation")
end
return hydro_gen
end
function get_storage_by_generator(data::PowerSystemTableData, gen_name::AbstractString)
head = []
tail = []
for s in iterate_rows(data, InputCategory.STORAGE)
if s.generator_name == gen_name
position = get(s, :position, "head")
if position == "tail"
push!(tail, s)
else
push!(head, s)
end
end
end
if length(head) != 1
@warn "storage generator should have exactly 1 head storage defined: this will throw an error in v1.3.x" maxlog =
1 # this currently selects the first head storage with no control on how to make that selection, in the future throw an error.
#throw(DataFormatError("storage generator must have exactly 1 head storage defined")) #TODO: uncomment this in next version
elseif length(tail) > 1
throw(
DataFormatError(
"storage generator cannot have more than 1 tail storage defined",
),
)
end
tail = length(tail) > 0 ? tail[1] : nothing
return (head = head[1], tail = tail)
end
function make_renewable_generator(
gen_type,
data::PowerSystemTableData,
gen,
cost_colnames,
bus,
)
@debug "Making RenewableGen" _group = IS.LOG_GROUP_PARSING gen.name
generator = nothing
active_power_limits =
(min = gen.active_power_limits_min, max = gen.active_power_limits_max)
(reactive_power, reactive_power_limits) = make_reactive_params(gen)
rating = calculate_rating(active_power_limits, reactive_power_limits)
base_power = gen.base_mva
var_cost, fixed, fuel_cost =
calculate_variable_cost(data, gen, cost_colnames, base_power)
operation_cost = TwoPartCost(var_cost, fixed)
if gen_type == RenewableDispatch
@debug "Creating $(gen.name) as RenewableDispatch" _group = IS.LOG_GROUP_PARSING
generator = RenewableDispatch(
name = gen.name,
available = gen.available,
bus = bus,
active_power = gen.active_power,
reactive_power = reactive_power,
rating = rating,
prime_mover = parse_enum_mapping(PrimeMovers, gen.unit_type),
reactive_power_limits = reactive_power_limits,
power_factor = gen.power_factor,
operation_cost = operation_cost,
base_power = base_power,
)
elseif gen_type == RenewableFix
@debug "Creating $(gen.name) as RenewableFix" _group = IS.LOG_GROUP_PARSING
generator = RenewableFix(
name = gen.name,
available = gen.available,
bus = bus,
active_power = gen.active_power,
reactive_power = reactive_power,
rating = rating,
prime_mover = parse_enum_mapping(PrimeMovers, gen.unit_type),
power_factor = gen.power_factor,
base_power = base_power,
)
else
error("Unsupported type $gen_type")
end
return generator
end
function make_storage(data::PowerSystemTableData, gen, storage, bus)
@debug "Making Storge" _group = IS.LOG_GROUP_PARSING storage.name
state_of_charge_limits =
(min = storage.min_storage_capacity, max = storage.storage_capacity)
input_active_power_limits = (
min = storage.input_active_power_limit_min,
max = storage.input_active_power_limit_max,
)
output_active_power_limits = (
min = storage.output_active_power_limit_min,
max = isnothing(storage.output_active_power_limit_max) ?
gen.active_power_limits_max : storage.output_active_power_limit_max,
)
efficiency = (in = storage.input_efficiency, out = storage.output_efficiency)
(reactive_power, reactive_power_limits) = make_reactive_params(storage)
battery = GenericBattery(;
name = gen.name,
available = storage.available,
bus = bus,
prime_mover = parse_enum_mapping(PrimeMovers, gen.unit_type),
initial_energy = storage.energy_level,
state_of_charge_limits = state_of_charge_limits,
rating = storage.rating,
active_power = storage.active_power,
input_active_power_limits = input_active_power_limits,
output_active_power_limits = output_active_power_limits,
efficiency = efficiency,
reactive_power = reactive_power,
reactive_power_limits = reactive_power_limits,
base_power = storage.base_power,
)
return battery
end
const CATEGORY_STR_TO_COMPONENT = Dict{String, DataType}(
"Bus" => Bus,
"Generator" => Generator,
"Reserve" => Service,
"LoadZone" => LoadZone,
"ElectricLoad" => ElectricLoad,
)
function _get_component_type_from_category(category::AbstractString)
component_type = get(CATEGORY_STR_TO_COMPONENT, category, nothing)
if isnothing(component_type)
throw(DataFormatError("unsupported category=$category"))
end
return component_type
end
function _read_config_file(file_path::String)
return open(file_path) do io
data = YAML.load(io)
# Replace keys with enums.
config_data = Dict{InputCategory, Vector}()
for (key, val) in data
# TODO: need to change user_descriptors.yaml to use reserve instead.
if key == "reserves"
key = "reserve"
end
config_data[get_enum_value(InputCategory, key)] = val
end
return config_data
end
end
"""Stores user-customized information for required dataframe columns."""
struct _FieldInfo
name::String
custom_name::String
per_unit_conversion::NamedTuple{
(:From, :To, :Reference),
Tuple{UnitSystem, UnitSystem, String},
}
unit_conversion::Union{NamedTuple{(:From, :To), Tuple{String, String}}, Nothing}
default_value::Any
# TODO unit, value ranges and options
end
function _get_field_infos(data::PowerSystemTableData, category::InputCategory, df_names)
if !haskey(data.user_descriptors, category)
throw(DataFormatError("Invalid category=$category"))
end
if !haskey(data.descriptors, category)
throw(DataFormatError("Invalid category=$category"))
end
# Cache whether PowerSystems uses a column's values as system-per-unit.
# The user's descriptors indicate that the raw data is already system-per-unit or not.
per_unit = Dict{String, IS.UnitSystem}()
unit = Dict{String, Union{String, Nothing}}()
custom_names = Dict{String, String}()
for descriptor in data.user_descriptors[category]
custom_name = descriptor["custom_name"]
if descriptor["custom_name"] in df_names
per_unit[descriptor["name"]] = get_enum_value(
IS.UnitSystem,
get(descriptor, "unit_system", "NATURAL_UNITS"),
)
unit[descriptor["name"]] = get(descriptor, "unit", nothing)
custom_names[descriptor["name"]] = custom_name
else
@warn "User-defined column name $custom_name is not in dataframe."
end
end
fields = Vector{_FieldInfo}()
for item in data.descriptors[category]
name = item["name"]
item_unit_system =
get_enum_value(IS.UnitSystem, get(item, "unit_system", "NATURAL_UNITS"))
per_unit_reference = get(item, "base_reference", "base_power")
default_value = get(item, "default_value", "required")
if default_value == "system_base_power"
default_value = data.base_power
end
if name in keys(custom_names)
custom_name = custom_names[name]
if item_unit_system == IS.UnitSystem.NATURAL_UNITS &&
per_unit[name] != IS.UnitSystem.NATURAL_UNITS
throw(DataFormatError("$name cannot be defined as $(per_unit[name])"))
end
pu_conversion = (
From = per_unit[name],
To = item_unit_system,
Reference = per_unit_reference,
)
expected_unit = get(item, "unit", nothing)
if !isnothing(expected_unit) &&
!isnothing(unit[name]) &&
expected_unit != unit[name]
unit_conversion = (From = unit[name], To = expected_unit)
else
unit_conversion = nothing
end
else
custom_name = name
pu_conversion = (
From = item_unit_system,
To = item_unit_system,
Reference = per_unit_reference,
)
unit_conversion = nothing
end
push!(
fields,
_FieldInfo(name, custom_name, pu_conversion, unit_conversion, default_value),
)
end
return fields
end
"""Reads values from dataframe row and performs necessary conversions."""
function _read_data_row(data::PowerSystemTableData, row, field_infos; na_to_nothing = true)
fields = Vector{String}()
vals = Vector()
for field_info in field_infos
if field_info.custom_name in names(row)
value = row[field_info.custom_name]
else
value = field_info.default_value
value == "required" && throw(DataFormatError("$(field_info.name) is required"))
@debug "Column $(field_info.custom_name) doesn't exist in df, enabling use of default value of $(field_info.default_value)" _group =
IS.LOG_GROUP_PARSING maxlog = 1
end
if ismissing(value)
throw(DataFormatError("$(field_info.custom_name) value missing"))
end
if na_to_nothing && value == "NA"
value = nothing
end
if !isnothing(value)
if field_info.per_unit_conversion.From == IS.UnitSystem.NATURAL_UNITS &&
field_info.per_unit_conversion.To == IS.UnitSystem.SYSTEM_BASE
@debug "convert to $(field_info.per_unit_conversion.To)" _group =
IS.LOG_GROUP_PARSING field_info.custom_name
value = value isa String ? tryparse(Float64, value) : value
value = data.base_power == 0.0 ? 0.0 : value / data.base_power
elseif field_info.per_unit_conversion.From == IS.UnitSystem.NATURAL_UNITS &&
field_info.per_unit_conversion.To == IS.UnitSystem.DEVICE_BASE
reference_idx = findfirst(
x -> x.name == field_info.per_unit_conversion.Reference,
field_infos,
)
isnothing(reference_idx) && throw(
DataFormatError(
"$(field_info.per_unit_conversion.Reference) not found in table with $(field_info.custom_name)",
),
)
reference_info = field_infos[reference_idx]
@debug "convert to $(field_info.per_unit_conversion.To) using $(reference_info.custom_name)" _group =
IS.LOG_GROUP_PARSING field_info.custom_name maxlog = 1
reference_value =
get(row, reference_info.custom_name, reference_info.default_value)
reference_value == "required" && throw(
DataFormatError(
"$(reference_info.name) is required for p.u. conversion",
),
)
value = value isa String ? tryparse(Float64, value) : value
value = reference_value == 0.0 ? 0.0 : value / reference_value
elseif field_info.per_unit_conversion.From != field_info.per_unit_conversion.To
throw(
DataFormatError(
"conversion not supported from $(field_info.per_unit_conversion.From) to $(field_info.per_unit_conversion.To) for $(field_info.custom_name)",
),
)
end
else
@debug "$(field_info.custom_name) is nothing" _group = IS.LOG_GROUP_PARSING maxlog =
1
end
# TODO: need special handling for units
if !isnothing(field_info.unit_conversion)
@debug "convert units" _group = IS.LOG_GROUP_PARSING field_info.custom_name maxlog =
1
value = convert_units!(value, field_info.unit_conversion)
end
# TODO: validate ranges and option lists
push!(fields, field_info.name)
push!(vals, value)
end
return NamedTuple{Tuple(Symbol.(fields))}(vals)
end
| [
27,
456,
62,
30783,
29,
3064,
12,
12825,
198,
198,
9979,
40295,
62,
23060,
25361,
62,
30910,
36584,
32961,
62,
25664,
796,
198,
220,
220,
220,
4654,
6978,
7,
15908,
3672,
7,
6978,
1659,
7,
13434,
11964,
82,
36911,
366,
20147,
1968,
... | 2.18274 | 25,342 |
<filename>test/runtests.jl
using Inequality
using StatsBase
using Test
using DataFrames
df_1 = DataFrame(v = [8,5,1,3,5,6,7,6,3],
w = collect(0.1:0.1:0.9))
df_2 = DataFrame(v = repeat([8,5,1,3,5,6,7,6,3],2),
w = repeat(collect(0.1:0.1:0.9),2),
group = [1,1,1,1,1,1,1,1,1,2,2,2,2,2,2,2,2,2])
@testset "atkinson checks" begin
@test_throws ArgumentError atkinson([8, 5, 1, 3, 5], -1)
@test_throws ArgumentError atkinson([8, 5, 1, 3, 5], [0.1, 0.2, 0.3, 0.4, 0.5], -1)
@test_throws ArgumentError atkinson([8, 5, 1, 3, 5], [-1, 0.1, 0.2, 0.3, 0.4], 1)
@test_throws ArgumentError atkinson([8, 5, 1, 3, 5], [NaN, 0.1, 0.2, 0.3, 0.4], 1)
@test_throws MethodError atkinson([8, 5, 1, 3, 5], [missing, 0.1, 0.2, 0.3, 0.4], 1)
@test_throws ArgumentError atkinson([8, 5, 1, 3, 5], [0.1, 0.2, 0.3, 0.4], 1) # different length `v` and `w`
end
@testset "atkinson" begin
@test atkinson([8,5,1,3,5], 1) ≈ atkinson([8,5,1,3,5], [1,1,1,1,1], 1) atol=0.00000001
@test atkinson([8,5,1,3,5], 0.8) ≈ atkinson([8,5,1,3,5], [1,1,1,1,1], 0.8) atol=0.00000001
@test atkinson([8,5,1,3,5], 1.2) ≈ atkinson([8,5,1,3,5], [1,1,1,1,1], 1.2) atol=0.00000001
@test atkinson([8,5,1,3,5], 1) ≈ 0.183083677559 atol=0.00000001
@test atkinson([8,5,1,3,5], 0.8) ≈ 0.14249378376024 atol=0.00000001
@test atkinson([8,5,1,3,5], 1.2) ≈ 0.2248733447899 atol=0.00000001
@test atkinson([8,5,1,3,5], [1, 2, 1, 3, 1], 1) ≈ atkinson([8,5,5,1,3,3,3,5], 1) atol=0.00000001 # same result for probability and frequency weights
@test atkinson([8,5,1,3,5], StatsBase.weights([1,1,1,1,1]), 1) ≈ atkinson([8,5,1,3,5], [1,1,1,1,1], 1) atol=0.00000001
@test atkinson([8,5,1,3,5], StatsBase.pweights([1,1,1,1,1]), 1) ≈ atkinson([8,5,1,3,5], [1,1,1,1,1], 1) atol=0.00000001
@test atkinson([8,5,1,3,5], StatsBase.weights([1,1,1,1,1]), 0.8) ≈ atkinson([8,5,1,3,5], [1,1,1,1,1], 0.8) atol=0.00000001
@test atkinson([8,5,1,3,5], StatsBase.pweights([1,1,1,1,1]), 0.8) ≈ atkinson([8,5,1,3,5], [1,1,1,1,1], 0.8) atol=0.00000001
@test atkinson([8,5,1,3,5], StatsBase.weights([1,1,1,1,1]), 1.2) ≈ atkinson([8,5,1,3,5], [1,1,1,1,1], 1.2) atol=0.00000001
@test atkinson([8,5,1,3,5], StatsBase.pweights([1,1,1,1,1]), 1.2) ≈ atkinson([8,5,1,3,5], [1,1,1,1,1], 1.2) atol=0.00000001
@test watkinson([8,5,1,3,5], [1, 2, 1, 3, 1], 1) ≈ atkinson([8,5,1,3,5], [1, 2, 1, 3, 1], 1)
@test watkinson([8,5,1,3,5], weights([1, 2, 1, 3, 1]), 1) ≈ atkinson([8,5,1,3,5], [1, 2, 1, 3, 1], 1)
end
@testset "atkinson with DataFrames" begin
@test combine(df_1, :v => x -> atkinson(x,2))[!,:v_function][1] ≈ atkinson([8,5,1,3,5,6,7,6,3],2)
@test combine(df_1, :v => x -> atkinson(x,1))[!,:v_function][1] ≈ atkinson([8,5,1,3,5,6,7,6,3],1)
@test combine(df_1, :v => x -> atkinson(x,0.8))[!,:v_function][1] ≈ atkinson([8,5,1,3,5,6,7,6,3],0.8)
@test combine(df_1, [:v, :w] => (x, y) -> atkinson(x,weights(y),2))[!,:v_w_function][1] ≈ atkinson([8,5,1,3,5,6,7,6,3], collect(0.1:0.1:0.9),2)
@test combine(df_1, [:v, :w] => (x, y) -> atkinson(x,y,2))[!,:v_w_function][1] ≈ atkinson([8,5,1,3,5,6,7,6,3], collect(0.1:0.1:0.9),2)
@test combine(groupby(df_2, :group), :v => x -> atkinson(x,2))[!,:v_function][1] ≈ atkinson([8,5,1,3,5,6,7,6,3],2)
@test combine(groupby(df_2, :group), [:v, :w] => (x, y) -> atkinson(x,y,2))[!,:v_w_function][1] ≈ atkinson([8,5,1,3,5,6,7,6,3], collect(0.1:0.1:0.9),2)
end
@testset "gini checks" begin
@test_throws ArgumentError gini([8, 5, 1, 3, 5], [-1, 0.1, 0.2, 0.3, 0.4])
@test_throws ArgumentError gini([8, 5, 1, 3, 5], [NaN, 0.1, 0.2, 0.3, 0.4])
@test_throws MethodError gini([8, 5, 1, 3, 5], [missing, 0.1, 0.2, 0.3, 0.4])
@test_throws ArgumentError gini([8, 5, 1, 3, 5], [0.1, 0.2, 0.3, 0.4]) # different length `v` and `w`
end
@testset "gini" begin
@test gini([8,5,1,3,5]) ≈ gini([8,5,1,3,5], [1,1,1,1,1]) atol=0.00000001
@test gini([8,5,1,3,5,6,7,6,3]) ≈ 0.237373737373737 atol=0.00000001
@test gini([8,5,1,3,5,6,7,6,3], collect(0.1:0.1:0.9)) ≈ 0.2065239551478077 atol=0.00000001
@test gini([8,5,1,3,5], [1, 2, 1, 3, 1]) ≈ gini([8,5,5,1,3,3,3,5]) atol=0.00000001 # same result for probability and frequency weights
@test gini([8,5,1,3,5], StatsBase.weights([1,1,1,1,1])) ≈ gini([8,5,1,3,5], [1,1,1,1,1]) atol=0.00000001
@test gini([8,5,1,3,5], StatsBase.pweights([1,1,1,1,1])) ≈ gini([8,5,1,3,5], [1,1,1,1,1]) atol=0.00000001
@test wgini([8,5,1,3,5,6,7,6,3], collect(0.1:0.1:0.9)) ≈ gini([8,5,1,3,5,6,7,6,3], collect(0.1:0.1:0.9))
@test wgini([8,5,1,3,5,6,7,6,3], weights(collect(0.1:0.1:0.9))) ≈ gini([8,5,1,3,5,6,7,6,3], collect(0.1:0.1:0.9))
end
@testset "gini with DataFrames" begin
@test combine(df_1, :v => gini)[!,:v_gini][1] ≈ gini([8,5,1,3,5,6,7,6,3])
@test combine(df_1, [:v, :w] => (x, y) -> gini(x, weights(y)))[!,:v_w_function][1] ≈ gini([8,5,1,3,5,6,7,6,3], collect(0.1:0.1:0.9))
@test combine(df_1, [:v, :w] => (x, y) -> gini(x, y))[!,:v_w_function][1] ≈ gini([8,5,1,3,5,6,7,6,3], collect(0.1:0.1:0.9))
@test combine(groupby(df_2, :group), :v => gini)[!,:v_gini][1] ≈ gini([8,5,1,3,5,6,7,6,3])
@test combine(groupby(df_2, :group), [:v, :w] => (x, y) -> gini(x, y))[!,:v_w_function][1] ≈ gini([8,5,1,3,5,6,7,6,3], collect(0.1:0.1:0.9))
# @test combine(groupby(df_2, :group), [:v, :w] => (x, y) -> gini(x, weights(y)))[!,:v_w_function][1] ≈ gini([8,5,1,3,5,6,7,6,3], collect(0.1:0.1:0.9)) broken=true
end
@testset "lorenz_curve checks" begin
@test_throws ArgumentError lorenz_curve([8, 5, 1, 3, 5], [-1, 0.1, 0.2, 0.3, 0.4])
@test_throws ArgumentError lorenz_curve([8, 5, 1, 3, 5], [NaN, 0.1, 0.2, 0.3, 0.4])
@test_throws MethodError lorenz_curve([8, 5, 1, 3, 5], [missing, 0.1, 0.2, 0.3, 0.4])
@test_throws ArgumentError lorenz_curve([8, 5, 1, 3, 5], [0.1, 0.2, 0.3, 0.4]) # different length `v` and `w`
end
@testset "lorenz_curve" begin
@test all(lorenz_curve([8,5,1,3,5])[2] .≈ lorenz_curve([8,5,1,3,5], [1,1,1,1,1])[2])
@test all(lorenz_curve([8,5,1,3,5,6,7,6,3])[1] .≈ Vector([0.0, 0.1111111111111111, 0.2222222222222222, 0.3333333333333333, 0.4444444444444444, 0.5555555555555556, 0.6666666666666666, 0.7777777777777778, 0.8888888888888888, 1.0]))
@test all(lorenz_curve([8,5,1,3,5,6,7,6,3])[2] .≈ [0.0, 0.022727272727272728, 0.09090909090909091, 0.1590909090909091, 0.2727272727272727, 0.38636363636363635, 0.5227272727272727, 0.6590909090909091, 0.8181818181818182, 1.0])
@test all(lorenz_curve([8,5,1,3,5,6,7,6,3], collect(0.1:0.1:0.9))[2] .≈ [0.0, 0.013761467889908256, 0.05045871559633028, 0.0963302752293578, 0.1513761467889908, 0.2660550458715596, 0.38990825688073394, 0.555045871559633, 0.7752293577981653, 1.0])
@test all(lorenz_curve([8,5,1,3,5], StatsBase.weights([1,1,1,1,1]))[2] ≈ lorenz_curve([8,5,1,3,5], [1,1,1,1,1])[2])
@test all(lorenz_curve([8,5,1,3,5], StatsBase.pweights([1,1,1,1,1]))[2] ≈ lorenz_curve([8,5,1,3,5], [1,1,1,1,1])[2])
@test all(wlorenz_curve([8,5,1,3,5,6,7,6,3], collect(0.1:0.1:0.9))[2] .≈ [0.0, 0.013761467889908256, 0.05045871559633028, 0.0963302752293578, 0.1513761467889908, 0.2660550458715596, 0.38990825688073394, 0.555045871559633, 0.7752293577981653, 1.0])
@test all(wlorenz_curve([8,5,1,3,5], StatsBase.weights([1,1,1,1,1]))[2] ≈ lorenz_curve([8,5,1,3,5], [1,1,1,1,1])[2])
end
@testset "lorenz_curve with DataFrames" begin
@test all(combine(df_1, :v => lorenz_curve)[!,:v_lorenz_curve][1][2] .≈ lorenz_curve([8,5,1,3,5,6,7,6,3])[2])
@test all(combine(df_1, [:v, :w] => (x, y) -> lorenz_curve(x, weights(y)))[!,:v_w_function][1][2] .≈ lorenz_curve([8,5,1,3,5,6,7,6,3], collect(0.1:0.1:0.9))[2])
@test all(combine(df_1, [:v, :w] => (x, y) -> lorenz_curve(x, y))[!,:v_w_function][1][2] .≈ lorenz_curve([8,5,1,3,5,6,7,6,3], collect(0.1:0.1:0.9))[2])
@test all(combine(groupby(df_2, :group), :v => lorenz_curve)[!,:v_lorenz_curve][1][2] .≈ lorenz_curve([8,5,1,3,5,6,7,6,3])[2])
@test all(combine(groupby(df_2, :group), [:v, :w] => (x, y) -> lorenz_curve(x, y))[!,:v_w_function][1][2] ≈ lorenz_curve([8,5,1,3,5,6,7,6,3], collect(0.1:0.1:0.9))[2])
end
@testset "mld checks" begin
@test_throws ArgumentError mld([8, 5, 1, 3, 5], [-1, 0.1, 0.2, 0.3, 0.4])
@test_throws ArgumentError mld([8, 5, 1, 3, 5], [NaN, 0.1, 0.2, 0.3, 0.4])
@test_throws MethodError mld([8, 5, 1, 3, 5], [missing, 0.1, 0.2, 0.3, 0.4])
@test_throws ArgumentError mld([8, 5, 1, 3, 5], [0.1, 0.2, 0.3, 0.4]) # different length `v` and `w`
end
@testset "mld" begin
@test mld([8,5,1,3,5]) ≈ mld([8,5,1,3,5], [1,1,1,1,1]) atol=0.00000001
@test mld([8,5,1,3,5,6,7,6,3]) ≈ 0.1397460530936332 atol=0.00000001
@test mld([8,5,1,3,5,6,7,6,3], collect(0.1:0.1:0.9)) ≈ 0.10375545537468207 atol=0.00000001
@test mld([8,5,1,3,5], [1, 2, 1, 3, 1]) ≈ mld([8,5,5,1,3,3,3,5]) atol=0.00000001 # same result for probability and frequency weights
@test mld([8,5,1,3,5], StatsBase.weights([1,1,1,1,1])) ≈ mld([8,5,1,3,5], [1,1,1,1,1]) atol=0.00000001
@test mld([8,5,1,3,5], StatsBase.pweights([1,1,1,1,1])) ≈ mld([8,5,1,3,5], [1,1,1,1,1]) atol=0.00000001
@test wmld([8,5,1,3,5,6,7,6,3], collect(0.1:0.1:0.9)) ≈ mld([8,5,1,3,5,6,7,6,3], collect(0.1:0.1:0.9))
@test wmld([8,5,1,3,5,6,7,6,3], StatsBase.weights(collect(0.1:0.1:0.9))) ≈ mld([8,5,1,3,5,6,7,6,3], collect(0.1:0.1:0.9))
end
@testset "mld with DataFrames" begin
@test combine(df_1, :v => mld)[!,:v_mld][1] ≈ mld([8,5,1,3,5,6,7,6,3])
@test combine(df_1, [:v, :w] => (x, y) -> mld(x, weights(y)))[!,:v_w_function][1] ≈ mld([8,5,1,3,5,6,7,6,3], collect(0.1:0.1:0.9))
@test combine(df_1, [:v, :w] => (x, y) -> mld(x, y))[!,:v_w_function][1] ≈ mld([8,5,1,3,5,6,7,6,3], collect(0.1:0.1:0.9))
@test combine(groupby(df_2, :group), :v => mld)[!,:v_mld][1] ≈ mld([8,5,1,3,5,6,7,6,3])
@test combine(groupby(df_2, :group), [:v, :w] => (x, y) -> mld(x, y))[!,:v_w_function][1] ≈ mld([8,5,1,3,5,6,7,6,3], collect(0.1:0.1:0.9))
end
@testset "watts checks" begin
@test_throws ArgumentError watts([8, 5, 1, 3, 5], [-1, 0.1, 0.2, 0.3, 0.4], 4)
@test_throws ArgumentError watts([8, 5, 1, 3, 5], [NaN, 0.1, 0.2, 0.3, 0.4], 4)
@test_throws MethodError watts([8, 5, 1, 3, 5], [missing, 0.1, 0.2, 0.3, 0.4], 4)
@test_throws ArgumentError watts([8, 5, 1, 3, 5], [0.1, 0.2, 0.3, 0.4], 4) # different length `v` and `w`
end
@testset "watts" begin
@test watts([8,5,1,3,5], 4) ≈ watts([8,5,1,3,5], [1,1,1,1,1], 4) atol=0.00000001
@test watts([8,5,1,3,5,6,7,6,3], 4) ≈ 0.217962056224828 atol=0.00000001
@test watts([8,5,1,3,5,6,7,6,3], collect(0.1:0.1:0.9), 4) ≈ 0.17552777833850716 atol=0.00000001
@test watts([8,5,1,3,5], [1, 2, 1, 3, 1], 4) ≈ watts([8,5,5,1,3,3,3,5], 4) atol=0.00000001 # same result for probability and frequency weights
@test watts([8,5,1,3,5], StatsBase.weights([1,1,1,1,1]), 4) ≈ watts([8,5,1,3,5], [1,1,1,1,1], 4) atol=0.00000001
@test watts([8,5,1,3,5], StatsBase.pweights([1,1,1,1,1]), 4) ≈ watts([8,5,1,3,5], [1,1,1,1,1], 4) atol=0.00000001
@test wwatts([8,5,1,3,5,6,7,6,3], collect(0.1:0.1:0.9), 4) ≈ watts([8,5,1,3,5,6,7,6,3], collect(0.1:0.1:0.9), 4)
@test wwatts([8,5,1,3,5,6,7,6,3], weights(collect(0.1:0.1:0.9)), 4) ≈ watts([8,5,1,3,5,6,7,6,3], collect(0.1:0.1:0.9), 4)
end
@testset "watts with DataFrames" begin
@test combine(df_1, :v => x -> watts(x,4))[!,:v_function][1] ≈ watts([8,5,1,3,5,6,7,6,3],4)
@test combine(df_1, [:v, :w] => (x, y) -> watts(x, weights(y),4))[!,:v_w_function][1] ≈ watts([8,5,1,3,5,6,7,6,3], collect(0.1:0.1:0.9),4)
@test combine(df_1, [:v, :w] => (x, y) -> watts(x,y,4))[!,:v_w_function][1] ≈ watts([8,5,1,3,5,6,7,6,3], collect(0.1:0.1:0.9),4)
@test combine(groupby(df_2, :group), :v => x-> watts(x,4))[!,:v_function][1] ≈ watts([8,5,1,3,5,6,7,6,3],4)
@test combine(groupby(df_2, :group), [:v, :w] => (x, y) -> watts(x,y,4))[!,:v_w_function][1] ≈ watts([8,5,1,3,5,6,7,6,3], collect(0.1:0.1:0.9),4)
end
@testset "theil checks" begin
@test_throws ArgumentError theil([8, 5, 1, 3, 5], [-1, 0.1, 0.2, 0.3, 0.4])
@test_throws ArgumentError theil([8, 5, 1, 3, 5], [NaN, 0.1, 0.2, 0.3, 0.4])
@test_throws MethodError theil([8, 5, 1, 3, 5], [missing, 0.1, 0.2, 0.3, 0.4])
@test_throws ArgumentError theil([8, 5, 1, 3, 5], [0.1, 0.2, 0.3, 0.4]) # different length `v` and `w`
end
@testset "theil" begin
@test theil([8,5,1,3,5]) ≈ theil([8,5,1,3,5], [1,1,1,1,1]) atol=0.00000001
@test theil([8,5,1,3,5,6,7,6,3]) ≈ 0.10494562214323544 atol=0.00000001
@test theil([8,5,1,3,5,6,7,6,3], collect(0.1:0.1:0.9)) ≈ 0.08120013911680612 atol=0.00000001
@test theil([8,5,1,3,5], [1, 2, 1, 3, 1]) ≈ theil([8,5,5,1,3,3,3,5]) atol=0.00000001 # same result for probability and frequency weights
@test theil([8,5,1,3,5], StatsBase.weights([1,1,1,1,1])) ≈ theil([8,5,1,3,5], [1,1,1,1,1]) atol=0.00000001
@test theil([8,5,1,3,5], StatsBase.pweights([1,1,1,1,1])) ≈ theil([8,5,1,3,5], [1,1,1,1,1]) atol=0.00000001
@test wtheil([8,5,1,3,5,6,7,6,3], collect(0.1:0.1:0.9)) ≈ theil([8,5,1,3,5,6,7,6,3], collect(0.1:0.1:0.9))
@test wtheil([8,5,1,3,5,6,7,6,3], weights(collect(0.1:0.1:0.9))) ≈ theil([8,5,1,3,5,6,7,6,3], collect(0.1:0.1:0.9))
end
@testset "theil with DataFrames" begin
@test combine(df_1, :v => theil)[!,:v_theil][1] ≈ theil([8,5,1,3,5,6,7,6,3])
@test combine(df_1, [:v, :w] => (x, y) -> theil(x, weights(y)))[!,:v_w_function][1] ≈ theil([8,5,1,3,5,6,7,6,3], collect(0.1:0.1:0.9))
@test combine(df_1, [:v, :w] => (x, y) -> theil(x, y))[!,:v_w_function][1] ≈ theil([8,5,1,3,5,6,7,6,3], collect(0.1:0.1:0.9))
@test combine(groupby(df_2, :group), :v => theil)[!,:v_theil][1] ≈ theil([8,5,1,3,5,6,7,6,3])
@test combine(groupby(df_2, :group), [:v, :w] => (x, y) -> theil(x, y))[!,:v_w_function][1] ≈ theil([8,5,1,3,5,6,7,6,3], collect(0.1:0.1:0.9))
end
@testset "gen_entropy checks" begin
@test_throws ArgumentError gen_entropy([8, 5, 1, 3, 5], [-1, 0.1, 0.2, 0.3, 0.4],2)
@test_throws ArgumentError gen_entropy([8, 5, 1, 3, 5], [NaN, 0.1, 0.2, 0.3, 0.4],2)
@test_throws MethodError gen_entropy([8, 5, 1, 3, 5], [missing, 0.1, 0.2, 0.3, 0.4],2)
@test_throws ArgumentError gen_entropy([8, 5, 1, 3, 5], [0.1, 0.2, 0.3, 0.4],2) # different length `v` and `w`
end
@testset "gen_entropy" begin
@test gen_entropy([8,5,1,3,5], 0) ≈ mld([8,5,1,3,5])
@test gen_entropy([8,5,1,3,5], 1) ≈ theil([8,5,1,3,5])
@test gen_entropy([8,5,1,3,5], 2) ≈ gen_entropy([8,5,1,3,5], [1,1,1,1,1], 2) atol=0.00000001
@test gen_entropy([8,5,1,3,5,6,7,6,3], 2) ≈ 0.09039256198347094 atol=0.00000001
@test gen_entropy([8,5,1,3,5,6,7,6,3], collect(0.1:0.1:0.9), 2) ≈ 0.0709746654322026 atol=0.00000001
@test gen_entropy([8,5,1,3,5], [1, 2, 1, 3, 1], 2) ≈ gen_entropy([8,5,5,1,3,3,3,5], 2) atol=0.00000001 # same result for probability and frequency weights
@test gen_entropy([8,5,1,3,5], collect(0.1:0.1:0.5), 0) ≈ mld([8,5,1,3,5],collect(0.1:0.1:0.5))
@test gen_entropy([8,5,1,3,5], collect(0.1:0.1:0.5), 1) ≈ theil([8,5,1,3,5],collect(0.1:0.1:0.5))
@test gen_entropy([8,5,1,3,5], StatsBase.weights([1,1,1,1,1]), 2) ≈ gen_entropy([8,5,1,3,5], [1,1,1,1,1], 2) atol=0.00000001
@test gen_entropy([8,5,1,3,5], StatsBase.pweights([1,1,1,1,1]), 2) ≈ gen_entropy([8,5,1,3,5], [1,1,1,1,1], 2) atol=0.00000001
@test gen_entropy([8,5,1,3,5], StatsBase.weights(collect(0.1:0.1:0.5)), 0) ≈ mld([8,5,1,3,5],collect(0.1:0.1:0.5))
@test gen_entropy([8,5,1,3,5], StatsBase.weights(collect(0.1:0.1:0.5)), 1) ≈ theil([8,5,1,3,5],collect(0.1:0.1:0.5))
@test wgen_entropy([8,5,1,3,5], [1,1,1,1,1], 2) ≈ gen_entropy([8,5,1,3,5], [1,1,1,1,1], 2) atol=0.00000001
@test wgen_entropy([8,5,1,3,5], weights(collect(0.1:0.1:0.5)), 2) ≈ gen_entropy([8,5,1,3,5], collect(0.1:0.1:0.5), 2)
end
@testset "gen_entropy with DataFrames" begin
@test combine(df_1, :v => x -> gen_entropy(x,0))[!,:v_function][1] ≈ mld([8,5,1,3,5,6,7,6,3])
@test combine(df_1, :v => x -> gen_entropy(x,1))[!,:v_function][1] ≈ theil([8,5,1,3,5,6,7,6,3])
@test combine(df_1, :v => x -> gen_entropy(x,2))[!,:v_function][1] ≈ gen_entropy([8,5,1,3,5,6,7,6,3],2)
@test combine(df_1, :v => x -> gen_entropy(x,1))[!,:v_function][1] ≈ gen_entropy([8,5,1,3,5,6,7,6,3],1)
@test combine(df_1, :v => x -> gen_entropy(x,0.8))[!,:v_function][1] ≈ gen_entropy([8,5,1,3,5,6,7,6,3],0.8)
@test combine(df_1, [:v, :w] => (x, y) -> gen_entropy(x,weights(y),2))[!,:v_w_function][1] ≈ gen_entropy([8,5,1,3,5,6,7,6,3], collect(0.1:0.1:0.9),2)
@test combine(df_1, [:v, :w] => (x, y) -> gen_entropy(x,y,2))[!,:v_w_function][1] ≈ gen_entropy([8,5,1,3,5,6,7,6,3], collect(0.1:0.1:0.9),2)
@test combine(groupby(df_2, :group), :v => x -> gen_entropy(x,2))[!,:v_function][1] ≈ gen_entropy([8,5,1,3,5,6,7,6,3],2)
@test combine(groupby(df_2, :group), [:v, :w] => (x, y) -> gen_entropy(x,y,2))[!,:v_w_function][1] ≈ gen_entropy([8,5,1,3,5,6,7,6,3], collect(0.1:0.1:0.9),2)
end
@testset "headcount checks" begin
@test_throws ArgumentError headcount([8, 5, 1, 3, 5], [-1, 0.1, 0.2, 0.3, 0.4],2)
@test_throws ArgumentError headcount([8, 5, 1, 3, 5], [NaN, 0.1, 0.2, 0.3, 0.4],2)
@test_throws MethodError headcount([8, 5, 1, 3, 5], [missing, 0.1, 0.2, 0.3, 0.4],2)
@test_throws ArgumentError headcount([8, 5, 1, 3, 5], [0.1, 0.2, 0.3, 0.4],2) # different length `v` and `w`
end
@testset "headcount" begin
@test headcount([8,5,1,3,5,6,7,6,3], 4) ≈ 0.3333333333333333 atol=0.00000001
@test headcount([8,5,1,3,5,6,7,6,3], fill(1,9), 4) ≈ 0.3333333333333333 atol=0.00000001
@test headcount([8,5,1,3,5,6,7,6,3], collect(0.1:0.1:0.9), 4) ≈ 0.35555555555555557 atol=0.00000001
@test headcount([8,5,1,3,5], [1, 2, 1, 3, 1], 2) ≈ headcount([8,5,5,1,3,3,3,5], 2) atol=0.00000001 # same result for probability and frequency weights
@test headcount([8,5,1,3,5], StatsBase.weights([1,1,1,1,1]), 2) ≈ headcount([8,5,1,3,5], [1,1,1,1,1], 2) atol=0.00000001
@test headcount([8,5,1,3,5], StatsBase.pweights([1,1,1,1,1]), 2) ≈ headcount([8,5,1,3,5], [1,1,1,1,1], 2) atol=0.00000001
@test wheadcount([8,5,1,3,5], [1, 2, 1, 3, 1], 2) ≈ headcount([8,5,5,1,3,3,3,5], 2) atol=0.00000001
@test wheadcount([8,5,1,3,5], StatsBase.pweights([1, 2, 1, 3, 1]), 2) ≈ headcount([8,5,5,1,3,3,3,5], 2) atol=0.00000001
end
@testset "headcount with DataFrames" begin
@test combine(df_1, :v => x -> headcount(x,4))[!,:v_function][1] ≈ headcount([8,5,1,3,5,6,7,6,3],4)
@test combine(df_1, [:v, :w] => (x, y) -> headcount(x, weights(y),4))[!,:v_w_function][1] ≈ headcount([8,5,1,3,5,6,7,6,3], collect(0.1:0.1:0.9),4)
@test combine(df_1, [:v, :w] => (x, y) -> headcount(x,y,4))[!,:v_w_function][1] ≈ headcount([8,5,1,3,5,6,7,6,3], collect(0.1:0.1:0.9),4)
@test combine(groupby(df_2, :group), :v => x-> headcount(x,4))[!,:v_function][1] ≈ headcount([8,5,1,3,5,6,7,6,3],4)
@test combine(groupby(df_2, :group), [:v, :w] => (x, y) -> headcount(x,y,4))[!,:v_w_function][1] ≈ headcount([8,5,1,3,5,6,7,6,3], collect(0.1:0.1:0.9),4)
end
@testset "poverty_gap checks" begin
@test_throws ArgumentError poverty_gap([8, 5, 1, 3, 5], [-1, 0.1, 0.2, 0.3, 0.4],2)
@test_throws ArgumentError poverty_gap([8, 5, 1, 3, 5], [NaN, 0.1, 0.2, 0.3, 0.4],2)
@test_throws MethodError poverty_gap([8, 5, 1, 3, 5], [missing, 0.1, 0.2, 0.3, 0.4],2)
@test_throws ArgumentError poverty_gap([8, 5, 1, 3, 5], [0.1, 0.2, 0.3, 0.4],2) # different length `v` and `w`
end
@testset "poverty_gap" begin
@test poverty_gap([8,5,1,3,5,6,7,6,3], 4) ≈ 0.1388888888888889 atol=0.00000001
@test poverty_gap([8,5,1,3,5,6,7,6,3], fill(1,9), 4) ≈ 0.1388888888888889 atol=0.00000001
@test poverty_gap([8,5,1,3,5,6,7,6,3], collect(0.1:0.1:0.9), 4) ≈ 0.1222222222222222 atol=0.00000001
@test poverty_gap([8,5,1,3,5], [1, 2, 1, 3, 1], 2) ≈ poverty_gap([8,5,5,1,3,3,3,5], 2) atol=0.00000001 # same result for probability and frequency weights
@test poverty_gap([8,5,1,3,5], StatsBase.weights([1,1,1,1,1]), 2) ≈ poverty_gap([8,5,1,3,5], [1,1,1,1,1], 2) atol=0.00000001
@test poverty_gap([8,5,1,3,5], StatsBase.pweights([1,1,1,1,1]), 2) ≈ poverty_gap([8,5,1,3,5], [1,1,1,1,1], 2) atol=0.00000001
@test wpoverty_gap([8,5,1,3,5,6,7,6,3], collect(0.1:0.1:0.9), 4) ≈ poverty_gap([8,5,1,3,5,6,7,6,3], collect(0.1:0.1:0.9), 4)
@test wpoverty_gap([8,5,1,3,5,6,7,6,3], weights(collect(0.1:0.1:0.9)), 4) ≈ poverty_gap([8,5,1,3,5,6,7,6,3], collect(0.1:0.1:0.9), 4)
end
@testset "poverty_gap with DataFrames" begin
@test combine(df_1, :v => x -> poverty_gap(x,4))[!,:v_function][1] ≈ poverty_gap([8,5,1,3,5,6,7,6,3],4)
@test combine(df_1, [:v, :w] => (x, y) -> poverty_gap(x, weights(y),4))[!,:v_w_function][1] ≈ poverty_gap([8,5,1,3,5,6,7,6,3], collect(0.1:0.1:0.9),4)
@test combine(df_1, [:v, :w] => (x, y) -> poverty_gap(x,y,4))[!,:v_w_function][1] ≈ poverty_gap([8,5,1,3,5,6,7,6,3], collect(0.1:0.1:0.9),4)
@test combine(groupby(df_2, :group), :v => x-> poverty_gap(x,4))[!,:v_function][1] ≈ poverty_gap([8,5,1,3,5,6,7,6,3],4)
@test combine(groupby(df_2, :group), [:v, :w] => (x, y) -> poverty_gap(x,y,4))[!,:v_w_function][1] ≈ poverty_gap([8,5,1,3,5,6,7,6,3], collect(0.1:0.1:0.9),4)
end
@testset "fgt checks" begin
@test_throws ArgumentError fgt([8, 5, 1, 3, 5], [-1, 0.1, 0.2, 0.3, 0.4], 2, 4)
@test_throws ArgumentError fgt([8, 5, 1, 3, 5], [NaN, 0.1, 0.2, 0.3, 0.4],2, 4)
@test_throws MethodError fgt([8, 5, 1, 3, 5], [missing, 0.1, 0.2, 0.3, 0.4],2, 4)
@test_throws ArgumentError fgt([8, 5, 1, 3, 5], [0.1, 0.2, 0.3, 0.4], 2, 4) # different length `v` and `w`
end
@testset "fgt" begin
@test fgt([8,5,1,3,5], 0, 4) ≈ headcount([8,5,1,3,5], 4)
@test fgt([8,5,1,3,5], 1, 4) ≈ poverty_gap([8,5,1,3,5], 4)
@test fgt([8,5,1,3,5], 0, 4) ≈ headcount([8,5,1,3,5], 4)
@test fgt([8,5,1,3,5], 1, 4) ≈ poverty_gap([8,5,1,3,5], 4)
@test fgt([8,5,1,3,5], 2, 4) ≈ fgt([8,5,1,3,5], [1,1,1,1,1], 2, 4) atol=0.00000001
@test fgt([8,5,1,3,5,6,7,6,3], 2, 4) ≈ 0.0763888888888889 atol=0.00000001
@test fgt([8,5,1,3,5,6,7,6,3], collect(0.1:0.1:0.9), 2, 4) ≈ 0.05555555555555555 atol=0.00000001
@test fgt([8,5,1,3,5], [1, 2, 1, 3, 1], 2, 4) ≈ fgt([8,5,5,1,3,3,3,5], 2, 4) atol=0.00000001 # same result for probability and frequency weights
@test fgt([8,5,1,3,5], StatsBase.weights([1,1,1,1,1]), 2, 4) ≈ fgt([8,5,1,3,5], [1,1,1,1,1], 2, 4) atol=0.00000001
@test fgt([8,5,1,3,5], StatsBase.pweights([1,1,1,1,1]), 2, 4) ≈ fgt([8,5,1,3,5], [1,1,1,1,1], 2, 4) atol=0.00000001
@test wfgt([8,5,1,3,5,6,7,6,3], collect(0.1:0.1:0.9), 2, 4) ≈ fgt([8,5,1,3,5,6,7,6,3], collect(0.1:0.1:0.9), 2, 4)
@test wfgt([8,5,1,3,5,6,7,6,3], weights(collect(0.1:0.1:0.9)), 2, 4) ≈ fgt([8,5,1,3,5,6,7,6,3], collect(0.1:0.1:0.9), 2, 4)
end
@testset "fgt with DataFrames" begin
@test combine(df_1, :v => x -> fgt(x,2,4))[!,:v_function][1] ≈ fgt([8,5,1,3,5,6,7,6,3],2,4)
@test combine(df_1, :v => x -> fgt(x,1,4))[!,:v_function][1] ≈ poverty_gap([8,5,1,3,5,6,7,6,3],4)
@test combine(df_1, :v => x -> fgt(x,0,4))[!,:v_function][1] ≈ headcount([8,5,1,3,5,6,7,6,3],4)
@test combine(df_1, [:v, :w] => (x, y) -> fgt(x, weights(y),2,4))[!,:v_w_function][1] ≈ fgt([8,5,1,3,5,6,7,6,3], collect(0.1:0.1:0.9),2,4)
@test combine(df_1, [:v, :w] => (x, y) -> fgt(x,y,2,4))[!,:v_w_function][1] ≈ fgt([8,5,1,3,5,6,7,6,3], collect(0.1:0.1:0.9),2,4)
@test combine(groupby(df_2, :group), :v => x-> fgt(x,2,4))[!,:v_function][1] ≈ fgt([8,5,1,3,5,6,7,6,3],2,4)
@test combine(groupby(df_2, :group), [:v, :w] => (x, y) -> fgt(x,y,2,4))[!,:v_w_function][1] ≈ fgt([8,5,1,3,5,6,7,6,3], collect(0.1:0.1:0.9),2,4)
end | [
27,
34345,
29,
9288,
14,
81,
2797,
3558,
13,
20362,
198,
3500,
554,
48203,
198,
3500,
20595,
14881,
198,
3500,
6208,
198,
3500,
6060,
35439,
628,
198,
7568,
62,
16,
796,
6060,
19778,
7,
85,
796,
685,
23,
11,
20,
11,
16,
11,
18,
... | 1.741275 | 13,667 |
<filename>src/StructuredQueries.jl
module StructuredQueries
using Compat
export Cursor,
Grouped,
@with,
source,
graph
include("utils.jl")
# grouped
include("grouped/grouped.jl")
include("grouped/show.jl")
#verbs
include("verbs/verbs.jl")
include("verbs/primitives.jl")
include("verbs/expr/assignment_expr_ops.jl")
include("verbs/expr/scalar.jl")
include("verbs/expr/sym_analysis.jl")
include("verbs/process/generic.jl")
include("verbs/process/select.jl")
include("verbs/process/filter.jl")
include("verbs/process/orderby.jl")
include("verbs/process/groupby.jl")
include("verbs/process/join.jl")
include("verbs/process/summarize.jl")
#nodes
include("nodes/nodes.jl")
include("nodes/primitives.jl")
include("nodes/show.jl")
# cursors
include("cursor/cursor.jl")
include("cursor/primitives.jl")
include("cursor/show.jl")
include("cursor/macros.jl")
include("cursor/graph.jl")
# collect
include("collect/lift.jl")
end
| [
27,
34345,
29,
10677,
14,
44909,
1522,
4507,
10640,
13,
20362,
198,
21412,
32112,
1522,
4507,
10640,
198,
198,
3500,
3082,
265,
198,
198,
39344,
220,
327,
21471,
11,
198,
220,
220,
220,
220,
220,
220,
220,
4912,
276,
11,
198,
220,
2... | 2.639118 | 363 |
<gh_stars>0
# This file is a part of Julia. License is MIT: http://julialang.org/license
struct Rational{T<:Integer} <: Real
num::T
den::T
function Rational{T}(num::Integer, den::Integer) where T<:Integer
num == den == zero(T) && throw(ArgumentError("invalid rational: zero($T)//zero($T)"))
g = den < 0 ? -gcd(den, num) : gcd(den, num)
new(div(num, g), div(den, g))
end
end
Rational(n::T, d::T) where T<:Integer = Rational{T}(n,d)
Rational(n::Integer, d::Integer) = Rational(promote(n,d)...)
Rational(n::Integer) = Rational(n,one(n))
function divgcd(x::Integer,y::Integer)
g = gcd(x,y)
div(x,g), div(y,g)
end
"""
//(num, den)
Divide two integers or rational numbers, giving a `Rational` result.
```jldoctest
julia> 3 // 5
3//5
julia> (3 // 5) // (2 // 1)
3//10
```
"""
//(n::Integer, d::Integer ) = Rational(n,d)
function //(x::Rational, y::Integer )
xn,yn = divgcd(x.num,y)
xn//checked_mul(x.den,yn)
end
function //(x::Integer, y::Rational)
xn,yn = divgcd(x,y.num)
checked_mul(xn,y.den)//yn
end
function //(x::Rational, y::Rational)
xn,yn = divgcd(x.num,y.num)
xd,yd = divgcd(x.den,y.den)
checked_mul(xn,yd)//checked_mul(xd,yn)
end
//(x::Complex, y::Real) = complex(real(x)//y,imag(x)//y)
//(x::Number, y::Complex) = x*y'//abs2(y)
//(X::AbstractArray, y::Number) = X .// y
function show(io::IO, x::Rational)
show(io, numerator(x))
print(io, "//")
show(io, denominator(x))
end
function read{T<:Integer}(s::IO, ::Type{Rational{T}})
r = read(s,T)
i = read(s,T)
r//i
end
function write(s::IO, z::Rational)
write(s,numerator(z),denominator(z))
end
convert{T<:Integer}(::Type{Rational{T}}, x::Rational) = Rational{T}(convert(T,x.num),convert(T,x.den))
convert{T<:Integer}(::Type{Rational{T}}, x::Integer) = Rational{T}(convert(T,x), convert(T,1))
convert(::Type{Rational}, x::Rational) = x
convert(::Type{Rational}, x::Integer) = convert(Rational{typeof(x)},x)
convert(::Type{Bool}, x::Rational) = x==0 ? false : x==1 ? true : throw(InexactError()) # to resolve ambiguity
convert(::Type{Integer}, x::Rational) = (isinteger(x) ? convert(Integer, x.num) : throw(InexactError()))
convert{T<:Integer}(::Type{T}, x::Rational) = (isinteger(x) ? convert(T, x.num) : throw(InexactError()))
convert(::Type{AbstractFloat}, x::Rational) = float(x.num)/float(x.den)
function convert{T<:AbstractFloat,S}(::Type{T}, x::Rational{S})
P = promote_type(T,S)
convert(T, convert(P,x.num)/convert(P,x.den))
end
function convert{T<:Integer}(::Type{Rational{T}}, x::AbstractFloat)
r = rationalize(T, x, tol=0)
x == convert(typeof(x), r) || throw(InexactError())
r
end
convert(::Type{Rational}, x::Float64) = convert(Rational{Int64}, x)
convert(::Type{Rational}, x::Float32) = convert(Rational{Int}, x)
big(z::Complex{<:Rational{<:Integer}}) = Complex{Rational{BigInt}}(z)
promote_rule{T<:Integer,S<:Integer}(::Type{Rational{T}}, ::Type{S}) = Rational{promote_type(T,S)}
promote_rule{T<:Integer,S<:Integer}(::Type{Rational{T}}, ::Type{Rational{S}}) = Rational{promote_type(T,S)}
promote_rule{T<:Integer,S<:AbstractFloat}(::Type{Rational{T}}, ::Type{S}) = promote_type(T,S)
widen{T}(::Type{Rational{T}}) = Rational{widen(T)}
"""
rationalize([T<:Integer=Int,] x; tol::Real=eps(x))
Approximate floating point number `x` as a `Rational` number with components
of the given integer type. The result will differ from `x` by no more than `tol`.
If `T` is not provided, it defaults to `Int`.
```jldoctest
julia> rationalize(5.6)
28//5
julia> a = rationalize(BigInt, 10.3)
103//10
julia> typeof(numerator(a))
BigInt
```
"""
function rationalize{T<:Integer}(::Type{T}, x::AbstractFloat, tol::Real)
if tol < 0
throw(ArgumentError("negative tolerance $tol"))
end
isnan(x) && return zero(T)//zero(T)
isinf(x) && return (x < 0 ? -one(T) : one(T))//zero(T)
p, q = (x < 0 ? -one(T) : one(T)), zero(T)
pp, qq = zero(T), one(T)
x = abs(x)
a = trunc(x)
r = x-a
y = one(x)
tolx = oftype(x, tol)
nt, t, tt = tolx, zero(tolx), tolx
ia = np = nq = zero(T)
# compute the successive convergents of the continued fraction
# np // nq = (p*a + pp) // (q*a + qq)
while r > nt
try
ia = convert(T,a)
np = checked_add(checked_mul(ia,p),pp)
nq = checked_add(checked_mul(ia,q),qq)
p, pp = np, p
q, qq = nq, q
catch e
isa(e,InexactError) || isa(e,OverflowError) || rethrow(e)
return p // q
end
# naive approach of using
# x = 1/r; a = trunc(x); r = x - a
# is inexact, so we store x as x/y
x, y = y, r
a, r = divrem(x,y)
# maintain
# x0 = (p + (-1)^i * r) / q
t, tt = nt, t
nt = a*t+tt
end
# find optimal semiconvergent
# smallest a such that x-a*y < a*t+tt
a = cld(x-tt,y+t)
try
ia = convert(T,a)
np = checked_add(checked_mul(ia,p),pp)
nq = checked_add(checked_mul(ia,q),qq)
return np // nq
catch e
isa(e,InexactError) || isa(e,OverflowError) || rethrow(e)
return p // q
end
end
rationalize{T<:Integer}(::Type{T}, x::AbstractFloat; tol::Real=eps(x)) = rationalize(T, x, tol)::Rational{T}
rationalize(x::AbstractFloat; kvs...) = rationalize(Int, x; kvs...)
"""
numerator(x)
Numerator of the rational representation of `x`.
```jldoctest
julia> numerator(2//3)
2
julia> numerator(4)
4
```
"""
numerator(x::Integer) = x
numerator(x::Rational) = x.num
"""
denominator(x)
Denominator of the rational representation of `x`.
```jldoctest
julia> denominator(2//3)
3
julia> denominator(4)
1
```
"""
denominator(x::Integer) = one(x)
denominator(x::Rational) = x.den
sign(x::Rational) = oftype(x, sign(x.num))
signbit(x::Rational) = signbit(x.num)
copysign(x::Rational, y::Real) = copysign(x.num,y) // x.den
copysign(x::Rational, y::Rational) = copysign(x.num,y.num) // x.den
typemin{T<:Integer}(::Type{Rational{T}}) = -one(T)//zero(T)
typemax{T<:Integer}(::Type{Rational{T}}) = one(T)//zero(T)
isinteger(x::Rational) = x.den == 1
-(x::Rational) = (-x.num) // x.den
function -{T<:Signed}(x::Rational{T})
x.num == typemin(T) && throw(OverflowError())
(-x.num) // x.den
end
function -{T<:Unsigned}(x::Rational{T})
x.num != zero(T) && throw(OverflowError())
x
end
for (op,chop) in ((:+,:checked_add), (:-,:checked_sub),
(:rem,:rem), (:mod,:mod))
@eval begin
function ($op)(x::Rational, y::Rational)
xd, yd = divgcd(x.den, y.den)
Rational(($chop)(checked_mul(x.num,yd), checked_mul(y.num,xd)), checked_mul(x.den,yd))
end
end
end
function *(x::Rational, y::Rational)
xn,yd = divgcd(x.num,y.den)
xd,yn = divgcd(x.den,y.num)
checked_mul(xn,yn) // checked_mul(xd,yd)
end
/(x::Rational, y::Rational) = x//y
/(x::Rational, y::Complex{<:Union{Integer,Rational}}) = x//y
fma(x::Rational, y::Rational, z::Rational) = x*y+z
==(x::Rational, y::Rational) = (x.den == y.den) & (x.num == y.num)
<( x::Rational, y::Rational) = x.den == y.den ? x.num < y.num :
widemul(x.num,y.den) < widemul(x.den,y.num)
<=(x::Rational, y::Rational) = x.den == y.den ? x.num <= y.num :
widemul(x.num,y.den) <= widemul(x.den,y.num)
==(x::Rational, y::Integer ) = (x.den == 1) & (x.num == y)
==(x::Integer , y::Rational) = y == x
<( x::Rational, y::Integer ) = x.num < widemul(x.den,y)
<( x::Integer , y::Rational) = widemul(x,y.den) < y.num
<=(x::Rational, y::Integer ) = x.num <= widemul(x.den,y)
<=(x::Integer , y::Rational) = widemul(x,y.den) <= y.num
function ==(x::AbstractFloat, q::Rational)
if isfinite(x)
(count_ones(q.den) == 1) & (x*q.den == q.num)
else
x == q.num/q.den
end
end
==(q::Rational, x::AbstractFloat) = x == q
for rel in (:<,:<=,:cmp)
for (Tx,Ty) in ((Rational,AbstractFloat), (AbstractFloat,Rational))
@eval function ($rel)(x::$Tx, y::$Ty)
if isnan(x) || isnan(y)
$(rel == :cmp ? :(throw(DomainError())) : :(return false))
end
xn, xp, xd = decompose(x)
yn, yp, yd = decompose(y)
if xd < 0
xn = -xn
xd = -xd
end
if yd < 0
yn = -yn
yd = -yd
end
xc, yc = widemul(xn,yd), widemul(yn,xd)
xs, ys = sign(xc), sign(yc)
if xs != ys
return ($rel)(xs,ys)
elseif xs == 0
# both are zero or ±Inf
return ($rel)(xn,yn)
end
xb, yb = ndigits0z(xc,2) + xp, ndigits0z(yc,2) + yp
if xb == yb
xc, yc = promote(xc,yc)
if xp > yp
xc = (xc<<(xp-yp))
else
yc = (yc<<(yp-xp))
end
return ($rel)(xc,yc)
else
return xc > 0 ? ($rel)(xb,yb) : ($rel)(yb,xb)
end
end
end
end
# needed to avoid ambiguity between ==(x::Real, z::Complex) and ==(x::Rational, y::Number)
==(z::Complex , x::Rational) = isreal(z) & (real(z) == x)
==(x::Rational, z::Complex ) = isreal(z) & (real(z) == x)
for op in (:div, :fld, :cld)
@eval begin
function ($op)(x::Rational, y::Integer )
xn,yn = divgcd(x.num,y)
($op)(xn, checked_mul(x.den,yn))
end
function ($op)(x::Integer, y::Rational)
xn,yn = divgcd(x,y.num)
($op)(checked_mul(xn,y.den), yn)
end
function ($op)(x::Rational, y::Rational)
xn,yn = divgcd(x.num,y.num)
xd,yd = divgcd(x.den,y.den)
($op)(checked_mul(xn,yd), checked_mul(xd,yn))
end
end
end
trunc{T}(::Type{T}, x::Rational) = convert(T,div(x.num,x.den))
floor{T}(::Type{T}, x::Rational) = convert(T,fld(x.num,x.den))
ceil{ T}(::Type{T}, x::Rational) = convert(T,cld(x.num,x.den))
function round{T, Tr}(::Type{T}, x::Rational{Tr}, ::RoundingMode{:Nearest})
if denominator(x) == zero(Tr) && T <: Integer
throw(DivideError())
elseif denominator(x) == zero(Tr)
return convert(T, copysign(one(Tr)//zero(Tr), numerator(x)))
end
q,r = divrem(numerator(x), denominator(x))
s = q
if abs(r) >= abs((denominator(x)-copysign(Tr(4), numerator(x))+one(Tr)+iseven(q))>>1 + copysign(Tr(2), numerator(x)))
s += copysign(one(Tr),numerator(x))
end
convert(T, s)
end
round{T}(::Type{T}, x::Rational) = round(T, x, RoundNearest)
function round{T, Tr}(::Type{T}, x::Rational{Tr}, ::RoundingMode{:NearestTiesAway})
if denominator(x) == zero(Tr) && T <: Integer
throw(DivideError())
elseif denominator(x) == zero(Tr)
return convert(T, copysign(one(Tr)//zero(Tr), numerator(x)))
end
q,r = divrem(numerator(x), denominator(x))
s = q
if abs(r) >= abs((denominator(x)-copysign(Tr(4), numerator(x))+one(Tr))>>1 + copysign(Tr(2), numerator(x)))
s += copysign(one(Tr),numerator(x))
end
convert(T, s)
end
function round{T, Tr}(::Type{T}, x::Rational{Tr}, ::RoundingMode{:NearestTiesUp})
if denominator(x) == zero(Tr) && T <: Integer
throw(DivideError())
elseif denominator(x) == zero(Tr)
return convert(T, copysign(one(Tr)//zero(Tr), numerator(x)))
end
q,r = divrem(numerator(x), denominator(x))
s = q
if abs(r) >= abs((denominator(x)-copysign(Tr(4), numerator(x))+one(Tr)+(numerator(x)<0))>>1 + copysign(Tr(2), numerator(x)))
s += copysign(one(Tr),numerator(x))
end
convert(T, s)
end
function round{T}(::Type{T}, x::Rational{Bool})
if denominator(x) == false && issubtype(T, Union{Integer, Bool})
throw(DivideError())
end
convert(T, x)
end
round{T}(::Type{T}, x::Rational{Bool}, ::RoundingMode{:Nearest}) = round(T, x)
round{T}(::Type{T}, x::Rational{Bool}, ::RoundingMode{:NearestTiesAway}) = round(T, x)
round{T}(::Type{T}, x::Rational{Bool}, ::RoundingMode{:NearestTiesUp}) = round(T, x)
round{T}(::Type{T}, x::Rational{Bool}, ::RoundingMode) = round(T, x)
trunc{T}(x::Rational{T}) = Rational(trunc(T,x))
floor{T}(x::Rational{T}) = Rational(floor(T,x))
ceil{ T}(x::Rational{T}) = Rational(ceil(T,x))
round{T}(x::Rational{T}) = Rational(round(T,x))
function ^(x::Rational, n::Integer)
n >= 0 ? power_by_squaring(x,n) : power_by_squaring(inv(x),-n)
end
^(x::Number, y::Rational) = x^(y.num/y.den)
^{T<:AbstractFloat}(x::T, y::Rational) = x^convert(T,y)
^{T<:AbstractFloat}(x::Complex{T}, y::Rational) = x^convert(T,y)
^(z::Complex{<:Rational}, n::Bool) = n ? z : one(z) # to resolve ambiguity
function ^(z::Complex{<:Rational}, n::Integer)
n >= 0 ? power_by_squaring(z,n) : power_by_squaring(inv(z),-n)
end
iszero(x::Rational) = iszero(numerator(x))
function lerpi(j::Integer, d::Integer, a::Rational, b::Rational)
((d-j)*a)/d + (j*b)/d
end
| [
27,
456,
62,
30783,
29,
15,
198,
2,
770,
2393,
318,
257,
636,
286,
22300,
13,
13789,
318,
17168,
25,
2638,
1378,
73,
377,
498,
648,
13,
2398,
14,
43085,
198,
198,
7249,
46863,
90,
51,
27,
25,
46541,
92,
1279,
25,
6416,
198,
220,... | 1.998928 | 6,528 |
# Use baremodule to shave off a few KB from the serialized `.ji` file
baremodule CGAL2_jll
using Base
using Base: UUID
import JLLWrappers
JLLWrappers.@generate_main_file_header("CGAL2")
JLLWrappers.@generate_main_file("CGAL2", UUID("a133c068-ba04-5466-9207-ec1c2ac43820"))
end # module CGAL2_jll
| [
2,
5765,
6247,
21412,
284,
34494,
572,
257,
1178,
14204,
422,
262,
11389,
1143,
4600,
13,
7285,
63,
2393,
198,
49382,
21412,
29925,
1847,
17,
62,
73,
297,
198,
3500,
7308,
198,
3500,
7308,
25,
471,
27586,
198,
11748,
449,
3069,
36918,... | 2.483333 | 120 |
function warning_callback(message::String)
global TEST_CALLBACK = true
error("checking that error are supported")
end
@testset "Errors" begin
err = ChemfilesError("oops")
iobuf = IOBuffer()
show(iobuf, err)
@test String(iobuf.data[1:(19 + length(err.message))]) == "\"Chemfiles error: oops\""
Chemfiles.clear_errors()
@test Chemfiles.last_error() == ""
remove_chemfiles_warning() do
@test_throws ChemfilesError Residue(Topology(), 3)
end
@test_throws UndefVarError TEST_CALLBACK == false
@test Chemfiles.last_error() == "residue index out of bounds in topology: we have 0 residues, but the index is 3"
Chemfiles.clear_errors()
@test Chemfiles.last_error() == ""
Chemfiles.set_warning_callback(warning_callback)
@test_throws ChemfilesError Residue(Topology(), 3)
@test TEST_CALLBACK == true
Chemfiles.set_warning_callback(Chemfiles.__default_warning_callback)
end
@testset "Configuration" begin
config = joinpath(@__DIR__, "data", "config.toml")
Chemfiles.add_configuration(config)
trajectory = joinpath(@__DIR__, "data", "water.xyz")
frame = read(Trajectory(trajectory))
@test name(Atom(frame, 9)) == "Oz"
@test type(Atom(frame, 9)) == "F"
end
@testset "Format list" begin
for metadata in Chemfiles.format_list()
if metadata.name == "XYZ"
@test metadata.description == "XYZ text format"
@test metadata.extension == ".xyz"
@test metadata.reference == "https://openbabel.org/wiki/XYZ"
@test metadata.read == true
@test metadata.write == true
@test metadata.memory == true
@test metadata.positions == true
@test metadata.velocities == false
@test metadata.unit_cell == true
@test metadata.atoms == true
@test metadata.bonds == false
@test metadata.residues == false
end
if metadata.name == "LAMMPD Data"
@test metadata.extension === nothing
end
end
end
@testset "Guess format" begin
@test Chemfiles.guess_format("file.xyz.gz") == "XYZ / GZ"
@test Chemfiles.guess_format("file.nc") == "Amber NetCDF"
end
| [
8818,
6509,
62,
47423,
7,
20500,
3712,
10100,
8,
198,
220,
220,
220,
3298,
43001,
62,
34,
7036,
31098,
796,
2081,
198,
220,
220,
220,
4049,
7203,
41004,
326,
4049,
389,
4855,
4943,
198,
437,
198,
198,
31,
9288,
2617,
366,
9139,
5965... | 2.460177 | 904 |
<filename>test/runtests.jl<gh_stars>1-10
using PySerial
using Base.Test
@test typeof(list_ports()) == Array{Any,1}
| [
27,
34345,
29,
9288,
14,
81,
2797,
3558,
13,
20362,
27,
456,
62,
30783,
29,
16,
12,
940,
198,
3500,
9485,
32634,
198,
3500,
7308,
13,
14402,
198,
198,
31,
9288,
2099,
1659,
7,
4868,
62,
3742,
28955,
6624,
15690,
90,
7149,
11,
16,
... | 2.521739 | 46 |
using KiteConnect
using Test
@test_throws ArgumentError KiteConnect.ltp("INFY")
| [
198,
3500,
509,
578,
13313,
198,
3500,
6208,
198,
198,
31,
9288,
62,
400,
8516,
45751,
12331,
509,
578,
13313,
13,
2528,
79,
7203,
1268,
43833,
4943,
628
] | 2.964286 | 28 |
# ----------------------
# -- read --
# ----------------------
function read_nodenum(skipnum)
"""
xmax : 仮想セルも含めたnodeのxの数
ymax : 仮想セルも含めたnodeのyの数
"""
fff=[]
open("grid/nodesnum", "r") do f
fff=read(f,String)
end
fff=split(fff,"\n",keepempty=false)
num_nodes=length(fff)-skipnum
for i in 1+skipnum:length(fff)
fff[i]=replace(fff[i]," \r" => "")
end
temp = split(fff[2]," ")
xmax = parse(Int64,temp[1])
ymax = parse(Int64,temp[2])
return xmax, ymax
end
function read_nodes(skipnum,xmax,ymax)
"""
nodes[i][j][k]
i : x点の番号
j : y点の番号
k=1 : 点のx座標
k=2 : 点のy座標
"""
fff=[]
open("grid/nodes", "r") do f
fff=read(f,String)
end
fff=split(fff,"\n",keepempty=false)
num_nodes=length(fff)-skipnum
for i in 1+skipnum:length(fff)
fff[i]=replace(fff[i]," \r" => "")
end
nodes = zeros(xmax,ymax,2)
for i in 1:num_nodes
temp=split(fff[i+skipnum]," ")
xnum = parse(Int64,temp[1])
ynum = parse(Int64,temp[2])
nodes[xnum,ynum,1]=parse(Float64,temp[3])
nodes[xnum,ynum,2]=parse(Float64,temp[4])
end
return nodes
end
function read_nodes_vtk(skipnum)
fff=[]
open("grid/nodes_forvtk", "r") do f
fff=read(f,String)
end
fff=split(fff,"\n",keepempty=false)
num_nodes=length(fff)-skipnum
for i in 1+skipnum:length(fff)
fff[i]=replace(fff[i]," \r" => "")
end
nodes=zeros(num_nodes,3)
for i in 1:num_nodes
temp=split(fff[i+skipnum]," ")
# x = parse(Float64,temp[1])
# y = parse(Float64,temp[2])
# z = parse(Float64,temp[3])
x = parse(Float64,temp[2])
y = parse(Float64,temp[3])
z = 0.0
nodes[i,1] = x
nodes[i,2] = y
nodes[i,3] = z
end
return nodes
end
function read_elements_vtk(skipnum)
fff=[]
open("grid/element_forvtk", "r") do f
fff=read(f,String)
end
fff=split(fff,"\n",keepempty=false)
num_elements=length(fff)-skipnum
for i in 1+skipnum:length(fff)
fff[i]=replace(fff[i]," \r" => "")
end
elements = zeros(Int64,num_elements,4)
for i in 1:num_elements
temp=split(fff[i+skipnum]," ")
elements[i,1] = parse(Int64,temp[2])
elements[i,2] = parse(Int64,temp[3])
elements[i,3] = parse(Int64,temp[4])
elements[i,4] = parse(Int64,temp[5])
end
return elements
end
function read_result(skipnum)
fff=[]
open("test333.dat", "r") do f
fff=read(f,String)
end
fff=split(fff,"\n",keepempty=false)
num_point=length(fff)-skipnum
for i in 1+skipnum:length(fff)
fff[i]=replace(fff[i]," \r" => "")
end
readQ = zeros(num_point,5)
for i in 1:num_point
temp=split(fff[i+skipnum]," ")
k = 1
for j in 1:length(temp)
if temp[j] != ""
readQ[i,k] = parse(Float64,temp[j])
k += 1
end
end
end
return readQ
end
function read_allgrid()
skip=1
xmax,ymax = read_nodenum(skip)
nodes = read_nodes(skip,xmax,ymax)
nodes_vtk = read_nodes_vtk(skip)
elements = read_elements_vtk(skip)
readQ = read_result(skip)
println("fin read grid")
return xmax,ymax,nodes,nodes_vtk,elements,readQ
end
| [
2,
41436,
438,
198,
2,
1377,
1100,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
1377,
198,
2,
41436,
438,
198,
8818,
1100,
62,
77,
375,
44709,
7,
48267,
22510,
8,
198,
220,
220,
220,
37227,
220,
198,
220,
220,
220... | 1.837061 | 1,878 |
# This file describes how we decide which logger (e.g. LogText vs LogValue vs LogHistograms)
# to use for what data, and any preprocessing
"""
preprocess(name, val, data)
This method takes a tag `name` and the value `val::T` pair. If type `T` can be
serialized to TensorBoard then the pair is pushed to `data`, otherwise it should
call `preprocess` recursively with some simpler types, until a serializable
type is finally hit.
For a struct, it calls preprocess on every field.
"""
function preprocess(name, val::T, data) where T
if isstructtype(T)
fn = logable_propertynames(val)
for f=fn
prop = getfield(val, f)
preprocess(name*"/$f", prop, data)
end
else
# If we do not know how to serialize a type, then
# it will be simply logged as text
push!(data, name=>val)
end
data
end
"""
logable_propertynames(val::Any)
Returns a tuple with the name of the fields of the structure `val` that
should be logged to TensorBoard. This function should be overridden when
you want TensorBoard to ignore some fields in a structure when logging
it. The default behaviour is to return the same result as `propertynames`.
See also: [`Base.propertynames`](@ref)
"""
logable_propertynames(val::Any) = propertynames(val)
## Default unpacking of key-value dictionaries
function preprocess(name, dict::AbstractDict, data)
for (key, val) in dict
# convert any key into a string, via interpolating it
preprocess("$name/$key", val, data)
end
return data
end
## Default behaviours
########## For things going to LogImage ##############################
function preprocess(name, img::AbstractArray{<:Colorant}, data)
# If it has three dimensions (and we don't have 3D monitors) we log several
# 2D slices under the same tag so that TB shows a slider along the z direction.
dimensions = ndims(img)
if dimensions == 3
#3rd is channel dim as observed in testimages
channels = size(img, 3)
for c in 1:channels
preprocess(name, convert(PngImage, img[:, :, c]), data)
end
else
preprocess(name, convert(PngImage, img), data)
end
return data
end
preprocess(name, val::PngImage, data) = push!(data, name=>val)
summary_impl(name, value::PngImage) = image_summary(name, value)
########## For things going to LogText ##############################
preprocess(name, val::AbstractString, data) where T<:String = push!(data, name=>val)
summary_impl(name, value::Any) = text_summary(name, value)
########## For things going to LogHistograms ########################
# Only consider 1D histograms for histogram plotting
preprocess(name, hist::StatsBase.Histogram{T,1}, data) where T = push!(data, name=>hist)
summary_impl(name, hist::StatsBase.Histogram) = histogram_summary(name, hist)
# TODO: maybe deprecate? tuple means histogram (only if bins/weights match)
function preprocess(name, (bins,weights)::Tuple{AbstractVector,AbstractVector}, data)
# if ... this is an histogram
if length(bins) == length(weights)+1
return preprocess(name, Histogram(bins,weights), data)
end
preprocess(name*"/1", bins, data)
preprocess(name*"/2", weights, data)
end
preprocess(name, val::AbstractArray{<:Real}, data) = push!(data, name=>val)
summary_impl(name, val::AbstractArray{<:Real}) = histogram_arr_summary(name, val)
# Split complex numbers into real/complex pairs
preprocess(name, val::AbstractArray{<:Complex}, data) = push!(data, name*"/re"=>real.(val), name*"/im"=>imag.(val))
########## For things going to LogValue #############################
preprocess(name, val::Real, data) = push!(data, name=>val)
summary_impl(name, value::Real) = scalar_summary(name, value)
# Split complex numbers into real/complex pairs
preprocess(name, val::Complex, data) = push!(data, name*"/re"=>real(val), name*"/im"=>imag(val))
| [
2,
770,
2393,
8477,
703,
356,
5409,
543,
49706,
357,
68,
13,
70,
13,
5972,
8206,
3691,
5972,
11395,
3691,
5972,
13749,
26836,
8,
198,
2,
284,
779,
329,
644,
1366,
11,
290,
597,
662,
36948,
198,
198,
37811,
198,
220,
220,
220,
662,... | 2.941485 | 1,333 |
<filename>abc171-180/abc171/a.jl<gh_stars>0
function solve()
a = readline()
a == uppercase(a) ? "A" : "a"
end
println(solve())
| [
27,
34345,
29,
39305,
27192,
12,
15259,
14,
39305,
27192,
14,
64,
13,
20362,
27,
456,
62,
30783,
29,
15,
198,
8818,
8494,
3419,
198,
220,
220,
220,
257,
796,
1100,
1370,
3419,
198,
220,
220,
220,
257,
6624,
334,
39921,
589,
7,
64,... | 2.15873 | 63 |
<gh_stars>0
using BinaryBuilder, Pkg
name = "GLPK"
version = v"5.0"
# Collection of sources required to build GLPK
sources = [
ArchiveSource("http://ftpmirror.gnu.org/gnu/glpk/glpk-$(version.major).$(version.minor).tar.gz",
"4a1013eebb50f728fc601bdd833b0b2870333c3b3e5a816eeba921d95bec6f15"),
]
# Bash recipe for building across all platforms
script = raw"""
cd $WORKSPACE/srcdir/glpk*
if [[ ${target} == *mingw* ]]; then
export CPPFLAGS="-I${prefix}/include -D__WOE__=1"
else
export CPPFLAGS="-I${prefix}/include"
fi
autoreconf -vi
./configure --prefix=${prefix} --host=${target} --build=${MACHTYPE} --with-gmp
make -j${nproc}
make install
"""
# Build for all platforms
platforms = supported_platforms()
# The products that we will ensure are always built
products = [
LibraryProduct("libglpk", :libglpk)
]
# Dependencies that must be installed before this package can be built
dependencies = [
Dependency("GMP_jll", v"6.1.2"),
]
# Build the tarballs, and possibly a `build.jl` as well.
# Use the same preferred_gcc_version as GMP.
build_tarballs(ARGS, name, version, sources, script, platforms, products, dependencies; preferred_gcc_version=v"6")
| [
27,
456,
62,
30783,
29,
15,
198,
3500,
45755,
32875,
11,
350,
10025,
198,
198,
3672,
796,
366,
8763,
40492,
1,
198,
9641,
796,
410,
1,
20,
13,
15,
1,
198,
198,
2,
12251,
286,
4237,
2672,
284,
1382,
10188,
40492,
198,
82,
2203,
7... | 2.624176 | 455 |
<gh_stars>10-100
module UI
import GLFW
using ModernGL
include("gltools.jl")
GLFW.Init()
# OS X-specific GLFW hints to initialize the correct version of OpenGL
@osx_only begin
GLFW.WindowHint(GLFW.CONTEXT_VERSION_MAJOR, 3)
GLFW.WindowHint(GLFW.CONTEXT_VERSION_MINOR, 2)
GLFW.WindowHint(GLFW.OPENGL_PROFILE, GLFW.OPENGL_CORE_PROFILE)
GLFW.WindowHint(GLFW.OPENGL_FORWARD_COMPAT, GL_TRUE)
end
# Create a windowed mode window and its OpenGL context
window = GLFW.CreateWindow(600, 600, "OpenGL Example")
# Make the window's context current
GLFW.MakeContextCurrent(window)
# The data for our triangle
data = GLfloat[
0.0, 0.5,
0.5, -0.5,
-0.5,-0.5
]
# Generate a vertex array and array buffer for our data
vao = glGenVertexArray()
glBindVertexArray(vao)
vbo = glGenBuffer()
glBindBuffer(GL_ARRAY_BUFFER, vbo)
glBufferData(GL_ARRAY_BUFFER, sizeof(data), data, GL_STATIC_DRAW)
# Create and initialize shaders
const vsh = """
#version 330
in vec2 position;
void main() {
gl_Position = vec4(position, 0.0, 1.0);
}
"""
const fsh = """
#version 330
out vec4 outColor;
void main() {
outColor = vec4(1.0, 1.0, 1.0, 1.0);
}
"""
vertexShader = createShader(vsh, GL_VERTEX_SHADER)
fragmentShader = createShader(fsh, GL_FRAGMENT_SHADER)
program = createShaderProgram(vertexShader, fragmentShader)
glUseProgram(program)
positionAttribute = glGetAttribLocation(program, "position");
glEnableVertexAttribArray(positionAttribute)
glVertexAttribPointer(positionAttribute, 2, GL_FLOAT, false, 0, 0)
t = 0
# Loop until the user closes the window
while !GLFW.WindowShouldClose(window)
# Pulse the background blue
t += 1
glClearColor(0.0, 0.0, 0.5 * (1 + sin(t * 0.02)), 1.0)
glClear(GL_COLOR_BUFFER_BIT)
# Draw our triangle
glDrawArrays(GL_TRIANGLES, 0, 3)
# Swap front and back buffers
GLFW.SwapBuffers(window)
# Poll for and process events
GLFW.PollEvents()
end
GLFW.Terminate()
end | [
27,
456,
62,
30783,
29,
940,
12,
3064,
198,
21412,
12454,
198,
220,
198,
11748,
10188,
24160,
198,
3500,
12495,
8763,
198,
198,
17256,
7203,
70,
2528,
10141,
13,
20362,
4943,
198,
198,
8763,
24160,
13,
31768,
3419,
198,
220,
198,
2,
... | 2.420168 | 833 |
abstract type AbstractBC{T} <: AbstractDiffEqAffineOperator{T} end
abstract type AtomicBC{T} <: AbstractBC{T} end
"""
Robin, General, and in general Neumann, Dirichlet and Bridge BCs
are not necessarily linear operators. Instead, they are affine
operators, with a constant term Q*x = Qa*x + Qb.
"""
abstract type AffineBC{T} <: AtomicBC{T} end
struct NeumannBC{N} end
struct Neumann0BC{N} end
struct DirichletBC{N} end
struct Dirichlet0BC{N} end
"""
q = PeriodicBC{T}()
Qx, Qy, ... = PeriodicBC{T}(size(u)) #When all dimensions are to be extended with a periodic boundary condition.
-------------------------------------------------------------------------------------
Creates a periodic boundary condition, where the lower index end of some u is extended with the upper index end and vice versa.
It is not reccomended to concretize this BC type in to a BandedMatrix, since the vast majority of bands will be all 0s. SpatseMatrix concretization is reccomended.
"""
struct PeriodicBC{T} <: AtomicBC{T}
PeriodicBC(T::Type) = new{T}()
end
"""
q = RobinBC(left_coefficients, right_coefficients, dx::T, approximation_order) where T # When this BC extends a dimension with a uniform step size
q = RobinBC(left_coefficients, right_coefficients, dx::Vector{T}, approximation_order) where T # When this BC extends a dimension with a non uniform step size. dx should be the vector of step sizes for the whole dimension
-------------------------------------------------------------------------------------
The variables in l are [αl, βl, γl], and correspond to a BC of the form αl*u(0) + βl*u'(0) = γl imposed on the lower index boundary.
The variables in r are [αl, βl, γl], and correspond to an analagous boundary on the higher index end.
Implements a robin boundary condition operator Q that acts on a vector to give an extended vector as a result
Referring to (https://github.com/JuliaDiffEq/DiffEqOperators.jl/files/3267835/ghost_node.pdf)
Write vector b̄₁ as a vertical concatanation with b0 and the rest of the elements of b̄ ₁, denoted b̄`₁, the same with ū into u0 and ū`. b̄`₁ = b̄`_2 = fill(β/Δx, length(stencil)-1)
Pull out the product of u0 and b0 from the dot product. The stencil used to approximate u` is denoted s. b0 = α+(β/Δx)*s[1]
Rearrange terms to find a general formula for u0:= -b̄`₁̇⋅ū`/b0 + γ/b0, which is dependent on ū` the robin coefficients and Δx.
The non identity part of Qa is qa:= -b`₁/b0 = -β.*s[2:end]/(α+β*s[1]/Δx). The constant part is Qb = γ/(α+β*s[1]/Δx)
do the same at the other boundary (amounts to a flip of s[2:end], with the other set of boundary coeffs)
"""
struct RobinBC{T, V<:AbstractVector{T}} <: AffineBC{T}
a_l::V
b_l::T
a_r::V
b_r::T
function RobinBC(l::NTuple{3,T}, r::NTuple{3,T}, dx::T, order = 1) where {T}
αl, βl, γl = l
αr, βr, γr = r
s = calculate_weights(1, one(T), Array(one(T):convert(T,order+1))) #generate derivative coefficients about the boundary of required approximation order
a_l = -s[2:end]./(αl*dx/βl + s[1])
a_r = s[end:-1:2]./(αr*dx/βr - s[1]) # for other boundary stencil is flippedlr with *opposite sign*
b_l = γl/(αl+βl*s[1]/dx)
b_r = γr/(αr-βr*s[1]/dx)
return new{T, typeof(a_l)}(a_l, b_l, a_r, b_r)
end
function RobinBC(l::Union{NTuple{3,T},AbstractVector{T}}, r::Union{NTuple{3,T},AbstractVector{T}}, dx::AbstractVector{T}, order = 1) where {T}
αl, βl, γl = l
αr, βr, γr = r
s_index = Array(one(T):convert(T,order+1))
dx_l, dx_r = dx[1:length(s_index)], dx[(end-length(s_index)+1):end]
s = calculate_weights(1, one(T), s_index) #generate derivative coefficients about the boundary of required approximation order
denom_l = αl+βl*s[1]/dx_l[1]
denom_r = αr-βr*s[1]/dx_r[end]
a_l = -βl.*s[2:end]./(denom_l*dx_l[2:end])
a_r = βr.*s[end:-1:2]./(denom_r*dx_r[1:(end-1)]) # for other boundary stencil is flippedlr with *opposite sign*
b_l = γl/denom_l
b_r = γr/denom_r
return new{T, typeof(a_l)}(a_l, b_l, a_r, b_r)
end
end
stencil(q::AffineBC{T}, N::Int) where T = ([transpose(q.a_l) transpose(zeros(T, N-length(q.a_l)))], [transpose(zeros(T, N-length(q.a_r))) transpose(q.a_r)])
affine(q::AffineBC) = (q.b_l, q.b_r)
stencil(q::PeriodicBC{T}, N::Int) where T= ([transpose(zeros(T, N-1)) one(T)], [one(T) transpose(zeros(T, N-1))])
affine(q::PeriodicBC{T}) where T = (zero(T), zero(T))
"""
q = GeneralBC(α_leftboundary, α_rightboundary, dx::T, approximation_order)
-------------------------------------------------------------------------------------
Implements a generalization of the Robin boundary condition, where α is a vector of coefficients.
Represents a condition of the form α[1] + α[2]u[0] + α[3]u'[0] + α[4]u''[0]+... = 0
Implemented in a similar way to the RobinBC (see above).
This time there are multiple stencils for multiple derivative orders - these can be written as a matrix S.
All components that multiply u(0) are factored out, turns out to only involve the first column of S, s̄0. The rest of S is denoted S`. the coeff of u(0) is s̄0⋅ᾱ[3:end] + α[2].
the remaining components turn out to be ᾱ[3:end]⋅(S`ū`) or equivalently (transpose(ᾱ[3:end])*S`)⋅ū`. Rearranging, a stencil q_a to be dotted with ū` upon extension can readily be found, along with a constant component q_b
"""
struct GeneralBC{T, L<:AbstractVector{T}, R<:AbstractVector{T}} <:AffineBC{T}
a_l::L
b_l::T
a_r::R
b_r::T
function GeneralBC(αl::AbstractVector{T}, αr::AbstractVector{T}, dx::T, order = 1) where {T}
nl = length(αl)
nr = length(αr)
S_l = zeros(T, (nl-2, order+nl-2))
S_r = zeros(T, (nr-2, order+nr-2))
for i in 1:(nl-2)
S_l[i,:] = [transpose(calculate_weights(i, one(T), Array(one(T):convert(T, order+i)))) transpose(zeros(T, Int(nl-2-i)))]./(dx^i) #am unsure if the length of the dummy_x is correct here
end
for i in 1:(nr-2)
S_r[i,:] = [transpose(calculate_weights(i, convert(T, order+i), Array(one(T):convert(T, order+i)))) transpose(zeros(T, Int(nr-2-i)))]./(dx^i)
end
s0_l = S_l[:,1] ; Sl = S_l[:,2:end]
s0_r = S_r[:,end] ; Sr = S_r[:,(end-1):-1:1]
denoml = αl[2] .+ αl[3:end] ⋅ s0_l
denomr = αr[2] .+ αr[3:end] ⋅ s0_r
a_l = -transpose(transpose(αl[3:end]) * Sl) ./denoml
a_r = reverse(-transpose(transpose(αr[3:end]) * Sr) ./denomr)
b_l = -αl[1]/denoml
b_r = -αr[1]/denomr
new{T, typeof(a_l), typeof(a_r)}(a_l,b_l,a_r,b_r)
end
function GeneralBC(αl::AbstractVector{T}, αr::AbstractVector{T}, dx::AbstractVector{T}, order = 1) where {T}
nl = length(αl)
nr = length(αr)
dx_l, dx_r = (dx[1:(order+nl-2)], reverse(dx[(end-order-nr+3):end]))
S_l = zeros(T, (nl-2, order+nl-2))
S_r = zeros(T, (nr-2, order+nr-2))
for i in 1:(nl-2)
S_l[i,:] = [transpose(calculate_weights(i, one(T), Array(one(T):convert(T, order+i)))) transpose(zeros(T, Int(nl-2-i)))]./(dx_l.^i)
end
for i in 1:(nr-2)
S_r[i,:] = [transpose(calculate_weights(i, convert(T, order+i), Array(one(T):convert(T, order+i)))) transpose(zeros(T, Int(nr-2-i)))]./(dx_r.^i)
end
s0_l = S_l[:,1] ; Sl = S_l[:,2:end]
s0_r = S_r[:,end] ; Sr = S_r[:,(end-1):-1:1]
denoml = αl[2] .+ αl[3:end] ⋅ s0_l
denomr = αr[2] .+ αr[3:end] ⋅ s0_r
a_l = -transpose(transpose(αl[3:end]) * Sl) ./denoml
a_r = reverse(-transpose(transpose(αr[3:end]) * Sr) ./denomr)
b_l = -αl[1]/denoml
b_r = -αr[1]/denomr
new{T, typeof(a_l), typeof(a_r)}(a_l,b_l,a_r,b_r)
end
end
#implement Neumann and Dirichlet as special cases of RobinBC
NeumannBC(α::NTuple{2,T}, dx::Union{AbstractVector{T}, T}, order = 1) where T = RobinBC((zero(T), one(T), α[1]), (zero(T), one(T), α[2]), dx, order)
DirichletBC(αl::T, αr::T) where T = RobinBC((one(T), zero(T), αl), (one(T), zero(T), αr), one(T), 2one(T) )
#specialized constructors for Neumann0 and Dirichlet0
Dirichlet0BC(T::Type) = DirichletBC(zero(T), zero(T))
Neumann0BC(dx::Union{AbstractVector{T}, T}, order = 1) where T = NeumannBC((zero(T), zero(T)), dx, order)
# other acceptable argument signatures
#RobinBC(al::T, bl::T, cl::T, dx_l::T, ar::T, br::T, cr::T, dx_r::T, order = 1) where T = RobinBC([al,bl, cl], [ar, br, cr], dx_l, order)
Base.:*(Q::AffineBC, u::AbstractVector) = BoundaryPaddedVector(Q.a_l ⋅ u[1:length(Q.a_l)] + Q.b_l, Q.a_r ⋅ u[(end-length(Q.a_r)+1):end] + Q.b_r, u)
Base.:*(Q::PeriodicBC, u::AbstractVector) = BoundaryPaddedVector(u[end], u[1], u)
Base.size(Q::AtomicBC) = (Inf, Inf) #Is this nessecary?
gettype(Q::AbstractBC{T}) where T = T
| [
397,
8709,
2099,
27741,
2749,
90,
51,
92,
1279,
25,
27741,
28813,
36,
80,
35191,
500,
18843,
1352,
90,
51,
92,
886,
628,
198,
397,
8709,
2099,
28976,
2749,
90,
51,
92,
1279,
25,
27741,
2749,
90,
51,
92,
886,
198,
198,
37811,
198,
... | 2.220544 | 3,972 |
const HubbardConf = Array{Int8, 2} # conf === hsfield === discrete Hubbard Stratonovich field (Hirsch field)
const HubbardDistribution = Int8[-1,1]
"""
Famous attractive (negative U) Hubbard model on a cubic lattice.
Discrete Hubbard Stratonovich transformation (Hirsch transformation) in the density/charge channel,
such that HS-field is real.
HubbardModelAttractive(; dims, L[, kwargs...])
Create an attractive Hubbard model on `dims`-dimensional cubic lattice
with linear system size `L`. Additional allowed `kwargs` are:
* `mu::Float64=0.0`: chemical potential
* `U::Float64=1.0`: onsite interaction strength, "Hubbard U"
* `t::Float64=1.0`: hopping energy
"""
@with_kw_noshow struct HubbardModelAttractive{C<:AbstractCubicLattice} <: Model
# user mandatory
dims::Int
L::Int
# user optional
mu::Float64 = 0.0
U::Float64 = 1.0
@assert U >= 0. "U must be positive."
t::Float64 = 1.0
# non-user fields
l::C = choose_lattice(HubbardModelAttractive, dims, L)
neighs::Matrix{Int} = neighbors_lookup_table(l)
flv::Int = 1
end
function choose_lattice(::Type{HubbardModelAttractive}, dims::Int, L::Int)
if dims == 1
return Chain(L)
elseif dims == 2
return SquareLattice(L)
else
return CubicLattice(dims, L)
end
end
"""
HubbardModelAttractive(params::Dict)
HubbardModelAttractive(params::NamedTuple)
Create an attractive Hubbard model with (keyword) parameters as specified in the
dictionary/named tuple `params`.
"""
HubbardModelAttractive(params::Dict{Symbol, T}) where T = HubbardModelAttractive(; params...)
HubbardModelAttractive(params::NamedTuple) = HubbardModelAttractive(; params...)
# cosmetics
import Base.summary
import Base.show
Base.summary(model::HubbardModelAttractive) = "$(model.dims)D attractive Hubbard model"
Base.show(io::IO, model::HubbardModelAttractive) = print(io, "$(model.dims)D attractive Hubbard model, L=$(model.L) ($(model.l.sites) sites)")
Base.show(io::IO, m::MIME"text/plain", model::HubbardModelAttractive) = print(io, model)
# implement `Model` interface
@inline nsites(m::HubbardModelAttractive) = nsites(m.l)
# implement `DQMC` interface: mandatory
@inline Base.rand(::Type{DQMC}, m::HubbardModelAttractive, nslices::Int) = rand(HubbardDistribution, nsites(m), nslices)
"""
Calculates the hopping matrix \$T_{i, j}\$ where \$i, j\$ are
site indices.
Note that since we have a time reversal symmetry relating spin-up
to spin-down we only consider one spin sector (one flavor) for the attractive
Hubbard model in the DQMC simulation.
This isn't a performance critical method as it is only used once before the
actual simulation.
"""
function hopping_matrix(mc::DQMC, m::HubbardModelAttractive)
N = nsites(m)
neighs = m.neighs # row = up, right, down, left; col = siteidx
T = diagm(0 => fill(-m.mu, N))
# Nearest neighbor hoppings
@inbounds @views begin
for src in 1:N
for nb in 1:size(neighs,1)
trg = neighs[nb,src]
T[trg,src] += -m.t
end
end
end
return T
end
"""
Calculate the interaction matrix exponential `expV = exp(- power * delta_tau * V(slice))`
and store it in `result::Matrix`.
This is a performance critical method.
"""
@inline function interaction_matrix_exp!(mc::DQMC, m::HubbardModelAttractive,
result::Matrix, conf::HubbardConf, slice::Int, power::Float64=1.)
dtau = mc.p.delta_tau
lambda = acosh(exp(m.U * dtau/2))
result .= spdiagm(0 => exp.(sign(power) * lambda * conf[:,slice]))
nothing
end
@inline function propose_local(mc::DQMC, m::HubbardModelAttractive, i::Int, slice::Int, conf::HubbardConf)
# see for example dos Santos (2002)
greens = mc.s.greens
dtau = mc.p.delta_tau
lambda = acosh(exp(m.U * dtau/2))
ΔE_boson = -2. * lambda * conf[i, slice]
γ = exp(ΔE_boson) - 1
detratio = (1 + γ * (1 - greens[i,i]))^2 # squared because of two spin sectors.
return detratio, ΔE_boson, γ
end
@inline function accept_local!(mc::DQMC, m::HubbardModelAttractive, i::Int, slice::Int, conf::HubbardConf, delta, detratio, ΔE_boson::Float64)
greens = mc.s.greens
γ = delta
u = -greens[:, i]
u[i] += 1.
# TODO: OPT: speed check, maybe @views/@inbounds
greens .-= kron(u * 1. /(1 + γ * u[i]), transpose(γ * greens[i, :]))
conf[i, slice] *= -1
nothing
end
# implement DQMC interface: optional
"""
Green's function is real for the attractive Hubbard model.
"""
@inline greenseltype(::Type{DQMC}, m::HubbardModelAttractive) = Float64
"""
Calculate energy contribution of the boson, i.e. Hubbard-Stratonovich/Hirsch field.
"""
@inline function energy_boson(m::HubbardModelAttractive, hsfield::HubbardConf)
dtau = mc.p.delta_tau
lambda = acosh(exp(m.U * dtau/2))
return lambda * sum(hsfield)
end
include("observables.jl")
| [
9979,
34342,
18546,
796,
15690,
90,
5317,
23,
11,
362,
92,
1303,
1013,
24844,
289,
82,
3245,
24844,
28810,
34342,
4285,
13951,
18198,
2214,
357,
39,
47108,
2214,
8,
198,
9979,
34342,
20344,
3890,
796,
2558,
23,
58,
12,
16,
11,
16,
6... | 2.653783 | 1,837 |
<gh_stars>1-10
export expandModelNearest, getSimilarLinearModel, addAbsorbingLayer
export addAbsorbingLayer, smoothModel, smooth3
export velocityToSlowSquared,slowSquaredToVelocity,velocityToSlow,slowToSlowSquared,slowSquaredToSlow
export slowToLeveledSlowSquared,getModelInvNewton
using Statistics
using jInv.Mesh
function slowToLeveledSlowSquared(s,mid::Float64 = 0.32,a::Float64 = 0.0,b::Float64 = 0.05)
d = (b-a)./2.0;
dinv = 200;
tt = dinv.*(mid-s);
t = -d.*(tanh(tt).+1) + a;
dt = (dinv*d)*(sech(tt)).^2 .+ 1;
# up until here it's just the slowness
dt = spdiagm(2.0.*(t+s).*dt);
t = (t + s).^2;
return t,dt
end
function getModelInvNewton(m,modFun::Function,m0 = copy(m))
# m0: initial guess for the model inverse
# modFun: the model function to invert.
err_prev = Inf;
s_prev = copy(m0);
s = m0;
for k=1:50
k
(fs,dfs) = modFun(s);
err = vecnorm(fs - m,Inf);
println(err)
if err < 1e-5
break;
end
err_prev = err;
s_prev[:] = s;
s = s - 0.4*(dfs\(fs - m));
end
return s;
end
function velocityToSlowSquared(v::Array)
s = (1.0./(v.+1e-16)).^2
ds = sparse(Diagonal((-2.0)./(v[:].^3)));
return s,ds
end
function slowSquaredToVelocity(s::Array)
m = 1.0./sqrt.(s.+1e-16);
dm = sparse(Diagonal((-0.5*(1.0/(s[:].^(3.0/2.0))))));
return m,dm
end
function velocityToSlow(v::Array)
s = (1.0./(v.+1e-16))
ds = sparse(Diagonal(((-1.0)./(v[:].^2))));
return s,ds
end
function slowToSlowSquared(v::Array)
s = v.^2;
ds = sparse(Diagonal((2.0.*v[:])));
return s,ds
end
function slowSquaredToSlow(v::Array)
s = sqrt.(v);
ds = sparse(Diagonal((0.5./s[:])));
return s,ds
end
function expandModelNearest(m,n,ntarget)
if length(size(m))==2
mnew = zeros(Float64,ntarget[1],ntarget[2]);
for j=1:ntarget[2]
for i=1:ntarget[1]
jorig = convert(Int64,ceil((j/ntarget[2])*n[2]));
iorig = convert(Int64,ceil((i/ntarget[1])*n[1]));
mnew[i,j] = m[iorig,jorig];
end
end
elseif length(size(m))==3
mnew = zeros(Float64,ntarget[1],ntarget[2],ntarget[3]);
for k=1:ntarget[3]
for j=1:ntarget[2]
for i=1:ntarget[1]
korig = convert(Int64,floor((k/ntarget[3])*n[3]));
jorig = convert(Int64,floor((j/ntarget[2])*n[2]));
iorig = convert(Int64,floor((i/ntarget[1])*n[1]));
mnew[i,j,k] = m[iorig,jorig,korig];
end
end
end
end
return mnew
end
function getSimilarLinearModel(m::Array{Float64},mtop::Float64=0.0,mbottom::Float64=0.0)
# m here is assumed to be a velocity model.
if length(size(m))==2
(nx,nz) = size(m);
m_vel = copy(m) ;
if mtop==0.0
mtop = m_vel[1:10,5:6];
mtop = Statistics.mean(mtop[:]);
println("Mref top = ",mtop);
end
if mbottom==0.0
mbottom = m_vel[1:10,end-10:end];
mbottom = Statistics.mean(mbottom[:]);
println("Mref bottom = ",mbottom);
end
m_vel = ones(nx)*range(mtop,stop=mbottom,length=nz)';
mref = m_vel;
elseif length(size(m))==3
(nx,ny,nz) = size(m);
m_vel = copy(m);
if mtop==0.0
mtop = m_vel[1:10,:,5:15];
mtop = Statistics.mean(mtop[:]);
end
if mbottom==0.0
mbottom = m_vel[1:10,:,end-10:end];
mbottom = Statistics.mean(mbottom[:]);
end
lin = range(mtop,stop=mbottom,length=nz);
m_vel = copy(m);
Oplane = ones(nx,ny);
for k=1:nz
m_vel[:,:,k] = lin[k]*Oplane;
end
mref = m_vel;
else
error("Unhandled Dimensions");
end
return mref;
end
function addAbsorbingLayer2D(m::Array{Float64},pad::Int64)
if pad<=0
return m;
end
mnew = zeros(size(m,1)+2*pad,size(m,2)+pad);
mnew[pad+1:end-pad,1:end-pad] = m;
mnew[1:pad,1:end-pad] = repeat(m[[1],:],pad,1);
mnew[end-pad+1:end,1:end-pad] = repeat(m[[end],:],pad,1);
mnew[:,end-pad+1:end] = repeat(mnew[:,end-pad],1,pad);
return mnew;
end
function addAbsorbingLayer(m::Array{Float64},Msh::RegularMesh,pad::Int64)
if pad<=0
return m,Msh;
end
Omega = Msh.domain;
if length(size(m))==2
mnew = addAbsorbingLayer2D(m,pad);
MshNew = getRegularMesh([Omega[1],Omega[2] + 2*pad*Msh.h[1],Omega[3],Omega[4]+pad*Msh.h[2]],Msh.n.+[2*pad,pad]);
elseif length(size(m))==3
mnew = zeros(size(m,1)+2*pad,size(m,2)+2*pad,size(m,3)+pad);
mnew[pad+1:end-pad,pad+1:end-pad,1:end-pad] = m;
extendedPlane1 = addAbsorbingLayer2D(reshape(m[1,:,:],size(m,2),size(m,3)),pad);
extendedPlaneEnd = addAbsorbingLayer2D(reshape(m[end,:,:],size(m,2),size(m,3)),pad);
for k=1:pad
mnew[k,:,:] = extendedPlane1;
mnew[end-k+1,:,:] = extendedPlaneEnd;
mnew[pad+1:end-pad,end-k+1,1:end-pad] = m[:,end,:];
mnew[pad+1:end-pad,k,1:end-pad] = m[:,1,:];
end
t = mnew[:,:,end-pad];
for k=1:pad
mnew[:,:,end-pad+k] = t;
end
MshNew = getRegularMesh([Omega[1],Omega[2] + 2*pad*Msh.h[1],Omega[3],Omega[4] + 2*pad*Msh.h[2],Omega[5],Omega[6]+pad*Msh.h[2]],Msh.n.+[2*pad,2*pad,pad]);
end
return mnew,MshNew;
end
function smoothModel(m,Mesh,times = 0)
ms = addAbsorbingLayer2D(m,times);
for k=1:times
for j = 2:size(ms,2)-1
for i = 2:size(ms,1)-1
@inbounds ms[i,j] = (2*ms[i,j] + (ms[i-1,j-1]+ms[i-1,j]+ms[i-1,j+1]+ms[i,j-1]+ms[i,j+1]+ms[i+1,j-1]+ms[i+1,j]+ms[i,j+1]))/10.0;
end
end
end
return ms[(times+1):(end-times),1:end-times];
end
function smooth3(m,Mesh,times = 0)
pad = 50
println("Smoothing ", times," times");
ms, = addAbsorbingLayer(m, Mesh, pad)
for k=1:times
for l = 2:size(ms,3)-1
for j = 2:size(ms,2)-1
for i = 2:size(ms,1)-1
@inbounds ms[i,j,l] = (2*ms[i,j,l] +
(ms[i,j,l+1] + ms[i,j,l-1] + ms[i,j-1,l] + ms[i,j-1,l-1] + ms[i,j-1,l+1] + ms[i,j+1,l] + ms[i,j+1,l-1] + ms[i,j+1,l+1] +
ms[i-1,j,l] + ms[i-1,j,l+1] + ms[i-1,j,l-1] + ms[i-1,j-1,l] + ms[i-1,j-1,l-1] + ms[i-1,j-1,l+1] + ms[i-1,j+1,l] + ms[i-1,j+1,l-1] + ms[i-1,j+1,l+1] +
ms[i+1,j,l] + ms[i+1,j,l+1] + ms[i+1,j,l-1] + ms[i+1,j-1,l] +
ms[i+1,j-1,l-1] + ms[i+1,j-1,l+1] + ms[i+1,j+1,l] + ms[i+1,j+1,l-1] + ms[i+1,j+1,l+1]))/28.0;
end
end
end
end
return ms[(pad+1):(end-pad),(pad+1):(end-pad),1:end-pad];
end
| [
27,
456,
62,
30783,
29,
16,
12,
940,
198,
39344,
4292,
17633,
8199,
12423,
11,
651,
18925,
14993,
451,
17633,
11,
751,
24849,
273,
4623,
49925,
198,
39344,
751,
24849,
273,
4623,
49925,
11,
7209,
17633,
11,
7209,
18,
198,
39344,
15432... | 1.895064 | 3,059 |
using Test
using MarketData
using TimeSeries
using MarketTechnicals
@testset "Levels" begin
@testset "floor pivots" begin
# values verified by various website calculators
@test isapprox(values(floorpivots(ohlc)[:r3])[1] , 123.310, atol=.01)
@test isapprox(values(floorpivots(ohlc)[:r2])[1] , 119.52, atol=.01)
@test isapprox(values(floorpivots(ohlc)[:r1])[1] , 115.73, atol=.01)
@test isapprox(values(floorpivots(ohlc)[:pivot])[1], 108.71, atol=.01)
@test isapprox(values(floorpivots(ohlc)[:s1])[1] , 104.92, atol=.01)
@test isapprox(values(floorpivots(ohlc)[:s2])[1] , 97.900, atol=.01)
@test isapprox(values(floorpivots(ohlc)[:s3])[1] , 94.110, atol=.01)
@test timestamp(floorpivots(ohlc))[end] == Date(2001,12,31)
end
@testset "woodiespivots" begin
# @test_approx_eq 97.37500000000001 value(wr4)[2] # values NEED to be verified with online calculators
# @test_approx_eq 88.62500000000001 value(ws4)[2]
@test isapprox(values(woodiespivots(ohlc)[:r3])[1] , 124.465, atol=.01)
@test isapprox(values(woodiespivots(ohlc)[:r2])[1] , 118.480, atol=.01)
@test isapprox(values(woodiespivots(ohlc)[:r1])[1] , 113.655, atol=.01)
@test isapprox(values(woodiespivots(ohlc)[:pivot])[1], 107.670, atol=.01)
@test isapprox(values(woodiespivots(ohlc)[:s1])[1] , 102.845, atol=.01)
@test isapprox(values(woodiespivots(ohlc)[:s2])[1] , 96.8625, atol=.01)
@test isapprox(values(woodiespivots(ohlc)[:s3])[1] , 92.035, atol=.01)
@test timestamp(woodiespivots(ohlc))[end] == Date(2001,12,31)
end
end # @testset "Levels"
| [
3500,
6208,
198,
198,
3500,
5991,
6601,
198,
3500,
3862,
27996,
198,
198,
3500,
5991,
25574,
20155,
628,
198,
31,
9288,
2617,
366,
4971,
82,
1,
2221,
628,
198,
31,
9288,
2617,
366,
28300,
16767,
1747,
1,
2221,
198,
220,
220,
220,
13... | 2.117955 | 763 |
"""
# Description
Rewrite an expression to remove all use of backticks.
# Arguments
1. `e::Any`: An expression.
# Return Values
1. `e::Any`: An expression in which backticks have been removed.
# Examples
```
julia> remove_backticks(:(`mean(a)`))
:(mean(a))
```
"""
function remove_backticks(@nospecialize(e::Any))
if isa(e, Expr) && e.head == :macrocall && isa(e.args[1], GlobalRef) && e.args[1].name == Symbol("@cmd")
Meta.parse(e.args[3])
elseif isa(e, Expr)
Expr(
e.head,
[remove_backticks(a) for a in e.args]...,
)
else
e
end
end
| [
198,
37811,
198,
2,
12489,
198,
198,
30003,
6525,
281,
5408,
284,
4781,
477,
779,
286,
736,
83,
3378,
13,
198,
198,
2,
20559,
2886,
198,
198,
16,
13,
4600,
68,
3712,
7149,
63,
25,
1052,
5408,
13,
198,
198,
2,
8229,
27068,
198,
1... | 2.131944 | 288 |
<reponame>f-ttok/Gadfly.jl<gh_stars>0
using Measures
const CategoricalAesthetic =
Union{Nothing, IndirectArray}
const NumericalAesthetic =
Union{Nothing, AbstractMatrix, AbstractVector}
const NumericalOrCategoricalAesthetic =
Union{CategoricalAesthetic, NumericalAesthetic}
@varset Aesthetics begin
x, Union{NumericalOrCategoricalAesthetic, Distribution}
y, Union{NumericalOrCategoricalAesthetic, Distribution}
z, Union{Nothing, Function, NumericalAesthetic}
xend, NumericalAesthetic
yend, NumericalAesthetic
size, Union{CategoricalAesthetic,Vector,Nothing}
shape, Union{CategoricalAesthetic,Vector,Nothing}
color, Union{CategoricalAesthetic,Vector,Nothing}
alpha, NumericalOrCategoricalAesthetic
linestyle, Union{CategoricalAesthetic,Vector,Nothing}
label, CategoricalAesthetic
group, CategoricalAesthetic
xmin, NumericalAesthetic
xmax, NumericalAesthetic
ymin, NumericalAesthetic
ymax, NumericalAesthetic
# hexagon sizes used for hexbin
xsize, NumericalAesthetic
ysize, NumericalAesthetic
# fixed lines
xintercept, NumericalAesthetic
yintercept, NumericalAesthetic
intercept, NumericalAesthetic
slope, NumericalAesthetic
# boxplots
middle, NumericalAesthetic
lower_hinge, NumericalAesthetic
upper_hinge, NumericalAesthetic
lower_fence, NumericalAesthetic
upper_fence, NumericalAesthetic
outliers, NumericalAesthetic
width, NumericalAesthetic
# subplots
xgroup, CategoricalAesthetic
ygroup, CategoricalAesthetic
# guides
xtick, NumericalAesthetic
ytick, NumericalAesthetic
xgrid, NumericalAesthetic
ygrid, NumericalAesthetic
color_key_colors, Maybe(AbstractDict)
color_key_title, Maybe(AbstractString)
color_key_continuous, Maybe(Bool)
color_function, Maybe(Function)
titles, Maybe(Dict{Symbol, AbstractString})
shape_key_title, Maybe(AbstractString)
size_key_title, Maybe(AbstractString)
size_key_vals, Maybe(AbstractDict)
# mark some ticks as initially invisible
xtickvisible, Maybe(Vector{Bool})
ytickvisible, Maybe(Vector{Bool})
# scale at which ticks should become visible
xtickscale, Maybe(Vector{Float64})
ytickscale, Maybe(Vector{Float64})
# plot viewport extents
xviewmin, Any
xviewmax, Any
yviewmin, Any
yviewmax, Any
# labeling functions
x_label, Function, showoff
y_label, Function, showoff
xtick_label, Function, showoff
ytick_label, Function, showoff
color_label, Function, showoff
xgroup_label, Function, showoff
ygroup_label, Function, showoff
shape_label, Function, showoff
size_label, Function, showoff
# pseudo-aesthetics
pad_categorical_x, Union{Missing,Bool}, missing
pad_categorical_y, Union{Missing,Bool}, missing
end
# Calculating fieldnames at runtime is expensive
const valid_aesthetics = fieldnames(Aesthetics)
function show(io::IO, data::Aesthetics)
maxlen = 0
print(io, "Aesthetics(")
for name in valid_aesthetics
val = getfield(data, name)
if !ismissing(val) && val != nothing
print(io, "\n ", string(name), "=")
show(io, getfield(data, name))
end
end
print(io, "\n)\n")
end
# Alternate aesthetic names
const aesthetic_aliases =
Dict{Symbol, Symbol}(:colour => :color,
:x_min => :xmin,
:x_max => :xmax,
:y_min => :ymin,
:y_max => :ymax,
:x_group => :xgroup,
:y_group => :ygroup,
:x_viewmin => :xviewmin,
:x_viewmax => :xviewmax,
:y_viewmin => :yviewmin,
:y_viewmax => :yviewmax,
:x_group_label => :xgroup_label,
:y_group_label => :ygroup_label,
:x_tick => :xtick,
:y_tick => :ytick,
:x_grid => :xgrid,
:y_grid => :ygrid)
# Index as if this were a data frame
getindex(aes::Aesthetics, i::Integer, j::AbstractString) = getfield(aes, Symbol(j))[i]
# Return the set of variables that are non-nothing.
function defined_aesthetics(aes::Aesthetics)
vars = Set{Symbol}()
for name in valid_aesthetics
getfield(aes, name) === nothing || push!(vars, name)
end
vars
end
# Checking aesthetics and giving reasonable error messages.
# Raise an error if any of the given aesthetics are not defined.
#
# Args:
# who: A string naming the caller which is printed in the error message.
# aes: An Aesthetics object.
# vars: Symbol that must be defined in the aesthetics.
#
undefined_aesthetics(aes::Aesthetics, vars::Symbol...) =
setdiff(Set(vars), defined_aesthetics(aes))
function assert_aesthetics_defined(who::AbstractString, aes::Aesthetics, vars::Symbol...)
undefined_vars = undefined_aesthetics(aes, vars...)
isempty(undefined_vars) || error("The following aesthetics are required by ",who,
" but are not defined: ", join(undefined_vars,", "),"\n")
end
function assert_aesthetics_undefined(who::AbstractString, aes::Aesthetics, vars::Symbol...)
defined_vars = intersect(Set(vars), defined_aesthetics(aes))
isempty(defined_vars) || error("The following aesthetics are defined but incompatible with ",
who,": ",join(defined_vars,", "),"\n")
end
function assert_aesthetics_equal_length(who::AbstractString, aes::Aesthetics, vars::Symbol...)
defined_vars = filter(var -> !(getfield(aes, var) === nothing), [vars...])
if !isempty(defined_vars)
n = length(getfield(aes, first(defined_vars)))
for var in defined_vars
length(getfield(aes, var)) != n && error(
"The following aesthetics are required by ",who,
" to be of equal length: ",join(defined_vars,", "),"\n")
end
end
nothing
end
# Replace values in a with non-nothing values in b.
#
# Args:
# a: Destination.
# b: Source.
#
# Returns: nothing
#
# Modifies: a
#
function update!(a::Aesthetics, b::Aesthetics)
for name in valid_aesthetics
issomething(getfield(b, name)) && setfield(a, name, getfield(b, name))
end
nothing
end
# Serialize aesthetics to JSON.
# Args:
# a: aesthetics to serialize.
#
# Returns:
# JSON data as a string.
#
json(a::Aesthetics) = join([string(a, ":", json(getfield(a, var))) for var in aes_vars], ",\n")
# Concatenate aesthetics.
#
# A new Aesthetics instance is produced with data vectors in each of the given
# Aesthetics concatenated, nothing being treated as an empty vector.
#
# Args:
# aess: One or more aesthetics.
#
# Returns:
# A new Aesthetics instance with vectors concatenated.
#
function concat(aess::Aesthetics...)
cataes = Aesthetics()
for aes in aess
for var in valid_aesthetics
if var in [:xviewmin, :yviewmin]
mu, mv = getfield(cataes, var), getfield(aes, var)
setfield!(cataes, var,
mu === nothing ? mv :
mv == nothing ? mu :
min(mu, mv))
elseif var in [:xviewmax, :yviewmax]
mu, mv = getfield(cataes, var), getfield(aes, var)
setfield!(cataes, var,
mu === nothing ? mv :
mv == nothing ? mu :
max(mu, mv))
else
setfield!(cataes, var,
cat_aes_var!(getfield(cataes, var), getfield(aes, var)))
end
end
end
cataes
end
cat_aes_var!(a::(Nothing), b::(Nothing)) = a
cat_aes_var!(a::(Nothing), b::Union{Function,AbstractString}) = b
cat_aes_var!(a::(Nothing), b) = copy(b)
cat_aes_var!(a, b::(Nothing)) = a
cat_aes_var!(a::Function, b::Function) = a === string || a == showoff ? b : a
function cat_aes_var!(a::Dict, b::Dict)
merge!(a, b)
a
end
cat_aes_var!(a::AbstractVector{T}, b::AbstractVector{T}) where {T <: Base.Callable} = append!(a, b)
cat_aes_var!(a::AbstractVector{T}, b::AbstractVector{U}) where {T <: Base.Callable, U <: Base.Callable} =
a = [a...,b...]
# Let arrays of numbers clobber arrays of functions. This is slightly odd
# behavior, comes up with with function statistics applied on a layer-wise
# basis.
cat_aes_var!(a::AbstractVector{T}, b::AbstractVector{U}) where {T <: Base.Callable, U} = b
cat_aes_var!(a::AbstractVector{T}, b::AbstractVector{U}) where {T, U <: Base.Callable} = a
cat_aes_var!(a::AbstractVector{T}, b::AbstractVector{T}) where {T} = append!(a, b)
cat_aes_var!(a, b) = a
function cat_aes_var!(a::AbstractVector{T}, b::AbstractVector{U}) where {T, U}
V = promote_type(T, U)
ab = Array{V}(undef, length(a) + length(b))
i = 1
for x in a
ab[i] = x
i += 1
end
for x in b
ab[i] = x
i += 1
end
return ab
end
function cat_aes_var!(xs::IndirectArray{T,1}, ys::IndirectArray{S,1}) where {T, S}
TS = promote_type(T, S)
return append!(IndirectArray(xs.index, convert(Array{TS},xs.values)),
IndirectArray(ys.index, convert(Array{TS},ys.values)))
end
cat_aes_var!(a::AbstractVector{T}, b::AbstractVector{U}) where {T<:Measure, U<:Measure} = [a..., b...]
cat_aes_var!(a::AbstractVector{T}, b::AbstractVector{U}) where {T<:Measure, U} =
isabsolute(T) ? [a..., b...] : b
cat_aes_var!(a::AbstractVector{T}, b::AbstractVector{U}) where {T, U<:Measure} =
isabsolute(U) ? a : [a..., b...]
# Summarizing aesthetics
# Produce a matrix of Aesthetic or Data objects partitioning the original
# Aesthetics or Data object by the cartesian product of xgroup and ygroup.
#
# This is useful primarily for drawing facets and subplots.
#
# Args:
# aes: Aesthetics or Data objects to partition.
#
# Returns:
# A Array{Aesthetics} of size max(1, length(xgroup)) by
# max(1, length(ygroup))
#
function by_xy_group(aes::T, xgroup, ygroup,
num_xgroups, num_ygroups) where T <: Union{Data, Aesthetics}
@assert xgroup === nothing || ygroup === nothing || length(xgroup) == length(ygroup)
n = num_ygroups
m = num_xgroups
xrefs = xgroup === nothing ? [1] : xgroup
yrefs = ygroup === nothing ? [1] : ygroup
aes_grid = Array{T}(undef, n, m)
staging = Array{AbstractArray}(undef, n, m)
for i in 1:n, j in 1:m
aes_grid[i, j] = T()
end
xgroup === nothing && ygroup === nothing && return aes_grid
function make_pooled_array(::Type{IndirectArray{T,N,A,V}}, arr::AbstractArray) where {T,N,A,V}
uarr = unique(arr)
return IndirectArray(A(indexin(arr, uarr)), V(uarr))
end
make_pooled_array(::Type{IndirectArray{T,R,N,RA}},
arr::IndirectArray{T,R,N,RA}) where {T,R,N,RA} = arr
for var in fieldnames(T)
# Skipped aesthetics. Don't try to partition aesthetics for which it
# makes no sense to pass on to subplots.
if var == :xgroup || var == :ygroup||
var == :xtick || var == :ytick ||
var == :xgrid || var == :ygrid ||
var == :x_viewmin || var == :y_viewmin ||
var == :x_viewmax || var == :y_viewmax ||
var == :color_key_colors
continue
end
vals = getfield(aes, var)
if isa(vals, AbstractArray) && length(vals)>1
if xgroup !== nothing && length(vals) !== length(xgroup) ||
ygroup !== nothing && length(vals) !== length(ygroup)
continue
end
for i in 1:n, j in 1:m
staging[i, j] = similar(vals, 0)
end
for (i, j, v) in zip(cycle(yrefs), cycle(xrefs), vals)
push!(staging[i, j], v)
end
for i in 1:n, j in 1:m
if typeof(vals) <: IndirectArray
setfield!(aes_grid[i, j], var,
make_pooled_array(typeof(vals), staging[i, j]))
else
if !applicable(convert, typeof(vals), staging[i, j])
T2 = eltype(vals)
if T2 <: Color T2 = Color end
da = Array{Union{Missing,T2}}(undef, length(staging[i, j]))
copy!(da, staging[i, j])
setfield!(aes_grid[i, j], var, da)
else
setfield!(aes_grid[i, j], var,
convert(typeof(vals), copy(staging[i, j])))
end
end
end
else
for i in 1:n, j in 1:m
setfield!(aes_grid[i, j], var, vals)
end
end
end
aes_grid
end
function inherit(a::Aesthetics, b::Aesthetics;
clobber=[])
acopy = copy(a)
inherit!(acopy, b, clobber=clobber)
return acopy
end
function inherit!(a::Aesthetics, b::Aesthetics;
clobber=[])
clobber_set = Set{Symbol}(clobber)
for field in valid_aesthetics
aval = getfield(a, field)
bval = getfield(b, field)
if field in clobber_set
setfield!(a, field, bval)
elseif aval === missing || aval === nothing || aval === string || aval == showoff
setfield!(a, field, bval)
elseif field == :xviewmin || field == :yviewmin
bval != nothing && (aval == nothing || aval > bval) && setfield!(a, field, bval)
elseif field == :xviewmax || field == :yviewmax
bval != nothing && (aval == nothing || aval < bval) && setfield!(a, field, bval)
elseif typeof(aval) <: Dict && typeof(bval) <: Dict
merge!(aval, getfield(b, field))
end
end
nothing
end
| [
27,
7856,
261,
480,
29,
69,
12,
926,
482,
14,
38,
324,
12254,
13,
20362,
27,
456,
62,
30783,
29,
15,
198,
3500,
45040,
198,
198,
9979,
327,
2397,
12409,
32,
37531,
796,
198,
220,
220,
220,
4479,
90,
18465,
11,
1423,
1060,
19182,
... | 2.07558 | 6,986 |
<reponame>Fypsilonn/MPIMeasurements.jl
export RobotBasedMagneticFieldStaticProtocolParams, RobotBasedMagneticFieldStaticProtocol, measurement, filename
Base.@kwdef struct RobotBasedMagneticFieldStaticProtocolParams <: RobotBasedProtocolParams
positions::Union{Positions, Missing} = missing
postMoveWaitTime::typeof(1.0u"s") = 0.5u"s"
numCooldowns::Integer = 0
robotVelocity::typeof(1.0u"m/s") = 0.01u"m/s"
switchBrakes::Bool = false
end
RobotBasedMagneticFieldStaticProtocolParams(dict::Dict) = createRobotBasedProtocolParams(RobotBasedMagneticFieldStaticProtocolParams, dict)
Base.@kwdef mutable struct RobotBasedMagneticFieldStaticProtocol <: RobotBasedProtocol
name::AbstractString
description::AbstractString
scanner::MPIScanner
params::RobotBasedMagneticFieldStaticProtocolParams
biChannel::BidirectionalChannel{ProtocolEvent}
done::Bool = false
cancelled::Bool = false
finishAcknowledged::Bool = false
executeTask::Union{Task, Nothing} = nothing
measurement::Union{MagneticFieldMeasurement, Missing} = missing
filename::Union{AbstractString, Missing} = missing
safetyTask::Union{Task, Nothing} = nothing
safetyChannel::Union{Channel{ProtocolEvent}, Nothing} = nothing
end
measurement(protocol::RobotBasedMagneticFieldStaticProtocol) = protocol.measurement
measurement(protocol::RobotBasedMagneticFieldStaticProtocol, measurement::Union{MagneticFieldMeasurement, Missing}) = protocol.measurement = measurement
filename(protocol::RobotBasedMagneticFieldStaticProtocol) = protocol.filename
filename(protocol::RobotBasedMagneticFieldStaticProtocol, filename::String) = protocol.filename = filename
function init(protocol::RobotBasedMagneticFieldStaticProtocol)
measurement_ = MagneticFieldMeasurement()
MPIFiles.description(measurement_, "Generated by protocol $(name(protocol)) with the following description: $(description(protocol))")
MPIFiles.positions(measurement_, positions(protocol))
measurement(protocol, measurement_)
# For inner protocol communication
protocol.safetyChannel = Channel{ProtocolEvent}(32)
# I'd prefer to only start task during execution and also close it in cleanup
protocol.safetyTask = @tspawnat protocol.scanner.generalParams.protocolThreadID watchTemperature(protocol.scanner, protocol.safetyChannel)
# For user I/O
return BidirectionalChannel{ProtocolEvent}(protocol.biChannel)
end
function preMoveAction(protocol::RobotBasedMagneticFieldStaticProtocol, pos::Vector{<:Unitful.Length})
if isready(protocol.safetyChannel)
# Shut down system appropiately and then end execution
close(protocol.safetyChannel)
throw(IllegalStateException(take!(protocol.safetyChannel).message))
end
@info "moving to position" pos
end
function postMoveAction(protocol::RobotBasedMagneticFieldStaticProtocol, pos::Vector{<:Unitful.Length})
gaussmeter = getGaussMeter(scanner(protocol))
field_ = getXYZValues(gaussmeter)
fieldError_ = calculateFieldError(gaussmeter, field_)
fieldFrequency_ = getFrequency(gaussmeter)
timestamp_ = now()
temperature_ = getTemperature(gaussmeter)
addMeasuredPosition(measurement(protocol), pos, field=field_, fieldError=fieldError_, fieldFrequency=fieldFrequency_, timestamp=timestamp_, temperature=temperature_)
end
function cleanup(protocol::RobotBasedMagneticFieldStaticProtocol)
saveMagneticFieldAsHDF5(measurement(protocol), filename(protocol))
close(protocol.scanner)
end
# Here I'd like to also dispatch on protocol and not only scanner
function watchTemperature(scanner::MPIScanner, channel::Channel)
while isopen(channel)
temp = getTemperature(scanner)
if temp > 1.0
put!(channel, IllegaleStateEvent("Temperature is too high. Aborting protocol"))
end
sleep(0.05)
end
end
function handleEvent(protocol::RobotBasedMagneticFieldStaticProtocol, event::FinishedAckEvent)
# End SafetyTask
close(protocol.safetyChannel)
end
| [
27,
7856,
261,
480,
29,
37,
88,
862,
33576,
77,
14,
7378,
3955,
68,
5015,
902,
13,
20362,
198,
39344,
16071,
15001,
13436,
9833,
15878,
45442,
19703,
4668,
10044,
4105,
11,
16071,
15001,
13436,
9833,
15878,
45442,
19703,
4668,
11,
15558... | 3.354256 | 1,163 |
<reponame>UnofficialJuliaMirror/Merlin.jl-80f3d04f-b880-5e6d-8e06-6a7e799169ac
doc"""
tanh(x::Var)
Hyperbolic tangent function.
"""
Base.tanh(x::Var) = Var(tanh(x.data), ∇tanh!, (x,))
Base.tanh(x::Array) = tanh.(x)
Base.tanh(x::CuArray) = CUDNN.tanh(x)
Base.tanh(x::Node) = Node(tanh, (x,))
function ∇tanh!(y::Var, x::Var)
isnothing(x.grad) && return
∇tanh!(y.data, y.grad, x.data, x.grad)
end
function ∇tanh!(y::Array{T}, gy::Array{T}, x::Array{T}, gx::Array{T}) where T
@inbounds for i = 1:length(gx)
gx[i] += gy[i] * (T(1) - y[i] * y[i])
end
end
∇tanh!(y::CuArray, gy::CuArray, x::CuArray, gx::CuArray) = CUDNN.∇tanh!(y, gy, x, gx)
| [
27,
7856,
261,
480,
29,
3118,
16841,
16980,
544,
27453,
1472,
14,
13102,
2815,
13,
20362,
12,
1795,
69,
18,
67,
3023,
69,
12,
65,
41655,
12,
20,
68,
21,
67,
12,
23,
68,
3312,
12,
21,
64,
22,
68,
45455,
22172,
330,
198,
15390,
... | 1.868347 | 357 |
"""
static_analysis(assembly; kwargs...)
Perform a static analysis of the system of nonlinear beams contained in
`assembly`. Return the resulting system and a flag indicating whether the
iteration procedure converged.
# Keyword Arguments
- `prescribed_conditions = Dict{Int,PrescribedConditions{Float64}}()`:
A dictionary with keys corresponding to the points at
which prescribed conditions are applied and elements of type
[`PrescribedConditions`](@ref) which describe the prescribed conditions
at those points. If time varying, this input may be provided as a
function of time.
- `distributed_loads = Dict{Int,DistributedLoads{Float64}}()`: A dictionary
with keys corresponding to the elements to which distributed loads are
applied and elements of type [`DistributedLoads`](@ref) which describe
the distributed loads at those points. If time varying, this input may
be provided as a function of time.
- `linear = false`: Set to `true` for a linear analysis
- `linearization_state`: Linearization state variables. Defaults to zeros.
- `update_linearization_state`: Flag indicating whether to update the linearization state
variables for a linear analysis with the instantaneous state variables.
- `method = :newton`: Method (as defined in NLsolve) to solve nonlinear system of equations
- `linesearch = LineSearches.BackTracking(maxstep=1e6)`: Line search used to solve nonlinear system of equations
- `ftol = 1e-9`: tolerance for solving nonlinear system of equations
- `iterations = 1000`: maximum iterations for solving the nonlinear system of equations
- `tvec = 0`: Time vector/value. May be used in conjunction with time varying
prescribed conditions and distributed loads to gradually increase
displacements/loads.
- `reset_state = true`: Flag indicating whether the state variables should be
reset prior to performing the analysis. This keyword argument is only valid
for the pre-allocated version of this function.
"""
function static_analysis(assembly;
prescribed_conditions=Dict{Int,PrescribedConditions{Float64}}(),
distributed_loads=Dict{Int,DistributedLoads{Float64}}(),
linear=false,
linearization_state=nothing,
update_linearization_state=false,
method=:newton,
linesearch=LineSearches.BackTracking(maxstep=1e6),
ftol=1e-9,
iterations=1000,
tvec=0.0,
)
static = true
pc = typeof(prescribed_conditions) <: AbstractDict ? prescribed_conditions : prescribed_conditions(tvec[1])
system = System(assembly, static; prescribed_points=keys(pc))
return static_analysis!(system, assembly;
prescribed_conditions=prescribed_conditions,
distributed_loads=distributed_loads,
linear=linear,
linearization_state=linearization_state,
update_linearization_state=update_linearization_state,
method=method,
linesearch=linesearch,
ftol=ftol,
iterations=iterations,
tvec=tvec,
reset_state=false)
end
"""
static_analysis!(system, assembly; kwargs...)
Pre-allocated version of `static_analysis`.
"""
function static_analysis!(system, assembly;
prescribed_conditions=Dict{Int,PrescribedConditions{Float64}}(),
distributed_loads=Dict{Int,DistributedLoads{Float64}}(),
linear=false,
linearization_state=nothing,
update_linearization_state=false,
method=:newton,
linesearch=LineSearches.BackTracking(maxstep=1e6),
ftol=1e-9,
iterations=1000,
tvec=0.0,
reset_state=true)
# check to make sure system is static
@assert system.static == true
# reset state, if specified
if reset_state
reset_state!(system)
end
# unpack pre-allocated storage and pointers
x = system.x
F = system.r
J = system.K
force_scaling = system.force_scaling
mass_scaling = system.mass_scaling
irow_point = system.irow_point
irow_elem = system.irow_elem
irow_elem1 = system.irow_elem1
irow_elem2 = system.irow_elem2
icol_point = system.icol_point
icol_elem = system.icol_elem
converged = true
for t in tvec
# update stored time
system.t = t
# current parameters
pcond = typeof(prescribed_conditions) <: AbstractDict ?
prescribed_conditions : prescribed_conditions(t)
dload = typeof(distributed_loads) <: AbstractDict ?
distributed_loads : distributed_loads(t)
# solve the system of equations
f! = (F, x) -> system_residual!(F, x, assembly, pcond, dload, force_scaling,
mass_scaling, irow_point, irow_elem, irow_elem1, irow_elem2, icol_point, icol_elem)
j! = (J, x) -> system_jacobian!(J, x, assembly, pcond, dload, force_scaling,
mass_scaling, irow_point, irow_elem, irow_elem1, irow_elem2, icol_point, icol_elem)
if linear
# linear analysis
if !update_linearization_state
if isnothing(linearization_state)
x .= 0
else
x .= linearization_state
end
end
f!(F, x)
j!(J, x)
x .-= safe_lu(J) \ F
else
# nonlinear analysis
df = NLsolve.OnceDifferentiable(f!, j!, x, F, J)
result = NLsolve.nlsolve(df, x,
linsolve=(x, A, b) -> ldiv!(x, safe_lu(A), b),
method=method,
linesearch=linesearch,
ftol=ftol,
iterations=iterations)
# update the solution
x .= result.zero
J .= df.DF
# update convergence flag
converged = result.f_converged
end
end
return system, converged
end
"""
steady_state_analysis(assembly; kwargs...)
Perform a steady-state analysis for the system of nonlinear beams contained in
`assembly`. Return the resulting system and a flag indicating whether the
iteration procedure converged.
# Keyword Arguments
- `prescribed_conditions = Dict{Int,PrescribedConditions{Float64}}()`:
A dictionary with keys corresponding to the points at
which prescribed conditions are applied and elements of type
[`PrescribedConditions`](@ref) which describe the prescribed conditions
at those points. If time varying, this input may be provided as a
function of time.
- `distributed_loads = Dict{Int,DistributedLoads{Float64}}()`: A dictionary
with keys corresponding to the elements to which distributed loads are
applied and elements of type [`DistributedLoads`](@ref) which describe
the distributed loads at those points. If time varying, this input may
be provided as a function of time.
- `linear = false`: Set to `true` for a linear analysis
- `linearization_state`: Linearization state variables. Defaults to zeros.
- `update_linearization_state`: Flag indicating whether to update the linearization state
variables for a linear analysis with the current state variables.
- `method = :newton`: Method (as defined in NLsolve) to solve nonlinear system of equations
- `linesearch = LineSearches.LineSearches.BackTracking(maxstep=1e6)`: Line search used to solve nonlinear system of equations
- `ftol = 1e-9`: tolerance for solving nonlinear system of equations
- `iterations = 1000`: maximum iterations for solving the nonlinear system of equations
- `origin = zeros(3)`: Global frame origin vector. If time varying, this input
may be provided as a function of time.
- `linear_velocity = zeros(3)`: Global frame linear velocity vector. If time
varying, this vector may be provided as a function of time.
- `angular_velocity = zeros(3)`: Global frame angular velocity vector. If time
varying, this vector may be provided as a function of time.
- `tvec = 0.0`: Time vector/value. May be used in conjunction with time varying
prescribed conditions, distributed loads, and global motion to gradually
increase displacements/loads.
- `reset_state = true`: Flag indicating whether the state variables should be
reset prior to performing the analysis. This keyword argument is only valid
for the pre-allocated version of this function.
"""
function steady_state_analysis(assembly;
prescribed_conditions=Dict{Int,PrescribedConditions{Float64}}(),
distributed_loads=Dict{Int,DistributedLoads{Float64}}(),
linear=false,
linearization_state=nothing,
update_linearization_state=false,
method=:newton,
linesearch=LineSearches.BackTracking(maxstep=1e6),
ftol=1e-9,
iterations=1000,
origin=(@SVector zeros(3)),
linear_velocity=(@SVector zeros(3)),
angular_velocity=(@SVector zeros(3)),
tvec=0.0,
)
static = false
pc = typeof(prescribed_conditions) <: AbstractDict ? prescribed_conditions : prescribed_conditions(tvec[1])
system = System(assembly, static; prescribed_points=keys(pc))
return steady_state_analysis!(system, assembly;
prescribed_conditions=prescribed_conditions,
distributed_loads=distributed_loads,
linear=linear,
linearization_state=linearization_state,
update_linearization_state=update_linearization_state,
method=method,
linesearch=linesearch,
ftol=ftol,
iterations=iterations,
origin=origin,
linear_velocity=linear_velocity,
angular_velocity=angular_velocity,
tvec=tvec,
reset_state=false,
)
end
"""
steady_state_analysis!(system, assembly; kwargs...)
Pre-allocated version of `steady_state_analysis`.
"""
function steady_state_analysis!(system, assembly;
prescribed_conditions=Dict{Int,PrescribedConditions{Float64}}(),
distributed_loads=Dict{Int,DistributedLoads{Float64}}(),
linear=false,
linearization_state=nothing,
update_linearization_state=false,
method=:newton,
linesearch=LineSearches.BackTracking(maxstep=1e6),
ftol=1e-9,
iterations=1000,
origin=(@SVector zeros(3)),
linear_velocity=(@SVector zeros(3)),
angular_velocity=(@SVector zeros(3)),
tvec=0.0,
reset_state=true,
)
# check to make sure the simulation is dynamic
@assert system.static == false
# reset state, if specified
if reset_state
reset_state!(system)
end
# unpack pointers to pre-allocated storage
x = system.x
F = system.r
J = system.K
force_scaling = system.force_scaling
mass_scaling = system.mass_scaling
irow_point = system.irow_point
irow_elem = system.irow_elem
irow_elem1 = system.irow_elem1
irow_elem2 = system.irow_elem2
icol_point = system.icol_point
icol_elem = system.icol_elem
converged = true
for t in tvec
# update stored time
system.t = t
# current parameters
pcond = typeof(prescribed_conditions) <: AbstractDict ?
prescribed_conditions : prescribed_conditions(t)
dload = typeof(distributed_loads) <: AbstractDict ?
distributed_loads : distributed_loads(t)
x0 = typeof(origin) <: AbstractVector ? SVector{3}(origin) : SVector{3}(origin(t))
v0 = typeof(linear_velocity) <: AbstractVector ? SVector{3}(linear_velocity) : SVector{3}(linear_velocity(t))
ω0 = typeof(angular_velocity) <: AbstractVector ? SVector{3}(angular_velocity) : SVector{3}(angular_velocity(t))
f! = (F, x) -> system_residual!(F, x, assembly, pcond,
dload, force_scaling, mass_scaling, irow_point, irow_elem, irow_elem1,
irow_elem2, icol_point, icol_elem, x0, v0, ω0)
j! = (J, x) -> system_jacobian!(J, x, assembly, pcond,
dload, force_scaling, mass_scaling, irow_point, irow_elem, irow_elem1,
irow_elem2, icol_point, icol_elem, x0, v0, ω0)
# solve the system of equations
if linear
# linear analysis
if !update_linearization_state
if isnothing(linearization_state)
x .= 0
else
x .= linearization_state
end
end
f!(F, x)
j!(J, x)
x .-= safe_lu(J) \ F
else
# nonlinear analysis
df = NLsolve.OnceDifferentiable(f!, j!, x, F, J)
result = NLsolve.nlsolve(df, x,
linsolve=(x, A, b) -> ldiv!(x, safe_lu(A), b),
method=method,
linesearch=linesearch,
ftol=ftol,
iterations=iterations)
# update the solution
x .= result.zero
J .= df.DF
# update the convergence flag
convergence = result.f_converged
end
end
return system, converged
end
"""
eigenvalue_analysis(assembly; kwargs...)
Compute the eigenvalues and eigenvectors of the system of nonlinear beams
contained in `assembly`. Return the modified system, eigenvalues, eigenvectors,
and a convergence flag indicating whether the corresponding steady-state analysis
converged.
# Keyword Arguments
- `prescribed_conditions = Dict{Int,PrescribedConditions{Float64}}()`:
A dictionary with keys corresponding to the points at
which prescribed conditions are applied and elements of type
[`PrescribedConditions`](@ref) which describe the prescribed conditions
at those points. If time varying, this input may be provided as a
function of time.
- `distributed_loads = Dict{Int,DistributedLoads{Float64}}()`: A dictionary
with keys corresponding to the elements to which distributed loads are
applied and elements of type [`DistributedLoads`](@ref) which describe
the distributed loads at those points. If time varying, this input may
be provided as a function of time.
- `linear = false`: Set to `true` for a linear analysis
- `linearization_state`: Linearization state variables. Defaults to zeros.
- `update_linearization_state`: Flag indicating whether to update the linearization state
variables for a linear analysis with the current state variables.
- `method = :newton`: Method (as defined in NLsolve) to solve nonlinear system of equations
- `linesearch = LineSearches.LineSearches.BackTracking(maxstep=1e6)`: Line search used to solve nonlinear system of equations
- `ftol = 1e-9`: tolerance for solving nonlinear system of equations
- `iterations = 1000`: maximum iterations for solving the nonlinear system of equations
- `reset_state = true`: Flag indicating whether the state variables should be
reset prior to performing the steady-state analysis. This keyword argument
is only valid for the pre-allocated version of this function.
- `find_steady_state = reset_state && !linear`: Flag indicating whether the
steady state solution should be found prior to performing the eigenvalue analysis.
- `origin = zeros(3)`: Global frame origin.
If time varying, this vector may be provided as a function of time.
- `linear_velocity = zeros(3)`: Global frame linear velocity vector.
If time varying, this vector may be provided as a function of time.
May be provided either as a constant or as a function of time.
- `angular_velocity = zeros(3)`: Global frame angular velocity vector.
If time varying, this vector may be provided as a function of time.
- `tvec`: Time vector. May be used in conjunction with time varying
prescribed conditions, distributed loads, and global motion to gradually
increase displacements/loads during the steady-state analysis.
- `nev = 6`: Number of eigenvalues to compute
"""
function eigenvalue_analysis(assembly;
prescribed_conditions=Dict{Int,PrescribedConditions{Float64}}(),
distributed_loads=Dict{Int,DistributedLoads{Float64}}(),
method=:newton,
linear=false,
linearization_state=nothing,
update_linearization_state=false,
linesearch=LineSearches.BackTracking(maxstep=1e6),
ftol=1e-9,
iterations=1000,
find_steady_state=!linear,
origin=(@SVector zeros(3)),
linear_velocity=(@SVector zeros(3)),
angular_velocity=(@SVector zeros(3)),
tvec=0.0,
nev=6
)
static = false
pc = typeof(prescribed_conditions) <: AbstractDict ? prescribed_conditions : prescribed_conditions(tvec[1])
system = System(assembly, static; prescribed_points=keys(pc))
return eigenvalue_analysis!(system, assembly;
prescribed_conditions=prescribed_conditions,
distributed_loads=distributed_loads,
linear=linear,
linearization_state=linearization_state,
update_linearization_state=update_linearization_state,
method=method,
linesearch=linesearch,
ftol=ftol,
iterations=iterations,
reset_state=false,
find_steady_state=find_steady_state,
origin=origin,
linear_velocity=linear_velocity,
angular_velocity=angular_velocity,
tvec=tvec,
nev=nev,
)
end
"""
eigenvalue_analysis!(system, assembly; kwargs...)
Pre-allocated version of `eigenvalue_analysis`. Uses the state variables stored in
`system` as an initial guess for iterating to find the steady state solution.
"""
function eigenvalue_analysis!(system, assembly;
prescribed_conditions=Dict{Int,PrescribedConditions{Float64}}(),
distributed_loads=Dict{Int,DistributedLoads{Float64}}(),
linear=false,
linearization_state=nothing,
update_linearization_state=false,
method=:newton,
linesearch=LineSearches.BackTracking(maxstep=1e6),
ftol=1e-9,
iterations=1000,
reset_state=true,
find_steady_state=!linear && reset_state,
origin=(@SVector zeros(3)),
linear_velocity=(@SVector zeros(3)),
angular_velocity=(@SVector zeros(3)),
tvec=0.0,
nev=6,
)
if reset_state
reset_state!(system)
end
# perform steady state analysis (if nonlinear)
if find_steady_state
system, converged = steady_state_analysis!(system, assembly;
prescribed_conditions=prescribed_conditions,
distributed_loads=distributed_loads,
linear=linear,
linearization_state=linearization_state,
update_linearization_state=update_linearization_state,
method=method,
linesearch=linesearch,
ftol=ftol,
iterations=iterations,
origin=origin,
linear_velocity=linear_velocity,
angular_velocity=angular_velocity,
tvec=tvec,
)
else
# set linearization state variables
if linear && !update_linearization_state
if isnothing(linearization_state)
system.x .= 0
else
system.x .= linearization_state
end
end
# converged by default
converged = true
end
# unpack state vector, stiffness, and mass matrices
x = system.x # populated during steady state solution
K = system.K # needs to be updated
M = system.M # still needs to be populated
# unpack scaling parameter
force_scaling = system.force_scaling
mass_scaling = system.mass_scaling
# also unpack system indices
irow_point = system.irow_point
irow_elem = system.irow_elem
irow_elem1 = system.irow_elem1
irow_elem2 = system.irow_elem2
icol_point = system.icol_point
icol_elem = system.icol_elem
# current time
t = system.t
# current parameters
pcond = typeof(prescribed_conditions) <: AbstractDict ?
prescribed_conditions : prescribed_conditions(t)
dload = typeof(distributed_loads) <: AbstractDict ?
distributed_loads : distributed_loads(t)
x0 = typeof(origin) <: AbstractVector ? SVector{3}(origin) : SVector{3}(origin(t))
v0 = typeof(linear_velocity) <: AbstractVector ? SVector{3}(linear_velocity) : SVector{3}(linear_velocity(t))
ω0 = typeof(angular_velocity) <: AbstractVector ? SVector{3}(angular_velocity) : SVector{3}(angular_velocity(t))
# solve for the system stiffness matrix
K = system_jacobian!(K, x, assembly, pcond, dload, force_scaling, mass_scaling,
irow_point, irow_elem, irow_elem1, irow_elem2, icol_point, icol_elem, x0, v0, ω0)
# solve for the system mass matrix
M = system_mass_matrix!(M, x, assembly, force_scaling, mass_scaling, irow_point, irow_elem,
irow_elem1, irow_elem2, icol_point, icol_elem)
# construct linear map
T = eltype(system)
nx = length(x)
Kfact = safe_lu(K)
f! = (b, x) -> ldiv!(b, Kfact, M * x)
fc! = (b, x) -> mul!(b, M', Kfact' \ x)
A = LinearMap{T}(f!, fc!, nx, nx; ismutating=true)
# compute eigenvalues and eigenvectors
λ, V = partialeigen(partialschur(A; nev=min(nx, nev), which=LM())[1])
# sort eigenvalues by magnitude
perm = sortperm(λ, by=(λ) -> (abs(λ), imag(λ)), rev=true)
λ .= λ[perm]
V .= V[:,perm]
# eigenvalues are actually -1/λ, no modification necessary for eigenvectors
λ .= -1 ./ λ
return system, λ, V, converged
end
"""
initial_condition_analysis(assembly, t0; kwargs...)
Perform an analysis to obtain a consistent set of initial conditions. Return the
final system with the new initial conditions.
# Keyword Arguments
- `prescribed_conditions: A dictionary with keys corresponding to the points at
which prescribed conditions are applied and elements of type
[`PrescribedConditions`](@ref) which describe the prescribed conditions
at those points. If time varying, this input may be provided as a
function of time.
- `distributed_loads: A dictionary with keys corresponding to the elements to
which distributed loads are applied and elements of type
[`DistributedLoads`](@ref) which describe the distributed loads at those
points. If time varying, this input may be provided as a function of
time.
- `linear = false`: Set to `true` for a linear analysis
- `linearization_state`: Linearization state variables. Defaults to zeros.
- `method = :newton`: Method (as defined in NLsolve) to solve nonlinear system of equations
- `linesearch = LineSearches.LineSearches.BackTracking(maxstep=1e6)`: Line search used to solve nonlinear system of equations
- `ftol = 1e-9`: tolerance for solving nonlinear system of equations
- `iterations = 1000`: maximum iterations for solving the nonlinear system of equations
- `reset_state = true`: Flag indicating whether the state variables should be
reset prior to performing the analysis. This keyword argument is only valid
for the pre-allocated version of this function.
- `origin = zeros(3)`: Global frame origin.
If time varying, this vector may be provided as a function of time.
- `linear_velocity = zeros(3)`: Global frame linear velocity vector.
If time varying, this vector may be provided as a function of time.
May be provided either as a constant or as a function of time.
- `angular_velocity = zeros(3)`: Global frame angular velocity vector.
If time varying, this vector may be provided as a function of time.
- `u0=fill(zeros(3), length(assembly.elements))`: Initial displacment of each beam element,
- `theta0=fill(zeros(3), length(assembly.elements))`: Initial angular displacement of each beam element,
- `udot0=fill(zeros(3), length(assembly.elements))`: Initial time derivative with respect to `u`
- `thetadot0=fill(zeros(3), length(assembly.elements))`: Initial time derivative with respect to `theta`
- `save=1:length(tvec)`: Steps at which to save the time history
"""
function initial_condition_analysis(assembly, t0;
prescribed_conditions=Dict{Int,PrescribedConditions{Float64}}(),
distributed_loads=Dict{Int,DistributedLoads{Float64}}(),
linear=false,
linearization_state=nothing,
method=:newton,
linesearch=LineSearches.BackTracking(maxstep=1e6),
ftol=1e-9,
iterations=1000,
origin=(@SVector zeros(3)),
linear_velocity=(@SVector zeros(3)),
angular_velocity=(@SVector zeros(3)),
u0=fill((@SVector zeros(3)), length(assembly.elements)),
theta0=fill((@SVector zeros(3)), length(assembly.elements)),
udot0=fill((@SVector zeros(3)), length(assembly.elements)),
thetadot0=fill((@SVector zeros(3)), length(assembly.elements)),
)
static = false
pc = typeof(prescribed_conditions) <: AbstractDict ? prescribed_conditions : prescribed_conditions(t0)
system = System(assembly, static; prescribed_points=keys(pc))
return initial_condition_analysis!(system, assembly, t0;
prescribed_conditions=prescribed_conditions,
distributed_loads=distributed_loads,
linear=linear,
linearization_state=linearization_state,
method=method,
linesearch=linesearch,
ftol=ftol,
iterations=iterations,
reset_state=false,
origin=origin,
linear_velocity=linear_velocity,
angular_velocity=angular_velocity,
u0=u0,
theta0=theta0,
udot0=udot0,
thetadot0=thetadot0,
)
end
"""
initial_condition_analysis!(system, assembly, t0; kwargs...)
Pre-allocated version of `initial_condition_analysis`.
"""
function initial_condition_analysis!(system, assembly, t0;
prescribed_conditions=Dict{Int,PrescribedConditions{Float64}}(),
distributed_loads=Dict{Int,DistributedLoads{Float64}}(),
linear=false,
linearization_state=nothing,
method=:newton,
linesearch=LineSearches.BackTracking(maxstep=1e6),
ftol=1e-9,
iterations=1000,
reset_state=true,
origin=(@SVector zeros(3)),
linear_velocity=(@SVector zeros(3)),
angular_velocity=(@SVector zeros(3)),
u0=fill((@SVector zeros(3)), length(assembly.elements)),
theta0=fill((@SVector zeros(3)), length(assembly.elements)),
udot0=fill((@SVector zeros(3)), length(assembly.elements)),
thetadot0=fill((@SVector zeros(3)), length(assembly.elements)),
)
# check to make sure the simulation is dynamic
@assert system.static == false
if reset_state
reset_state!(system)
end
# unpack pre-allocated storage and pointers for system
x = system.x
F = system.r
J = system.K
force_scaling = system.force_scaling
mass_scaling = system.mass_scaling
irow_point = system.irow_point
irow_elem = system.irow_elem
irow_elem1 = system.irow_elem1
irow_elem2 = system.irow_elem2
icol_point = system.icol_point
icol_elem = system.icol_elem
udot = system.udot
θdot = system.θdot
Pdot = system.Pdot
Hdot = system.Hdot
nelem = length(assembly.elements)
# set current time step
system.t = t0
# set current parameters
pcond = typeof(prescribed_conditions) <: AbstractDict ?
prescribed_conditions : prescribed_conditions(t0)
dload = typeof(distributed_loads) <: AbstractDict ?
distributed_loads : distributed_loads(t0)
x0 = typeof(origin) <: AbstractVector ? SVector{3}(origin) : SVector{3}(origin(t0))
v0 = typeof(linear_velocity) <: AbstractVector ? SVector{3}(linear_velocity) : SVector{3}(linear_velocity(t0))
ω0 = typeof(angular_velocity) <: AbstractVector ? SVector{3}(angular_velocity) : SVector{3}(angular_velocity(t0))
# construct residual and jacobian functions
f! = (F, x) -> system_residual!(F, x, assembly, pcond, dload, force_scaling,
mass_scaling, irow_point, irow_elem, irow_elem1, irow_elem2, icol_point, icol_elem,
x0, v0, ω0, u0, theta0, udot0, thetadot0)
j! = (J, x) -> system_jacobian!(J, x, assembly, pcond, dload, force_scaling,
mass_scaling, irow_point, irow_elem, irow_elem1, irow_elem2, icol_point, icol_elem,
x0, v0, ω0, u0, theta0, udot0, thetadot0)
# solve system of equations
if linear
# linear analysis
if isnothing(linearization_state)
x .= 0
else
x .= linearization_state
end
f!(F, x)
j!(J, x)
x .-= safe_lu(J) \ F
else
# nonlinear analysis
df = OnceDifferentiable(f!, j!, x, F, J)
result = NLsolve.nlsolve(df, x,
linsolve=(x, A, b) -> ldiv!(x, safe_lu(A), b),
method=method,
linesearch=linesearch,
ftol=ftol,
iterations=iterations)
x .= result.zero
J .= df.DF
end
# get convergence flag
converged = result.f_converged
# save states and state rates
for ielem = 1:nelem
icol = icol_elem[ielem]
# extract rotation parameters for this beam element
C = get_C(theta0[ielem])
Cab = assembly.elements[ielem].Cab
CtCab = C' * Cab
# save states and state rates
udot[ielem] = udot0[ielem]
θdot[ielem] = thetadot0[ielem]
Pdot[ielem] = CtCab' * SVector(x[icol], x[icol + 1], x[icol + 2]) .* mass_scaling
Hdot[ielem] = CtCab' * SVector(x[icol + 3], x[icol + 4], x[icol + 5]) .* mass_scaling
# restore original state vector
x[icol:icol + 2] .= u0[ielem]
x[icol + 3:icol + 5] .= theta0[ielem]
end
return system, converged
end
"""
time_domain_analysis(assembly, tvec; kwargs...)
Perform a time-domain analysis for the system of nonlinear beams contained in
`assembly` using the time vector `tvec`. Return the final system, a post-processed
solution history, and a convergence flag indicating whether the iterations
converged for each time step.
# Keyword Arguments
- `prescribed_conditions: A dictionary with keys corresponding to the points at
which prescribed conditions are applied and elements of type
[`PrescribedConditions`](@ref) which describe the prescribed conditions
at those points. If time varying, this input may be provided as a
function of time.
- `distributed_loads: A dictionary with keys corresponding to the elements to
which distributed loads are applied and elements of type
[`DistributedLoads`](@ref) which describe the distributed loads at those
points. If time varying, this input may be provided as a function of
time.
- `linear = false`: Set to `true` for a linear analysis
- `linearization_state`: Linearization state variables. Defaults to zeros.
- `update_linearization_state`: Flag indicating whether to update the linearization state
variables for a linear analysis with the current state variables.
- `method = :newton`: Method (as defined in NLsolve) to solve nonlinear system of equations
- `linesearch = LineSearches.LineSearches.BackTracking(maxstep=1e6)`: Line search used to solve nonlinear system of equations
- `ftol = 1e-9`: tolerance for solving nonlinear system of equations
- `iterations = 1000`: maximum iterations for solving the nonlinear system of equations
- `reset_state = true`: Flag indicating whether the state variables should be
reset prior to performing the analysis. This keyword argument is only valid
for the pre-allocated version of this function.
- `initialize = true`: Flag indicating whether a consistent set of initial
conditions should be found using [`initial_condition_analysis`](@ref). If
`false`, the keyword arguments `u0`, `theta0`, `udot0` and `thetadot0` will
be ignored and the system state vector will be used as the initial state
variables.
- `origin`: Global frame origin vector. If time varying, this input
may be provided as a function of time.
- `linear_velocity`: Global frame linear velocity vector. If time
varying, this vector may be provided as a function of time.
- `angular_velocity`: Global frame angular velocity vector. If time
varying, this vector may be provided as a function of time.
- `u0=fill(zeros(3), length(assembly.elements))`: Initial displacment of each beam element,
- `theta0=fill(zeros(3), length(assembly.elements))`: Initial angular displacement of each beam element,
- `udot0=fill(zeros(3), length(assembly.elements))`: Initial time derivative with respect to `u`
- `thetadot0=fill(zeros(3), length(assembly.elements))`: Initial time derivative with respect to `theta`
- `save=1:length(tvec)`: Steps at which to save the time history
"""
function time_domain_analysis(assembly, tvec;
prescribed_conditions=Dict{Int,PrescribedConditions{Float64}}(),
distributed_loads=Dict{Int,DistributedLoads{Float64}}(),
linear=false,
linearization_state=nothing,
update_linearization_state=false,
method=:newton,
linesearch=LineSearches.BackTracking(maxstep=1e6),
ftol=1e-9,
iterations=1000,
initialize=true,
origin=(@SVector zeros(3)),
linear_velocity=(@SVector zeros(3)),
angular_velocity=(@SVector zeros(3)),
u0=fill((@SVector zeros(3)), length(assembly.elements)),
theta0=fill((@SVector zeros(3)), length(assembly.elements)),
udot0=fill((@SVector zeros(3)), length(assembly.elements)),
thetadot0=fill((@SVector zeros(3)), length(assembly.elements)),
save=1:length(tvec)
)
static = false
pc = typeof(prescribed_conditions) <: AbstractDict ? prescribed_conditions : prescribed_conditions(tvec[1])
system = System(assembly, static; prescribed_points=keys(pc))
return time_domain_analysis!(system, assembly, tvec;
prescribed_conditions=prescribed_conditions,
distributed_loads=distributed_loads,
linear=linear,
linearization_state=linearization_state,
update_linearization_state=update_linearization_state,
method=method,
linesearch=linesearch,
ftol=ftol,
iterations=iterations,
reset_state=false,
initialize=initialize,
origin=origin,
linear_velocity=linear_velocity,
angular_velocity=angular_velocity,
u0=u0,
theta0=theta0,
udot0=udot0,
thetadot0=thetadot0,
save=save,
)
end
"""
time_domain_analysis!(system, assembly, tvec; kwargs...)
Pre-allocated version of `time_domain_analysis`.
"""
function time_domain_analysis!(system, assembly, tvec;
prescribed_conditions=Dict{Int,PrescribedConditions{Float64}}(),
distributed_loads=Dict{Int,DistributedLoads{Float64}}(),
linear=false,
linearization_state=nothing,
update_linearization_state=false,
method=:newton,
linesearch=LineSearches.BackTracking(maxstep=1e6),
ftol=1e-9,
iterations=1000,
reset_state=true,
initialize=true,
origin=(@SVector zeros(3)),
linear_velocity=(@SVector zeros(3)),
angular_velocity=(@SVector zeros(3)),
u0=fill((@SVector zeros(3)), length(assembly.elements)),
theta0=fill((@SVector zeros(3)), length(assembly.elements)),
udot0=fill((@SVector zeros(3)), length(assembly.elements)),
thetadot0=fill((@SVector zeros(3)), length(assembly.elements)),
save=1:length(tvec)
)
# check to make sure the simulation is dynamic
@assert system.static == false
if reset_state
reset_state!(system)
end
# perform initial condition analysis
if initialize
system, converged = initial_condition_analysis!(system, assembly, tvec[1];
prescribed_conditions=prescribed_conditions,
distributed_loads=distributed_loads,
linear=linear,
linearization_state=linearization_state,
method=method,
linesearch=linesearch,
ftol=ftol,
iterations=iterations,
reset_state=false,
origin=origin,
linear_velocity=linear_velocity,
angular_velocity=angular_velocity,
u0=u0,
theta0=theta0,
udot0=udot0,
thetadot0=thetadot0,
)
else
# converged by default
converged = true
end
# unpack pre-allocated storage and pointers for system
x = system.x
F = system.r
J = system.K
force_scaling = system.force_scaling
mass_scaling = system.mass_scaling
irow_point = system.irow_point
irow_elem = system.irow_elem
irow_elem1 = system.irow_elem1
irow_elem2 = system.irow_elem2
icol_point = system.icol_point
icol_elem = system.icol_elem
udot = system.udot
θdot = system.θdot
Pdot = system.Pdot
Hdot = system.Hdot
# number of beam elements
nelem = length(assembly.elements)
# initialize storage for each time step
isave = 1
history = Vector{AssemblyState{eltype(system)}}(undef, length(save))
# add initial state to the solution history
if isave in save
pcond = typeof(prescribed_conditions) <: AbstractDict ?
prescribed_conditions : prescribed_conditions(tvec[1])
history[isave] = AssemblyState(system, assembly, prescribed_conditions=pcond)
isave += 1
end
# --- Begin Time Domain Simulation --- #
for it = 2:length(tvec)
# update current time
system.t = tvec[it]
# current time step size
dt = tvec[it] - tvec[it - 1]
# current parameters
pcond = typeof(prescribed_conditions) <: AbstractDict ?
prescribed_conditions : prescribed_conditions(tvec[it])
dload = typeof(distributed_loads) <: AbstractDict ?
distributed_loads : distributed_loads(tvec[it])
x0 = typeof(origin) <: AbstractVector ? SVector{3}(origin) : SVector{3}(origin(tvec[it]))
v0 = typeof(linear_velocity) <: AbstractVector ? SVector{3}(linear_velocity) : SVector{3}(linear_velocity(tvec[it]))
ω0 = typeof(angular_velocity) <: AbstractVector ? SVector{3}(angular_velocity) : SVector{3}(angular_velocity(tvec[it]))
# current initialization parameters
for ielem = 1:nelem
icol = icol_elem[ielem]
# get beam element states
u = SVector(x[icol], x[icol + 1], x[icol + 2])
θ = SVector(x[icol + 3], x[icol + 4], x[icol + 5])
P = SVector(x[icol + 12], x[icol + 13], x[icol + 14]) .* mass_scaling
H = SVector(x[icol + 15], x[icol + 16], x[icol + 17]) .* mass_scaling
# extract rotation parameters
C = get_C(θ)
Cab = assembly.elements[ielem].Cab
CtCab = C' * Cab
# store `udot_init` in `udot`
udot[ielem] = 2 / dt * u + udot[ielem]
# store `θdot_init` in `θdot`
θdot[ielem] = 2 / dt * θ + θdot[ielem]
# store `CtCabPdot_init` in `Pdot`
Pdot[ielem] = 2 / dt * CtCab * P + CtCab * Pdot[ielem]
# store `CtCabHdot_init` in `Hdot`
Hdot[ielem] = 2 / dt * CtCab * H + CtCab * Hdot[ielem]
end
# solve for the state variables at the next time step
f! = (F, x) -> system_residual!(F, x, assembly, pcond, dload, force_scaling,
mass_scaling, irow_point, irow_elem, irow_elem1, irow_elem2, icol_point, icol_elem,
x0, v0, ω0, udot, θdot, Pdot, Hdot, dt)
j! = (J, x) -> system_jacobian!(J, x, assembly, pcond, dload, force_scaling,
mass_scaling, irow_point, irow_elem, irow_elem1, irow_elem2, icol_point, icol_elem,
x0, v0, ω0, udot, θdot, Pdot, Hdot, dt)
# solve system of equations
if linear
# linear analysis
if !update_linearization_state
if isnothing(linearization_state)
x .= 0
else
x .= linearization_state
end
end
f!(F, x)
j!(J, x)
x .-= safe_lu(J) \ F
else
df = OnceDifferentiable(f!, j!, x, F, J)
result = NLsolve.nlsolve(df, x,
linsolve=(x, A, b) -> ldiv!(x, safe_lu(A), b),
method=method,
linesearch=linesearch,
ftol=ftol,
iterations=iterations)
x .= result.zero
J .= df.DF
end
# set new state rates
for ielem = 1:nelem
icol = icol_elem[ielem]
# get beam element states
u = SVector(x[icol], x[icol + 1], x[icol + 2])
θ = SVector(x[icol + 3], x[icol + 4], x[icol + 5])
P = SVector(x[icol + 12], x[icol + 13], x[icol + 14]) .* mass_scaling
H = SVector(x[icol + 15], x[icol + 16], x[icol + 17]) .* mass_scaling
# extract rotation parameters
C = get_C(θ)
Cab = assembly.elements[ielem].Cab
CtCab = C' * Cab
# save state rates
udot[ielem] = 2 / dt * u - udot[ielem]
θdot[ielem] = 2 / dt * θ - θdot[ielem]
Pdot[ielem] = 2 / dt * P - CtCab' * Pdot[ielem]
Hdot[ielem] = 2 / dt * H - CtCab' * Hdot[ielem]
end
# add state to history
if it in save
history[isave] = AssemblyState(system, assembly, prescribed_conditions=prescribed_conditions)
isave += 1
end
# stop early if unconverged
if !linear && !result.f_converged
converged = false
break
end
end
return system, history, converged
end
| [
37811,
198,
220,
220,
220,
9037,
62,
20930,
7,
41873,
26,
479,
86,
22046,
23029,
198,
198,
5990,
687,
257,
9037,
3781,
286,
262,
1080,
286,
1729,
29127,
26741,
7763,
287,
198,
63,
41873,
44646,
8229,
262,
7186,
1080,
290,
257,
6056,
... | 2.498696 | 16,489 |
# Multidimensional arrays
z = zeros(Float64, 2, 3)
println(typeof(z))
# Declare array of dimension n x m
n, m = 2, 4
arr = Array{Int}(undef, 2, 4)
println(arr)
println(size(arr))
arr2 = Array{Int}(undef, 3, 2, 2)
println(arr2)
println(size(arr2))
s = ones(String, 1, 3)
println(s)
# Note that s is considered a "row matrix / vector".
# The typical array is considered a "column matrix / vector".
println(s == ["", "", ""])
a = [1 2 3; 4 5 6]
println(a) # 2 rows, 3 columns
println(a[1, 2])
a[2, 3] = 10
println(a)
u = a[:,2:end]
println(ℯ.^(im*u)) | [
2,
7854,
312,
16198,
26515,
198,
89,
796,
1976,
27498,
7,
43879,
2414,
11,
362,
11,
513,
8,
198,
35235,
7,
4906,
1659,
7,
89,
4008,
198,
198,
2,
16691,
533,
7177,
286,
15793,
299,
2124,
285,
198,
77,
11,
285,
796,
362,
11,
604,
... | 2.369099 | 233 |
function randuint() :: UInt32
Base.llvmcall((
"""
define i32 @randuint() #0 {
%1 = tail call { i32, i32 } @llvm.x86.rdrand.32() #1
%2 = extractvalue { i32, i32 } %1, 0
ret i32 %2
}
; Function Attrs: nounwind
declare { i32, i32 } @llvm.x86.rdrand.32() #1
attributes #0 = { nounwind ssp uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="core2" "target-features"="+ssse3,+cx16,+rdrnd,+sse,+sse2,+sse3" "unsafe-fp-math"="true" "use-soft-float"="false" }
attributes #1 = { nounwind }
"""
,
"""
%1 = call i32 @randuint()
ret i32 %1
"""),
UInt32, Tuple{}
)
end
| [
198,
8818,
43720,
28611,
3419,
7904,
471,
5317,
2624,
198,
220,
220,
220,
7308,
13,
297,
14761,
13345,
19510,
198,
37811,
198,
13086,
1312,
2624,
2488,
25192,
28611,
3419,
1303,
15,
1391,
198,
220,
4064,
16,
796,
7894,
869,
1391,
1312,
... | 2.127907 | 344 |
using LazyTaylorSeries
using Test
@testset "Basic usage" begin
t = Taylor1((t, i) -> (i == 1) ? 1.0 : 0.0, Float64[]) # define variable
t2 = Taylor1((t, i) -> (i == 1) ? 1.0 : 0.0, Dict{Int,Float64}()) # define variable
@test t[0] == 0 == t2[0]
@test t[1] == 1 == t2[1]
@test t[2] == 0 == t2[2]
@test t[100] == 0 == t2[100]
s = 1 + t
@test (s[0], s[1], s[2], s[3]) == (1, 1, 0, 0)
s = t + t
@test (s[0], s[1], s[2], s[3]) == (0, 2, 0, 0)
s = t * t
@test (s[0], s[1], s[2], s[3]) == (0, 0, 1, 0)
s = t^2
@test (s[0], s[1], s[2], s[3]) == (0, 0, 1, 0)
s = (1 + t)^2
@test (s[0], s[1], s[2], s[3]) == (1, 2, 1, 0)
end
| [
3500,
406,
12582,
29907,
27996,
198,
3500,
6208,
628,
198,
31,
9288,
2617,
366,
26416,
8748,
1,
2221,
198,
220,
220,
220,
256,
220,
796,
8121,
16,
19510,
83,
11,
1312,
8,
4613,
357,
72,
6624,
352,
8,
5633,
352,
13,
15,
1058,
657,
... | 1.78553 | 387 |
using Documenter, MyPkg1
makedocs(;
modules=[MyPkg1],
format=Documenter.HTML(),
pages=[
"Home" => "index.md",
],
repo="https://github.com/XiaodongMa-MRI/MyPkg1.jl/blob/{commit}{path}#L{line}",
sitename="MyPkg1.jl",
authors="<NAME>",
assets=String[],
)
deploydocs(;
repo="github.com/XiaodongMa-MRI/MyPkg1.jl",
)
| [
3500,
16854,
263,
11,
2011,
47,
10025,
16,
198,
198,
76,
4335,
420,
82,
7,
26,
198,
220,
220,
220,
13103,
41888,
3666,
47,
10025,
16,
4357,
198,
220,
220,
220,
5794,
28,
24941,
263,
13,
28656,
22784,
198,
220,
220,
220,
5468,
4188... | 2.016949 | 177 |
<filename>src/grammar.jl
import Base
include("production.jl")
"""
A grammar, represented as a tuple ``G=(N,T,P,S)``
"""
struct Grammar
"The nonterminal symbols Set"
N::Set
"The terminal symbols Set"
T::Set
"The productions Array"
P::Array
"The starting symbol"
S::AbstractString
iscontextfree::Bool
end
### Constructors ###
"""
Grammar(N::Set, T::Set, P::Array, S::AbstractString)::Grammar
Builds a [`Grammar`](@ref) obtained from the given string of productions.
"""
function Grammar(N, T, P, S)
cf = all(x -> isa(x.left, AbstractString), P)
if (N ∩ T) ≠ Set()
throw(ArgumentError("The set of terminals and nonterminals are not disjoint, but have " * string(collect(N ∩ T)) * " in common"))
end
if S ∉ N
throw(ArgumentError("The start symbol is not a nonterminal."))
end
if cf
badprods = [p for p ∈ P if p.left ∉ N]
if !isempty(badprods)
throw(ArgumentError("The following productions have a left-hand side that is not a nonterminal: " * string(badprods)))
end
end
badprods = [p for p ∈ P if (Set(astype0(p).left) ∪ Set(p.right)) ⊈ (N ∪ T ∪ Set(["ε"]))]
if ~isempty(badprods)
throw(ArgumentError("The following productions contain symbols that are neither terminals or nonterminals: " * string(badprods)))
end
Grammar(N, T, P, S, cf)
end
"""
Grammar(prods::AbstractString, iscontextfree = true)::Grammar
Builds a [`Grammar`](@ref) obtained from the given string of productions.
"""
function Grammar(prods::AbstractString, iscontextfree = true)::Grammar
P = parseproduction(prods, iscontextfree)
S = nothing
N = nothing
T = nothing
if iscontextfree
S = P[1].left
N = Set(map(x -> x.left, P))
T = Set(vcat(map(x -> x.right, P)...)) - N - "ε"
else
S = P[1].left[1]
symbols = Set(vcat(map(x -> x.left, P)...)) ∪ Set(vcat(map(x -> x.right, P)...))
N = Set(filter(x -> isuppercase(x[1]), symbols))
T = symbols - N - "ε"
end
G = Grammar(N, T, P, S)
if iscontextfree
if ~G.iscontextfree
throw(ArgumentError("The resulting grammar is not context-free, even if so requested."))
end
end
return G
end
### Functions ###
"""
alternatives(g::Grammar, N::Array)::Array
Returns all the right-hand sides alternatives matching the given nonterminal.
"""
alternatives(g::Grammar, N::Union{AbstractString, Iterable})::Array = [P.right for P ∈ g.P if P.left == (isa(N,Iterable) ? collect(N) : N)]
"""
restrict(g::Grammar, symbols::Set)
Returns a [`Grammar`](@ref) using only the given symbols.
"""
restrict(g::Grammar, symbols::Set) = if g.S ∉ symbols throw(ArgumentError("The start symbol must be present among the symbols to keep.")) else Grammar(g.N ∩ symbols, g.T ∩ symbols, [P for P ∈ g.P if (Set([P.left]) ∪ Set(P.right)) ≤ symbols], g.S) end
### Operators ###
Base.:(==)(x::Grammar, y::Grammar) = (x.N, x.T, sort(x.P), x.S) == (y.N, y.T, sort(y.P), y.S)
Base.hash(g::Grammar) = Base.hash((g.N, g.T, sort(g.P), g.S))
Base.show(io::IO, g::Grammar) = Base.show(io, string("Grammar(N=", g.N, ", T=", g.T, ", P=", g.P, "S=", g.S, ")"))
| [
27,
34345,
29,
10677,
14,
4546,
3876,
13,
20362,
198,
11748,
7308,
198,
198,
17256,
7203,
25493,
13,
20362,
4943,
198,
198,
37811,
198,
32,
23491,
11,
7997,
355,
257,
46545,
7559,
38,
16193,
45,
11,
51,
11,
47,
11,
50,
8,
15506,
1... | 2.367918 | 1,359 |
<gh_stars>10-100
const ALWB_URI = URI(scheme="http", host="www.bom.gov.au", path="/jsp/awra/thredds/fileServer/AWRACMS")
abstract type DataMode end
"""
Values <: DataMode
Get the dataset as regular measured values.
"""
struct Values <: DataMode end
"""
Deciles <: DataMode
Get the dataset in relative deciles.
"""
struct Deciles <: DataMode end
# Docs below
struct ALWB{M<:DataMode,D<:Union{Day,Month,Year}} <: RasterDataSource end
layers(::Type{<:ALWB}) = (
:rain_day, :s0_pct, :ss_pct, :sd_pct, :sm_pct, :qtot, :etot,
:e0, :ma_wet, :pen_pet, :fao_pet, :asce_pet, :msl_wet, :dd
)
# Days are in 1 year nc files
date_step(::Type{<:ALWB{<:Any,Day}}) = Year(1)
# Months and years are in single files
date_step(::Type{<:ALWB{<:Any,Month}}) = Year(100)
date_step(::Type{<:ALWB{<:Any,Year}}) = Year(100)
has_constant_dims(::Type{<:ALWB}) = false
@doc """
ALWB{Union{Deciles,Values},Union{Day,Month,Year}} <: RasterDataSource
Data from the Australian Landscape Water Balance (ALWB) data source.
See: [www.bom.gov.au/water/landscape](http://www.bom.gov.au/water/landscape)
The dataset contains NetCDF files. They have a time dimension so that multiple
dates are stored in each file.
The available layers are: `$(layers(ALWB))`, available in daily, monthly and
annual resolutions, and as `Values` or relative `Deciles`.
`getraster` for `ALWB` must use a `date` keyword to specify the date to download.
See the [`getraster`](@ref) docs for implementation details.
""" ALWB
# http://www.bom.gov.au/jsp/awra/thredds/fileServer/AWRACMS/values/day/rain_day_2017.nc
# Precipiation = "rain_day"
# SoilMoisture_Upper = "s0_pct"
# http://www.bom.gov.au/jsp/awra/thredds/fileServer/AWRACMS/values/day/ss_pct_2017.nc
# SoilMoisture_Lower = "ss_pct"
# http://www.bom.gov.au/jsp/awra/thredds/fileServer/AWRACMS/values/day/sd_pct_2017.nc
# SoilMoisture_Deep = "sd_pct"
# http://www.bom.gov.au/jsp/awra/thredds/fileServer/AWRACMS/values/day/sm_pct_2017.nc
# SoilMoisture_RootZone = "sm_pct" # http://www.bom.gov.au/jsp/awra/thredds/fileServer/AWRACMS/values/day/qtot_2017.nc # Runoff = "qtot"
# http://www.bom.gov.au/jsp/awra/thredds/fileServer/AWRACMS/values/day/etot_2017.nc
# Evapotrans_Actual = "etot"
# http://www.bom.gov.au/jsp/awra/thredds/fileServer/AWRACMS/values/day/e0_2017.nc
# Evapotrans_Potential_Landscape = "e0"
# http://www.bom.gov.au/jsp/awra/thredds/fileServer/AWRACMS/values/day/ma_wet_2017.nc
# Evapotrans_Potential_Areal = "ma_wet"
# http://www.bom.gov.au/jsp/awra/thredds/fileServer/AWRACMS/values/day/pen_pet_2017.nc
# Evapotrans_Potential_SyntheticPan = "pen_pet"
# http://www.bom.gov.au/jsp/awra/thredds/fileServer/AWRACMS/values/day/fao_pet_2017.nc
# Evapotrans_RefCrop_Short = "fao_pet"
# http://www.bom.gov.au/jsp/awra/thredds/fileServer/AWRACMS/values/day/asce_pet_2017.nc
# Evapotrans_RefCrop_Tall = "asce_pet"
# http://www.bom.gov.au/jsp/awra/thredds/fileServer/AWRACMS/values/day/etot_2017.nc
# http://www.bom.gov.au/jsp/awra/thredds/fileServer/AWRACMS/values/day/msl_wet_2017.nc
# Evaporation_OpenWater = "msl_wet"
# http://www.bom.gov.au/jsp/awra/thredds/fileServer/AWRACMS/values/day/dd_2017.nc
# DeepDrainage = "dd"
"""
getraster(source::Type{<:ALWB{Union{Deciles,Values},Union{Day,Month,Year}}}, [layer]; date)
Download [`ALWB`](@ref) weather data from
[www.bom.gov.au/water/landscape](http://www.bom.gov.au/water/landscape) as values or
deciles with timesteps of `Day`, `Month` or `Year`.
# Arguments
- `layer`: `Symbol` or `Tuple` of `Symbol` from `$(layers(ALWB))`. Without a
`layer` argument, all layers will be downloaded, and a `NamedTuple` of paths returned.
# Keywords
- `date`: a `DateTime`, `AbstractVector` of `DateTime` or a `Tuple` of start and end dates.
For multiple dates, a `Vector` of multiple filenames will be returned.
ALWB is available with a daily, monthly, and yearly, timestep.
# Example
This will return the file containing annual averages, including your date:
```julia
julia> getraster(ALWB{Values,Year}, :ss_pct; date=Date(2001, 2))
"/your/RASTERDATASOURCES_PATH/ALWB/values/month/ss_pct.nc"
```
Returns the filepath/s of the downloaded or pre-existing files.
"""
function getraster(T::Type{<:ALWB}, layers::Union{Tuple,Symbol}; date)
_getraster(T, layers, date)
end
function _getraster(T::Type{<:ALWB{M,P}}, layers, dates::Tuple) where {M,P}
_getraster(T, layers, date_sequence(T, dates))
end
function _getraster(T::Type{<:ALWB}, layers, dates::AbstractArray)
_getraster.(T, Ref(layers), dates)
end
function _getraster(T::Type{<:ALWB}, layers::Tuple, date::Dates.TimeType)
_map_layers(T, layers, date)
end
function _getraster(T::Type{<:ALWB}, layer::Symbol, date::Dates.TimeType)
_check_layer(T, layer)
mkpath(rasterpath(T))
url = rasterurl(T, layer; date=date)
path = rasterpath(T, layer; date=date)
_maybe_download(url, path)
path
end
rastername(T::Type{<:ALWB{M,P}}, layer; date) where {M,P} =
string(layer, _pathsegment(P, date), ".nc")
rasterpath(::Type{ALWB}) = joinpath(rasterpath(), "ALWB")
rasterpath(::Type{ALWB{M,P}}) where {M,P} =
joinpath(joinpath(rasterpath(), "ALWB"), map(_pathsegment, (M, P))...)
rasterpath(T::Type{<:ALWB}, layer; date=nothing) =
joinpath(rasterpath(T), rastername(T, layer; date))
rasterurl(T::Type{<:ALWB{M,P}}, layer; date) where {M,P} =
joinpath(ALWB_URI, _pathsegments(T)..., rastername(T, layer; date))
# Utility methods
_pathsegments(::Type{ALWB{M,P}}) where {M,P} = _pathsegment(M), _pathsegment(P)
_pathsegment(::Type{Values}) = "values"
_pathsegment(::Type{Deciles}) = "deciles"
_pathsegment(::Type{Day}) = "day"
_pathsegment(::Type{Month}) = "month"
_pathsegment(::Type{Year}) = "year"
# Days are in whole-year files
_pathsegment(::Type{Day}, date) = "_" * string(year(date))
# Months and years are all in one file
_pathsegment(::Type{<:Union{Year,Month}}, date) = ""
| [
27,
456,
62,
30783,
29,
940,
12,
3064,
198,
198,
9979,
8355,
45607,
62,
47269,
796,
43975,
7,
15952,
1326,
2625,
4023,
1600,
2583,
2625,
2503,
13,
65,
296,
13,
9567,
13,
559,
1600,
3108,
35922,
73,
2777,
14,
707,
430,
14,
400,
445... | 2.376613 | 2,480 |
<gh_stars>1-10
"""
scattering_field(args)
Returns a function which gives the average scattering coefficients for any vector `x` inside the material. This field is defined by Equation (3.13) in [<NAME> and <NAME>, "Effective waves for random three-dimensional particulate materials", (2021)](https://arxiv.org/pdf/2010.00934.pdf)
"""
scattering_field
"Calculates the effective wavenumbers and return Vector{EffectivePlaneWaveMode}."
function WaveModes(ω::T, source::AbstractSource, material::Material{Dim,S,Sps}; kws...) where {T,Dim,S<:Shape{Dim},Sps<:Species{T,Dim}} # without the parametric types we get a "Unreachable reached" error
# The wavenumbers are calculated without knowledge of the materail symmetry. This is because the plane-wave symmetry leads to all possible wavenumbers and is simple to calculate.
k_effs = wavenumbers(ω, source.medium, material.species; numberofparticles = material.numberofparticles, kws... )
# The wavemodes need to know the material symmetry as the eigenvectors do depend on material shape and symetry.
wave_effs = [
WaveMode(ω, k_eff, source, material; kws...)
for k_eff in k_effs]
return wave_effs
end
"""
WaveMode(ω::T, wavenumber::Complex{T}, eigenvectors::Array{Complex{T}}, ::SetupSymmetry; kws...)
Returns a concrete subtype of AbstractWaveMode depending on the SetupSymmetry. The returned type should have all the necessary fields to calculate scattered waves (currently not true for EffectivePlanarWaves).
"""
function WaveMode(ω::T, wavenumber::Complex{T}, source::AbstractSource, material::Material{Dim}; kws...) where {T,Dim}
eigvectors = eigenvectors(ω, wavenumber, source, material; kws...)
α = solve_boundary_condition(ω, wavenumber, eigvectors, source, material; kws...)
# After this normalisation, sum(eigvectors, dims = 3) will satisfy the boundary conditions
eigvectors = [eigvectors[i] * α[i[3]] for i in CartesianIndices(eigvectors)]
return EffectiveRegularWaveMode(ω, wavenumber, source, material, eigvectors; kws...)
end
function WaveMode(ω::T, wavenumber::Complex{T}, psource::PlaneSource{T,Dim,1}, material::Material{Dim,Halfspace{T,Dim}};
tol::T = 1e-6, kws...) where {T,Dim}
direction = transmission_direction(wavenumber, (ω / psource.medium.c) * psource.direction, material.shape.normal)
eigvectors = eigenvectors(ω, wavenumber, psource, material; direction_eff = direction, kws...)
α = solve_boundary_condition(ω, wavenumber, eigvectors, psource, material; kws...)
# After this normalisation, sum(eigvectors, dims = 3) will satisfy the boundary conditions
eigvectors = [eigvectors[i] * α[i[3]] for i in CartesianIndices(eigvectors)]
return EffectivePlaneWaveMode(ω, wavenumber, direction, eigvectors)
end
function WaveMode(ω::T, wavenumber::Complex{T}, psource::PlaneSource{T,Dim,1}, material::Material{Dim,Plate{T,Dim}}; kws...) where {T,Dim}
# First we calculate the outward pointing normal
n = material.shape.normal;
n = - n .* sign(real(dot(n,psource.direction)));
k = ω / psource.medium.c
direction1 = transmission_direction(wavenumber, k * psource.direction, n)
eigvectors1 = eigenvectors(ω, wavenumber, psource, material; direction_eff = direction1, kws...)
# we choose direction2 so that k2 .* direction2 = - k1 .* direction1, where k1 = wavenumber, and k2 = - wavenumber
direction2 = direction1
eigvectors2 = eigenvectors(ω, - wavenumber, psource, material; direction_eff = direction2, kws...)
α = solve_boundary_condition(ω, wavenumber, eigvectors1, eigvectors2, psource, material; kws...)
# apply normalisation
eigvectors1 = eigvectors1 .* α[1]
eigvectors2 = eigvectors2 .* α[2]
mode1 = EffectivePlaneWaveMode(ω, wavenumber, direction1, eigvectors1)
mode2 = EffectivePlaneWaveMode(ω, - wavenumber, direction2, eigvectors2)
return [mode1,mode2]
end
# eigensystem(ω::T, source::AbstractSource, material::Material; kws...) where T<:AbstractFloat = eigensystem(ω, source.medium, material.species, Symmetry(source,material); numberofparticles = material.numberofparticles, kws...)
function solve_boundary_condition(ω::T, wavenumber::Complex{T}, eigvectors::Array, source::AbstractSource, material::Material; kws...) where T
return solve_boundary_condition(ω, wavenumber, eigvectors, source, material, Symmetry(source,material); kws...)
end
function solve_boundary_condition(ω::T, wavenumber::Complex{T}, eigvectors1::Array, eigvectors2::Array, source::AbstractSource, material::Material; kws...) where T
return solve_boundary_condition(ω, wavenumber, eigvectors1, eigvectors2, source, material, Symmetry(source,material); kws...)
end
"""
convert_eigenvector_basis(medium::PhysicalMedium,sym::AbstractSymmetry,eigvecs::Array)
The eigenvectors from high symmetric scenarios are smaller then the more general scenarios. This function pads with zeros the more symmetric cases to match more general cases, so that we can use the functions for both.
"""
convert_eigenvector_basis(medium::PhysicalMedium,sym::AbstractSymmetry,eigvecs::Array) = eigvecs
eigenvectors(ω::T, k_eff::Complex{T}, source::AbstractSource, material::Material; kws...) where T<:AbstractFloat = eigenvectors(ω, k_eff::Complex{T}, source.medium, material.species, Symmetry(source,material); numberofparticles = material.numberofparticles, kws...)
# For plane waves, it is simpler to write all cases in the format for the most general case. For example, for PlanarAzimuthalSymmetry the eignvectors are much smaller. So we will turn these into the more general eigvector case by padding it with zeros.
# function eigenvectors(ω::T, k_eff::Complex{T}, source::PlaneSource{T}, material::Material{Dim,S}; kws...) where {T<:AbstractFloat,Dim,S<:Union{Plate,Halfspace}}
#
# eigvecs = eigenvectors(ω, k_eff, source.medium, material.species, Symmetry(source,material); kws...)
#
# if Symmetry(source,material) == PlanarAzimuthalSymmetry{Dim}()
# eigvecs = azimuthal_to_planar_eigenvector(typeof(source.medium),eigvecs)
# end
#
# return eigvecs
#
# end
function eigenvectors(ω::T, k_eff::Complex{T}, medium::PhysicalMedium, species::Vector{Sp}, symmetry::AbstractSymmetry;
tol::T = 1e-4, kws...
) where {T<:AbstractFloat, Sp<:Specie{T}}
MM = eigensystem(ω, medium, species, symmetry; kws...)
# calculate eigenvectors
MM_svd = svd(MM(k_eff))
inds = findall(MM_svd.S .< tol)
if isempty(inds)
@warn("No eigenvectors found with the tolerance tol = $tol. Will use only one eigenvector with the eigenvalue $(MM_svd.S[end]), which should be less than tol.")
inds = [length(MM_svd.S)]
end
#NOTE: MM(k_eff) ≈ MM_svd.U * diagm(0 => MM_svd.S) * MM_svd.Vt
eigvectors = MM_svd.V[:,inds]
# Reshape to separate different species and eigenvectors
S = length(species)
# pads with zeros if necessary to match the more general case with less symmetry
eigvectors = convert_eigenvector_basis(medium,symmetry,reshape(eigvectors,(:,S,size(eigvectors,2))))
return eigvectors
end
| [
27,
456,
62,
30783,
29,
16,
12,
940,
198,
198,
37811,
198,
220,
220,
220,
45765,
62,
3245,
7,
22046,
8,
198,
198,
35561,
257,
2163,
543,
3607,
262,
2811,
45765,
44036,
329,
597,
15879,
4600,
87,
63,
2641,
262,
2587,
13,
770,
2214,... | 2.875557 | 2,467 |
##############
# Owner Type #
##############
@ghdef mutable struct Owner
typ::Union{String, Nothing}
email::Union{String, Nothing}
name::Union{String, Nothing}
login::Union{String, Nothing}
bio::Union{String, Nothing}
company::Union{String, Nothing}
location::Union{String, Nothing}
avatar_url::Union{HTTP.URI, Nothing}
gravatar_id::Union{String, Nothing}
id::Union{Int, Nothing}
public_repos::Union{Int, Nothing}
owned_private_repos::Union{Int, Nothing}
total_private_repos::Union{Int, Nothing}
public_gists::Union{Int, Nothing}
private_gists::Union{Int, Nothing}
followers::Union{Int, Nothing}
following::Union{Int, Nothing}
collaborators::Union{Int, Nothing}
blog::Union{HTTP.URI, Nothing}
url::Union{HTTP.URI, Nothing}
html_url::Union{HTTP.URI, Nothing}
updated_at::Union{Dates.DateTime, Nothing}
created_at::Union{Dates.DateTime, Nothing}
date::Union{Dates.DateTime, Nothing}
hireable::Union{Bool, Nothing}
site_admin::Union{Bool, Nothing}
end
Owner(login::AbstractString, isorg = false) = Owner(Dict("login" => login, "type" => isorg ? "Organization" : "User"))
namefield(owner::Owner) = owner.login
typprefix(isorg) = isorg ? "orgs" : "users"
#############
# Owner API #
#############
isorg(owner::Owner) = something(owner.typ, "") == "Organization"
@api_default function whoami(api::GitHubAPI; options...)
result = gh_get_json(api, "/user"; options...)
return Owner(result)
end
@api_default owner(api::GitHubAPI, owner_obj::Owner; options...) = owner(api, name(owner_obj), isorg(owner_obj); options...)
@api_default function owner(api::GitHubAPI, owner_obj, isorg = false; options...)
result = gh_get_json(api, "/$(typprefix(isorg))/$(name(owner_obj))"; options...)
return Owner(result)
end
@api_default function users(api::GitHubAPI; options...)
results, page_data = gh_get_paged_json(api, "/users"; options...)
return map(Owner, results), page_data
end
@api_default function check_membership(api::GitHubAPI, org, user; public_only = false, options...)
scope = public_only ? "public_members" : "members"
resp = gh_get(api, "/orgs/$(name(org))/$scope/$(name(user))"; handle_error = false, allowredirects = false, options...)
if resp.status == 204
return true
elseif resp.status == 404
return false
elseif resp.status == 302
# For convenience, still check public membership. Otherwise, we don't know, so error
@assert !public_only
is_public_member = check_membership(org, user; public_only = true, options...)
is_public_member && return true
error("Enquiring about an Organization to which you do not have access.\n"*
"Set `public_only=true` or provide authentication.")
else
handle_response_error(resp)
end
end
@api_default function orgs(api::GitHubAPI, owner; options...)
results, page_data = gh_get_paged_json(api, "/users/$(name(owner))/orgs"; options...)
return map(Owner, results), page_data
end
@api_default function followers(api::GitHubAPI, owner; options...)
results, page_data = gh_get_paged_json(api, "/users/$(name(owner))/followers"; options...)
return map(Owner, results), page_data
end
@api_default function following(api::GitHubAPI, owner; options...)
results, page_data = gh_get_paged_json(api, "/users/$(name(owner))/following"; options...)
return map(Owner, results), page_data
end
@api_default function pubkeys(api::GitHubAPI, owner; options...)
Base.depwarn("`pubkeys` is deprecated in favor of `sshkeys`, " *
"which return a vector of keys, instead of a Dict from key-id to key.", :pubkeys)
results, page_data = sshkeys(api, owner; options...)
output = Dict{Int,String}([(key["id"], key["key"]) for key in results])
return output, page_data
end
@api_default function sshkeys(api::GitHubAPI, owner; options...)
results, page_data = gh_get_paged_json(api, "/users/$(name(owner))/keys"; options...)
output = convert(Vector{Dict{String,Any}}, results)
return output, page_data
end
@api_default function gpgkeys(api::GitHubAPI, owner; options...)
results, page_data = gh_get_paged_json(api, "/users/$(name(owner))/gpg_keys"; options...)
output = convert(Vector{Dict{String,Any}}, results)
return output, page_data
end
repos(api::GitHubAPI, owner::Owner; options...) = repos(api, name(owner), isorg(owner); options...)
@api_default function repos(api::GitHubAPI, owner, isorg = false; options...)
results, page_data = gh_get_paged_json(api, "/$(typprefix(isorg))/$(name(owner))/repos"; options...)
return map(Repo, results), page_data
end
@api_default function teams(api::GitHubAPI, owner; options...)
results, page_data = gh_get_paged_json(api, "/orgs/$(name(owner))/teams"; options...)
return map(Team, results), page_data
end
| [
7804,
4242,
2235,
198,
2,
23853,
5994,
1303,
198,
7804,
4242,
2235,
198,
198,
31,
456,
4299,
4517,
540,
2878,
23853,
198,
220,
220,
220,
2170,
3712,
38176,
90,
10100,
11,
10528,
92,
198,
220,
220,
220,
3053,
3712,
38176,
90,
10100,
... | 2.7671 | 1,769 |
################################
## Generic DataFile interface ##
################################
# This provides common methods that could be applicable to any
# interface for reading variables out of a file, e.g. HDF5,
# JLD, or MAT files. This is the super class of HDF5File, HDF5Group,
# JldFile, JldGroup, Matlabv5File, and MatlabHDF5File.
#
# Types inheriting from DataFile should have names, read, and write
# methods
abstract DataFile
import Base: read, write
# Convenience macros
macro read(fid, sym)
if !isa(sym, Symbol)
error("Second input to @read must be a symbol (i.e., a variable)")
end
esc(:($sym = read($fid, $(string(sym)))))
end
macro write(fid, sym)
if !isa(sym, Symbol)
error("Second input to @write must be a symbol (i.e., a variable)")
end
esc(:(write($fid, $(string(sym)), $sym)))
end
# Read a list of variables, read(parent, "A", "B", "x", ...)
read(parent::DataFile, name::ASCIIString...) =
tuple([read(parent, x) for x in name]...)
# Read one or more variables and pass them to a function. This is
# convenient for avoiding type inference pitfalls with the usual
# read syntax.
read(f::Base.Callable, parent::DataFile, name::ASCIIString...) =
f(read(parent, name...)...)
# Read every variable in the file
if VERSION < v"0.4.0-dev+980"
function read(f::DataFile)
vars = names(f)
vals = Array(Any, length(vars))
for i = 1:length(vars)
vals[i] = read(f, vars[i])
end
Dict(vars, vals)
end
else
function read(f::DataFile)
vars = names(f)
vals = Array(Any, length(vars))
for i = 1:length(vars)
vals[i] = read(f, vars[i])
end
Dict(zip(vars, vals))
end
end
| [
29113,
198,
2235,
42044,
6060,
8979,
7071,
22492,
198,
29113,
198,
2,
770,
3769,
2219,
5050,
326,
714,
307,
9723,
284,
597,
198,
2,
7071,
329,
3555,
9633,
503,
286,
257,
2393,
11,
304,
13,
70,
13,
5572,
37,
20,
11,
198,
2,
449,
... | 2.536337 | 688 |
<filename>src/umfpack_lu.jl
mutable struct LUFactorization{Tv,Ti} <: AbstractLUFactorization{Tv,Ti}
A::Union{Nothing,ExtendableSparseMatrix{Tv,Ti}}
fact::Union{Nothing,SuiteSparse.UMFPACK.UmfpackLU{Tv,Ti}}
phash::UInt64
end
"""
```
LUFactorization(;valuetype=Float64, indextype=Int64)
LUFactorization(matrix)
```
Default Julia LU Factorization based on umfpack.
"""
LUFactorization(;valuetype::Type=Float64,indextype::Type=Int64)=LUFactorization{valuetype,indextype}(nothing,nothing,0)
function update!(lufact::LUFactorization)
flush!(lufact.A)
if lufact.A.phash!=lufact.phash
lufact.fact=lu(lufact.A.cscmatrix)
lufact.phash=lufact.A.phash
else
lufact.fact=lu!(lufact.fact,lufact.A.cscmatrix)
end
lufact
end
| [
27,
34345,
29,
10677,
14,
388,
69,
8002,
62,
2290,
13,
20362,
198,
76,
18187,
2878,
406,
36820,
11218,
1634,
90,
51,
85,
11,
40533,
92,
1279,
25,
27741,
43,
36820,
11218,
1634,
90,
51,
85,
11,
40533,
92,
198,
220,
220,
220,
317,
... | 2.203966 | 353 |
<gh_stars>0
# CEP
"""
setup_cep_opt_sets(ts_data::ClustData,opt_data::CEPData,opt_config::Dict{String,Any})
fetching sets from the time series (ts_data) and capacity expansion model data (opt_data) and returning Dictionary with Sets as Symbols
"""
function setup_opt_cep_set(ts_data::ClustData,
opt_data::OptDataCEP,
opt_config::Dict{String,Any})
#`costs::OptVariable`: costs[tech,node,year,account,impact] - annulized costs [USD in USD/MW_el, CO2 in kg-CO₂-eq./MW_el]`
costs = opt_data.costs
#`techs::OptVariable`: techs[tech] - OptDataCEPTech
techs = opt_data.techs
#`nodes::OptVariable`: nodes[tech,node] - OptDataCEPNode
nodes = opt_data.nodes
#`lines::OptVarible`: lines[tech,line] - OptDataCEPLine
lines = opt_data.lines
set=Dict{String,Array}()
set["nodes"]=axes(nodes,"node")
#Seperate sets for fossil and renewable technology
set["tech"]=Array{String,1}()
for categ in unique(getfield.(techs[:],:categ))
if opt_config[categ]
set["tech_"*categ]=axes(techs,"tech")[getfield.(techs[:], :categ).==categ]
set["tech"]=[set["tech"];set["tech_"*categ]]
end
end
#Compose a set of technologies without transmission
set["tech_cap"]=deepcopy(set["tech"])
set["tech_trans"]=Array{String,1}()
set["tech_power"]=deepcopy(set["tech"])
set["tech_energy"]=Array{String,1}()
for (k,v) in set
if occursin("tech",k) && occursin("_transmission",k)
setdiff!(set["tech_cap"],v)
set["tech_trans"]=[set["tech_trans"];v]
end
if occursin("tech",k) && String(k[end-1:end])=="_e"
setdiff!(set["tech_power"],v)
set["tech_energy"]=[set["tech_energy"];v]
end
end
set["impact"]=axes(costs,"impact")
set["impact_mon"]=[set["impact"][1]]
set["impact_env"]=set["impact"][2:end]
set["year"]=axes(costs,"year")
set["account"]=axes(costs,"account")
if opt_config["transmission"]
set["lines"]=axes(opt_data.lines,"line")
set["dir_transmission"]=["uniform","opposite"]
end
if opt_config["existing_infrastructure"]
set["infrastruct"]=["new","ex"]
else
set["infrastruct"]=["new"]
end
set["sector"]=unique(getfield.(techs[:],:sector))
set["time_K"]=1:ts_data.K
set["time_T"]=1:ts_data.T
set["time_T_e"]=0:ts_data.T
if opt_config["seasonalstorage"]
set["time_I_e"]=0:length(ts_data.k_ids)
set["time_I"]=1:length(ts_data.k_ids)
end
return set
end
"""
setup_cep_opt_basic(ts_data::ClustData,opt_data::CEPData,opt_config::Dict{String,Any},optimizer::DataType,optimizer_config::Dict{Symbol,Any})
setting up the basic core elements for a CEP-model
- a JuMP Model is setup and the optimizer is configured. The optimizer itself is passed on as a `optimizer`. It's configuration with `optimizer_config` - Each Symbol and the corresponding value in the Dictionary is passed on to the `with_optimizer` function in addition to the optimizer. For Gurobi an example Dictionary could look like `Dict{Symbol,Any}(:Method => 2, :OutputFlag => 0, :Threads => 2)`
- the sets are setup
"""
function setup_opt_cep_basic(ts_data::ClustData,
opt_data::OptDataCEP,
opt_config::Dict{String,Any},
optimizer::DataType,
optimizer_config::Dict{Symbol,Any})
## MODEL CEP ##
# Initialize model
model = JuMP.Model(with_optimizer(optimizer;optimizer_config...))
# Initialize info
info=[opt_config["descriptor"]]
# Setup set
set=setup_opt_cep_set(ts_data, opt_data, opt_config)
# Setup Model CEP
return OptModelCEP(model,info,set)
end
"""
setup_opt_cep_basic_variables!(cep::OptModelCEP,ts_data::ClustData,opt_data::OptDataCEP)
Adding basic variables COST, CAP and GEN based on set
"""
function setup_opt_cep_basic_variables!(cep::OptModelCEP,
ts_data::ClustData,
opt_data::OptDataCEP)
## DATA ##
set=cep.set
## VARIABLES ##
# Cost
push!(cep.info,"Variable COST[account, impact, tech] in $(set["impact"].*" "...)")
@variable(cep.model, COST[account=set["account"],impact=set["impact"],tech=set["tech"]])
# Capacity
push!(cep.info,"Variable CAP[tech_cap, infrastruct, nodes] ≥ 0 in MW]")
@variable(cep.model, CAP[tech=set["tech_cap"],infrastruct=set["infrastruct"] ,node=set["nodes"]]>=0)
# Generation #
push!(cep.info,"Variable GEN[sector, tech_power, t, k, node] in MW")
@variable(cep.model, GEN[sector=set["sector"], tech=set["tech_power"], t=set["time_T"], k=set["time_K"], node=set["nodes"]])
#end
return cep
end
"""
setup_opt_cep_lost_load!(cep::OptModelCEP, ts_data::ClustData, opt_data::OptDataCEP) set::Dict)
Adding variable SLACK, LL (LostLoad - if demand cannot be met with installed capacity -> Lost Load can be "purchased" to meet demand)
"""
function setup_opt_cep_lost_load!(cep::OptModelCEP,
ts_data::ClustData,
opt_data::OptDataCEP)
## DATA ##
set=cep.set
#ts_weights: k - weight of each period:
ts_weights=ts_data.weights
#ts_deltas: t x k - Δt of each segment x period
ts_deltas=ts_data.delta_t
## LOST LOAD ##
# Slack variable #
push!(cep.info,"Variable SLACK[sector, t, k, node] ≥ 0 in MW")
@variable(cep.model, SLACK[sector=set["sector"], t=set["time_T"], k=set["time_K"], node=set["nodes"]] >=0)
# Lost Load variable #
push!(cep.info,"Variable LL[sector, node] ≥ 0 in MWh")
@variable(cep.model, LL[sector=set["sector"], node=set["nodes"]] >=0)
# Calculation of Lost Load
push!(cep.info,"LL[sector, node] = Σ SLACK[sector, t, k, node] ⋅ ts_weights[k] ⋅ Δt[t,k] ∀ sector, node")
@constraint(cep.model, [sector=set["sector"], node=set["nodes"]], cep.model[:LL][sector, node]==sum(cep.model[:SLACK][sector, t, k, node]*ts_weights[k]*ts_deltas[t,k] for t=set["time_T"], k=set["time_K"]))
return cep
end
"""
setup_opt_cep_lost_emission!(cep::OptModelCEP, ts_data::ClustData, opt_data::OptDataCEP)
Adding variable LE (LostEmission - if demand cannot be met without breaking Emission-constraint -> Lost Emission can be "purchased" to meet demand with "dirty" production)
"""
function setup_opt_cep_lost_emission!(cep::OptModelCEP,
ts_data::ClustData,
opt_data::OptDataCEP)
## DATA ##
set=cep.set
## LOST EMISSION ##
# Lost Emission variable #
push!(cep.info,"Variable LE[impact_{environment}] ≥ 0 in kg")
@variable(cep.model, LE[impact=set["impact"][2:end]] >=0)
return cep
end
"""
setup_opt_cep_fix_design_variables!(cep::OptModelCEP,ts_data::ClustData, opt_data::OptDataCEP,fixed_design_variables::Dict{String,Any})
Fixing variables CAP based on first stage vars
"""
function setup_opt_cep_fix_design_variables!(cep::OptModelCEP,
ts_data::ClustData,
opt_data::OptDataCEP,
fixed_design_variables::Dict{String,Any})
## DATA ##
set=cep.set
cap=fixed_design_variables["CAP"]
## VARIABLES ##
# Transmission
if "tech_transmission" in keys(set)
trans=fixed_design_variables["TRANS"]
push!(cep.info,"TRANS[tech, 'new', line] = existing infrastructure ∀ tech_trans, line")
@constraint(cep.model, [line=set["lines"], tech=set["tech_trans"]], cep.model[:TRANS][tech,"new",line]==trans[tech, "new", line])
end
# Capacity
push!(cep.info,"CAP[tech, 'new', node] = existing infrastructure ∀ tech_cap, node")
@constraint(cep.model, [node=set["nodes"], tech=set["tech_cap"]], cep.model[:CAP][tech,"new",node]==cap[tech, "new", node])
return cep
end
"""
setup_opt_cep_generation_el!(cep::OptModelCEP,ts_data::ClustData,opt_data::OptDataCEP)
add variable and fixed Costs and limit generation to installed capacity (and limiting time_series, if dependency in techs defined) for fossil and renewable power plants
"""
function setup_opt_cep_generation_el!(cep::OptModelCEP,
ts_data::ClustData,
opt_data::OptDataCEP)
## DATA ##
set=cep.set
#`costs::OptVariable`: costs[tech,node,year,account,impact] - annulized costs [USD in USD/MW_el, CO2 in kg-CO₂-eq./MW_el]`
costs = opt_data.costs
#`techs::OptVariable`: techs[tech] - OptDataCEPTech
techs = opt_data.techs
#`nodes::OptVariable`: nodes[tech,node] - OptDataCEPNode
nodes = opt_data.nodes
#ts Dict( tech-node ): t x k
ts=ts_data.data
#ts_weights: k - weight of each period:
ts_weights=ts_data.weights
#ts_deltas: t x k - Δt of each segment x period
ts_deltas=ts_data.delta_t
## GENERATION ELECTRICITY ##
# Calculate Variable Costs
push!(cep.info,"COST['var',impact,tech] = Σ_{t,k,node}GEN['el',t,k,node]⋅ ts_weights[k] ⋅ ts_deltas[t,k]⋅ var_costs[tech,impact] ∀ impact, tech_generation")
@constraint(cep.model, [impact=set["impact"], tech=set["tech_generation"]], cep.model[:COST]["var",impact,tech]==sum(cep.model[:GEN]["el",tech,t,k,node]*ts_weights[k]*ts_deltas[t,k]*costs[tech,node,set["year"][1],"var",impact] for node=set["nodes"], t=set["time_T"], k=set["time_K"]))
# Calculate Fixed Costs
push!(cep.info,"COST['cap_fix',impact,tech] = Σ_{t,k}(ts_weights ⋅ ts_deltas[t,k])/8760h ⋅ Σ_{node}CAP[tech,'new',node] ⋅ cap_costs[tech,impact] ∀ impact, tech_generation")
@constraint(cep.model, [impact=set["impact"], tech=set["tech_generation"]], cep.model[:COST]["cap_fix",impact,tech]==sum(ts_weights[k]*ts_deltas[t,k] for t=set["time_T"], k=set["time_K"])/8760* sum(cep.model[:CAP][tech,"new",node] *costs[tech,node,set["year"][1],"cap_fix",impact] for node=set["nodes"]))
# Limit the generation of dispathables to the infrastructing capacity of dispachable power plants
push!(cep.info,"0 ≤ GEN['el',tech, t, k, node] ≤ Σ_{infrastruct} CAP[tech,infrastruct,node] ∀ node, tech_generation{dispatchable}, t, k")
# Limit the generation of dispathables to the infrastructing capacity of non-dispachable power plants
push!(cep.info,"0 ≤ GEN['el',tech, t, k, node] ≤ Σ_{infrastruct}CAP[tech,infrastruct,node]*ts[tech-node,t,k] ∀ node, tech_generation{non_dispatchable}, t, k")
for tech in set["tech_generation"]
# Limit the generation of dispathables to the infrastructing capacity of dispachable power plants
if techs[tech].time_series=="none"
@constraint(cep.model, [node=set["nodes"], t=set["time_T"], k=set["time_K"]], 0 <=cep.model[:GEN]["el",tech, t, k, node])
@constraint(cep.model, [node=set["nodes"], t=set["time_T"], k=set["time_K"]], cep.model[:GEN]["el",tech, t, k, node] <=sum(cep.model[:CAP][tech,infrastruct,node] for infrastruct=set["infrastruct"]))
else
# Limit the generation of dispathables to the infrastructing capacity of non-dispachable power plants
@constraint(cep.model, [node=set["nodes"], t=set["time_T"], k=set["time_K"]], 0 <=cep.model[:GEN]["el",tech, t, k, node])
@constraint(cep.model, [node=set["nodes"], t=set["time_T"], k=set["time_K"]], cep.model[:GEN]["el",tech,t,k,node] <= sum(cep.model[:CAP][tech,infrastruct,node] for infrastruct=set["infrastruct"])*ts[techs[tech].time_series*"-"*node][t,k])
end
end
return cep
end
"""
setup_opt_cep_storage!(cep::OptModelCEP,ts_data::ClustData,opt_data::OptDataCEP)
add variables INTRASTORGEN and INTRASTOR, variable and fixed Costs, limit generation to installed power-capacity, connect simple-storage levels (within period) with generation
basis for either simplestorage or seasonalstorage
"""
function setup_opt_cep_storage!(cep::OptModelCEP,
ts_data::ClustData,
opt_data::OptDataCEP)
## DATA ##
set=cep.set
#`costs::OptVariable`: costs[tech,node,year,account,impact] - annulized costs [USD in USD/MW_el, CO2 in kg-CO₂-eq./MW_el]`
costs = opt_data.costs
#`techs::OptVariable`: techs[tech] - OptDataCEPTech
techs = opt_data.techs
#ts_weights: k - weight of each period:
ts_weights=ts_data.weights
#ts_deltas: t x k - Δt of each segment x period
ts_deltas=ts_data.delta_t
## VARIABLE ##existing_infrastructure
# Storage has additional element 0 for storage at hour 0 of day
push!(cep.info,"Variable INTRASTOR[sector, tech_storage_e, t, k, node] ≥ 0 in MWh")
@variable(cep.model, INTRASTOR[sector=set["sector"], tech=set["tech_storage_e"], t=set["time_T_e"], k=set["time_K"], node=set["nodes"]] >=0)
# Storage generation is necessary for the efficiency
#push!(cep.info,"Variable INTRASTORGEN[sector, dir, tech, t, k, node] ≥ 0 in MW")
#@variable(cep.model, INTRASTORGEN[sector=set["sector"], dir=set["dir_storage"], tech=set["tech_storage_p"], t=set["time_T"], k=set["time_K"], node=set["nodes"]] >=0)
## STORAGE ##
# Calculate Variable Costs
push!(cep.info,"COST['var',impact,tech] = 0 ∀ impact, tech_storage")
@constraint(cep.model, [impact=set["impact"], tech=[set["tech_storage_in"];set["tech_storage_out"];set["tech_storage_e"]]], cep.model[:COST]["var",impact,tech]==0)
# Fix Costs storage
push!(cep.info,"COST['fix',impact,tech] = Σ_{t,k}(ts_weights ⋅ ts_deltas[t,k])/8760h ⋅ Σ_{node}CAP[tech,'new',node] ⋅ costs[tech,node,year,'cap_fix',impact] ∀ impact, tech_storage")
@constraint(cep.model, [tech=[set["tech_storage_in"];set["tech_storage_out"];set["tech_storage_e"]], impact=set["impact"]], cep.model[:COST]["cap_fix",impact,tech]==sum(ts_weights[k]*ts_deltas[t,k] for t=set["time_T"], k=set["time_K"])/8760* sum(cep.model[:CAP][tech,"new",node]*costs[tech,node,set["year"][1],"cap_fix",impact] for node=set["nodes"]))
# Limit the Generation of the theoretical power part of the battery to its installed power
push!(cep.info,"0 ≤ GEN['el',tech, t, k, node] ≤ Σ_{infrastruct} CAP[tech,infrastruct,node] ∀ node, tech_storage_out, t, k")
@constraint(cep.model, [node=set["nodes"], tech=set["tech_storage_out"], t=set["time_T"], k=set["time_K"]], 0 <= cep.model[:GEN]["el",tech,t,k,node])
@constraint(cep.model, [node=set["nodes"], tech=set["tech_storage_out"], t=set["time_T"], k=set["time_K"]], cep.model[:GEN]["el",tech,t,k,node]<=sum(cep.model[:CAP][tech,infrastruct,node] for infrastruct=set["infrastruct"]))
push!(cep.info,"0 ≥ GEN['el',tech, t, k, node] ≥ (-1) ⋅ Σ_{infrastruct} CAP[tech,infrastruct,node] ∀ node, tech_storage_in, t, k")
@constraint(cep.model, [node=set["nodes"], tech=set["tech_storage_in"], t=set["time_T"], k=set["time_K"]], 0 >= cep.model[:GEN]["el",tech,t,k,node])
@constraint(cep.model, [node=set["nodes"], tech=set["tech_storage_in"], t=set["time_T"], k=set["time_K"]], cep.model[:GEN]["el",tech,t,k,node]>=(-1)*sum(cep.model[:CAP][tech,infrastruct,node] for infrastruct=set["infrastruct"]))
# Connect the previous storage level and the integral of the flows with the new storage level
push!(cep.info,"INTRASTOR['el',tech, t, k, node] = INTRASTOR['el',tech, t-1, k, node] η[tech]^(ts_deltas[t,k]/732h) + ts_deltas[t,k] ⋅ (-1) ⋅ (GEN['el',tech_{in}, t, k, node] ⋅ η[tech_{in}] + GEN['el',tech_{out}, t, k, node] / η[tech_{out}]) ∀ node, tech_storage_e, t, k")
@constraint(cep.model, [node=set["nodes"], tech=set["tech_storage_e"], t in set["time_T"], k=set["time_K"]], cep.model[:INTRASTOR]["el",tech,t,k,node]==cep.model[:INTRASTOR]["el",tech,t-1,k,node]*(techs[tech].eff)^(ts_deltas[t,k]/732) - ts_deltas[t,k] * (cep.model[:GEN]["el",split(tech,"_")[1]*"_in",t,k,node] * techs[split(tech,"_")[1]*"_in"].eff + cep.model[:GEN]["el",split(tech,"_")[1]*"_out",t,k,node] / techs[split(tech,"_")[1]*"_out"].eff))
push!(cep.info,"CAP[tech_{out}, 'new', node] = CAP[tech_{in}, 'new', node] ∀ node, tech_{EUR-Cap-Cost out/in==0}")
for tech in set["tech_storage_out"]
for node in set["nodes"]
if costs[tech,node,set["year"][1],"cap_fix",set["impact"][1]]==0 || costs[split(tech,"_")[1]*"_in",node,set["year"][1],"cap_fix",set["impact"][1]]==0
@constraint(cep.model, cep.model[:CAP][tech,"new",node]==cep.model[:CAP][split(tech,"_")[1]*"_in","new",node])
end
end
end
return cep
end
"""
setup_opt_cep_simplestorage!(cep::OptModelCEP,ts_data::ClustData,opt_data::OptDataCEP)
Adding only intra-day storage:
Looping constraint for each period (same start and end level for all periods) and limit storage to installed energy-capacity
"""
function setup_opt_cep_simplestorage!(cep::OptModelCEP,
ts_data::ClustData,
opt_data::OptDataCEP)
## DATA ##
set=cep.set
## INTRASTORAGE ##
# Limit the storage of the theoretical energy part of the battery to its installed power
push!(cep.info,"INTRASTOR['el',tech, t, k, node] ≤ Σ_{infrastruct} CAP[tech,infrastruct,node] ∀ node, tech_storage, t, k")
@constraint(cep.model, [node=set["nodes"], tech=set["tech_storage_e"], t=set["time_T"], k=set["time_K"]], cep.model[:INTRASTOR]["el",tech,t,k,node]<=sum(cep.model[:CAP][tech,infrastruct,node] for infrastruct=set["infrastruct"]))
# Set storage level at beginning and end of day equal
push!(cep.info,"INTRASTOR['el',tech, '0', k, node] = INTRASTOR['el',tech, 't[end]', k, node] ∀ node, tech_storage_e, k")
@constraint(cep.model, [node=set["nodes"], tech=set["tech_storage_e"], k=set["time_K"]], cep.model[:INTRASTOR]["el",tech,0,k,node]== cep.model[:INTRASTOR]["el",tech,set["time_T_e"][end],k,node])
# Set the storage level at the beginning of each representative day to the same
push!(cep.info,"INTRASTOR['el',tech, '0', k, node] = INTRASTOR['el',tech, '0', k, node] ∀ node, tech_storage_e, k")
@constraint(cep.model, [node=set["nodes"], tech=set["tech_storage_e"], k=set["time_K"]], cep.model[:INTRASTOR]["el",tech,0,k,node]== cep.model[:INTRASTOR]["el",tech,0,1,node])
return cep
end
"""
setup_opt_cep_seasonalstorage!(cep::OptModelCEP,ts_data::ClustData,opt_data::OptDataCEP)
Adding inter-day storage:
add variable INTERSTOR, calculate seasonal-storage-level and limit total storage to installed energy-capacity
"""
function setup_opt_cep_seasonalstorage!(cep::OptModelCEP,
ts_data::ClustData,
opt_data::OptDataCEP)
## DATA ##
set=cep.set
#K identification numbers
k_ids=ts_data.k_ids
## VARIABLE ##
# Storage
push!(cep.info,"Variable INTERSTOR[sector, tech, i, node] ≥ 0 in MWh")
@variable(cep.model, INTERSTOR[sector=set["sector"], tech=set["tech_storage_e"], i=set["time_I_e"], node=set["nodes"]]>=0)
## INTERSTORAGE ##
# Set storage level at the beginning of the year equal to the end of the year
push!(cep.info,"INTERSTOR['el',tech, '0', node] = INTERSTOR['el',tech, 'end', node] ∀ node, tech_storage, t, k")
@constraint(cep.model, [node=set["nodes"], tech=set["tech_storage_e"]], cep.model[:INTERSTOR]["el",tech,0,node]== cep.model[:INTERSTOR]["el",tech,set["time_I_e"][end],node])
# Connect the previous seasonal-storage level and the daily difference of the corresponding simple-storage with the new seasonal-storage level
push!(cep.info,"INTERSTOR['el',tech, i+1, node] = INTERSTOR['el',tech, i, node] + INTRASTOR['el',tech, 'k[i]', 't[end]', node] - INTRASTOR['el',tech, 'k[i]', '0', node] ∀ node, tech_storage_e, i")
# Limit the total storage (seasonal and simple) to be greater than zero and less than total storage cap
push!(cep.info,"0 ≤ INTERSTOR['el',tech, i, node] + INTRASTOR['el',tech, t, k[i], node] ≤ Σ_{infrastruct} CAP[tech,infrastruct,node] ∀ node, tech_storage_e, i, t")
push!(cep.info,"0 ≤ INTERSTOR['el',tech, i, node] + INTRASTOR['el',tech, t, k[i], node] ≤ Σ_{infrastruct} CAP[tech,infrastruct,node] ∀ node, tech_storage_e, i, t")
for i in set["time_I"]
@constraint(cep.model, [node=set["nodes"], tech=set["tech_storage_e"]], cep.model[:INTERSTOR]["el",tech,i,node] == cep.model[:INTERSTOR]["el",tech,i-1,node] + cep.model[:INTRASTOR]["el",tech,set["time_T"][end],k_ids[i],node] - cep.model[:INTRASTOR]["el",tech,0,k_ids[i],node])
@constraint(cep.model, [node=set["nodes"], tech=set["tech_storage_e"], t=set["time_T_e"]], 0 <= cep.model[:INTERSTOR]["el",tech,i,node]+cep.model[:INTRASTOR]["el",tech,t,k_ids[i],node])
@constraint(cep.model, [node=set["nodes"], tech=set["tech_storage_e"], t=set["time_T_e"]], cep.model[:INTERSTOR]["el",tech,i,node]+cep.model[:INTRASTOR]["el",tech,t,k_ids[i],node] <= sum(cep.model[:CAP][tech,infrastruct,node] for infrastruct=set["infrastruct"]))
end
return cep
end
"""
setup_opt_cep_transmission!(cep::OptModelCEP,ts_data::ClustData,opt_data::OptDataCEP)
Setup variable FLOW and TRANS, calculate fixed and variable COSTs, set CAP-trans to zero, limit FLOW with TRANS, calculate GEN-trans for each node
"""
function setup_opt_cep_transmission!(cep::OptModelCEP,
ts_data::ClustData,
opt_data::OptDataCEP)
## DATA ##
set=cep.set
#`costs::OptVariable`: costs[tech,node,year,account,impact] - annulized costs [USD in USD/MW_el, CO2 in kg-CO₂-eq./MW_el]`
costs = opt_data.costs
#`lines::OptVarible`: lines[tech,line] - OptDataCEPLine
lines = opt_data.lines
#ts_weights: k - weight of each period:
ts_weights=ts_data.weights
#ts_deltas: t x k - Δt of each segment x period
ts_deltas=ts_data.delta_t
## VARIABLE ##
# Add varibale FLOW
push!(cep.info,"Variable FLOW[sector, dir, tech_transmission, t, k, line] ≥ 0 in MW")
@variable(cep.model, FLOW[sector=set["sector"], dir=set["dir_transmission"], tech=set["tech_transmission"], t=set["time_T"], k=set["time_K"], line=set["lines"]] >= 0)
# Add variable TRANS
push!(cep.info,"Variable TRANS[tech_transmission, infrastruct, lines] ≥ 0 in MW")
@variable(cep.model, TRANS[tech=set["tech_transmission"], infrastruct=set["infrastruct"], line=set["lines"]] >= 0)
## TRANSMISSION ##
# Calculate Variable Costs
push!(cep.info,"COST['var',impact,tech] = 0 ∀ impact, tech_transmission")
@constraint(cep.model, [impact=set["impact"], tech=set["tech_transmission"]], cep.model[:COST]["var",impact,tech] == 0)
# Calculate Fixed Costs
push!(cep.info,"COST['cap-fix',impact,tech] = Σ_{t,k}(ts_weights ⋅ ts_deltas[t,k])/8760h ⋅ Σ_{node}(TRANS[tech,'new',line] ⋅ length[line]) ⋅ (cap_costs[tech,impact]+fix_costs[tech,impact]) ∀ impact, tech_transmission")
@constraint(cep.model, [impact=set["impact"], tech=set["tech_transmission"]], cep.model[:COST]["cap_fix",impact,tech] == sum(ts_weights[k]*ts_deltas[t,k] for t=set["time_T"], k=set["time_K"])/8760* sum(cep.model[:TRANS][tech,"new",line]*lines[tech,line].length *(costs[tech,lines[tech,line].node_start,set["year"][1],"cap_fix",impact]) for line=set["lines"]))
# Limit the flow per line to the existing infrastructure
push!(cep.info,"| FLOW['el', dir, tech, t, k, line] | ≤ Σ_{infrastruct}TRANS[tech,infrastruct,line] ∀ line, tech_transmission, t, k")
@constraint(cep.model, [line=set["lines"], dir=set["dir_transmission"], tech=set["tech_transmission"], t=set["time_T"], k=set["time_K"]], cep.model[:FLOW]["el",dir, tech, t, k, line] <= sum(cep.model[:TRANS][tech,infrastruct,line] for infrastruct=set["infrastruct"]))
# Calculate the sum of the flows for each node
push!(cep.info,"GEN['el',tech, t, k, node] = Σ_{line-end(node)} FLOW['el','uniform',tech, t, k, line] - Σ_{line_pos} FLOW['el','opposite',tech, t, k, line] / (η[tech]⋅length[line]) + Σ_{line-start(node)} Σ_{line_pos} FLOW['el','opposite',tech, t, k, line] - FLOW['el','uniform',tech, t, k, line] / (η[tech]⋅length[line])∀ tech_transmission, t, k")
for node in set["nodes"]
@constraint(cep.model, [tech=set["tech_transmission"], t=set["time_T"], k=set["time_K"]], cep.model[:GEN]["el",tech, t, k, node] == sum(cep.model[:FLOW]["el","uniform",tech, t, k, line_end] - cep.model[:FLOW]["el","opposite",tech, t, k, line_end]/lines[tech,line_end].eff for line_end=set["lines"][getfield.(lines[tech,:], :node_end).==node]) + sum(cep.model[:FLOW]["el","opposite",tech, t, k, line_start] - cep.model[:FLOW]["el","uniform",tech, t, k, line_start]/lines[tech,line_start].eff for line_start=set["lines"][getfield.(lines[tech,:], :node_start).==node]))
end
return cep
end
"""
setup_opt_cep_demand!(cep::OptModelCEP,ts_data::ClustData,opt_data::OptDataCEP,lost_load_cost::Dict{String,Number}=Dict{String,Number}("el"=>Inf))
Add demand which shall be matched by the generation (GEN)
"""
function setup_opt_cep_demand!(cep::OptModelCEP,
ts_data::ClustData,
opt_data::OptDataCEP;
lost_load_cost::Dict{String,Number}=Dict{String,Number}("el"=>Inf))
## DATA ##
set=cep.set
#ts Dict( tech-node ): t x k
ts=ts_data.data
## DEMAND ##
if "tech_transmission" in keys(set) && lost_load_cost["el"]!=Inf
# Force the demand and slack to match the generation either with transmission
push!(cep.info,"Σ_{tech}GEN['el',tech,t,k,node] = ts[el_demand-node,t,k]-SLACK['el',t,k,node] ∀ node,t,k")
@constraint(cep.model, [node=set["nodes"], t=set["time_T"], k=set["time_K"]], sum(cep.model[:GEN]["el",tech,t,k,node] for tech=set["tech_power"]) == ts["el_demand-"*node][t,k]-cep.model[:SLACK]["el",t,k,node])
elseif !("tech_transmission" in keys(set)) && lost_load_cost["el"]!=Inf
# or on copperplate
push!(cep.info,"Σ_{tech,node}GEN['el',tech,t,k,node]= Σ_{node}ts[el_demand-node,t,k]-SLACK['el',t,k,node] ∀ t,k")
@constraint(cep.model, [t=set["time_T"], k=set["time_K"]], sum(cep.model[:GEN]["el",tech,t,k,node] for node=set["nodes"], tech=set["tech_power"]) == sum(ts["el_demand-"*node][t,k]-cep.model[:SLACK]["el",t,k,node] for node=set["nodes"]))
elseif "tech_transmission" in keys(set) && lost_load_cost["el"]==Inf
# Force the demand without slack to match the generation either with transmission
push!(cep.info,"Σ_{tech}GEN['el',tech,t,k,node] = ts[el_demand-node,t,k] ∀ node,t,k")
@constraint(cep.model, [node=set["nodes"], t=set["time_T"], k=set["time_K"]], sum(cep.model[:GEN]["el",tech,t,k,node] for tech=set["tech_power"]) == ts["el_demand-"*node][t,k])
else
# or on copperplate
push!(cep.info,"Σ_{tech,node}GEN['el',tech,t,k,node]= Σ_{node}ts[el_demand-node,t,k]∀ t,k")
@constraint(cep.model, [t=set["time_T"], k=set["time_K"]], sum(cep.model[:GEN]["el",tech,t,k,node] for node=set["nodes"], tech=set["tech_power"]) == sum(ts["el_demand-"*node][t,k] for node=set["nodes"]))
end
return cep
end
"""
setup_opt_cep_co2_limit!(cep::OptModelCEP,ts_data::ClustData,opt_data::OptDataCEP;co2_limit::Number=Inf,lost_emission_cost::Dict{String,Number}=Dict{String,Number}("CO2"=>Inf))
Add co2 emission constraint
"""
function setup_opt_cep_co2_limit!(cep::OptModelCEP,
ts_data::ClustData,
opt_data::OptDataCEP;
co2_limit::Number=Inf,
lost_emission_cost::Dict{String,Number}=Dict{String,Number}("CO2"=>Inf))
## DATA ##
set=cep.set
#ts Dict( tech-node ): t x k
ts=ts_data.data
#ts_weights: k - weight of each period:
ts_weights=ts_data.weights
#ts_deltas: t x k - Δt of each segment x period
ts_deltas=ts_data.delta_t
## EMISSIONS ##
if lost_emission_cost["CO2"]!=Inf
# Limit the Emissions with co2_limit if it exists
push!(cep.info,"ΣCOST_{account,tech}[account,'$(set["impact"][1])',tech] ≤ LE['CO2'] + co2_limit Σ_{node,t,k} ts[el_demand-node,t,k] ⋅ ts_weights[k] ⋅ ts_deltas[t,k]")
@constraint(cep.model, sum(cep.model[:COST][account,"CO2",tech] for account=set["account"], tech=set["tech"])<= cep.model[:LE]["CO2"] + co2_limit*sum(ts["el_demand-"*node][t,k]*ts_deltas[t,k]*ts_weights[k] for t=set["time_T"], k=set["time_K"], node=set["nodes"]))
else
# Limit the Emissions with co2_limit if it exists
# Total demand can also be determined with the function get_total_demand() edit both in case of changes of e.g. ts_deltas
push!(cep.info,"ΣCOST_{account,tech}[account,'$(set["impact"][1])',tech] ≤ co2_limit ⋅ Σ_{node,t,k} ts[el_demand-node,t,k] ⋅ ts_weights[k] ⋅ ts_deltas[t,k]")
@constraint(cep.model, sum(cep.model[:COST][account,"CO2",tech] for account=set["account"], tech=set["tech"])<= co2_limit*sum(ts["el_demand-$node"][t,k]*ts_weights[k]*ts_deltas[t,k] for node=set["nodes"], t=set["time_T"], k=set["time_K"]))
end
return cep
end
"""
setup_opt_cep_existing_infrastructure!(cep::OptModelCEP,ts_data::ClustData,opt_data::OptDataCEP)
fixing existing infrastructure to CAP[tech, 'ex', node]
"""
function setup_opt_cep_existing_infrastructure!(cep::OptModelCEP,
ts_data::ClustData,
opt_data::OptDataCEP)
## DATA ##
set=cep.set
#`nodes::OptVariable`: nodes[tech,node] - OptDataCEPNode
nodes = opt_data.nodes
#`lines::OptVarible`: lines[tech,line] - OptDataCEPLine
lines = opt_data.lines
## ASSIGN VALUES ##
# Assign the existing capacity from the nodes table
push!(cep.info,"CAP[tech, 'ex', node] = existing infrastructure ∀ node, tech")
@constraint(cep.model, [node=set["nodes"], tech=set["tech_cap"]], cep.model[:CAP][tech,"ex",node]==nodes[tech,node].power_ex)
if "transmission" in keys(set)
push!(cep.info,"TRANS[tech, 'ex', line] = existing infrastructure ∀ tech, line")
@constraint(cep.model, [line=set["lines"], tech=set["tech_trans"]], cep.model[:TRANS][tech,"ex",line]==lines[tech,line].power_ex)
end
return cep
end
"""
setup_opt_cep_limit_infrastructure!(cep::OptModelCEP,ts_data::ClustData,opt_data::OptDataCEP)
limit infrastructure setup of CAP[tech, sum(infrastuct), node]
NOTE just for CAP not for TRANS implemented
"""
function setup_opt_cep_limit_infrastructure!(cep::OptModelCEP,
ts_data::ClustData,
opt_data::OptDataCEP)
## DATA ##
set=cep.set
#`nodes::OptVariable`: nodes[tech,node] - OptDataCEPNode
nodes = opt_data.nodes
#`lines::OptVarible`: lines[tech,line] - OptDataCEPLine
lines = opt_data.lines
## ASSIGN VALUES ##
# Limit the capacity for each tech at each node with the limit provided in nodes table in column infrastruct
push!(cep.info,"∑_{infrastuct} CAP[tech, infrastruct, node] <= limit infrastructure ∀ tech_cap, node")
@constraint(cep.model, [node=set["nodes"], tech=set["tech_cap"]], sum(cep.model[:CAP][tech,infrastruct,node] for infrastruct=set["infrastruct"]) <= nodes[tech,node].power_lim)
if "transmission" in keys(set)
push!(cep.info,"∑_{infrastuct} TRANS[tech, infrastruct, line] <= limit infrastructure ∀ tech_trans, line")
@constraint(cep.model, [line=set["lines"], tech=set["tech_trans"]], sum(cep.model[:TRANS][tech,infrastruct,line] for infrastruct=set["infrastruct"]) <= lines[tech,line].power_lim)
end
return cep
end
"""
setup_opt_cep_objective!(cep::OptModelCEP,ts_data::ClustData,opt_data::OptDataCEP)
Calculate total system costs and set as objective
"""
function setup_opt_cep_objective!(cep::OptModelCEP,
ts_data::ClustData,
opt_data::OptDataCEP;
lost_load_cost::Dict{String,Number}=Dict{String,Number}("el"=>Inf),
lost_emission_cost::Dict{String,Number}=Dict{String,Number}("CO2"=>Inf))
## DATA ##
set=cep.set
## OBJECTIVE ##
# Minimize the total €-Costs s.t. the Constraints introduced above
if lost_load_cost["el"]==Inf && lost_emission_cost["CO2"]==Inf
push!(cep.info,"min Σ_{account,tech}COST[account,'$(set["impact"][1])',tech] st. above")
@objective(cep.model, Min, sum(cep.model[:COST][account,set["impact"][1],tech] for account=set["account"], tech=set["tech"]))
elseif lost_load_cost["el"]!=Inf && lost_emission_cost["CO2"]==Inf
push!(cep.info,"min Σ_{account,tech}COST[account,'$(set["impact"][1])',tech] + Σ_{node} LL['el'] ⋅ $(lost_load_cost["el"]) st. above")
@objective(cep.model, Min, sum(cep.model[:COST][account,set["impact"][1],tech] for account=set["account"], tech=set["tech"]) + sum(cep.model[:LL]["el",node] for node=set["nodes"])*lost_load_cost["el"])
elseif lost_load_cost["el"]==Inf && lost_emission_cost["CO2"]!=Inf
push!(cep.info,"min Σ_{account,tech}COST[account,'$(set["impact"][1])',tech] + LE['CO2'] ⋅ $(lost_emission_cost["CO2"]) st. above")
@objective(cep.model, Min, sum(cep.model[:COST][account,set["impact"][1],tech] for account=set["account"], tech=set["tech"]) + cep.model[:LE]["CO2"]*lost_emission_cost["CO2"])
else
push!(cep.info,"min Σ_{account,tech}COST[account,'$(set["impact"][1])',tech] + Σ_{node} LL['el'] ⋅ $(lost_load_cost["el"]) + LE['CO2'] ⋅ $(lost_emission_cost["CO2"]) st. above")
@objective(cep.model, Min, sum(cep.model[:COST][account,set["impact"][1],tech] for account=set["account"], tech=set["tech"]) + sum(cep.model[:LL]["el",node] for node=set["nodes"])*lost_load_cost["el"] + cep.model[:LE]["CO2"]*lost_emission_cost["CO2"])
end
return cep
end
"""
solve_opt_cep(cep::OptModelCEP,ts_data::ClustData,opt_data::OptDataCEP,opt_config::Dict{String,Any})
solving the cep model and writing it's results and `co2_limit` into an OptResult-Struct
"""
function solve_opt_cep(cep::OptModelCEP,
ts_data::ClustData,
opt_data::OptDataCEP,
opt_config::Dict{String,Any})
optimize!(cep.model)
status=Symbol(termination_status(cep.model))
objective=objective_value(cep.model)
total_demand=get_total_demand(cep,ts_data)
variables=Dict{String,Any}()
# cv - Cost variable, dv - design variable, which is used to fix variables in a dispatch model, ov - operational variable
variables["COST"]=OptVariable(cep,:COST,"cv")
variables["CAP"]=OptVariable(cep,:CAP,"dv")
variables["GEN"]=OptVariable(cep,:GEN,"ov")
lost_load=0
lost_emission=0
if opt_config["lost_load_cost"]["el"]!=Inf
variables["SLACK"]=OptVariable(cep,:SLACK,"sv")
variables["LL"]=OptVariable(cep,:LL,"sv")
lost_load=sum(variables["LL"].data)
end
if opt_config["lost_emission_cost"]["CO2"]!=Inf
variables["LE"]=OptVariable(cep,:LE,"sv")
lost_emission=sum(variables["LE"].data)
end
if opt_config["storage_in"] && opt_config["storage_out"] && opt_config["storage_e"]
variables["INTRASTOR"]=OptVariable(cep,:INTRASTOR,"ov")
if opt_config["seasonalstorage"]
variables["INTERSTOR"]=OptVariable(cep,:INTERSTOR,"ov")
end
end
if opt_config["transmission"]
variables["TRANS"]=OptVariable(cep,:TRANS,"dv")
variables["FLOW"]=OptVariable(cep,:FLOW,"ov")
end
get_met_cap_limit(cep, opt_data, variables)
currency=variables["COST"].axes[2][1]
if lost_load==0 && lost_emission==0
opt_config["print_flag"] && @info("Solved Scenario $(opt_config["descriptor"]): "*String(status)*" min COST: $(round(objective,sigdigits=4)) [$currency] ⇨ $(round(objective/total_demand,sigdigits=4)) [$currency per MWh] s.t. Emissions ≤ $(opt_config["co2_limit"]) [kg-CO₂-eq. per MWh]")
else
cost=variables["COST"]
opt_config["print_flag"] && @info("Solved Scenario $(opt_config["descriptor"]): "*String(status)*" min COST: $(round(sum(cost[:,axes(cost,"impact")[1],:]),sigdigits=4)) [$currency] ⇨ $(round(sum(cost[:,axes(cost,"impact")[1],:])/total_demand,sigdigits=4)) [$currency per MWh] with LL: $lost_load [MWh] s.t. Emissions ≤ $(opt_config["co2_limit"]) + $(round(lost_emission/total_demand,sigdigits=4)) (violation) [kg-CO₂-eq. per MWh]")
end
opt_info=Dict{String,Any}("total_demand"=>total_demand,"model"=>cep.info,)
return OptResult(status,objective,variables,cep.set,opt_config,opt_info)
end
| [
27,
456,
62,
30783,
29,
15,
198,
2,
327,
8905,
198,
37811,
198,
220,
220,
220,
9058,
62,
344,
79,
62,
8738,
62,
28709,
7,
912,
62,
7890,
3712,
2601,
436,
6601,
11,
8738,
62,
7890,
3712,
5222,
5760,
1045,
11,
8738,
62,
11250,
371... | 2.313041 | 15,474 |
<reponame>SV-97/ares
# requires LinearAlgebra, Images, ImageView
include("GrahamScan.jl")
using ImageView, Images
using SparseArrays
using Random
using LinearAlgebra
using Statistics
function FindRoughEdge(A)
# Find the rough coordinates of the non-horizontal edges of shapes in A
B = Float64.(A);
abs.(B[:, 2:end] - B[:, 1:end - 1]) |>
rough_outline -> findall(px -> abs.(px - 1.0) < 0.8, rough_outline)
end
function FitLinear(x, y, deg)
# Use linear regression to approximate f(x) = y with a polynomial of degree deg
# returns coefficients of polynomial
vs = hcat(map(p -> x.^p, (deg:-1:0))...);
Q, R = qr(vs);
R_pad = [R; zeros(size(Q, 1) - size(R, 1), size(R, 2))];
params = R_pad \ (Q' * y);
params
end
function FitLinearModel(x, y, deg)
# Use linear regression to approximate f(x) = y with a polynomial of degree deg
# returns a ready to use function
params = FitLinear(x, y, deg);
t -> (t.^(deg:-1:0))' * params
end
function CleanUpEdges(sensitivity, contour)
# Try to remove unnecessary points from contour
# contour is a matrix where the rows are x,y coordinate pairs
reduced_contour = [];
current_xs = Array{eltype(contour),1}();
current_ys = Array{eltype(contour),1}();
for point in eachrow(contour) # the first element is always equal to GrahamScan.EdgePoint(contour)
x, y = point[1], point[2]
if isempty(current_xs)
push!(current_xs, x);
push!(current_ys, y);
else
next_xs = [x; current_xs];
next_ys = [y; current_ys];
# approximate all points in current segment by a line
approx = FitLinearModel(next_xs, next_ys, 1);
# calculate deviations from approximation and actual values
# and find the maximum deviation
md = vcat(approx.(next_xs)...) - next_ys |>
ds -> abs.(ds) |>
maximum;
if md <= sensitivity # if the maximum deviation doesn't exceed `sensitivity` pixels
# try the next line
current_xs = next_xs;
current_ys = next_ys;
continue
else
# otherwise save ends of the last line to the reduced contour
push!(reduced_contour, [current_xs[1], current_ys[1]]);
push!(reduced_contour, [current_xs[end], current_ys[end]]);
# and begin a new segment starting at the current point
current_xs = [x];
current_ys = [y];
end
end
end
reduced_contour |> unique |>
x -> hcat(x...)'
end
function Center(C)
# find "center" of a point cloud
mean.(eachcol(C))'
end
function Triangulate(center, points)
start = points[1, :];
current = start;
Channel() do channel
for point in eachrow(points[2:end, :])
put!(channel, (current', center, point'));
current = point;
end
put!(channel, (current', center, start'));
end
end
function CalculateArea(C)
# calculate area of star domain with center Center(C)
triangleArea((a, b, c)) = abs(det([a 1; b 1; c 1])) / 2;
map(triangleArea, Triangulate(Center(C), C)) |> sum
end
function CartesianToCoords(idxs)
# convert CartesianIndex array to (no of pixels) x (2) Matrix
hcat(map(idx -> [idx[1], idx[2]], idxs)...)'
end
function CoordsToImage(A)
S = sparse(A[:,1], A[:,2], ones(size(A)[1]));
I, J, V = findnz(S);
I = I .- (minimum(I) - 1);
J = J .- (minimum(J) - 1);
sparse(I, J, V) |> Matrix
end
function Show(A)
imshow(CoordsToImage(A))
A
end
if isempty(ARGS)
norm_path = "norm.bmp";
img_path = "in.bmp";
else
norm_path = ARGS[1];
img_path = ARGS[2];
end
# calculate area of a square we know to have an area of 1mm²
# (this could of course be replaced with a arbitrary shape of known area)
area_square = load(norm_path) |>
img -> Gray.(img) |>
FindRoughEdge |>
CartesianToCoords |>
GrahamScan.FindConvexHull |> Show |>
CalculateArea
pixelsToMillimeters(x) = x / sqrt(area_square)
area(path) = load(path) |>
img -> Gray.(img) |>
FindRoughEdge |>
CartesianToCoords |>
GrahamScan.FindConvexHull |> Show |>
hull -> CleanUpEdges(10, hull) |> Show |>
pixelsToMillimeters |>
CalculateArea |>
area ->
println("The area is $(area) mm². ~$(Int64(round(sqrt(area_square)))) pixels are 1 mm.")
area(img_path)
println("Press any button to continue...")
readline()
| [
27,
7856,
261,
480,
29,
50,
53,
12,
5607,
14,
3565,
198,
2,
4433,
44800,
2348,
29230,
11,
5382,
11,
7412,
7680,
198,
198,
17256,
7203,
45821,
33351,
13,
20362,
4943,
198,
198,
3500,
7412,
7680,
11,
5382,
198,
3500,
1338,
17208,
3163... | 2.296334 | 1,991 |
module NamedIndicesMeta
using NamedDims, ImageCore, ImageMetadata, ImageAxes, FieldProperties,
AxisIndices, LightGraphs, SimpleWeightedGraphs, Reexport, MappedArrays
using Base: tail
import ImageAxes: timeaxis, timedim, colordim, checknames, isstreamedaxis
export
NamedDimsArray,
AxisIndicesArray,
NIMArray,
NIArray,
IMArray,
MetaArray,
properties,
dimnames,
getdim,
named_axes,
@defdim
include("types.jl")
include("properties.jl")
include("data_format.jl")
include("defdim.jl")
include("time.jl")
using .TimeData
include("color.jl")
using .ColorData
include("observation.jl")
using .ObservationData
include("spatial.jl")
using .SpatialData
include("graphs.jl")
using .AxisIndicesGraphs
include("deprecations.jl")
@reexport using NamedIndicesMeta.TimeData
@reexport using NamedIndicesMeta.ColorData
@reexport using NamedIndicesMeta.ObservationData
@reexport using NamedIndicesMeta.SpatialData
@reexport using NamedIndicesMeta.AxisIndicesGraphs
end # module
| [
21412,
34441,
5497,
1063,
48526,
198,
198,
3500,
34441,
35,
12078,
11,
7412,
14055,
11,
7412,
9171,
14706,
11,
7412,
31554,
274,
11,
7663,
2964,
18200,
11,
198,
220,
220,
220,
220,
220,
38349,
5497,
1063,
11,
4401,
37065,
82,
11,
1742... | 2.782609 | 368 |
<gh_stars>10-100
### A Pluto.jl notebook ###
# v0.14.4
using Markdown
using InteractiveUtils
# ╔═╡ d01a9b4f-8b55-4607-abb6-717d227fcd48
begin
using PlutoUI, LinearAlgebra
PlutoUI.TableOfContents(aside=true)
end
# ╔═╡ 479a40d9-b81e-442b-9962-f972b110a4dd
# Pkg.checkout("SpecialMatrices")
using SpecialMatrices
# ╔═╡ 574e86bb-159c-4141-8d8a-21bdcc9b5304
# Plot the eigenvalues (singular values) and left singular vectors
using Plots
# ╔═╡ 5eb73af5-f78a-4811-83a5-ac39063a4516
# pkg> add Arrowhead#master
using Arrowhead
# ╔═╡ 5d95dc2c-bf94-4b13-b9d5-b7b261e86cf6
md"""
# Algorithms for Structured Matrices
For matrices with some special structure, it is possible to derive versions of algorithms which are faster and/or more accurate than the standard algorithms.
__Prerequisites__
The reader should be familiar with concepts of eigenvalues and eigen vectors, singular values and singular vectors, related perturbation theory, and algorithms.
__Competences__
The reader should be able to recognise matrices which have rank-revealing decomposition and apply adequate algorithms, and to apply forward stable algorithms to arrowhead and diagonal-plus-rank-one matrices.
"""
# ╔═╡ e3e43840-d0a3-4cde-9b1e-5785759912b2
md"""
# Rank revealing decompositions
For more details, see [<NAME>, Computing Eigenvalues and Singular Values to High Relative Accuracy](https://www.routledge.com/Handbook-of-Linear-Algebra/Hogben/p/book/9781138199897) and [<NAME> et al, Computing the singular value decomposition with high relative accuracy](http://www.sciencedirect.com/science/article/pii/S0024379599001342), and the references therein.
Let $A\in\mathbb{R}^{m\times n}$ with $\mathop{\mathrm{rank}}(A)=n$ (therefore, $m\geq n$) and $A=U\Sigma V^T$ its thin SVD.
## Definitions
Let $A\in\mathbb{R}^{m\times n}$.
The singular values of $A$ are (__perfectly__) __well determined to high relative accuracy__ if changing any entry $A_{kl}$ to $\theta A_{kl}$, $\theta \neq 0$, causes perturbations in singular values bounded by
$$
\min\{|\theta|,1/|\theta|\}\sigma_j \leq\tilde \sigma_j \leq
\max\{|\theta|,1/|\theta|\}\sigma_j,\quad \forall j.$$
The __sparsity pattern__ of $A$, $Struct(A)$, is the set of indices for which $A_{kl}$ is permitted to be non-zero.
The __bipartite graph__ of the sparsity pattern $S$, $\mathcal{G}(S)$, is the graph with vertices partitioned into row vertices $r_1,\ldots,r_m$ and column vertices $c_1,\ldots,c_n$, where $r_k$ and $c_l$ are connected if and only if $(k,l)\in S$.
If $\mathcal{G}(S)$ is acyclic, matrices with sparsity pattern $S$ are __biacyclic__.
A decomposition $A=XDY^T$ with diagonal matrix $D$ is called a __rank revealing decomposition__ (RRD) if $X$ and $Y$ are full-column rank well-conditioned matrices.
__Hilbert matrix__ is a square matrix $H$ with elements $H_{ij}=\displaystyle\frac{1}{i+j-1}$.
__Hankel matrix__ is a square matrix with constant elements along skew-diagonals.
__Cauchy matrix__ is an $m\times n$ matrix $C$ with elements $C_{ij}=\displaystyle\frac{1}{x_i+y_j}$ with $x_i+y_j\neq 0$ for all $i,j$.
"""
# ╔═╡ 03f797ac-8764-4688-9e7e-e144cafb3b4c
md"""
## Facts
1. The singular values of $A$ are perfectly well determined to high relative accuracy if and only if the bipartite graph $\mathcal{G}(S)$ is acyclic (forest of trees). Examples are bidiagonal and arrowhead matrices. Sparsity pattern $S$ of acyclic bipartite graph allows at most $m+n-1$ nonzero entries. A bisection algorithm computes all singular values of biacyclic matrices to high relative accuracy.
2. An RRD of $A$ can be given or computed to high accuracy by some method. Typical methods are Gaussian elimination with complete pivoting or QR factorization with complete pivoting.
3. Let $\hat X \hat D \hat Y^T$ be the computed RRD of $A$ satisfying $|D_{jj}-\hat D_{jj}| \leq O(\varepsilon)|D_{jj}|$, $\| X-\hat X\|\leq O(\varepsilon) \|X\|$, and $\| Y-\hat Y\|\leq O(\varepsilon) \|Y\|$. The following algorithm computes the EVD of $A$ with high relative accuracy:
1. Perform QR factorization with pivoting to get $\hat X\hat D=QRP$, where $P$ is a permutation matrix. Thus $A=QRP\hat Y^T$.
2. Multiply $W=RP\hat Y^T$ (_NOT_ Strassen's multiplication). Thus $A=QW$ and $W$ is well-scaled from the left.
3. Compute the SVD of $W^T=V\Sigma^T \bar U^T$ using one-sided Jacobi method. Thus $A=Q\bar U \Sigma V^T$.
4. Multiply $U=Q\bar U$. Thus $A=U\Sigma V^T$ is the computed SVD of $A$.
4. Let $R=D'R'$, where $D'$ is such that the _rows_ of $R'$ have unit norms. Then the following error bounds hold:
$$
\frac{|\sigma_j-\tilde\sigma_j|}{\sigma_j}\leq O(\varepsilon \kappa(R')\cdot \max\{\kappa(X),\kappa(Y)\})\leq
O(\varepsilon n^{3/2}\kappa(X)\cdot \max\{\kappa(X),\kappa(Y)\}).$$
5. Hilbert matrix is Hankel matrix and Cauchy matrix, it is symmetric positive definite and _very_ ill-conditioned.
6. Every sumbatrix of a Cauchy matrix is itself a Cauchy matrix.
7. Determinant of a square Cauchy matrix is
$$
\det(C)=\frac{\prod_{1\leq i<j\leq n}(x_j-x_i)(y_j-y_i)}
{\prod_{1\leq i,j\leq n} (x_i+y_j)}.$$
It is computed with elementwise high relative accuracy.
8. Let $A$ be square and nonsingular and let $A=LDR$ be its decomposition with diagonal $D$, lower unit-triangular $L$, and upper unit-triangular $R$. The closed formulas using quotients of minors are (see [A. S. Householder, The Theory of Matrices in Numerical Analysis](https://books.google.hr/books?id=hCre109IpRcC&printsec=frontcover&hl=hr&source=gbs_ge_summary_r&cad=0#v=onepage&q&f=false)):
$$
\begin{aligned}
D_{11}&=A_{11}, \\
D_{jj}&=\frac{\det(A_{1:j,1:j})}{\det(A_{1:j-1,1:j-1})}, \quad j=2,\ldots,n, \\
L_{jj}&=1, \\
L_{ij}&=\frac{\det(A_{[1,2,\ldots,j-1,i],[1:j]}\, )}
{\det(A_{1:j,1:j})}, \quad j < i, \\
R_{jj}&=1, \\
R_{ji}&=\frac{\det(A_{[1,2,\ldots,j],[1,2, \ldots,j-1,i]}\, )}
{\det(A_{1:j,1:j})}, \quad i > j,
\end{aligned}$$
"""
# ╔═╡ f280b119-76a7-4ee8-b6fd-608d977af0c6
md"""
## Examples
### Positive definite matrix
Let $A=DA_S D$ be strongly scaled symmetric positive definite matrix. Then Cholesky factorization with complete (diagonal) pivoting is an RRD. Consider the following three step algorithm:
1. Compute $P^T A P=LL^T$ (_Cholesky factorization with complete pivoting_).
2. Compute the $L=\bar U\Sigma V^T$ (_one-sided Jacobi, V is not needed_).
3. Set $\Lambda=\Sigma^2$ and $U=P\bar U$. Thus $A=U\Lambda U^T$ is an EVD of $A$.
The Cholesky factorization with pivoting can be implemented very fast with block algorithm (see [C. Lucas, LAPack-Style Codes for Level 2 and 3 Pivoted Cholesky Factorizations](http://www.netlib.org/lapack/lawnspdf/lawn161.pdf)).
The eigenvalues $\tilde \lambda_j$ computed using the above algorithm satisfy relative error bounds:
$$
\frac{|\lambda_j-\tilde\lambda_j|}{\lambda_j} \leq O(n\varepsilon \|A_S\|_2^{-1}).$$
"""
# ╔═╡ 28c1a9e7-6c65-4184-b41b-b5cfd17645b5
function JacobiR(A₁::AbstractMatrix)
A=deepcopy(A₁)
m,n=size(A)
T=typeof(A[1,1])
V=Matrix{T}(I,n,n)
# Tolerance for rotation
tol=√n*eps(T)
# Counters
p=n*(n-1)/2
sweep=0
pcurrent=0
# First criterion is for standard accuracy, second one is for relative accuracy
# while sweep<30 && vecnorm(A-diagm(diag(A)))>tol
while sweep<30 && pcurrent<p
sweep+=1
# Row-cyclic strategy
for i = 1 : n-1
for j = i+1 : n
# Compute the 2 x 2 sumbatrix of A'*A
# F=A[:,[i,j]]'*A[:,[i,j]]
F=view(A,:,[i,j])'*view(A,:,[i,j])
# Check the tolerance - the first criterion is standard,
# the second one is for relative accuracy
# if A[i,j]!=zero(T)
#
if abs(F[1,2])>tol*√(F[1,1]*F[2,2])
# Compute c and s
τ=(F[2,2]-F[1,1])/(2*F[1,2])
t=sign(τ)/(abs(τ)+√(1+τ^2))
c=1/√(1+t^2)
s=c*t
G=LinearAlgebra.Givens(i,j,c,s)
# A*=G'
rmul!(A,G)
# V*=G'
rmul!(V,G)
pcurrent=0
else
pcurrent+=1
end
end
end
end
σ=[norm(A[:,k]) for k=1:n]
for k=1:n
A[:,k]./=σ[k]
end
# A, σ, V
SVD(A,σ,adjoint(V))
end
# ╔═╡ bc8b94b7-7e20-4cd3-be68-7e9152fe6d7b
begin
n=20
import Random
Random.seed!(421)
B=randn(n,n)
# Scaled matrix
As=Matrix(Symmetric(B'*B))
# Scaling
D₀=exp.(50*(rand(n).-0.5))
# Parentheses are necessary!
A=[As[i,j]*(D₀[i]*D₀[j]) for i=1:n, j=1:n]
issymmetric(A), cond(As), cond(A)
end
# ╔═╡ 66dedda0-24a0-48a0-9286-4ce688e5da72
# ?cholesky;
# ╔═╡ cc924252-137a-4241-b178-2eabf653ff71
md"""
We will not use the Cholesky factorization with complete pivoting. Instead, we will just sort the diagonal of $A$ in advance, which is sufficient for this example.
_Write the function for Cholesky factorization with complete pivoting as an excercise._
"""
# ╔═╡ c41e4534-6b29-4cbe-9b17-2c69c89e5570
# ?sortperm;
# ╔═╡ bcee7b90-2dd4-47b0-a781-67d35962d5f2
begin
p=sortperm(diag(A), rev=true)
L=cholesky(A[p,p])
end
# ╔═╡ f471d60f-1c70-4bd5-bb67-2bb17c18f3f8
U,σ,V=JacobiR(Matrix(L.L));
# ╔═╡ c474702a-8af8-4640-9f45-bca748ab3952
begin
U₁=U[invperm(p),:]
λ=σ.^2
end
# ╔═╡ 25143fe5-3965-468a-8cb1-7c3e8e8027ea
U'*A[p,p]*U
# ╔═╡ 86dc734d-f45d-491f-9f80-7d958e642fbd
# Due to large condition number, this is not
# as accurate as expected
Ξ=U₁'*A*U₁
# ╔═╡ 001f801a-06d2-48be-a5ad-4c07f7a92890
# Orthogonality
norm(U₁'*U₁-I)
# ╔═╡ 5196b508-5aad-4811-bfc0-1c1fa00eec37
begin
DΞ=sqrt.(diag(Ξ))
Ξs=[Ξ[i,j]/(DΞ[i]*DΞ[j]) for i=1:n, j=1:n]
end
# ╔═╡ bcfdae20-040f-492e-bc67-60cf5f420cc0
begin
K=U₁*Diagonal(σ)
K'*K
end
# ╔═╡ 3ad5b60e-3fb9-443e-90eb-40b2fd78d9a3
# Explain why is the residual so large.
norm(A*U₁-U₁*Diagonal(λ))
# ╔═╡ 3e863b09-dc68-4560-adeb-e56ab22d9afd
# Relative residual is percfect
norm(A*U₁-U₁*Diagonal(λ))/norm(A)
# ╔═╡ a5555731-c229-43de-9d88-cc3b91d7db67
[λ sort(eigvals(A),rev=true)]
# ╔═╡ c3fcbaf1-b98f-401f-9244-aad632805ecb
md"""
### Hilbert matrix
We need the newest version of the package
[SpecialMatrices.jl](https://github.com/JuliaMatrices/SpecialMatrices.jl).
"""
# ╔═╡ 8d7e4960-9b04-4f5e-b2e6-acce80a6812f
varinfo(SpecialMatrices)
# ╔═╡ 80d64fc6-33e7-4069-a6f4-8e85369dad9f
C=Cauchy([1,2,3,4,5],[0,1,2,3,4])
# ╔═╡ 804c4972-bf3d-4284-8426-b50b3ea5bb1b
H=Hilbert(5)
# ╔═╡ e9bf2b02-a60d-495b-bbdb-d87e31d8d9e0
Hf=Matrix(H)
# ╔═╡ c113b7fe-87b9-454c-aeb9-166af79bbe61
begin
# Exact formula for the determinant of a Cauchy matrix from Fact 7.
import LinearAlgebra.det
function det(C::Cauchy{T}) where T
n=length(C.x)
F=triu([(C.x[j]-C.x[i])*(C.y[j]-C.y[i]) for i=1:n, j=1:n],1)
num=prod(F[findall(!iszero,F)])
den=prod([(C.x[i]+C.y[j]) for i=1:n, j=1:n])
if all(isinteger,C.x)&all(isinteger,C.y)
return num//den
else
return num/den
end
end
end
# ╔═╡ c6c0e7b2-400c-478e-90af-496659fed9c4
# This is exact
det(Hf)
# ╔═╡ d057146d-4a5c-415f-9332-9b3bb7e82fc4
det(C)
# ╔═╡ 66115e0a-a5c8-4a66-958b-27d9a7865855
md"""
Compute componentwise highly accurate $A=LDL ^T$ factorization of a Hilbert (Cauchy) matrix. Using `Rational` numbers gives high accuracy.
"""
# ╔═╡ 43656da7-93db-4de3-b420-098f64dfba85
# Exact LDLT factorization from Fact 8, no pivoting.
function LDLT(C::Cauchy)
n=length(C.x)
T=typeof(C.x[1])
D=Array{Rational{T}}(undef,n)
L=Matrix{Rational{T}}(I,n,n)
δ=[det(Cauchy(C.x[1:j],C.y[1:j])) for j=1:n]
D[1]=map(Rational{T},C[1,1])
D[2:n]=δ[2:n]./δ[1:n-1]
for i=2:n
for j=1:i-1
L[i,j]=det(Cauchy( C.x[[1:j-1;i]], C.y[1:j])) / δ[j]
end
end
L,D
end
# ╔═╡ d0b35d9b-8b87-4831-846c-b63fe8912ec7
L₁,D₁=LDLT(C)
# ╔═╡ d25dcaa1-2448-49ec-b863-b224edc4ec2c
L₁*Diagonal(D₁)*L₁' # -Matrix(H)
# ╔═╡ d668a5a2-8060-48ef-adf0-6be0b1c3859e
# L*D*L' is an RRD
cond(L₁)
# ╔═╡ a4a40d52-94fc-4ccc-8e35-09c75c5de755
cond(C)
# ╔═╡ dbfbbb00-ab00-4c85-a4ce-32468f18088a
md"""
We now compute the accurate EVD of the Hilbert matrix of order $n=100$. We cannot use the function `LDLT()` since the _computation of determinant causes overflow_ and _there is no pivoting_. Instead, we use Algorithm 3 from [<NAME>, Computing the singular value decomposition with high relative accuracy, SIAM J. Matrix Anal. Appl, 21 (1999) 562-580](http://www.netlib.org/lapack/lawnspdf/lawn130.pdf).
"""
# ╔═╡ 0176739c-2846-4b07-a8d3-68454c1251a6
function GECP(C::Cauchy)
n=length(C.x)
G=Matrix(C)
x=copy(C.x)
y=copy(C.y)
pr=collect(1:n)
pc=collect(1:n)
# Find the maximal element
for k=1:n-1
i,j=Tuple(argmax(abs.(G[k:n,k:n])))
i+=k-1
j+=k-1
if i!=k || j!=k
G[[i,k],:]=G[[k,i],:]
G[:, [j,k]]=G[:, [k,j]]
x[[k,i]]=x[[i,k]]
y[[k,j]]=y[[j,k]]
pr[[i,k]]=pr[[k,i]]
pc[[j,k]]=pc[[k,j]]
end
for r=k+1:n
for s=k+1:n
G[r,s]=G[r,s]*(x[r]-x[k])*(y[s]-y[k])/
((x[k]+y[s])*(x[r]+y[k]))
end
end
G=Matrix(Symmetric(G))
end
D=diag(G)
X=tril(G,-1)*Diagonal(1.0./D)+I
Y=Diagonal(1.0./D)*triu(G,1)+I
X,D,Y', pr,pc
end
# ╔═╡ 8b844cf7-c574-45e9-b682-fa9cc6e9cb73
X,D,Y,pr,pc=GECP(C)
# ╔═╡ b3877ea1-d479-4d08-af8c-26e17de77106
# Check
norm(X*Diagonal(D)*Y'-Matrix(C)[pr,pc]),
norm(X[invperm(pr),:]*Diagonal(D)*Y[invperm(pc),:]'-C)
# ╔═╡ 9bff72d8-68b6-41b6-a954-ee39f90ec7b0
begin
# Now the big test.
n₂=100
H₂=Hilbert(n₂)
C₂=Cauchy(collect(1:n₂), collect(0:n₂-1))
end
# ╔═╡ 72390bf0-c5d6-46de-b8d0-0bee2cbb0af7
md"""
We need a function to compute RRD from `GECP()`
"""
# ╔═╡ 46656cee-2202-4eb9-9725-1e3e3af4df42
function RRD(C::Cauchy)
X,D,Y,pr,pc=GECP(C)
X[invperm(pr),:], D, Y[invperm(pc),:]
end
# ╔═╡ 6b544eaf-858a-40e5-b59b-75cfa2237a6f
X₂,D₂,Y₂=RRD(C₂);
# ╔═╡ 8eb2d15e-53c0-40ab-98ce-7b0bbf4d6cd1
# Check
norm((X₂*Diagonal(D₂)*Y₂')-C₂)
# ╔═╡ c7fd604c-9e63-4ac0-8839-82371f968ba7
cond(C₂)
# ╔═╡ 23df1242-1b43-4d52-a8ed-1b12b0d5d4b9
# Is this RRD? here X=Y
cond(X₂), cond(Y₂)
# ╔═╡ 14374962-2eea-4c8f-9390-73ef886c25d9
# Algorithm from Fact 3
function RRDSVD(X,D,Y)
Q,R,p=qr(X*Diagonal(D),Val(true))
W=R[:,p]*Y'
V,σ,U₁=JacobiR(W')
U=Q*U₁
U,σ,V
end
# ╔═╡ d4a86d03-0afe-4b1d-8828-52eae055aa9f
U₂,σ₂,V₂=RRDSVD(X₂,D₂,Y₂);
# ╔═╡ 1dfe86bf-7553-444b-a94e-340acccf5375
# Residual and orthogonality
norm(Matrix(C₂)*V₂-U₂*Diagonal(σ₂)), norm(U₂'*U₂-I), norm(V₂'*V₂-I)
# ╔═╡ cc123376-b657-4d8f-88e0-0f7577058c2b
# Observe the differences!!
[sort(σ₂) sort(svdvals(C₂)) sort(eigvals(Matrix(C₂)))]
# ╔═╡ 337ade70-accb-417b-b655-9640ed61b375
plot(σ₂,yscale = :log10,legend=false, title="Singular values of Hilbert matrix")
# ╔═╡ f7556414-693d-4d46-889b-ed6b091a235e
begin
# Better spy
# some options :bluesreds,clim=(-1.0,1.0)
import Plots.spy
spy(A)=heatmap(A, yflip=true, color=:bluesreds, aspectratio=1)
end
# ╔═╡ 9ee70a5d-59f7-460c-92af-8872966cec40
spy(U₂)
# ╔═╡ abb64f41-6817-49e9-9d44-81ef0ce32934
md"""
# Symmetric arrowhead and DPR1 matrices
For more details, see
[<NAME>, <NAME> and <NAME>, Accurate eigenvalue decomposition of real symmetric arrowhead matrices and applications](https://arxiv.org/abs/1302.7203) and [<NAME>, <NAME> and <NAME>, Forward stable eigenvalue decomposition of rank-one modifications of diagonal matrices](https://arxiv.org/abs/1405.7537).
"""
# ╔═╡ b24b1c96-c73e-4c8e-83af-f35e2b9df304
md"""
## Definitions
An __arrowhead matrix__ is a real symmetric matrix of order $n$ of the form $A=\begin{bmatrix} D & z \\ z^{T} & \alpha \end{bmatrix}$, where $D=\mathop{\mathrm{diag}}(d_{1},d_{2},\ldots ,d_{n-1})$, $z=\begin{bmatrix} \zeta _{1} & \zeta _{2} & \cdots & \zeta _{n-1} \end{bmatrix}^T$ is a vector, and $\alpha$ is a scalar.
An arrowhead matrix is __irreducible__ if $\zeta _{i}\neq 0$ for all $i$ and $d_{i}\neq d_{j}$ for all $i\neq j$.
A __diagonal-plus-rank-one matrix__ (DPR1 matrix) is a real symmetric matrix of order $n$ of the form $A= D +\rho z z^T$, where $D=\mathop{\mathrm{diag}}(d_{1},d_{2},\ldots ,d_{n})$, $z=\begin{bmatrix} \zeta _{1} & \zeta _{2} & \cdots & \zeta _{n} \end{bmatrix}^T$ is a vector, and $\rho \neq 0$ is a scalar.
A DPR1 matrix is __irreducible__ if $\zeta _{i}\neq 0$ for all $i$ and $d_{i}\neq d_{j}$ for all $i\neq j$.
"""
# ╔═╡ 5e068291-cb05-4fd5-acaa-8e5d766e375a
md"""
## Facts on arrowhead matrices
Let $A$ be an arrowhead matrix of order $n$ and let $A=U\Lambda U^T$ be its EVD.
1. If $d_i$ and $\lambda_i$ are nonincreasingy ordered, the Cauchy Interlace Theorem implies
$$\lambda _{1}\geq d_{1}\geq \lambda _{2}\geq d_{2}\geq \cdots \geq d_{n-2}\geq\lambda
_{n-1}\geq d_{n-1}\geq \lambda _{n}.$$
2. If $\zeta _{i}=0$ for some $i$, then $d_{i}$ is an eigenvalue whose corresponding eigenvector is the $i$-th unit vector, and we can reduce the size of the problem by deleting the $i$-th row and column of the matrix. If $d_{i}=d_{j}$, then $d_{i}$ is an eigenvalue of $A$ (this follows from the interlacing property) and we can reduce the size of the problem by annihilating $\zeta_j$ with a Givens rotation in the $(i,j)$-plane.
3. If $A$ is irreducible, the interlacing property holds with strict inequalities.
4. The eigenvalues of $A$ are the zeros of the __Pick function__ (also, _secular equation_)
$$
f(\lambda )=\alpha -\lambda -\sum_{i=1}^{n-1}\frac{\zeta _{i}^{2}}{%
d_{i}-\lambda }=\alpha -\lambda -z^{T}(D-\lambda I)^{-1}z,$$
and the corresponding eigenvectors are
$$
U_{:,i}=\frac{x_{i}}{\left\Vert x_{i}\right\Vert _{2}},\quad
x_{i}=\begin{bmatrix}
\left( D-\lambda _{i}I\right) ^{-1}z \\
-1%
\end{bmatrix},
\quad i=1,\ldots ,n.$$
5. Let $A$ be irreducible and nonsingular. If $d_i\neq 0$ for all $i$, then $A^{-1}$ is a DPR1 matrix
$$
A^{-1}=\begin{bmatrix} D^{-1} & \\ & 0 \end{bmatrix} + \rho uu^{T},$$
where $u=\begin{bmatrix} D^{-1}z \\ -1 \end{bmatrix}$, and $\rho =\displaystyle\frac{1}{\alpha-z^{T}D^{-1}z}$. If $d_i=0$, then $A^{-1}$ is a permuted arrowhead matrix,
$$
A^{-1}\equiv
\begin{bmatrix}
D_{1} & 0 & 0 & z_{1} \\
0 & 0 & 0 & \zeta _{i} \\
0 & 0 & D_{2} & z_{2} \\
z_{1}^{T} & \zeta _{i} & z_{2}^{T} & \alpha
\end{bmatrix}^{-1}
= \begin{bmatrix}
D_{1}^{-1} & w_{1} & 0 & 0 \\
w_{1}^{T} & b & w_{2}^{T} & 1/\zeta _{i} \\
0 & w_{2} & D_{2}^{-1} & 0 \\
0 & 1/\zeta _{i} & 0 & 0
\end{bmatrix},$$
where
$$
\begin{aligned}
w_{1}&=-D_{1}^{-1}z_{1}\displaystyle\frac{1}{\zeta _{i}},\\
w_{2}&=-D_{2}^{-1}z_{2}\displaystyle\frac{1}{\zeta _{i}},\\
b&= \displaystyle\frac{1}{\zeta _{i}^{2}}\left(-\alpha +z_{1}^{T}D_{1}^{-1}z_{1}+z_{2}^{T}D_{2}^{-1}z_{2}\right).
\end{aligned}$$
6. The algorithm based on the following approach computes all eigenvalues and _all components_ of the corresponding eigenvectors in a forward stable manner to almost full accuracy in $O(n)$ operations per eigenpair:
1. Shift the irreducible $A$ to $d_i$ which is closer to $\lambda_i$ (one step of bisection on $f(\lambda)$).
2. Invert the shifted matrix.
3. Compute the absolutely largest eigenvalue of the inverted shifted matrix and the corresponding eigenvector.
7. The algorithm is implemented in the package [Arrowhead.jl](https://github.com/ivanslapnicar/Arrowhead.jl). In certain cases, $b$ or $\rho$ need to be computed with extended precision. For this, we use the functions from file [DoubleDouble.jl](https://github.com/ivanslapnicar/Arrowhead.jl/blob/master/src/DoubleDouble.jl), originally from the the package [DoubleDouble.jl](https://github.com/simonbyrne/DoubleDouble.jl).
"""
# ╔═╡ 6d7f330f-3c88-44bd-aff9-65daa7f1ea1c
md"
## Examples
### Extended precision arithmetic
"
# ╔═╡ eea30449-2410-448d-885f-e27b7aa657c0
begin
# Extended precision arithmetic
a=2.0
b=3.0
√a
end
# ╔═╡ 21f8e11a-09fe-49a9-91cd-b118876910a8
√BigFloat(a)
# ╔═╡ 6d484719-e545-4742-9e46-515525af8244
# Double numbers according to Dekker, 1971
ad=Arrowhead.Double(a)
# ╔═╡ 310ace86-6193-43ee-bdac-fe1a79a2cb26
bd=Arrowhead.Double(b)
# ╔═╡ be91a6f1-2594-4084-8b23-312c51f35553
roota=√ad
# ╔═╡ 5762ccae-2a28-4041-8a02-4bec918e192a
rootb=√bd
# ╔═╡ 4c1a73a1-bdfe-4939-8282-54b7fa193e5a
# 30 digits should match
BigFloat(roota.hi)+BigFloat(roota.lo)
# ╔═╡ f5b83025-64e7-44d1-a841-f9a7fc8614fb
√BigFloat(a)*√BigFloat(b)
# ╔═╡ 89e578d9-7608-42f2-a2f0-0ec2c89bd510
rootab=roota*rootb
# ╔═╡ 24f1cb57-ed20-4399-996f-80294c7b1bb2
BigFloat(rootab.hi)+BigFloat(rootab.lo)
# ╔═╡ 8339adb3-b0fa-45e4-b107-62954547345b
md"""
### Random arrowhead matrix
"""
# ╔═╡ 321070d6-c0c9-482d-9a5f-0e5a4f7b522f
varinfo(Arrowhead)
# ╔═╡ 9c207955-174d-474f-be95-061c71761023
methods(GenSymArrow)
# ╔═╡ bd8ea662-f4c2-4f2d-943b-b1a5f59cbe74
begin
n₃=8
A₃=GenSymArrow(n₃,n₃)
end
# ╔═╡ 9fe07467-4033-4374-b4f2-b0ceecf91a03
# Elements of the type SymArrow
A₃.D, A₃.z, A₃.a, A₃.i
# ╔═╡ a3ba9c15-1d55-4e2b-b32e-4202fbeca671
E₃,info₃=eigen(A₃)
# ╔═╡ 34d1bc3f-b703-4b31-887e-36d036e5c3f9
@which eigen(A₃)
# ╔═╡ 8677d8b8-f68b-4013-864c-89902ffff8fd
# Residual and orthogonality
norm(A₃*E₃.vectors-E₃.vectors*Diagonal(E₃.values)),
norm(E₃.vectors'*E₃.vectors-I)
# ╔═╡ 360829ce-2790-4d70-8699-687650fc51b4
begin
# Timings - notice the O(n^2)
@time eigen(GenSymArrow(1000,1000))
@time eigen(GenSymArrow(2000,2000))
1
end
# ╔═╡ 9f2a5e5d-a705-4d65-8ebf-6b6d9d548999
md"""
### Numerically demanding matrix
"""
# ╔═╡ 19d95a91-8c03-4b12-a8d3-46a6861991a1
A₄=SymArrow( [ 1e10+1.0/3.0, 4.0, 3.0, 2.0, 1.0 ],
[ 1e10 - 1.0/3.0, 1.0, 1.0, 1.0, 1.0 ], 1e10, 6 )
# ╔═╡ 4a86007c-1137-4964-be3c-da51ab7fca3c
begin
E₄,info₄=eigen(A₄)
[sort(E₄.values) sort(eigvals(Matrix(A₄))) sort(E₄.values)-sort(eigvals(Matrix(A₄)))]
end
# ╔═╡ 33e70165-1fab-4996-99e6-a5d27bf976c5
# Residual and orthogonality
norm(A₄*E₄.vectors-E₄.vectors*Diagonal(E₄.values)),
norm(E₄.vectors'*E₄.vectors-I)
# ╔═╡ 65e00890-d4d7-4d36-a359-4606378401b7
md"""
## Facts on DPR1 matrices
The properties of DPR1 matrices are very similar to those of arrowhead matrices. Let $A$ be a DPR1 matrix of order $n$ and let $A=U\Lambda U^T$ be its EVD.
1. If $d_i$ and $\lambda_i$ are nonincreasingy ordered and $\rho>0$, then
$$\lambda _{1}\geq d_{1}\geq \lambda _{2}\geq d_{2}\geq \cdots \geq d_{n-2}\geq\lambda
_{n-1}\geq d_{n-1}\geq \lambda _{n}\geq d_n.$$
If $A$ is irreducible, the inequalities are strict.
2. Fact 2 on arrowhead matrices holds.
3. The eigenvalues of $A$ are the zeros of the __secular equation__
$$
f(\lambda )=1+\rho\sum_{i=1}^{n}\frac{\zeta _{i}^{2}}{d_{i}-\lambda }
=1 +\rho z^{T}(D-\lambda I)^{-1}z=0,$$
and the corresponding eigenvectors are
$$
U_{:,i}=\frac{x_{i}}{\left\Vert x_{i}\right\Vert _{2}},\quad
x_{i}=( D-\lambda _{i}I) ^{-1}z.$$
4. Let $A$ be irreducible and nonsingular. If $d_i\neq 0$ for all $i$, then
$$
A^{-1}=D^{-1} +\gamma uu^{T},\quad u=D^{-1}z, \quad \gamma =-\frac{\rho}{1+\rho z^{T}D^{-1}z},$$
is also a DPR1 matrix. If $d_i=0$, then $A^{-1}$ is a permuted arrowhead matrix,
$$
A^{-1}\equiv \left(\begin{bmatrix} D_{1} & 0 & 0 \\ 0 & 0 & 0 \\ 0 & 0 & D_{2} \end{bmatrix}
+\rho \begin{bmatrix} z_{1} \\ \zeta _{i} \\ z_{2}
\end{bmatrix}
\begin{bmatrix}
z_{1}^{T} & \zeta _{i} & z_{2}^{T}
\end{bmatrix}\right)^{-1}=
\begin{bmatrix}
D_{1}^{-1} & w_{1} & 0 \\
w_{1}^{T} & b & w_{2}^{T} \\
0 & w_{2} & D_{2}^{-1}
\end{bmatrix},$$
where
$$
\begin{aligned}
w_{1}&=-D_{1}^{-1}z_{1}\displaystyle\frac{1}{\zeta _{i}},\\
w_{2}&=-D_{2}^{-1}z_{2}\displaystyle\frac{1}{\zeta _{i}},\\
b &=\displaystyle\frac{1}{\zeta _{i}^{2}}\left(
\frac{1}{\rho}+z_{1}^{T}D_{1}^{-1}z_{1}+z_{2}^{T}D_{2}^{-1}z_{2}\right).
\end{aligned}$$
5. The algorithm based on the same approach as above, computes all eigenvalues and all components of the corresponding eigenvectors in a forward stable manner to almost full accuracy in $O(n)$ operations per eigenpair. The algorithm is implemented in the package `Arrowhead.jl`. In certain cases, $b$ or $\gamma$ need to be computed with extended precision.
"""
# ╔═╡ 6b3c7336-f1fc-428b-8b1e-94546b6622e8
md"""
## Examples
### Random DPR1 matrix
"""
# ╔═╡ 71d0022d-3cd8-488c-80b8-4c3c958ed8fa
begin
n₅=8
A₅=GenSymDPR1(n₅)
end
# ╔═╡ 4655eb27-0ae8-46db-9deb-575c89d08e7e
# Elements of the type SymDPR1
A₅.D, A₅.u, A₅.r
# ╔═╡ 6642fa72-8b1b-40b2-87f2-91199a96d7f9
begin
E₅,info₅=eigen(A₅)
norm(A₅*E₅.vectors-E₅.vectors*Diagonal(E₅.values)),
norm(E₅.vectors'*E₅.vectors-I)
end
# ╔═╡ 827bb865-c361-4a98-a6d6-90dca425eddc
md"""
### Numerically demanding DPR1 matrix
"""
# ╔═╡ 56c0c2ec-220f-4e04-a0cf-a144d89dd225
# Choose one
A₆=SymDPR1( [ 1e10, 5.0, 4e-3, 0.0, -4e-3,-5.0 ], [ 1e10, 1.0, 1.0, 1e-7, 1.0,1.0 ], 1.0 )
# ╔═╡ 31114bf7-450b-458d-9a1d-c22123f5f291
begin
E₆,info₆=eigen(A₆)
[sort(E₆.values) sort(eigvals(Matrix(A₆)))]
end
# ╔═╡ ad305a98-6cdc-424e-8f06-386b801b53e5
# Residual and orthogonality
norm(A₆*E₆.vectors-E₆.vectors*Diagonal(E₆.values)),
norm(E₆.vectors'*E₆.vectors-I)
# ╔═╡ Cell order:
# ╟─d01a9b4f-8b55-4607-abb6-717d227fcd48
# ╟─5d95dc2c-bf94-4b13-b9d5-b7b261e86cf6
# ╟─e3e43840-d0a3-4cde-9b1e-5785759912b2
# ╟─03f797ac-8764-4688-9e7e-e144cafb3b4c
# ╟─f280b119-76a7-4ee8-b6fd-608d977af0c6
# ╠═28c1a9e7-6c65-4184-b41b-b5cfd17645b5
# ╠═bc8b94b7-7e20-4cd3-be68-7e9152fe6d7b
# ╠═66dedda0-24a0-48a0-9286-4ce688e5da72
# ╟─cc924252-137a-4241-b178-2eabf653ff71
# ╠═c41e4534-6b29-4cbe-9b17-2c69c89e5570
# ╠═bcee7b90-2dd4-47b0-a781-67d35962d5f2
# ╠═f471d60f-1c70-4bd5-bb67-2bb17c18f3f8
# ╠═c474702a-8af8-4640-9f45-bca748ab3952
# ╠═25143fe5-3965-468a-8cb1-7c3e8e8027ea
# ╠═86dc734d-f45d-491f-9f80-7d958e642fbd
# ╠═001f801a-06d2-48be-a5ad-4c07f7a92890
# ╠═5196b508-5aad-4811-bfc0-1c1fa00eec37
# ╠═bcfdae20-040f-492e-bc67-60cf5f420cc0
# ╠═3ad5b60e-3fb9-443e-90eb-40b2fd78d9a3
# ╠═3e863b09-dc68-4560-adeb-e56ab22d9afd
# ╠═a5555731-c229-43de-9d88-cc3b91d7db67
# ╟─c3fcbaf1-b98f-401f-9244-aad632805ecb
# ╠═479a40d9-b81e-442b-9962-f972b110a4dd
# ╠═8d7e4960-9b04-4f5e-b2e6-acce80a6812f
# ╠═80d64fc6-33e7-4069-a6f4-8e85369dad9f
# ╠═804c4972-bf3d-4284-8426-b50b3ea5bb1b
# ╠═e9bf2b02-a60d-495b-bbdb-d87e31d8d9e0
# ╠═c6c0e7b2-400c-478e-90af-496659fed9c4
# ╠═c113b7fe-87b9-454c-aeb9-166af79bbe61
# ╠═d057146d-4a5c-415f-9332-9b3bb7e82fc4
# ╟─66115e0a-a5c8-4a66-958b-27d9a7865855
# ╠═43656da7-93db-4de3-b420-098f64dfba85
# ╠═d0b35d9b-8b87-4831-846c-b63fe8912ec7
# ╠═d25dcaa1-2448-49ec-b863-b224edc4ec2c
# ╠═d668a5a2-8060-48ef-adf0-6be0b1c3859e
# ╠═a4a40d52-94fc-4ccc-8e35-09c75c5de755
# ╟─dbfbbb00-ab00-4c85-a4ce-32468f18088a
# ╠═0176739c-2846-4b07-a8d3-68454c1251a6
# ╠═8b844cf7-c574-45e9-b682-fa9cc6e9cb73
# ╠═b3877ea1-d479-4d08-af8c-26e17de77106
# ╠═9bff72d8-68b6-41b6-a954-ee39f90ec7b0
# ╟─72390bf0-c5d6-46de-b8d0-0bee2cbb0af7
# ╠═46656cee-2202-4eb9-9725-1e3e3af4df42
# ╠═6b544eaf-858a-40e5-b59b-75cfa2237a6f
# ╠═8eb2d15e-53c0-40ab-98ce-7b0bbf4d6cd1
# ╠═c7fd604c-9e63-4ac0-8839-82371f968ba7
# ╠═23df1242-1b43-4d52-a8ed-1b12b0d5d4b9
# ╠═14374962-2eea-4c8f-9390-73ef886c25d9
# ╠═d4a86d03-0afe-4b1d-8828-52eae055aa9f
# ╠═1dfe86bf-7553-444b-a94e-340acccf5375
# ╠═cc123376-b657-4d8f-88e0-0f7577058c2b
# ╠═574e86bb-159c-4141-8d8a-21bdcc9b5304
# ╠═337ade70-accb-417b-b655-9640ed61b375
# ╠═f7556414-693d-4d46-889b-ed6b091a235e
# ╠═9ee70a5d-59f7-460c-92af-8872966cec40
# ╟─abb64f41-6817-49e9-9d44-81ef0ce32934
# ╟─b24b1c96-c73e-4c8e-83af-f35e2b9df304
# ╟─5e068291-cb05-4fd5-acaa-8e5d766e375a
# ╟─6d7f330f-3c88-44bd-aff9-65daa7f1ea1c
# ╠═5eb73af5-f78a-4811-83a5-ac39063a4516
# ╠═eea30449-2410-448d-885f-e27b7aa657c0
# ╠═21f8e11a-09fe-49a9-91cd-b118876910a8
# ╠═6d484719-e545-4742-9e46-515525af8244
# ╠═310ace86-6193-43ee-bdac-fe1a79a2cb26
# ╠═be91a6f1-2594-4084-8b23-312c51f35553
# ╠═5762ccae-2a28-4041-8a02-4bec918e192a
# ╠═4c1a73a1-bdfe-4939-8282-54b7fa193e5a
# ╠═f5b83025-64e7-44d1-a841-f9a7fc8614fb
# ╠═89e578d9-7608-42f2-a2f0-0ec2c89bd510
# ╠═24f1cb57-ed20-4399-996f-80294c7b1bb2
# ╟─8339adb3-b0fa-45e4-b107-62954547345b
# ╠═321070d6-c0c9-482d-9a5f-0e5a4f7b522f
# ╠═9c207955-174d-474f-be95-061c71761023
# ╠═bd8ea662-f4c2-4f2d-943b-b1a5f59cbe74
# ╠═9fe07467-4033-4374-b4f2-b0ceecf91a03
# ╠═a3ba9c15-1d55-4e2b-b32e-4202fbeca671
# ╠═34d1bc3f-b703-4b31-887e-36d036e5c3f9
# ╠═8677d8b8-f68b-4013-864c-89902ffff8fd
# ╠═360829ce-2790-4d70-8699-687650fc51b4
# ╟─9f2a5e5d-a705-4d65-8ebf-6b6d9d548999
# ╠═19d95a91-8c03-4b12-a8d3-46a6861991a1
# ╠═4a86007c-1137-4964-be3c-da51ab7fca3c
# ╠═33e70165-1fab-4996-99e6-a5d27bf976c5
# ╟─65e00890-d4d7-4d36-a359-4606378401b7
# ╟─6b3c7336-f1fc-428b-8b1e-94546b6622e8
# ╠═71d0022d-3cd8-488c-80b8-4c3c958ed8fa
# ╠═4655eb27-0ae8-46db-9deb-575c89d08e7e
# ╠═6642fa72-8b1b-40b2-87f2-91199a96d7f9
# ╟─827bb865-c361-4a98-a6d6-90dca425eddc
# ╠═56c0c2ec-220f-4e04-a0cf-a144d89dd225
# ╠═31114bf7-450b-458d-9a1d-c22123f5f291
# ╠═ad305a98-6cdc-424e-8f06-386b801b53e5
| [
27,
456,
62,
30783,
29,
940,
12,
3064,
198,
21017,
317,
32217,
13,
20362,
20922,
44386,
198,
2,
410,
15,
13,
1415,
13,
19,
198,
198,
3500,
2940,
2902,
198,
3500,
21365,
18274,
4487,
198,
198,
2,
2343,
243,
242,
28670,
22880,
94,
2... | 1.829519 | 15,773 |
# This file is a part of Julia. License is MIT: https://julialang.org/license
using Test
# code_native / code_llvm (issue #8239)
# It's hard to really test these, but just running them should be
# sufficient to catch segfault bugs.
module ReflectionTest
using Test, Random
function test_ir_reflection(freflect, f, types)
@test !isempty(freflect(f, types))
nothing
end
function test_bin_reflection(freflect, f, types)
iob = IOBuffer()
freflect(iob, f, types)
str = String(take!(iob))
@test !isempty(str)
nothing
end
function test_code_reflection(freflect, f, types, tester)
tester(freflect, f, types)
tester(freflect, f, (types.parameters...,))
nothing
end
function test_code_reflections(tester, freflect)
test_code_reflection(freflect, occursin,
Tuple{Regex, AbstractString}, tester) # abstract type
test_code_reflection(freflect, +, Tuple{Int, Int}, tester) # leaftype signature
test_code_reflection(freflect, +,
Tuple{Array{Float32}, Array{Float32}}, tester) # incomplete types
test_code_reflection(freflect, Module, Tuple{}, tester) # Module() constructor (transforms to call)
test_code_reflection(freflect, Array{Int64}, Tuple{Array{Int32}}, tester) # with incomplete types
test_code_reflection(freflect, muladd, Tuple{Float64, Float64, Float64}, tester)
end
test_code_reflections(test_ir_reflection, code_lowered)
test_code_reflections(test_ir_reflection, code_typed)
io = IOBuffer()
Base.print_statement_costs(io, map, (typeof(sqrt), Tuple{Int}))
str = String(take!(io))
@test occursin("map(f, t::Tuple{Any})", str)
@test occursin("sitofp", str)
@test occursin(r"20 .*sqrt_llvm.*::Float64", str)
end # module ReflectionTest
# isbits, isbitstype
@test !isbitstype(Array{Int})
@test isbitstype(Float32)
@test isbitstype(Int)
@test !isbitstype(AbstractString)
@test isbitstype(Tuple{Int, Vararg{Int, 2}})
@test !isbitstype(Tuple{Int, Vararg{Int}})
@test !isbitstype(Tuple{Integer, Vararg{Int, 2}})
@test isbitstype(Tuple{Int, Vararg{Any, 0}})
@test isbitstype(Tuple{Vararg{Any, 0}})
@test isbits(1)
@test isbits((1,2))
@test !isbits([1])
@test isbits(nothing)
# issue #16670
@test isconcretetype(Int)
@test isconcretetype(Vector{Int})
@test isconcretetype(Tuple{Int, Vararg{Int, 2}})
@test !isconcretetype(Tuple{Any})
@test !isconcretetype(Tuple{Integer, Vararg{Int, 2}})
@test !isconcretetype(Tuple{Int, Vararg{Int}})
@test !isconcretetype(Type{Tuple{Integer, Vararg{Int}}})
@test !isconcretetype(Type{Vector})
@test !isconcretetype(Type{Int})
@test !isconcretetype(Tuple{Type{Int}})
@test isconcretetype(DataType)
@test isconcretetype(Union)
@test !isconcretetype(Union{})
@test isconcretetype(Tuple{Union{}})
@test !isconcretetype(Complex)
@test !isconcretetype(Complex.body)
@test !isconcretetype(AbstractArray{Int,1})
struct AlwaysHasLayout{T}
x
end
@test !isconcretetype(AlwaysHasLayout) && !isconcretetype(AlwaysHasLayout.body)
@test isconcretetype(AlwaysHasLayout{Any})
@test isconcretetype(Ptr{Cvoid})
@test !isconcretetype(Ptr) && !isconcretetype(Ptr.body)
# issue #10165
i10165(::Type) = 0
i10165(::Type{AbstractArray{T,n}}) where {T,n} = 1
@test i10165(AbstractArray{Int,n} where n) == 0
@test which(i10165, Tuple{Type{AbstractArray{Int,n} where n},}).sig == Tuple{typeof(i10165),Type}
# fullname
@test fullname(Base) == (:Base,)
@test fullname(Base.Iterators) == (:Base, :Iterators)
const a_const = 1
not_const = 1
@test isconst(@__MODULE__, :a_const) == true
@test isconst(Base, :pi) == true
@test isconst(@__MODULE__, :pi) == true
@test isconst(@__MODULE__, :not_const) == false
@test isconst(@__MODULE__, :is_not_defined) == false
@test ismutable(1) == false
@test ismutable([]) == true
@test ismutabletype(Int) == false
@test ismutabletype(Vector{Any}) == true
@test ismutabletype(Union{Int, Vector{Any}}) == false
## find bindings tests
@test ccall(:jl_get_module_of_binding, Any, (Any, Any), Base, :sin)==Base
# For curmod_*
include("testenv.jl")
module TestMod7648
using Test
import Base.convert
import ..curmod_name, ..curmod
export a9475, foo9475, c7648, foo7648, foo7648_nomethods, Foo7648
const c7648 = 8
d7648 = 9
const f7648 = 10
foo7648(x) = x
function foo7648_nomethods end
mutable struct Foo7648 end
module TestModSub9475
using Test
using ..TestMod7648
import ..curmod_name
export a9475, foo9475
a9475 = 5
b9475 = 7
foo9475(x) = x
let
@test Base.binding_module(@__MODULE__, :a9475) == @__MODULE__
@test Base.binding_module(@__MODULE__, :c7648) == TestMod7648
@test Base.nameof(@__MODULE__) == :TestModSub9475
@test Base.fullname(@__MODULE__) == (curmod_name..., :TestMod7648, :TestModSub9475)
@test Base.parentmodule(@__MODULE__) == TestMod7648
end
end # module TestModSub9475
using .TestModSub9475
let
@test Base.binding_module(@__MODULE__, :d7648) == @__MODULE__
@test Base.binding_module(@__MODULE__, :a9475) == TestModSub9475
@test Base.nameof(@__MODULE__) == :TestMod7648
@test Base.parentmodule(@__MODULE__) == curmod
end
end # module TestMod7648
let
@test Base.binding_module(TestMod7648, :d7648) == TestMod7648
@test Base.binding_module(TestMod7648, :a9475) == TestMod7648.TestModSub9475
@test Base.binding_module(TestMod7648.TestModSub9475, :b9475) == TestMod7648.TestModSub9475
@test Set(names(TestMod7648))==Set([:TestMod7648, :a9475, :foo9475, :c7648, :foo7648, :foo7648_nomethods, :Foo7648])
@test Set(names(TestMod7648, all = true)) == Set([:TestMod7648, :TestModSub9475, :a9475, :foo9475, :c7648, :d7648, :f7648,
:foo7648, Symbol("#foo7648"), :foo7648_nomethods, Symbol("#foo7648_nomethods"),
:Foo7648, :eval, Symbol("#eval"), :include, Symbol("#include")])
@test Set(names(TestMod7648, all = true, imported = true)) == Set([:TestMod7648, :TestModSub9475, :a9475, :foo9475, :c7648, :d7648, :f7648,
:foo7648, Symbol("#foo7648"), :foo7648_nomethods, Symbol("#foo7648_nomethods"),
:Foo7648, :eval, Symbol("#eval"), :include, Symbol("#include"),
:convert, :curmod_name, :curmod])
@test isconst(TestMod7648, :c7648)
@test !isconst(TestMod7648, :d7648)
end
let
using .TestMod7648
@test Base.binding_module(@__MODULE__, :a9475) == TestMod7648.TestModSub9475
@test Base.binding_module(@__MODULE__, :c7648) == TestMod7648
@test nameof(foo7648) == :foo7648
@test parentmodule(foo7648, (Any,)) == TestMod7648
@test parentmodule(foo7648) == TestMod7648
@test parentmodule(foo7648_nomethods) == TestMod7648
@test parentmodule(foo9475, (Any,)) == TestMod7648.TestModSub9475
@test parentmodule(foo9475) == TestMod7648.TestModSub9475
@test parentmodule(Foo7648) == TestMod7648
@test nameof(Foo7648) == :Foo7648
@test basename(functionloc(foo7648, (Any,))[1]) == "reflection.jl"
@test first(methods(TestMod7648.TestModSub9475.foo7648)) == which(foo7648, (Int,))
@test TestMod7648 == which(@__MODULE__, :foo7648)
@test TestMod7648.TestModSub9475 == which(@__MODULE__, :a9475)
end
@test which(===, Tuple{Int, Int}) isa Method
@test length(code_typed(===, Tuple{Int, Int})) === 1
@test only(Base.return_types(===, Tuple{Int, Int})) === Any
module TestingExported
using Test
include("testenv.jl") # for curmod_str
import Base.isexported
global this_is_not_defined
export this_is_not_defined
@test_throws ErrorException("\"this_is_not_defined\" is not defined in module Main") which(Main, :this_is_not_defined)
@test_throws ErrorException("\"this_is_not_exported\" is not defined in module Main") which(Main, :this_is_not_exported)
@test isexported(@__MODULE__, :this_is_not_defined)
@test !isexported(@__MODULE__, :this_is_not_exported)
const a_value = 1
@test which(@__MODULE__, :a_value) === @__MODULE__
@test_throws ErrorException("\"a_value\" is not defined in module Main") which(Main, :a_value)
@test which(Main, :Core) === Main
@test !isexported(@__MODULE__, :a_value)
end
# PR 13825
let ex = :(a + b)
@test string(ex) == "a + b"
end
foo13825(::Array{T, N}, ::Array, ::Vector) where {T, N} = nothing
@test startswith(string(first(methods(foo13825))),
"foo13825(::Array{T, N}, ::Array, ::Vector) where {T, N} in")
mutable struct TLayout
x::Int8
y::Int16
z::Int32
end
tlayout = TLayout(5,7,11)
@test fieldnames(TLayout) == (:x, :y, :z) == Base.propertynames(tlayout)
@test hasfield(TLayout, :y)
@test !hasfield(TLayout, :a)
@test hasfield(Complex, :re)
@test !hasfield(Complex, :qxq)
@test hasproperty(tlayout, :x)
@test !hasproperty(tlayout, :p)
@test [(fieldoffset(TLayout,i), fieldname(TLayout,i), fieldtype(TLayout,i)) for i = 1:fieldcount(TLayout)] ==
[(0, :x, Int8), (2, :y, Int16), (4, :z, Int32)]
@test fieldnames(Complex) === (:re, :im)
@test_throws BoundsError fieldtype(TLayout, 0)
@test_throws ArgumentError fieldname(TLayout, 0)
@test_throws BoundsError fieldoffset(TLayout, 0)
@test_throws BoundsError fieldtype(TLayout, 4)
@test_throws ArgumentError fieldname(TLayout, 4)
@test_throws BoundsError fieldoffset(TLayout, 4)
@test fieldtype(Tuple{Vararg{Int8}}, 1) === Int8
@test fieldtype(Tuple{Vararg{Int8}}, 10) === Int8
@test_throws BoundsError fieldtype(Tuple{Vararg{Int8}}, 0)
# issue #30505
@test fieldtype(Union{Tuple{Char},Tuple{Char,Char}},2) === Char
@test_throws BoundsError fieldtype(Union{Tuple{Char},Tuple{Char,Char}},3)
@test fieldnames(NTuple{3, Int}) == ntuple(i -> fieldname(NTuple{3, Int}, i), 3) == (1, 2, 3)
@test_throws ArgumentError fieldnames(Union{})
@test_throws BoundsError fieldname(NTuple{3, Int}, 0)
@test_throws BoundsError fieldname(NTuple{3, Int}, 4)
@test fieldnames(NamedTuple{(:z,:a)}) === (:z,:a)
@test fieldname(NamedTuple{(:z,:a)}, 1) === :z
@test fieldname(NamedTuple{(:z,:a)}, 2) === :a
@test_throws ArgumentError fieldname(NamedTuple{(:z,:a)}, 3)
@test_throws ArgumentError fieldnames(NamedTuple)
@test_throws ArgumentError fieldnames(NamedTuple{T,Tuple{Int,Int}} where T)
@test_throws ArgumentError fieldnames(Real)
@test_throws ArgumentError fieldnames(AbstractArray)
@test fieldtype((NamedTuple{T,Tuple{Int,String}} where T), 1) === Int
@test fieldtype((NamedTuple{T,Tuple{Int,String}} where T), 2) === String
@test_throws BoundsError fieldtype((NamedTuple{T,Tuple{Int,String}} where T), 3)
@test fieldtype(NamedTuple, 42) === Any
@test_throws BoundsError fieldtype(NamedTuple, 0)
@test_throws BoundsError fieldtype(NamedTuple, -1)
@test fieldtype(NamedTuple{(:a,:b)}, 1) === Any
@test fieldtype(NamedTuple{(:a,:b)}, 2) === Any
@test fieldtype((NamedTuple{(:a,:b),T} where T<:Tuple{Vararg{Integer}}), 2) === Integer
@test_throws BoundsError fieldtype(NamedTuple{(:a,:b)}, 3)
# issue #32697
@test fieldtype(NamedTuple{(:x,:y), T} where T <: Tuple{Int, Union{Float64, Missing}}, :x) == Int
@test fieldtype(NamedTuple{(:x,:y), T} where T <: Tuple{Int, Union{Float64, Missing}}, :y) == Union{Float64, Missing}
@test fieldtypes(NamedTuple{(:a,:b)}) == (Any, Any)
@test fieldtypes((NamedTuple{T,Tuple{Int,String}} where T)) === (Int, String)
@test fieldtypes(TLayout) === (Int8, Int16, Int32)
import Base: datatype_alignment, return_types
@test datatype_alignment(UInt16) == 2
@test datatype_alignment(TLayout) == 4
let rts = return_types(TLayout)
@test length(rts) == 2 # general constructor and specific constructor
@test all(rts .== TLayout)
end
# issue #15447
f15447_line = @__LINE__() + 1
@noinline function f15447(s, a)
if s
return a
else
nb = 0
return nb
end
end
@test functionloc(f15447)[2] == f15447_line
# issue #14346
@noinline function f14346(id, mask, limit)
if id <= limit && mask[id]
return true
end
end
@test functionloc(f14346)[2] == @__LINE__() - 5
# issue #15714
# show variable names for slots and suppress spurious type warnings
function f15714(array_var15714)
for index_var15714 in eachindex(array_var15714)
array_var15714[index_var15714] += 0
end
end
function g15714(array_var15714)
for index_var15714 in eachindex(array_var15714)
array_var15714[index_var15714] += 0
end
let index_var15714
for outer index_var15714 in eachindex(array_var15714)
array_var15714[index_var15714] += 0
end
index_var15714
end
let index_var15714
for outer index_var15714 in eachindex(array_var15714)
array_var15714[index_var15714] += 0
end
index_var15714
end
end
import InteractiveUtils.code_warntype
used_dup_var_tested15714 = false
used_unique_var_tested15714 = false
function test_typed_ir_printing(Base.@nospecialize(f), Base.@nospecialize(types), must_used_vars)
src, rettype = code_typed(f, types, optimize=false)[1]
dupnames = Set()
slotnames = Set()
for name in src.slotnames
if name in slotnames || name === Symbol("")
push!(dupnames, name)
else
push!(slotnames, name)
end
end
# Make sure must_used_vars are in slotnames
for name in must_used_vars
@test name in slotnames
end
must_used_checked = Dict{Symbol,Bool}()
for sym in must_used_vars
must_used_checked[sym] = false
end
for str in (sprint(io -> code_warntype(io, f, types, optimize=false)),
repr("text/plain", src))
for var in must_used_vars
@test occursin(string(var), str)
end
# Check that we are not printing the bare slot numbers
for i in 1:length(src.slotnames)
name = src.slotnames[i]
if name in dupnames
if name in must_used_vars && occursin(Regex("_$i\\b"), str)
must_used_checked[name] = true
global used_dup_var_tested15714 = true
end
else
@test !occursin(Regex("_$i\\b"), str)
if name in must_used_vars
global used_unique_var_tested15714 = true
end
end
end
end
for sym in must_used_vars
if sym in dupnames
@test must_used_checked[sym]
end
must_used_checked[sym] = false
end
# Make sure printing an AST outside CodeInfo still works.
str = sprint(show, src.code)
# Check that we are printing the slot numbers when we don't have the context
# Use the variable names that we know should be present in the optimized AST
for i in 2:length(src.slotnames)
name = src.slotnames[i]
if name in must_used_vars && occursin(Regex("_$i\\b"), str)
must_used_checked[name] = true
end
end
for sym in must_used_vars
@test must_used_checked[sym]
end
end
test_typed_ir_printing(f15714, Tuple{Vector{Float32}},
[:array_var15714, :index_var15714])
test_typed_ir_printing(g15714, Tuple{Vector{Float32}},
[:array_var15714, :index_var15714])
#This test doesn't work with the new optimizer because we drop slotnames
#We may want to test it against debug info eventually
#@test used_dup_var_tested15715
@test used_unique_var_tested15714
let li = typeof(fieldtype).name.mt.cache.func::Core.MethodInstance,
lrepr = string(li),
mrepr = string(li.def),
lmime = repr("text/plain", li),
mmime = repr("text/plain", li.def)
@test lrepr == lmime == "MethodInstance for fieldtype(...)"
@test mrepr == mmime == "fieldtype(...) in Core"
end
# Linfo Tracing test
function tracefoo end
# Method Tracing test
methtracer(x::Ptr{Cvoid}) = (@test isa(unsafe_pointer_to_objref(x), Method); global didtrace = true; nothing)
let cmethtracer = @cfunction(methtracer, Cvoid, (Ptr{Cvoid},))
ccall(:jl_register_newmeth_tracer, Cvoid, (Ptr{Cvoid},), cmethtracer)
end
didtrace = false
tracefoo2(x, y) = x*y
@test didtrace
didtrace = false
tracefoo(x::Int64, y::Int64) = x*y
@test didtrace
didtrace = false
ccall(:jl_register_newmeth_tracer, Cvoid, (Ptr{Cvoid},), C_NULL)
# test for reflection over large method tables
for i = 1:100; @eval fLargeTable(::Val{$i}, ::Any) = 1; end
for i = 1:100; @eval fLargeTable(::Any, ::Val{$i}) = 2; end
fLargeTable(::Any...) = 3
@test length(methods(fLargeTable, Tuple{})) == 1
fLargeTable(::Complex, ::Complex) = 4
fLargeTable(::Union{ComplexF32, ComplexF64}...) = 5
@test length(methods(fLargeTable, Tuple{})) == 1
fLargeTable() = 4
@test length(methods(fLargeTable)) == 204
@test length(methods(fLargeTable, Tuple{})) == 1
@test fLargeTable(1im, 2im) == 4
@test fLargeTable(1.0im, 2.0im) == 5
@test_throws MethodError fLargeTable(Val(1), Val(1))
@test fLargeTable(Val(1), 1) == 1
@test fLargeTable(1, Val(1)) == 2
fLargeTable(::Union, ::Union) = "a"
@test fLargeTable(Union{Int, Missing}, Union{Int, Missing}) == "a"
fLargeTable(::Union, ::Union) = "b"
@test length(methods(fLargeTable)) == 205
@test fLargeTable(Union{Int, Missing}, Union{Int, Missing}) == "b"
# issue #15280
function f15280(x) end
@test functionloc(f15280)[2] > 0
# bug found in #16850, Base.url with backslashes on Windows
function module_depth(from::Module, to::Module)
if from === to || parentmodule(to) === to
return 0
else
return 1 + module_depth(from, parentmodule(to))
end
end
function has_backslashes(mod::Module)
for n in names(mod, all = true, imported = true)
isdefined(mod, n) || continue
Base.isdeprecated(mod, n) && continue
f = getfield(mod, n)
if isa(f, Module) && module_depth(Main, f) <= module_depth(Main, mod)
continue
end
h = has_backslashes(f)
h === nothing || return h
end
return nothing
end
function has_backslashes(f::Function)
for m in methods(f)
h = has_backslashes(m)
h === nothing || return h
end
return nothing
end
function has_backslashes(meth::Method)
if '\\' in string(meth.file)
return meth
else
return nothing
end
end
has_backslashes(x) = nothing
h16850 = has_backslashes(Base)
if Sys.iswindows()
if h16850 === nothing
@warn """No methods found in Base with backslashes in file name,
skipping test for `Base.url`"""
else
@test !('\\' in Base.url(h16850))
end
else
@test h16850 === nothing
end
# PR #18888: code_typed shouldn't cache, return_types should
f18888() = nothing
let
world = Core.Compiler.get_world_counter()
m = first(methods(f18888, Tuple{}))
ft = typeof(f18888)
code_typed(f18888, Tuple{}; optimize=false)
@test !isempty(m.specializations) # uncached, but creates the specializations entry
mi = Core.Compiler.specialize_method(m, Tuple{ft}, Core.svec())
interp = Core.Compiler.NativeInterpreter(world)
@test !Core.Compiler.haskey(Core.Compiler.code_cache(interp), mi)
@test !isdefined(mi, :cache)
code_typed(f18888, Tuple{}; optimize=true)
@test !isdefined(mi, :cache)
Base.return_types(f18888, Tuple{})
@test Core.Compiler.getindex(Core.Compiler.code_cache(interp), mi) === mi.cache
@test mi.cache isa Core.CodeInstance
@test !isdefined(mi.cache, :next)
end
# code_typed_by_type
@test Base.code_typed_by_type(Tuple{Type{<:Val}})[1][2] == Val
@test Base.code_typed_by_type(Tuple{typeof(sin), Float64})[1][2] === Float64
# New reflection methods in 0.6
struct ReflectionExample{T<:AbstractFloat, N}
x::Tuple{T, N}
end
@test !isabstracttype(Union{})
@test !isabstracttype(Union{Int,Float64})
@test isabstracttype(AbstractArray)
@test isabstracttype(AbstractSet{Int})
@test !isabstracttype(ReflectionExample)
@test !isabstracttype(Int)
@test !isabstracttype(TLayout)
@test !isprimitivetype(Union{})
@test !isprimitivetype(Union{Int,Float64})
@test !isprimitivetype(AbstractArray)
@test !isprimitivetype(AbstractSet{Int})
@test !isprimitivetype(ReflectionExample)
@test isprimitivetype(Int)
@test !isprimitivetype(TLayout)
@test !isstructtype(Union{})
@test !isstructtype(Union{Int,Float64})
@test !isstructtype(AbstractArray)
@test !isstructtype(AbstractSet{Int})
@test isstructtype(ReflectionExample)
@test !isstructtype(Int)
@test isstructtype(TLayout)
let
wrapperT(T) = Base.typename(T).wrapper
@test @inferred wrapperT(ReflectionExample{Float64, Int64}) == ReflectionExample
@test @inferred wrapperT(ReflectionExample{Float64, N} where N) == ReflectionExample
@test @inferred wrapperT(ReflectionExample{T, Int64} where T) == ReflectionExample
@test @inferred wrapperT(ReflectionExample) == ReflectionExample
@test @inferred wrapperT(Union{ReflectionExample{Union{},1},ReflectionExample{Float64,1}}) == ReflectionExample
@test_throws(ErrorException("typename does not apply to unions whose components have different typenames"),
Base.typename(Union{Int, Float64}))
end
# sizeof and nfields
@test sizeof(Int16) == 2
@test sizeof(ComplexF64) == 16
primitive type ParameterizedByte__{A,B} 8 end
@test sizeof(ParameterizedByte__) == 1
@test sizeof(nothing) == 0
@test sizeof(()) == 0
struct TypeWithIrrelevantParameter{T}
x::Int32
end
@test sizeof(TypeWithIrrelevantParameter) == sizeof(Int32)
@test sizeof(TypeWithIrrelevantParameter{Int8}) == sizeof(Int32)
@test sizeof(:abc) == 3
@test sizeof(Symbol("")) == 0
@test_throws(ErrorException("Abstract type Real does not have a definite size."),
sizeof(Real))
@test sizeof(Union{ComplexF32,ComplexF64}) == 16
@test sizeof(Union{Int8,UInt8}) == 1
@test_throws ErrorException sizeof(AbstractArray)
@test_throws ErrorException sizeof(Tuple)
@test_throws ErrorException sizeof(Tuple{Any,Any})
@test_throws ErrorException sizeof(String)
@test_throws ErrorException sizeof(Vector{Int})
@test_throws ErrorException sizeof(Symbol)
@test_throws ErrorException sizeof(Core.SimpleVector)
@test_throws ErrorException sizeof(Union{})
@test nfields((1,2)) == 2
@test nfields(()) == 0
@test nfields(nothing) == fieldcount(Nothing) == 0
@test nfields(1) == 0
@test_throws ArgumentError fieldcount(Union{})
@test fieldcount(Tuple{Any,Any,T} where T) == 3
@test fieldcount(Complex) == fieldcount(ComplexF32) == 2
@test fieldcount(Union{ComplexF32,ComplexF64}) == 2
@test fieldcount(Int) == 0
@test_throws(ArgumentError("type does not have a definite number of fields"),
fieldcount(Union{Complex,Pair}))
@test_throws ArgumentError fieldcount(Real)
@test_throws ArgumentError fieldcount(AbstractArray)
@test_throws ArgumentError fieldcount(Tuple{Any,Vararg{Any}})
# PR #22979
function test_similar_codeinfo(a, b)
@test a.code == b.code
@test a.slotnames == b.slotnames
@test a.slotflags == b.slotflags
end
@generated f22979(x...) = (y = 1; :(x[1] + x[2]))
let
x22979 = (1, 2.0, 3.0 + im)
T22979 = Tuple{typeof(f22979), typeof.(x22979)...}
world = Core.Compiler.get_world_counter()
match = Base._methods_by_ftype(T22979, -1, world)[1]
instance = Core.Compiler.specialize_method(match)
cinfo_generated = Core.Compiler.get_staged(instance)
@test_throws ErrorException Base.uncompressed_ir(match.method)
test_similar_codeinfo(code_lowered(f22979, typeof(x22979))[1], cinfo_generated)
cinfos = code_lowered(f22979, typeof.(x22979), generated=true)
@test length(cinfos) == 1
cinfo = cinfos[1]
test_similar_codeinfo(cinfo, cinfo_generated)
@test_throws ErrorException code_lowered(f22979, typeof.(x22979), generated=false)
end
module MethodDeletion
using Test, Random
# Deletion after compiling top-level call
bar1(x) = 1
bar1(x::Int) = 2
foo1(x) = bar1(x)
faz1(x) = foo1(x)
@test faz1(1) == 2
@test faz1(1.0) == 1
m = first(methods(bar1, Tuple{Int}))
Base.delete_method(m)
@test bar1(1) == 1
@test bar1(1.0) == 1
@test foo1(1) == 1
@test foo1(1.0) == 1
@test faz1(1) == 1
@test faz1(1.0) == 1
# Deletion after compiling middle-level call
bar2(x) = 1
bar2(x::Int) = 2
foo2(x) = bar2(x)
faz2(x) = foo2(x)
@test foo2(1) == 2
@test foo2(1.0) == 1
m = first(methods(bar2, Tuple{Int}))
Base.delete_method(m)
@test bar2(1.0) == 1
@test bar2(1) == 1
@test foo2(1) == 1
@test foo2(1.0) == 1
@test faz2(1) == 1
@test faz2(1.0) == 1
# Deletion after compiling low-level call
bar3(x) = 1
bar3(x::Int) = 2
foo3(x) = bar3(x)
faz3(x) = foo3(x)
@test bar3(1) == 2
@test bar3(1.0) == 1
m = first(methods(bar3, Tuple{Int}))
Base.delete_method(m)
@test bar3(1) == 1
@test bar3(1.0) == 1
@test foo3(1) == 1
@test foo3(1.0) == 1
@test faz3(1) == 1
@test faz3(1.0) == 1
# Deletion before any compilation
bar4(x) = 1
bar4(x::Int) = 2
foo4(x) = bar4(x)
faz4(x) = foo4(x)
m = first(methods(bar4, Tuple{Int}))
Base.delete_method(m)
@test bar4(1) == 1
@test bar4(1.0) == 1
@test foo4(1) == 1
@test foo4(1.0) == 1
@test faz4(1) == 1
@test faz4(1.0) == 1
# Methods with keyword arguments
fookw(x; direction=:up) = direction
fookw(y::Int) = 2
@test fookw("string") == :up
@test fookw(1) == 2
m = collect(methods(fookw))[2]
Base.delete_method(m)
@test fookw(1) == 2
@test_throws MethodError fookw("string")
# functions with many methods
types = (Float64, Int32, String)
for T1 in types, T2 in types, T3 in types
@eval foomany(x::$T1, y::$T2, z::$T3) = y
end
@test foomany(Int32(5), "hello", 3.2) == "hello"
m = first(methods(foomany, Tuple{Int32, String, Float64}))
Base.delete_method(m)
@test_throws MethodError foomany(Int32(5), "hello", 3.2)
struct EmptyType end
Base.convert(::Type{EmptyType}, x::Integer) = EmptyType()
m = first(methods(convert, Tuple{Type{EmptyType}, Integer}))
Base.delete_method(m)
@test_throws MethodError convert(EmptyType, 1)
# parametric methods
parametric(A::Array{T,N}, i::Vararg{Int,N}) where {T,N} = N
@test parametric(rand(2,2), 1, 1) == 2
m = first(methods(parametric))
Base.delete_method(m)
@test_throws MethodError parametric(rand(2,2), 1, 1)
# Deletion and ambiguity detection
foo(::Int, ::Int) = 1
foo(::Real, ::Int) = 2
foo(::Int, ::Real) = 3
Base.delete_method(first(methods(foo)))
@test_throws MethodError foo(1, 1)
foo(::Int, ::Int) = 1
foo(1, 1)
Base.delete_method(first(methods(foo)))
@test_throws MethodError foo(1, 1)
# multiple deletions and ambiguities
typeparam(::Type{T}, a::Array{T}) where T<:AbstractFloat = 1
typeparam(::Type{T}, a::Array{T}) where T = 2
for mth in collect(methods(typeparam))
Base.delete_method(mth)
end
typeparam(::Type{T}, a::AbstractArray{T}) where T<:AbstractFloat = 1
typeparam(::Type{T}, a::AbstractArray{T}) where T = 2
@test typeparam(Float64, rand(2)) == 1
@test typeparam(Int, rand(Int, 2)) == 2
# prior ambiguities (issue #28899)
uambig(::Union{Int,Nothing}) = 1
uambig(::Union{Float64,Nothing}) = 2
@test uambig(1) == 1
@test uambig(1.0) == 2
@test_throws MethodError uambig(nothing)
m = which(uambig, Tuple{Int})
Base.delete_method(m)
@test_throws MethodError uambig(1)
@test uambig(1.0) == 2
@test uambig(nothing) == 2
end
module HasmethodKwargs
using Test
f(x::Int; y=3) = x + y
@test hasmethod(f, Tuple{Int})
@test hasmethod(f, Tuple{Int}, ())
@test hasmethod(f, Tuple{Int}, (:y,))
@test !hasmethod(f, Tuple{Int}, (:jeff,))
@test !hasmethod(f, Tuple{Int}, (:y,), world=typemin(UInt))
g(; b, c, a) = a + b + c
h(; kwargs...) = 4
for gh = (g, h)
@test hasmethod(gh, Tuple{})
@test hasmethod(gh, Tuple{}, ())
@test hasmethod(gh, Tuple{}, (:a,))
@test hasmethod(gh, Tuple{}, (:a, :b))
@test hasmethod(gh, Tuple{}, (:a, :b, :c))
end
@test !hasmethod(g, Tuple{}, (:a, :b, :c, :d))
@test hasmethod(h, Tuple{}, (:a, :b, :c, :d))
end
# issue #31353
function f31353(f, x::Array{<:Dict})
end
@test hasmethod(f31353, Tuple{Any, Array{D}} where D<:Dict)
@test !hasmethod(f31353, Tuple{Any, Array{D}} where D<:AbstractDict)
# issue #26267
module M26267
import Test
foo(x) = x
end
@test !(:Test in names(M26267, all=true, imported=false))
@test :Test in names(M26267, all=true, imported=true)
@test :Test in names(M26267, all=false, imported=true)
# issue #20872
f20872(::Val{N}, ::Val{N}) where {N} = true
f20872(::Val, ::Val) = false
@test which(f20872, Tuple{Val{N},Val{N}} where N).sig == Tuple{typeof(f20872), Val{N}, Val{N}} where N
@test which(f20872, Tuple{Val,Val}).sig == Tuple{typeof(f20872), Val, Val}
@test which(f20872, Tuple{Val,Val{N}} where N).sig == Tuple{typeof(f20872), Val, Val}
@test_throws ErrorException which(f20872, Tuple{Any,Val{N}} where N)
@test which(Tuple{typeof(f20872), Val{1}, Val{2}}).sig == Tuple{typeof(f20872), Val, Val}
module M29962 end
# make sure checking if a binding is deprecated does not resolve it
@test !Base.isdeprecated(M29962, :sin) && !Base.isbindingresolved(M29962, :sin)
# @locals
using Base: @locals
let
local x, y
global z
@test isempty(keys(@locals))
x = 1
@test @locals() == Dict{Symbol,Any}(:x=>1)
y = ""
@test @locals() == Dict{Symbol,Any}(:x=>1,:y=>"")
for i = 8:8
@test @locals() == Dict{Symbol,Any}(:x=>1,:y=>"",:i=>8)
end
for i = 42:42
local x
@test @locals() == Dict{Symbol,Any}(:y=>"",:i=>42)
end
@test @locals() == Dict{Symbol,Any}(:x=>1,:y=>"")
x = (y,)
@test @locals() == Dict{Symbol,Any}(:x=>("",),:y=>"")
end
function _test_at_locals1(::Any, ::Any)
x = 1
@test @locals() == Dict{Symbol,Any}(:x=>1)
end
_test_at_locals1(1,1)
function _test_at_locals2(a::Any, ::Any, c::T) where T
x = 2
@test @locals() == Dict{Symbol,Any}(:x=>2,:a=>a,:c=>c,:T=>typeof(c))
end
_test_at_locals2(1,1,"")
_test_at_locals2(1,1,0.5f0)
@testset "issue #31687" begin
import InteractiveUtils._dump_function
@noinline f31687_child(i) = f31687_nonexistent(i)
f31687_parent() = f31687_child(0)
params = Base.CodegenParams()
_dump_function(f31687_parent, Tuple{},
#=native=#false, #=wrapper=#false, #=strip=#false,
#=dump_module=#true, #=syntax=#:att, #=optimize=#false, :none,
#=binary=#false,
params)
end
@test nameof(Any) === :Any
@test nameof(:) === :Colon
@test nameof(Core.Intrinsics.mul_int) === :mul_int
@test nameof(Core.Intrinsics.arraylen) === :arraylen
module TestMod33403
f(x) = 1
f(x::Int) = 2
g() = 3
module Sub
import ..TestMod33403: f
f(x::Char) = 3
end
end
@testset "methods with module" begin
using .TestMod33403: f, g
@test length(methods(f)) == 3
@test length(methods(f, (Int,))) == 1
@test length(methods(f, TestMod33403)) == 2
@test length(methods(f, [TestMod33403])) == 2
@test length(methods(f, (Int,), TestMod33403)) == 1
@test length(methods(f, (Int,), [TestMod33403])) == 1
@test length(methods(f, TestMod33403.Sub)) == 1
@test length(methods(f, [TestMod33403.Sub])) == 1
@test length(methods(f, (Char,), TestMod33403.Sub)) == 1
@test length(methods(f, (Int,), TestMod33403.Sub)) == 0
@test length(methods(g, ())) == 1
end
module BodyFunctionLookup
f1(x, y; a=1) = error("oops")
f2(f::Function, args...; kwargs...) = f1(args...; kwargs...)
end
@testset "bodyfunction" begin
m = first(methods(BodyFunctionLookup.f1))
f = Base.bodyfunction(m)
@test occursin("f1#", String(nameof(f)))
m = first(methods(BodyFunctionLookup.f2))
f = Base.bodyfunction(m)
@test f !== Core._apply_iterate
@test f !== Core._apply
@test occursin("f2#", String(nameof(f)))
end
@testset "code_typed(; world)" begin
mod = @eval module $(gensym()) end
@eval mod foo() = 1
world1 = Base.get_world_counter()
@test only(code_typed(mod.foo, ())).second == Int
@test only(code_typed(mod.foo, (); world=world1)).second == Int
@eval mod foo() = 2.
world2 = Base.get_world_counter()
@test only(code_typed(mod.foo, ())).second == Float64
@test only(code_typed(mod.foo, (); world=world1)).second == Int
@test only(code_typed(mod.foo, (); world=world2)).second == Float64
end
@testset "default_tt" begin
m = Module()
@eval m f1() = return
@test Base.default_tt(m.f1) == Tuple{}
@eval m f2(a) = return
@test Base.default_tt(m.f2) == Tuple{Any}
@eval m f3(a::Integer) = return
@test Base.default_tt(m.f3) == Tuple{Integer}
@eval m f4() = return
@eval m f4(a) = return
@test Base.default_tt(m.f4) == Tuple
end
| [
2,
770,
2393,
318,
257,
636,
286,
22300,
13,
13789,
318,
17168,
25,
3740,
1378,
73,
377,
498,
648,
13,
2398,
14,
43085,
198,
198,
3500,
6208,
198,
198,
2,
2438,
62,
30191,
1220,
2438,
62,
297,
14761,
357,
21949,
1303,
23,
23516,
8... | 2.341793 | 13,751 |
<reponame>nveldt/SparseCardDSFM
using Plots
include("src/pwl_approx.jl")
## Define a submodular cardinality based function via Combined gadget
k = 2
w = random_scb_function(k::Int64)
w = 10*w/maximum(w)
pl = scatter(0:k,w,legend = false, xticks = 0:k)
epsi = 10.0
z0, zk, a, b, cgf = SubCardFun_to_CGF_weights(w,epsi,false)
Jtrue = length(a) + 1
J = Jtrue
xs = 0:0.01:k
ys = cgf(xs)
plot!(pl,xs,ys,color= :blue)
# Refine to get better approximation
z0l, zkl, al, bl, cgfl, best_eps = Refined_SCB_to_CGF(w,epsi)
xs = 0:0.01:k
ys = cgfl(xs)
plot!(pl,xs,ys,color= :red)
| [
27,
7856,
261,
480,
29,
77,
303,
335,
83,
14,
50,
29572,
16962,
5258,
23264,
198,
3500,
1345,
1747,
198,
198,
17256,
7203,
10677,
14,
79,
40989,
62,
1324,
13907,
13,
20362,
4943,
198,
198,
2235,
2896,
500,
257,
850,
4666,
934,
38691... | 2.091575 | 273 |
@enum Shape CIRCLE RECTANGLE ROUNDED_RECTANGLE DISTANCEFIELD TRIANGLE
@enum CubeSides TOP BOTTOM FRONT BACK RIGHT LEFT
struct Grid{N, T <: AbstractRange}
dims::NTuple{N, T}
end
Base.ndims(::Grid{N,T}) where {N,T} = N
Grid(ranges::AbstractRange...) = Grid(ranges)
function Grid(a::Array{T, N}) where {N, T}
s = Vec{N, Float32}(size(a))
smax = maximum(s)
s = s./smax
Grid(ntuple(Val{N}) do i
range(0, stop=s[i], length=size(a, i))
end)
end
Grid(a::AbstractArray, ranges...) = Grid(a, ranges)
"""
This constructor constructs a grid from ranges given as a tuple.
Due to the approach, the tuple `ranges` can consist of NTuple(2, T)
and all kind of range types. The constructor will make sure that all ranges match
the size of the dimension of the array `a`.
"""
function Grid(a::AbstractArray{T, N}, ranges::Tuple) where {T, N}
length(ranges) =! N && throw(ArgumentError(
"You need to supply a range for every dimension of the array. Given: $ranges
given Array: $(typeof(a))"
))
Grid(ntuple(Val(N)) do i
range(first(ranges[i]), stop=last(ranges[i]), length=size(a, i))
end)
end
Base.length(p::Grid) = prod(size(p))
Base.size(p::Grid) = map(length, p.dims)
function Base.getindex(p::Grid{N,T}, i) where {N,T}
inds = ind2sub(size(p), i)
return Point{N, eltype(T)}(ntuple(Val(N)) do i
p.dims[i][inds[i]]
end)
end
Base.iterate(g::Grid, i = 1) = i <= length(g) ? (g[i], i + 1) : nothing
GLAbstraction.isa_gl_struct(x::Grid) = true
GLAbstraction.toglsltype_string(t::Grid{N,T}) where {N,T} = "uniform Grid$(N)D"
function GLAbstraction.gl_convert_struct(g::Grid{N, T}, uniform_name::Symbol) where {N,T}
return Dict{Symbol, Any}(
Symbol("$uniform_name.start") => Vec{N, Float32}(minimum.(g.dims)),
Symbol("$uniform_name.stop") => Vec{N, Float32}(maximum.(g.dims)),
Symbol("$uniform_name.lendiv") => Vec{N, Cint}(length.(g.dims) .- 1),
Symbol("$uniform_name.dims") => Vec{N, Cint}(map(length, g.dims))
)
end
function GLAbstraction.gl_convert_struct(g::Grid{1, T}, uniform_name::Symbol) where T
x = g.dims[1]
return Dict{Symbol, Any}(
Symbol("$uniform_name.start") => Float32(minimum(x)),
Symbol("$uniform_name.stop") => Float32(maximum(x)),
Symbol("$uniform_name.lendiv") => Cint(length(x) - 1),
Symbol("$uniform_name.dims") => Cint(length(x))
)
end
import Base: getindex, length, iterate, ndims, setindex!, eltype
struct GridZRepeat{G, T, N} <: AbstractArray{Point{3, T}, N}
grid::G
z::Array{T, N}
end
Base.size(g::GridZRepeat) = size(g.z)
Base.size(g::GridZRepeat, i) = size(g.z, i)
Base.IndexStyle(::Type{<:GridZRepeat}) = Base.IndexLinear()
function Base.getindex(g::GridZRepeat{G, T}, i) where {G,T}
pxy = g.grid[i]
Point{3, T}(pxy[1], pxy[2], g.z[i])
end
struct GLVisualizeShader <: AbstractLazyShader
paths::Tuple
kw_args::Dict{Symbol, Any}
function GLVisualizeShader(paths::String...; view = Dict{String, String}(), kw_args...)
# TODO properly check what extensions are available
@static if !Sys.isapple()
view["GLSL_EXTENSIONS"] = "#extension GL_ARB_conservative_depth: enable"
view["SUPPORTED_EXTENSIONS"] = "#define DETPH_LAYOUT"
end
args = Dict{Symbol, Any}(kw_args)
args[:view] = view
args[:fragdatalocation] = [(0, "fragment_color"), (1, "fragment_groupid")]
new(map(x-> assetpath("shader", x), paths), args)
end
end
| [
31,
44709,
25959,
327,
4663,
29931,
371,
9782,
15567,
2538,
371,
15919,
1961,
62,
23988,
15567,
2538,
360,
8808,
19240,
44603,
37679,
15567,
2538,
198,
31,
44709,
23315,
50,
1460,
28662,
347,
29089,
2662,
8782,
35830,
28767,
33621,
12509,
... | 2.286923 | 1,537 |
<filename>src/nodes/variable.jl
# Variable reference.
mutable struct VariableNode <: AbstractSQLNode
name::Symbol
VariableNode(; name::Union{Symbol, AbstractString}) =
new(Symbol(name))
end
VariableNode(name) =
VariableNode(name = name)
"""
Var(; name)
Var(name)
Var.name Var."name" Var[name] Var["name"]
A reference to a query parameter.
# Examples
```jldoctest
julia> person = SQLTable(:person, columns = [:person_id, :year_of_birth]);
julia> q = From(person) |>
Where(Get.year_of_birth .> Var.year);
julia> print(render(q))
SELECT "person_1"."person_id", "person_1"."year_of_birth"
FROM "person" AS "person_1"
WHERE ("person_1"."year_of_birth" > :year)
```
"""
Var(args...; kws...) =
VariableNode(args...; kws...) |> SQLNode
dissect(scr::Symbol, ::typeof(Var), pats::Vector{Any}) =
dissect(scr, VariableNode, pats)
Base.getproperty(::typeof(Var), name::Symbol) =
Var(name)
Base.getproperty(::typeof(Var), name::AbstractString) =
Var(name)
Base.getindex(::typeof(Var), name::Union{Symbol, AbstractString}) =
Var(name)
PrettyPrinting.quoteof(n::VariableNode, ctx::QuoteContext) =
Expr(:., nameof(Var), quoteof(n.name))
label(n::VariableNode) =
n.name
| [
27,
34345,
29,
10677,
14,
77,
4147,
14,
45286,
13,
20362,
198,
2,
35748,
4941,
13,
198,
198,
76,
18187,
2878,
35748,
19667,
1279,
25,
27741,
17861,
19667,
198,
220,
220,
220,
1438,
3712,
13940,
23650,
628,
220,
220,
220,
35748,
19667,... | 2.426641 | 518 |
<reponame>Pooksoft/PooksoftOptionsKit.jl
# packages -
using Dates
using Optim
using JSON
using DataFrames
using Statistics
using LsqFit
using Reexport
@reexport using PooksoftBase
# include my code -
include("./base/Types.jl")
include("./base/Checks.jl")
include("./base/Intrinsic.jl")
include("./base/Binary.jl")
include("./base/Ternary.jl")
include("./base/Greeks.jl")
include("./base/Compute.jl")
include("./base/Factory.jl")
include("./base/Volatility.jl")
include("./base/Utility.jl")
include("./base/Longstaff.jl")
include("./base/Breakeven.jl") | [
27,
7856,
261,
480,
29,
47,
566,
4215,
14,
47,
566,
4215,
29046,
20827,
13,
20362,
198,
2,
10392,
532,
198,
3500,
44712,
198,
3500,
30011,
198,
3500,
19449,
198,
3500,
6060,
35439,
198,
3500,
14370,
198,
3500,
406,
31166,
31805,
198,
... | 2.641148 | 209 |
<reponame>Jagirhussan/ModiaMath.jl<gh_stars>0
module test_Variables
import ModiaMath
# Desired:
# using Test
#
# In order that Test needs not to be defined in the user environment, it is included via ModiaMath:
@static if VERSION < v"0.7.0-DEV.2005"
using Base.Test
else
using ModiaMath.Test
end
mutable struct Revolute <: ModiaMath.AbstractComponentWithVariables
_internal::ModiaMath.ComponentInternal
phi::ModiaMath.RealScalar
w::ModiaMath.RealScalar
a::ModiaMath.RealScalar
tau::ModiaMath.RealScalar
drive::Bool
function Revolute(;phi0::Float64=0.0, w0::Float64=0.0, drive::Bool=false)
this = new(ModiaMath.ComponentInternal(:Revolute))
phi = ModiaMath.RealScalar(:phi, this, start=phi0, unit="rad", fixed=true, info="Relative rotation angle", numericType=drive ? ModiaMath.WR : ModiaMath.XD_EXP)
w = ModiaMath.RealScalar("w", this, start=w0, unit="rad/s", fixed=true, info="Relative angular velocity", integral=phi, numericType=drive ? ModiaMath.WR : ModiaMath.XD_EXP, analysis=ModiaMath.OnlyDynamicAnalysis)
a = ModiaMath.RealScalar("a", this, start=0.0, unit="rad/s^2", info="Relative angular acceleration", integral=w, numericType=drive ? ModiaMath.WR : ModiaMath.DER_XD_EXP, analysis=ModiaMath.OnlyDynamicAnalysis)
tau = ModiaMath.RealScalar(:tau, this, start=0.0, unit="N*m", info="Driving torque", numericType=ModiaMath.WR, analysis=ModiaMath.QuasiStaticAndDynamicAnalysis)
return this
end
end
function Base.show(io::IO, rev::Revolute)
print("Revolute(",
"\n phi = ", rev.phi,
"\n w = ", rev.w,
"\n a = ", rev.a,
"\n tau = ", rev.tau,
"\n )")
end
mutable struct Frame <: ModiaMath.AbstractComponentWithVariables
_internal::ModiaMath.ComponentInternal
r::ModiaMath.RealSVector3
q::ModiaMath.RealSVector{4}
derq::ModiaMath.RealSVector{4}
v::ModiaMath.RealSVector3
w::ModiaMath.RealSVector3
a::ModiaMath.RealSVector3
z::ModiaMath.RealSVector3
f::ModiaMath.RealSVector3
t::ModiaMath.RealSVector3
residue_w::ModiaMath.RealSVector3
residue_f::ModiaMath.RealSVector3
residue_t::ModiaMath.RealSVector3
residue_q::ModiaMath.RealScalar
drive::Bool
function Frame(;r0=zeros(3), q0=[0,0,0,1], v0=zeros(3), w0=zeros(3), drive::Bool=false)
this = new(ModiaMath.ComponentInternal(:Frame))
r = ModiaMath.RealSVector3(:r, this, start=r0, unit="m", fixed=true, info="Relative position", numericType=drive ? ModiaMath.WR : ModiaMath.XD_EXP)
q = ModiaMath.RealSVector{4}(:q, this, start=q0, fixed=true, info="Relative quaternion", numericType=drive ? ModiaMath.WR : ModiaMath.XD_IMP)
derq = ModiaMath.RealSVector{4}(:derq, this, unit="1/s", info="der(q)", integral=q, numericType=drive ? ModiaMath.WR : ModiaMath.DER_XD_IMP, analysis=ModiaMath.OnlyDynamicAnalysis)
v = ModiaMath.RealSVector3(:v, this, start=v0, unit="m/s", fixed=true, info="Relative velocity", integral=r, numericType=drive ? ModiaMath.WR : ModiaMath.XD_IMP, analysis=ModiaMath.OnlyDynamicAnalysis)
w = ModiaMath.RealSVector3(:w, this, start=w0, unit="rad/s", fixed=true, info="Relative angular velocity", numericType=drive ? ModiaMath.WR : ModiaMath.XD_IMP, analysis=ModiaMath.OnlyDynamicAnalysis)
a = ModiaMath.RealSVector3(:a, this, unit="m/s^2", info="Relative acceleration", integral=v, numericType=drive ? ModiaMath.WR : ModiaMath.DER_XD_IMP, analysis=ModiaMath.OnlyDynamicAnalysis)
z = ModiaMath.RealSVector3(:z, this, unit="rad/s^2", info="Relative angular acceleration", integral=w, numericType=drive ? ModiaMath.WR : ModiaMath.DER_XD_IMP, analysis=ModiaMath.OnlyDynamicAnalysis)
f = ModiaMath.RealSVector3(:f, this, unit="N", info="Driving force", numericType=ModiaMath.WR, analysis=ModiaMath.QuasiStaticAndDynamicAnalysis)
t = ModiaMath.RealSVector3(:t, this, unit="N*m", info="Driving torque", numericType=ModiaMath.WR, analysis=ModiaMath.QuasiStaticAndDynamicAnalysis)
residue_w = ModiaMath.RealSVector3(:residue_w, this, info="Angular velocity residue", numericType=ModiaMath.FD_IMP, analysis=ModiaMath.OnlyDynamicAnalysis)
residue_f = ModiaMath.RealSVector3(:residue_f, this, info="Momentum equation residue", numericType=ModiaMath.FD_IMP, analysis=ModiaMath.OnlyDynamicAnalysis)
residue_t = ModiaMath.RealSVector3(:residue_t, this, info="Angular momentum equation residue", numericType=ModiaMath.FD_IMP, analysis=ModiaMath.OnlyDynamicAnalysis)
residue_q = ModiaMath.RealScalar(:residue_q, this, info="Quaternion constraint residue", numericType=ModiaMath.FC, analysis=ModiaMath.OnlyDynamicAnalysis)
this.drive = drive
return this
end
end
function Base.show(io::IO, frame::Frame)
print("Revolute(",
"\n r = ", frame.r,
"\n q = ", frame.q,
"\n v = ", frame.v,
"\n w = ", frame.w,
"\n a = ", frame.a,
"\n z = ", frame.z,
"\n f = ", frame.f,
"\n t = ", frame.t,
"\n )")
end
ModiaMath.@component Robot(;phi10=0.0, phi20=0.0, var10=0.0, r0=zeros(3), q0=zeros(4)) begin
rev1 = Revolute(phi0=phi10)
rev2 = Revolute(phi0=phi20)
var1 = ModiaMath.RealScalar(start=var10, numericType=ModiaMath.XA)
res1 = ModiaMath.RealScalar(numericType=ModiaMath.FD_IMP)
frame = Frame(r0=r0, q0=q0)
end
ModiaMath.@component Robot2(;phi10=0.0, phi20=0.0, r0=zeros(3), q0=[0.0, 0.0, 0.0, 1.0]) begin
rev1 = Revolute(phi0=phi10, drive=true)
rev2 = Revolute(phi0=phi20, drive=true)
frame = Frame(r0=r0, q0=q0, drive=true)
end
ModiaMath.@component Robot3(;phi10=0.0, phi20=0.0, phi30=-2.0) begin
rev1 = Revolute(phi0=phi10, drive=true)
rev2 = Revolute(phi0=phi20, drive=true)
rev3 = Revolute(phi0=phi30)
res1 = ModiaMath.RealScalar(numericType=ModiaMath.FC)
end
@testset "ModiaMath: test_Variables.jl" begin
@testset "Dynamic analysis" begin # ------------------------------ Dynamic analysis
r0 = [1.0,2.0,3.0]
q0 = [0.5,0.5,0.0,sqrt(0.5^2 + 0.5^2)]
robot = Robot(phi10=1.0, phi20=2.0, var10=3.0, r0=r0, q0=q0)
robot.rev1.a.value = 10 * 2.22
robot.rev2.a.value = 10 * 4.44
println("\n... robot = ", robot)
println("\n... Print variables of robot")
m = ModiaMath.ModelVariables(robot)
ModiaMath.print_ModelVariables(m)
println("\n... Copy start values to x")
x = zeros(5 + 7 + 6)
x_fixed = fill(false, 5 + 7 + 6)
ModiaMath.copy_start_to_x!(m, x, x_fixed)
x0 = [1.0, 0.0, 2.0, 0.0, 1.0, 2.0, 3.0, 0.5, 0.5, 0.0, sqrt(0.5^2 + 0.5^2), 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 3.0 ]
x0_fixed = fill(true, 17)
append!(x0_fixed, false)
@test isapprox(x, x0)
@test x_fixed == x0_fixed
println("\n... Copy x and der_x to variables")
x = [1.11, 2.22 , 3.33, 4.44, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6 , 5.55]
derx = [2.22, 22.2, 4.44, 44.4, 1.1, 2.2, 3.3, 4.44, 5.55, 6.66, 7.77, 1.11, 2.22, 3.33, 4.44, 5.55, 6.66, 0.0]
ModiaMath.copy_x_and_derx_to_variables!(0.5, x, derx, m)
@test isapprox(x, [robot.rev1.phi.value, robot.rev1.w.value, robot.rev2.phi.value, robot.rev2.w.value,
robot.frame.r.value[1], robot.frame.r.value[2], robot.frame.r.value[3],
robot.frame.q.value[1], robot.frame.q.value[2], robot.frame.q.value[3], robot.frame.q.value[4],
robot.frame.v.value[1], robot.frame.v.value[2], robot.frame.v.value[3],
robot.frame.w.value[1], robot.frame.w.value[2], robot.frame.w.value[3],
robot.var1.value], rtol=1e-15)
@test isapprox(derx[2], robot.rev1.a.value, rtol=1e-15)
@test isapprox(derx[4], robot.rev2.a.value, rtol=1e-15)
println("\n... Copy variables to residues")
residues = zeros(5 + 7 + 6)
ModiaMath.copy_variables_to_residue!(m, x, derx, residues)
println("residue = ", residues)
@test isapprox(residues, zeros(5 + 7 + 6), atol=1e-12)
end
@testset "Kinematic analysis 1" begin # ---------------------------- Kinematic analysis 1
robot2 = Robot2(phi10=1.0, phi20=2.0)
println("\n... robot2 = ", robot2)
println("\n... Print variables of robot2")
m = ModiaMath.ModelVariables(robot2, analysis=ModiaMath.KinematicAnalysis)
ModiaMath.print_ModelVariables(m)
println("\n... Copy start values to x")
x = [1.11]
x_fixed = [false]
ModiaMath.copy_start_to_x!(m, x, x_fixed)
@test isapprox(x, [0.0])
@test x_fixed == [true]
println("\n... Copy x and der_x to variables")
x = fill(2.1, 1)
derx = fill(3.2, 1)
ModiaMath.copy_x_and_derx_to_variables!(0.5, x, derx, m)
@test isapprox(x, [m.var[2].value], rtol=1e-15) # var[2] = _dummy_x
println("\n... Copy variables to residues")
residues = zeros(m.nx)
ModiaMath.copy_variables_to_residue!(m, x, derx, residues)
@test isapprox(residues, [3.2 - (-2.1)], atol=1e-12)
end
@testset "Kinematic analysis 2" begin # ---------------------------- Kinematic analysis 2
robot3 = Robot3(phi10=1.0, phi20=2.0, phi30=-2.0)
println("\n... robot3 = ", robot3)
println("\n... Print variables of robot3")
m = ModiaMath.ModelVariables(robot3, analysis=ModiaMath.KinematicAnalysis)
ModiaMath.print_ModelVariables(m)
println("\n... Copy start values to x")
x = zeros(m.nx)
x_fixed = fill(false, m.nx)
ModiaMath.copy_start_to_x!(m, x, x_fixed)
@test isapprox(x, [-2.0])
@test x_fixed == [true]
println("\n... Copy x and der_x to variables")
x = [1.11]
derx = [2.22]
ModiaMath.copy_x_and_derx_to_variables!(0.5, x, derx, m)
@test isapprox(x, [robot3.rev3.phi.value], rtol=1e-15)
println("\n... Copy variables to residues")
residues = zeros(m.nx)
ModiaMath.copy_variables_to_residue!(m, x, derx, residues)
@test isapprox(residues, zeros(1), atol=1e-12)
end
end
end | [
27,
7856,
261,
480,
29,
41,
363,
343,
71,
1046,
272,
14,
5841,
544,
37372,
13,
20362,
27,
456,
62,
30783,
29,
15,
198,
21412,
1332,
62,
23907,
2977,
198,
198,
11748,
3401,
544,
37372,
198,
198,
2,
2935,
1202,
25,
198,
2,
220,
22... | 1.942311 | 5,807 |
<gh_stars>0
# Note that this script can accept some limited command-line arguments, run
# `julia build_tarballs.jl --help` to see a usage message.
using BinaryBuilder
name = "iso_codes"
version = v"4.3"
# Collection of sources required to build iso-codes
sources = [
"https://salsa.debian.org/iso-codes-team/iso-codes/-/archive/iso-codes-$(version.major).$(version.minor)/iso-codes-iso-codes-$(version.major).$(version.minor).tar.bz2" =>
"6b539f915d02c957c45fce8133670811f1c36a1f1535d5af3dd95dc519d3c386"
]
# Bash recipe for building across all platforms
script = raw"""
cd $WORKSPACE/srcdir/iso-codes-*/
apk add gettext
./configure --prefix=${prefix} --host=${target}
make -j${nproc}
make install
"""
# These are the platforms we will build for by default, unless further
# platforms are passed in on the command line
platforms = supported_platforms()
# The products that we will ensure are always built
products = Product[
FileProduct("share/iso-codes", :iso_codes_dir),
]
# Dependencies that must be installed before this package can be built
dependencies = [
]
# Build the tarballs, and possibly a `build.jl` as well.
build_tarballs(ARGS, name, version, sources, script, platforms, products, dependencies)
| [
27,
456,
62,
30783,
29,
15,
198,
2,
5740,
326,
428,
4226,
460,
2453,
617,
3614,
3141,
12,
1370,
7159,
11,
1057,
198,
2,
4600,
73,
43640,
1382,
62,
18870,
21591,
13,
20362,
1377,
16794,
63,
284,
766,
257,
8748,
3275,
13,
198,
3500,... | 3.042079 | 404 |
<reponame>typedb-osi/TypeDBClient.jl<gh_stars>1-10
# This file is a part of TypeDBClient. License is MIT: https://github.com/Humans-of-Julia/TypeDBClient.jl/blob/main/LICENSE
# ---------------------------------------------------------------------------------
module DatabaseManagerRequestBuilder
using ..TypeDBClient: Proto
create_req(name::String) = Proto.CoreDatabaseManager_Create_Req(; name)
contains_req(name::String) = Proto.CoreDatabaseManager_Contains_Req(; name)
all_req() = Proto.CoreDatabaseManager_All_Req()
end
# ---------------------------------------------------------------------------------
module DatabaseRequestBuilder
using ..TypeDBClient: Proto
schema_req(name::String) = Proto.CoreDatabase_Schema_Req(; name)
delete_req(name::String) = Proto.CoreDatabase_Delete_Req(; name)
end
# ---------------------------------------------------------------------------------
module SessionRequestBuilder
using ..TypeDBClient: Proto, EnumType, Bytes
function open_req(database::String, _type::EnumType, options::Proto.Options)
return Proto.Session_Open_Req(; database, _type, options)
end
close_req(session_id::Bytes) = Proto.Session_Close_Req(; session_id)
pulse_req(session_id::Bytes) = Proto.Session_Pulse_Req(; session_id)
end
# ---------------------------------------------------------------------------------
module TransactionRequestBuilder
using ..TypeDBClient: Proto, EnumType, Bytes
using UUIDs: UUID
function client_msg(reqs::AbstractVector{Proto.Transaction_Req})
return Proto.Transaction_Client(; reqs)
end
function stream_req(req_id::Bytes)
stream_req = Proto.Transaction_Stream_Req()
return Proto.Transaction_Req(; req_id, stream_req)
end
function open_req(
session_id::Bytes,
_type::EnumType,
options::Proto.Options,
network_latency_millis::Int
)
open_req = Proto.Transaction_Open_Req(;
session_id, _type, options, network_latency_millis
)
return Proto.Transaction_Req(; open_req)
end
function commit_req()
# metadata = tracing_data()
commit_req = Proto.Transaction_Commit_Req()
return Proto.Transaction_Req(; commit_req)
end
end
# ---------------------------------------------------------------------------------
module QueryManagerRequestBuilder
using ..TypeDBClient: Proto
for (f, t) in (
(:define_req, :Define_Req),
(:undefine_req, :Undefine_Req),
(:match_req, :Match_Req),
(:match_aggregate_req, :MatchAggregate_Req),
(:match_group_req, :MatchGroup_Req),
(:match_group_aggregate_req, :MatchGroupAggregate_Req),
(:insert_req, :Insert_Req),
(:delete_req, :Delete_Req),
(:update_req, :Update_Req),
)
func = Symbol("$f")
type = Symbol("QueryManager_$t")
@eval begin
function $func(query::String, options::Proto.Options = Proto.Options())
$f = Proto.$type(; query)
query_manager_req = Proto.QueryManager_Req(; $f, options)
return Proto.Transaction_Req(; query_manager_req)
end
end
end
end
# ---------------------------------------------------------------------------------
module ConceptManagerRequestBuilder
using ..TypeDBClient: Proto, EnumType, bytes
function _treq(; kwargs...)
return Proto.Transaction_Req(
concept_manager_req = Proto.ConceptManager_Req(; kwargs...)
)
end
function put_entity_type_req(label::String)
return _treq(
put_entity_type_req = Proto.ConceptManager_PutEntityType_Req(; label)
)
end
function put_relation_type_req(label::String)
return _treq(
put_relation_type_req = Proto.ConceptManager_PutRelationType_Req(; label)
)
end
function put_attribute_type_req(label::String, value_type::EnumType)
return _treq(
put_attribute_type_req =
Proto.ConceptManager_PutAttributeType_Req(; label, value_type)
)
end
function get_thing_type_req(label::String)
return _treq(
get_thing_type_req = Proto.ConceptManager_GetThingType_Req(; label)
)
end
function get_thing_req(iid::String)
return _treq(
get_thing_req = Proto.ConceptManager_GetThing_Req(; iid = bytes(iid))
)
end
end
# ---------------------------------------------------------------------------------
module LogicManagerRequestBuilder
using ..TypeDBClient: Proto
function _treq(; kwargs...)
return Proto.Transaction_Req(
logic_manager_req = Proto.LogicManager_Req(
; kwargs...
)
)
end
function put_rule_req(label::String, when::String, then::String)
return _treq(
put_rule_req = Proto.LogicManager_PutRule_Req(; label, when, then)
)
end
function get_rule_req(label::String)
return _treq(
get_rule_req = Proto.LogicManager_GetRule_Req(; label)
)
end
function get_rules_req()
return _treq(
get_rules_req = Proto.LogicManager_GetRules_Req()
)
end
end
# ---------------------------------------------------------------------------------
module TypeRequestBuilder
using ..TypeDBClient: Proto, Label
# Ignore linter error here
function _treq(label, scope; kwargs...)
return Proto.Transaction_Req(
type_req = Proto.Type_Req(; label, scope, kwargs...)
)
end
function is_abstract_req(label::Label)
return _treq(label.name, label.scope;
type_is_abstract_req = Proto.Type_IsAbstract_Req()
)
end
function set_label_req(label::Label, new_label::String)
return _treq(label.name, label.scope;
type_set_label_req = Proto.Type_SetLabel_Req(
label = new_label
)
)
end
function get_supertypes_req(label::Label)
return _treq(label.name, label.scope;
type_get_supertypes_req = Proto.Type_GetSupertypes_Req()
)
end
function get_subtypes_req(label::Label)
return _treq(label.name, label.scope;
type_get_subtypes_req = Proto.Type_GetSubtypes_Req()
)
end
function get_supertype_req(label::Label)
return _treq(label.name, label.scope;
type_get_supertype_req = Proto.Type_GetSupertype_Req()
)
end
function delete_req(label::Label)
return _treq(label.name, label.scope;
type_delete_req = Proto.Type_Delete_Req()
)
end
end
# ---------------------------------------------------------------------------------
module RoleTypeRequestBuilder
using ..TypeDBClient: Proto, EnumType, Label
using ..TypeRequestBuilder: _treq
# TODO to be deprecated, see porting note at RoleType.jl
function proto_role_type(label::Label, encoding::EnumType)
@assert label.scope !== nothing
return Proto._Type(
scope = label.scope,
label = label.name,
encoding = encoding,
)
end
function get_relation_types_req(label::Label)
return _treq(label.name, label.scope;
role_type_get_relation_types_req = Proto.RoleType_GetRelationTypes_Req()
)
end
function get_players_req(label::Label)
return _treq(label.name, label.scope;
role_type_get_players_req = Proto.RoleType_GetPlayers_Req()
)
end
end
# ---------------------------------------------------------------------------------
module ThingTypeRequestBuilder
using ..TypeDBClient: Proto, EnumType, Label, Optional
using ..TypeRequestBuilder: _treq
# TODO to be deprecated, see porting note at RoleType.jl
function proto_thing_type(label::Label, encoding::EnumType)
return Proto._Type(
label = label.name,
encoding = encoding
)
end
function set_abstract_req(label::Label)
return _treq(label.name, label.scope;
thing_type_set_abstract_req = Proto.ThingType_SetAbstract_Req()
)
end
function unset_abstract_req(label::Label)
return _treq(label.name, label.scope;
thing_type_unset_abstract_req = Proto.ThingType_UnsetAbstract_Req()
)
end
function set_supertype_req(label::Label)
return _treq(label.name, label.scope;
type_set_supertype_req = Proto.Type_SetSupertype_Req()
)
end
function get_plays_req(label::Label)
return _treq(label.name, label.scope;
thing_type_get_plays_req = Proto.ThingType_GetPlays_Req()
)
end
function set_plays_req(
label::Label,
role_type::Proto.RoleType,
overridden_role_type::Optional{Proto.RoleType} = nothing
)
return _treq(label.name, label.scope;
thing_type_set_plays_req = Proto.ThingType_SetPlays_Req(
role = role_type,
overridden_role = overridden_role_type
)
)
end
function unset_plays_req(
label::Label, role_type::Proto.RoleType
)
return _treq(label.name, label.scope;
thing_type_unset_plays_req = Proto.ThingType_UnsetPlays_Req(
role = role_type,
)
)
end
# Porting note: keys_only is defaulted to false
function get_owns_req(
label::Label,
value_type::Optional{EnumType} = nothing,
keys_only::Bool = false
)
# TODO this code can be simplified later (ProtoBuf PR#77)
thing_type_get_owns_req = value_type === nothing ?
Proto.ThingType_GetOwns_Req(; keys_only) :
Proto.ThingType_GetOwns_Req(; keys_only, value_type)
return _treq(label.name, label.scope; thing_type_get_owns_req)
end
# Porting note: the order of `is_key` is moved upfront
function set_owns_req(
label::Label,
is_key::Bool,
attribute_type::Proto.AttributeType,
overridden_type::Optional{Proto.AttributeType} = nothing
)
# TODO this code can be simplified later (ProtoBuf PR#77)
thing_type_set_owns_req = overridden_type === nothing ?
Proto.ThingType_SetOwns_Req(; is_key, attribute_type) :
Proto.ThingType_SetOwns_Req(; is_key, attribute_type, overridden_type)
return _treq(label.name, label.scope; thing_type_set_owns_req)
# return _treq(label.name, label.scope;
# thing_type_set_owns_req = Proto.ThingType_SetOwns_Req(;
# is_key, attribute_type, overridden_type
# )
# )
end
function unset_owns_req(label::Label, attribute_type::Proto.AttributeType)
return _treq(label.name, label.scope;
thing_type_unset_owns_req = Proto.ThingType_UnsetOwns_Req(; attribute_type)
)
end
function get_instances_req(label::Label)
return _treq(label.name, label.scope;
thing_type_get_instances_req = Proto.ThingType_GetInstances_Req()
)
end
end
# ---------------------------------------------------------------------------------
module EntityTypeRequestBuilder
using ..TypeDBClient: Proto, Label
using ..TypeRequestBuilder: _treq
function create_req(label::Label)
return _treq(label.name, label.scope;
entity_type_create_req = Proto.EntityType_Create_Req()
)
end
end
# ---------------------------------------------------------------------------------
module RelationTypeRequestBuilder
using ..TypeDBClient: Proto, Label, Optional
using ..TypeRequestBuilder: _treq
function create_req(label::Label)
return _treq(label.name, label.scope;
relation_type_create_req = Proto.RelationType_Create_Req()
)
end
function get_relates_req(label::Label, role_label::Optional{String})
return _treq(label.name, label.scope;
relation_type_get_relates_req = Proto.RelationType_GetRelates_Req(;
label = role_label
)
)
end
function set_relates_req(
label::Label, role_label::String, overridden_label::Optional{String}
)
return _treq(label.name, label.scope;
relation_type_set_relates_req = Proto.RelationType_SetRelates_Req(;
label = role_label,
overridden_label
)
)
end
function unset_relates_req(label::Label, role_label::Optional{String})
return _treq(label.name, label.scope;
relation_type_unset_relates_req = Proto.RelationType_UnsetRelates_Req(;
label = role_label
)
)
end
end
# ---------------------------------------------------------------------------------
module AttributeTypeRequestBuilder
using ..TypeDBClient: Proto, Label
using ..TypeRequestBuilder: _treq
function get_owners_req(label::Label, only_key::Bool)
return _treq(label.name, label.scope;
attribute_type_get_owners_req = Proto.AttributeType_GetOwners_Req(; only_key)
)
end
function put_req(label::Label, value::Proto.Attribute_Value)
return _treq(label.name, label.scope;
attribute_type_put_req = Proto.AttributeType_Put_Req(; value)
)
end
function get_req(label::Label, value::Proto.Attribute_Value)
return _treq(label.name, label.scope;
attribute_type_get_req = Proto.AttributeType_Get_Req(; value)
)
end
function get_regex_req(label::Label)
return _treq(label.name, label.scope;
attribute_type_get_regex_req = Proto.AttributeType_GetRegex_Req()
)
end
function set_regex_req(label::Label, regex::AbstractString)
return _treq(label.name, label.scope;
attribute_type_set_regex_req = Proto.AttributeType_SetRegex_Req(; regex)
)
end
end
# ---------------------------------------------------------------------------------
module ThingRequestBuilder
using ..TypeDBClient: Proto, Label, Bytes, bytes
proto_thing(iid::Bytes) = Proto.Thing(; iid)
proto_thing(iid::String) = proto_thing(bytes(iid))
function _thing_req(iid::String; kwargs...)
return Proto.Transaction_Req(
thing_req = Proto.Thing_Req(
; iid = bytes(iid), kwargs...
)
)
end
function is_inferred_req(iid::String)
return _thing_req(iid;
thing_is_inferred_req = Proto.Thing_IsInferred_Req()
)
end
function get_has_req(iid::String, attribute_types::AbstractVector{Proto.Type})
return _thing_req(iid;
thing_get_has_req = Proto.Thing_GetHas_Req(; attribute_types)
)
end
function get_has_req(iid::String, only_key::Bool)
return _thing_req(iid;
thing_get_has_req = Proto.Thing_GetHas_Req(; only_key)
)
end
function set_has_req(iid::String, attribute::Proto.Thing)
return _thing_req(iid;
thing_set_has_req = Proto.Thing_SetHas_Req(; attribute)
)
end
function unset_has_req(iid::String, attribute::Proto.Thing)
return _thing_req(iid;
thing_unset_has_req = Proto.Thing_UnsetHas_Req(; attribute)
)
end
function get_playing_req(iid::String)
return _thing_req(iid;
thing_get_playing_req = Proto.Thing_GetPlaying_Req()
)
end
function get_relations_req(iid::String, role_types::AbstractVector{Proto._Type})
return _thing_req(iid;
thing_get_relations_req = Proto.Thing_GetRelations_Req(; role_types)
)
end
function delete_req(iid::String)
return _thing_req(iid;
thing_delete_req = Proto.Thing_Delete_Req()
)
end
end
# ---------------------------------------------------------------------------------
module RelationRequestBuilder
using ..TypeDBClient: Proto
using ..ThingRequestBuilder: _thing_req
function add_player_req(iid::String, role_type::Proto._Type, player::Proto.Thing)
return _thing_req(iid;
relation_add_player_req = Proto.Relation_AddPlayer_Req(;
role_type,
player
)
)
end
function remove_player_req(iid::String, role_type::Proto._Type, player::Proto.Thing)
return _thing_req(iid;
relation_remove_player_req = Proto.Relation_RemovePlayer_Req(;
role_type,
player
)
)
end
function get_players_req(iid::String, role_types::AbstractVector{Proto._Type})
return _thing_req(iid;
relation_get_players_req = Proto.Relation_GetPlayers_Req(; role_types)
)
end
function get_players_by_role_type_req(iid::String)
return _thing_req(iid;
relation_get_players_by_role_type_req = Proto.Relation_GetPlayersByRoleType_Req()
)
end
function get_relating_req(iid::String)
return _thing_req(iid;
relation_get_players_req = Proto.Relation_GetRelating_Req()
)
end
end
# ---------------------------------------------------------------------------------
module AttributeRequestBuilder
using ..TypeDBClient: Proto, Optional
using ..ThingRequestBuilder: _thing_req
using TimeZones: ZonedDateTime
function get_owners_req(iid::String, owner_type::Optional{Proto._Type})
return _thing_req(iid;
relation_get_owners_req = Proto.Relation_GetOwners_Req(),
thing_type = owner_type
)
end
proto_boolean_attribute_value(value::Bool) = Proto.Attribute_Value(; boolean = value)
proto_long_attribute_value(value::Int64) = Proto.Attribute_Value(; long = value)
proto_double_attribute_value(value::Float64) = Proto.Attribute_Value(; double = value)
proto_string_attribute_value(value::String) = Proto.Attribute_Value(; string = value)
function proto_date_time_attribute_value(value::ZonedDateTime)
epoch_millis = value.utc_datetime.instant
Proto.Attribute_Value(; date_time = epoch_millis)
end
end
# ---------------------------------------------------------------------------------
module RuleRequestBuilder
using ..TypeDBClient: Proto
function set_label_req(current_label::String, new_label::String)
return Proto.Transaction_Req(
rule_req = Proto.Rule_Req(
label = current_label,
rule_set_label_req = Proto.Rule_SetLabel_Req(
label = new_label
)
)
)
end
function delete_req(label::String)
return Proto.Transaction_Req(
rule_req = Proto.Rule_Req(
rule_delete_req = Proto.Rule_Delete_Req()
)
)
end
end
| [
27,
7856,
261,
480,
29,
774,
9124,
65,
12,
21707,
14,
6030,
11012,
11792,
13,
20362,
27,
456,
62,
30783,
29,
16,
12,
940,
198,
2,
770,
2393,
318,
257,
636,
286,
5994,
11012,
11792,
13,
220,
13789,
318,
17168,
25,
3740,
1378,
12567... | 2.587025 | 6,705 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.