query
stringlengths 8
6.75k
| document
stringlengths 9
1.89M
| negatives
listlengths 19
19
| metadata
dict |
---|---|---|---|
GetVestedOnly implements the exported.ClawbackVestingAccountI interface. It returns the vesting schedule and blockTime. Like GetVestedCoins, but only for the vesting (in the clawback sense) component.
|
func (va ClawbackVestingAccount) GetVestedOnly(blockTime time.Time) sdk.Coins {
return ReadSchedule(va.StartTime, va.EndTime, va.VestingPeriods, va.OriginalVesting, blockTime.Unix())
}
|
[
"func (va ClawbackVestingAccount) GetVestingCoins(blockTime time.Time) sdk.Coins {\n\treturn va.OriginalVesting.Sub(va.GetVestedCoins(blockTime))\n}",
"func (dva DelayedVestingAccount) GetVestedCoins(blockTime time.Time) sdk.Coins {\n\tif blockTime.Unix() >= dva.EndTime {\n\t\treturn dva.OriginalVesting\n\t}\n\n\treturn nil\n}",
"func (pva PeriodicVestingAccount) GetVestedCoins(blockTime time.Time) sdk.Coins {\n\tcoins := ReadSchedule(pva.StartTime, pva.EndTime, pva.VestingPeriods, pva.OriginalVesting, blockTime.Unix())\n\tif coins.IsZero() {\n\t\treturn nil\n\t}\n\treturn coins\n}",
"func (dva DelayedVestingAccount) GetVestingCoins(blockTime time.Time) sdk.Coins {\n\treturn dva.OriginalVesting.Sub(dva.GetVestedCoins(blockTime))\n}",
"func (plva PermanentLockedAccount) GetVestedCoins(_ time.Time) sdk.Coins {\n\treturn nil\n}",
"func (vva ValidatorVestingAccount) GetVestingCoins(blockTime time.Time) sdk.Coins {\n\treturn vva.OriginalVesting.Sub(vva.GetVestedCoins(blockTime))\n}",
"func (plva PermanentLockedAccount) GetVestingCoins(_ time.Time) sdk.Coins {\n\treturn plva.OriginalVesting\n}",
"func (cva ContinuousVestingAccount) GetVestedCoins(blockTime time.Time) sdk.Coins {\n\tvar vestedCoins sdk.Coins\n\n\t// We must handle the case where the start time for a vesting account has\n\t// been set into the future or when the start of the chain is not exactly\n\t// known.\n\tif blockTime.Unix() <= cva.StartTime {\n\t\treturn vestedCoins\n\t} else if blockTime.Unix() >= cva.EndTime {\n\t\treturn cva.OriginalVesting\n\t}\n\n\t// calculate the vesting scalar\n\tx := blockTime.Unix() - cva.StartTime\n\ty := cva.EndTime - cva.StartTime\n\ts := sdk.NewDec(x).Quo(sdk.NewDec(y))\n\n\tfor _, ovc := range cva.OriginalVesting {\n\t\tvestedAmt := ovc.Amount.ToDec().Mul(s).RoundInt()\n\t\tvestedCoins = append(vestedCoins, sdk.NewCoin(ovc.Denom, vestedAmt))\n\t}\n\n\treturn vestedCoins\n}",
"func (va ClawbackVestingAccount) GetUnlockedOnly(blockTime time.Time) sdk.Coins {\n\treturn ReadSchedule(va.StartTime, va.EndTime, va.LockupPeriods, va.OriginalVesting, blockTime.Unix())\n}",
"func (cva ContinuousVestingAccount) GetVestingCoins(blockTime time.Time) sdk.Coins {\n\treturn cva.OriginalVesting.Sub(cva.GetVestedCoins(blockTime))\n}",
"func (va ClawbackVestingAccount) GetVestingPeriods() Periods {\n\treturn va.VestingPeriods\n}",
"func (pva PeriodicVestingAccount) GetVestingPeriods() Periods {\n\treturn pva.VestingPeriods\n}",
"func (bva BaseVestingAccount) LockedCoinsFromVesting(vestingCoins sdk.Coins) sdk.Coins {\n\tlockedCoins := vestingCoins.Sub(vestingCoins.Min(bva.DelegatedVesting))\n\tif lockedCoins == nil {\n\t\treturn sdk.Coins{}\n\t}\n\treturn lockedCoins\n}",
"func (sm *StateManager) GetFilVested(ctx context.Context, height abi.ChainEpoch, st *state.StateTree) (abi.TokenAmount, error) {\n\tvf := big.Zero()\n\tif height <= build.UpgradeIgnitionHeight {\n\t\tfor _, v := range sm.preIgnitionGenInfos.genesisMsigs {\n\t\t\tau := big.Sub(v.InitialBalance, v.AmountLocked(height))\n\t\t\tvf = big.Add(vf, au)\n\t\t}\n\t} else {\n\t\tfor _, v := range sm.postIgnitionGenInfos.genesisMsigs {\n\t\t\t// In the pre-ignition logic, we simply called AmountLocked(height), assuming startEpoch was 0.\n\t\t\t// The start epoch changed in the Ignition upgrade.\n\t\t\tau := big.Sub(v.InitialBalance, v.AmountLocked(height-v.StartEpoch))\n\t\t\tvf = big.Add(vf, au)\n\t\t}\n\t}\n\n\t// there should not be any such accounts in testnet (and also none in mainnet?)\n\t// continue to use preIgnitionGenInfos, nothing changed at the Ignition epoch\n\tfor _, v := range sm.preIgnitionGenInfos.genesisActors {\n\t\tact, err := st.GetActor(v.addr)\n\t\tif err != nil {\n\t\t\treturn big.Zero(), xerrors.Errorf(\"failed to get actor: %w\", err)\n\t\t}\n\n\t\tdiff := big.Sub(v.initBal, act.Balance)\n\t\tif diff.GreaterThan(big.Zero()) {\n\t\t\tvf = big.Add(vf, diff)\n\t\t}\n\t}\n\n\t// continue to use preIgnitionGenInfos, nothing changed at the Ignition epoch\n\tvf = big.Add(vf, sm.preIgnitionGenInfos.genesisPledge)\n\t// continue to use preIgnitionGenInfos, nothing changed at the Ignition epoch\n\tvf = big.Add(vf, sm.preIgnitionGenInfos.genesisMarketFunds)\n\n\treturn vf, nil\n}",
"func (bva BaseVestingAccount) GetOriginalVesting() sdk.Coins {\n\treturn bva.OriginalVesting\n}",
"func (_TokenVesting *TokenVestingCallerSession) VestedAmount(_token common.Address) (*big.Int, error) {\n\treturn _TokenVesting.Contract.VestedAmount(&_TokenVesting.CallOpts, _token)\n}",
"func (o *AllocationList) GetInvested() float64 {\n\tif o == nil {\n\t\tvar ret float64\n\t\treturn ret\n\t}\n\n\treturn o.Invested\n}",
"func (bva BaseVestingAccount) GetDelegatedVesting() sdk.Coins {\n\treturn bva.DelegatedVesting\n}",
"func (pva PeriodicVestingAccount) LockedCoins(ctx sdk.Context) sdk.Coins {\n\treturn pva.BaseVestingAccount.LockedCoinsFromVesting(pva.GetVestingCoins(ctx.BlockTime()))\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
computeClawback removes all future vesting events from the account, returns the total sum of these events. When removing the future vesting events, the lockup schedule will also have to be capped to keep the total sums the same. (But future unlocking events might be preserved if they unlock currently vested coins.) If the amount returned is zero, then the returned account should be unchanged. Does not adjust DelegatedVesting
|
func (va *ClawbackVestingAccount) computeClawback(clawbackTime int64) sdk.Coins {
// Compute the truncated vesting schedule and amounts.
// Work with the schedule as the primary data and recompute derived fields, e.g. OriginalVesting.
vestTime := va.StartTime
totalVested := sdk.NewCoins()
totalUnvested := sdk.NewCoins()
unvestedIdx := 0
for i, period := range va.VestingPeriods {
vestTime += period.Length
// tie in time goes to clawback
if vestTime < clawbackTime {
totalVested = totalVested.Add(period.Amount...)
unvestedIdx = i + 1
} else {
totalUnvested = totalUnvested.Add(period.Amount...)
}
}
lastVestTime := vestTime
newVestingPeriods := va.VestingPeriods[:unvestedIdx]
// To cap the unlocking schedule to the new total vested, conjunct with a limiting schedule
capPeriods := []Period{
{
Length: 0,
Amount: totalVested,
},
}
_, lastLockTime, newLockupPeriods := ConjunctPeriods(va.StartTime, va.StartTime, va.LockupPeriods, capPeriods)
// Now construct the new account state
va.OriginalVesting = totalVested
va.EndTime = max64(lastVestTime, lastLockTime)
va.LockupPeriods = newLockupPeriods
va.VestingPeriods = newVestingPeriods
// DelegatedVesting and DelegatedFree will be adjusted elsewhere
return totalUnvested
}
|
[
"func (va *ClawbackVestingAccount) clawback(ctx sdk.Context, dest sdk.AccAddress, ak AccountKeeper, bk BankKeeper, sk StakingKeeper) error {\n\t// Compute the clawback based on the account state only, and update account\n\ttoClawBack := va.computeClawback(ctx.BlockTime().Unix())\n\tif toClawBack.IsZero() {\n\t\treturn nil\n\t}\n\taddr := va.GetAddress()\n\tbondDenom := sk.BondDenom(ctx)\n\n\t// Compute the clawback based on bank balance and delegation, and update account\n\tencumbered := va.GetVestingCoins(ctx.BlockTime())\n\tbondedAmt := sk.GetDelegatorBonded(ctx, addr)\n\tunbondingAmt := sk.GetDelegatorUnbonding(ctx, addr)\n\tbonded := sdk.NewCoins(sdk.NewCoin(bondDenom, bondedAmt))\n\tunbonding := sdk.NewCoins(sdk.NewCoin(bondDenom, unbondingAmt))\n\tunbonded := bk.GetAllBalances(ctx, addr)\n\ttoClawBack = va.updateDelegation(encumbered, toClawBack, bonded, unbonding, unbonded)\n\n\t// Write now now so that the bank module sees unvested tokens are unlocked.\n\t// Note that all store writes are aborted if there is a panic, so there is\n\t// no danger in writing incomplete results.\n\tak.SetAccount(ctx, va)\n\n\t// Now that future vesting events (and associated lockup) are removed,\n\t// the balance of the account is unlocked and can be freely transferred.\n\tspendable := bk.SpendableCoins(ctx, addr)\n\ttoXfer := coinsMin(toClawBack, spendable)\n\terr := bk.SendCoins(ctx, addr, dest, toXfer)\n\tif err != nil {\n\t\treturn err // shouldn't happen, given spendable check\n\t}\n\ttoClawBack = toClawBack.Sub(toXfer)\n\n\t// We need to traverse the staking data structures to update the\n\t// vesting account bookkeeping, and to recover more funds if necessary.\n\t// Staking is the only way unvested tokens should be missing from the bank balance.\n\n\t// If we need more, transfer UnbondingDelegations.\n\twant := toClawBack.AmountOf(bondDenom)\n\tunbondings := sk.GetUnbondingDelegations(ctx, addr, math.MaxUint16)\n\tfor _, unbonding := range unbondings {\n\t\tvalAddr, err := sdk.ValAddressFromBech32(unbonding.ValidatorAddress)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\ttransferred := sk.TransferUnbonding(ctx, addr, dest, valAddr, want)\n\t\twant = want.Sub(transferred)\n\t\tif !want.IsPositive() {\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// If we need more, transfer Delegations.\n\tif want.IsPositive() {\n\t\tdelegations := sk.GetDelegatorDelegations(ctx, addr, math.MaxUint16)\n\t\tfor _, delegation := range delegations {\n\t\t\tvalidatorAddr, err := sdk.ValAddressFromBech32(delegation.ValidatorAddress)\n\t\t\tif err != nil {\n\t\t\t\tpanic(err) // shouldn't happen\n\t\t\t}\n\t\t\tvalidator, found := sk.GetValidator(ctx, validatorAddr)\n\t\t\tif !found {\n\t\t\t\t// validator has been removed\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\twantShares, err := validator.SharesFromTokensTruncated(want)\n\t\t\tif err != nil {\n\t\t\t\t// validator has no tokens\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\ttransferredShares := sk.TransferDelegation(ctx, addr, dest, delegation.GetValidatorAddr(), wantShares)\n\t\t\t// to be conservative in what we're clawing back, round transferred shares up\n\t\t\ttransferred := validator.TokensFromSharesRoundUp(transferredShares).RoundInt()\n\t\t\twant = want.Sub(transferred)\n\t\t\tif !want.IsPositive() {\n\t\t\t\t// Could be slightly negative, due to rounding?\n\t\t\t\t// Don't think so, due to the precautions above.\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\t// If we've transferred everything and still haven't transferred the desired clawback amount,\n\t// then the account must have most some unvested tokens from slashing.\n\treturn nil\n}",
"func NewClawbackVestingAccount(baseAcc *authtypes.BaseAccount, funder sdk.AccAddress, originalVesting sdk.Coins, startTime int64, lockupPeriods, vestingPeriods Periods) *ClawbackVestingAccount {\n\t// copy and align schedules to avoid mutating inputs\n\tlp := make(Periods, len(lockupPeriods))\n\tcopy(lp, lockupPeriods)\n\tvp := make(Periods, len(vestingPeriods))\n\tcopy(vp, vestingPeriods)\n\t_, endTime := AlignSchedules(startTime, startTime, lp, vp)\n\tbaseVestingAcc := &BaseVestingAccount{\n\t\tBaseAccount: baseAcc,\n\t\tOriginalVesting: originalVesting,\n\t\tEndTime: endTime,\n\t}\n\n\treturn &ClawbackVestingAccount{\n\t\tBaseVestingAccount: baseVestingAcc,\n\t\tFunderAddress: funder.String(),\n\t\tStartTime: startTime,\n\t\tLockupPeriods: lp,\n\t\tVestingPeriods: vp,\n\t}\n}",
"func (va *ClawbackVestingAccount) updateDelegation(encumbered, toClawBack, bonded, unbonding, unbonded sdk.Coins) sdk.Coins {\n\tdelegated := bonded.Add(unbonding...)\n\toldDelegated := va.DelegatedVesting.Add(va.DelegatedFree...)\n\tslashed := oldDelegated.Sub(coinsMin(delegated, oldDelegated))\n\ttotal := delegated.Add(unbonded...)\n\ttoClawBack = coinsMin(toClawBack, total) // might have been slashed\n\tnewDelegated := coinsMin(delegated, total.Sub(toClawBack)).Add(slashed...)\n\tva.DelegatedVesting = coinsMin(encumbered, newDelegated)\n\tva.DelegatedFree = newDelegated.Sub(va.DelegatedVesting)\n\treturn toClawBack\n}",
"func CalculatePercentageSharesForCycle(delegatedContracts []DelegatedContract, cycle int, rate float64, spillage bool, delegateAddr string) ([]DelegatedContract, error){\n var stakingBalance float64\n //var balance float64\n var err error\n\n spillAlert := false\n\n stakingBalance, err = GetDelegateStakingBalance(delegateAddr, cycle)\n if (err != nil){\n return delegatedContracts, errors.New(\"func CalculateRollSpillage(delegatedContracts []DelegatedContract, delegateAddr string) failed: \" + err.Error())\n }\n\n mod := math.Mod(stakingBalance, 10000)\n sum := stakingBalance - mod\n balanceCheck := stakingBalance - mod\n\n for index, delegation := range delegatedContracts{\n counter := 0\n for i, _ := range delegation.Contracts {\n if (delegatedContracts[index].Contracts[i].Cycle == cycle){\n break\n }\n counter = counter + 1\n }\n balanceCheck = balanceCheck - delegatedContracts[index].Contracts[counter].Amount\n //fmt.Println(stakingBalance)\n if (spillAlert){\n delegatedContracts[index].Contracts[counter].SharePercentage = 0\n delegatedContracts[index].Contracts[counter].RollInclusion = 0\n } else if (balanceCheck < 0 && spillage){\n spillAlert = true\n delegatedContracts[index].Contracts[counter].SharePercentage = (delegatedContracts[index].Contracts[counter].Amount + stakingBalance) / sum\n delegatedContracts[index].Contracts[counter].RollInclusion = delegatedContracts[index].Contracts[counter].Amount + stakingBalance\n } else{\n delegatedContracts[index].Contracts[counter].SharePercentage = delegatedContracts[index].Contracts[counter].Amount / stakingBalance\n delegatedContracts[index].Contracts[counter].RollInclusion = delegatedContracts[index].Contracts[counter].Amount\n }\n delegatedContracts[index].Contracts[counter] = CalculatePayoutForContract(delegatedContracts[index].Contracts[counter], rate, delegatedContracts[index].Delegate)\n delegatedContracts[index].Fee = delegatedContracts[index].Fee + delegatedContracts[index].Contracts[counter].Fee\n }\n\n return delegatedContracts, nil\n}",
"func (va ClawbackVestingAccount) postReward(ctx sdk.Context, reward sdk.Coins, ak AccountKeeper, bk BankKeeper, sk StakingKeeper) {\n\t// Find the scheduled amount of vested and unvested staking tokens\n\tbondDenom := sk.BondDenom(ctx)\n\tvested := ReadSchedule(va.StartTime, va.EndTime, va.VestingPeriods, va.OriginalVesting, ctx.BlockTime().Unix()).AmountOf(bondDenom)\n\tunvested := va.OriginalVesting.AmountOf(bondDenom).Sub(vested)\n\n\tif unvested.IsZero() {\n\t\t// no need to adjust the vesting schedule\n\t\treturn\n\t}\n\n\tif vested.IsZero() {\n\t\t// all staked tokens must be unvested\n\t\tva.distributeReward(ctx, ak, bondDenom, reward)\n\t\treturn\n\t}\n\n\t// Find current split of account balance on staking axis\n\tbonded := sk.GetDelegatorBonded(ctx, va.GetAddress())\n\tunbonding := sk.GetDelegatorUnbonding(ctx, va.GetAddress())\n\tdelegated := bonded.Add(unbonding)\n\n\t// discover what has been slashed and remove from delegated amount\n\toldDelegated := va.DelegatedVesting.AmountOf(bondDenom).Add(va.DelegatedFree.AmountOf(bondDenom))\n\tslashed := oldDelegated.Sub(intMin(oldDelegated, delegated))\n\tdelegated = delegated.Sub(intMin(delegated, slashed))\n\n\t// Prefer delegated tokens to be unvested\n\tunvested = intMin(unvested, delegated)\n\tvested = delegated.Sub(unvested)\n\n\t// Compute the unvested amount of reward and add to vesting schedule\n\tif unvested.IsZero() {\n\t\treturn\n\t}\n\tif vested.IsZero() {\n\t\tva.distributeReward(ctx, ak, bondDenom, reward)\n\t\treturn\n\t}\n\tunvestedRatio := unvested.ToDec().QuoTruncate(bonded.ToDec()) // round down\n\tunvestedReward := scaleCoins(reward, unvestedRatio)\n\tva.distributeReward(ctx, ak, bondDenom, unvestedReward)\n}",
"func (va ClawbackVestingAccount) GetVestingCoins(blockTime time.Time) sdk.Coins {\n\treturn va.OriginalVesting.Sub(va.GetVestedCoins(blockTime))\n}",
"func (_Cakevault *CakevaultCaller) CalculateTotalPendingCakeRewards(opts *bind.CallOpts) (*big.Int, error) {\n\tvar out []interface{}\n\terr := _Cakevault.contract.Call(opts, &out, \"calculateTotalPendingCakeRewards\")\n\n\tif err != nil {\n\t\treturn *new(*big.Int), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\n\n\treturn out0, err\n\n}",
"func (_Cakevault *CakevaultSession) CalculateTotalPendingCakeRewards() (*big.Int, error) {\n\treturn _Cakevault.Contract.CalculateTotalPendingCakeRewards(&_Cakevault.CallOpts)\n}",
"func (sch *Scheduler) CalClusterBalance(podUsed *[PHYNUM][DIMENSION]float64, podReq []PodRequest) {\n\t//cal the pod sum and used rate\n\tpodLen := len(podReq)\n\tvar podNum [PHYNUM]int\n\tvar podSum int\n\tfor i := 0; i < podLen; i++ {\n\t\tif podReq[i].nodeName != -1 {\n\t\t\tpodSum++\n\t\t\tpodNum[podReq[i].nodeName]++\n\t\t}\n\t}\n\n\tvar podIdle [PHYNUM]float64\n\tvar resIdle [PHYNUM][DIMENSION]float64\n\tvar podVal float64\n\tvar resVal [DIMENSION]float64 // cal the sum and mean value\n\n\tfor i := 0; i < PHYNUM; i++ {\n\t\tpodIdle[i] = 1.0 - (float64)(podNum[i])/(float64)(podSum)\n\t\tpodVal = podVal + podIdle[i]\n\t\tfor j := 0; j < DIMENSION; j++ {\n\t\t\tresIdle[i][j] = (sch.reTotal[j] - podUsed[i][j]) / sch.reTotal[j]\n\t\t\tresVal[j] = resVal[j] + resIdle[i][j]\n\t\t}\n\t}\n\t// cal the balance value\n\tpodMean := podVal / (float64)(podSum)\n\tvar resMean [DIMENSION]float64\n\tfor j := 0; j < DIMENSION; j++ {\n\t\tresMean[j] = resVal[j] / (float64)(PHYNUM)\n\t}\n\tvar baIdle float64\n\tfor i := 0; i < PHYNUM; i++ {\n\t\tfor j := 0; j < DIMENSION; j++ {\n\t\t\tbaIdle = baIdle + math.Pow((resIdle[i][j]-resMean[j]), 2)\n\t\t}\n\t\tbaIdle = baIdle + math.Pow((podIdle[i]-podMean), 2)\n\t}\n\tbaIdle = math.Sqrt(baIdle)\n\tfmt.Printf(\"The balance value is %.3f \\n\", baIdle)\n}",
"func CalculateAllTotalPayout(delegatedContracts []DelegatedContract) []DelegatedContract{\n for index, delegatedContract := range delegatedContracts{\n delegatedContracts[index] = CalculateTotalPayout(delegatedContract)\n }\n\n return delegatedContracts\n}",
"func getBalanceTotal(recordCollection []record) (totalBalance time.Duration) {\n\tfor _, r := range recordCollection {\n\t\t_, balance := getWorkedHours(&r)\n\t\ttotalBalance += balance\n\t}\n\treturn totalBalance\n}",
"func CalculateTotalPayout(delegatedContract DelegatedContract) DelegatedContract{\n for _, contract := range delegatedContract.Contracts{\n delegatedContract.TotalPayout = delegatedContract.TotalPayout + contract.NetPayout\n }\n return delegatedContract\n}",
"func (b *rpcVestingBalance) delegated() (sdk.Coins, sdk.Coins, error) {\n\tdelegatedCoins, err := b.totalDelegated()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tunbondingCoins, err := b.totalUnbondingDelegations()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tdelegated := delegatedCoins.AmountOf(stakingDenom)\n\tunbonding := unbondingCoins.AmountOf(stakingDenom)\n\ttotalStaked := delegated.Add(unbonding)\n\tdelegatedFree := b.vacc.GetDelegatedFree().AmountOf(stakingDenom)\n\n\t// total number of staked and unbonding tokens considered to be liquid\n\ttotalFree := sdk.MinInt(totalStaked, delegatedFree)\n\t// any coins that are not considered liquid, are vesting up to a maximum of delegated\n\tstakedVesting := sdk.MinInt(totalStaked.Sub(totalFree), delegated)\n\t// staked free coins are left over\n\tstakedFree := delegated.Sub(stakedVesting)\n\n\tliquidCoins := sdk.NewCoins(newKavaCoin(stakedFree))\n\tvestingCoins := sdk.NewCoins(newKavaCoin(stakedVesting))\n\treturn liquidCoins, vestingCoins, nil\n}",
"func CalculatePayoutForContract(contract Contract, rate float64, delegate bool) Contract{\n ////-------------JUST FOR TESTING -------------////\n totalNodeRewards := 378 //Amount of rewards for my delegation in cycle 11\n ////--------------END TESTING ------------------////\n\n grossRewards := contract.SharePercentage * float64(totalNodeRewards)\n contract.GrossPayout = grossRewards\n fee := rate * grossRewards\n contract.Fee = fee\n var netRewards float64\n if (delegate){\n netRewards = grossRewards\n contract.NetPayout = netRewards\n contract.Fee = 0\n } else {\n netRewards = grossRewards - fee\n contract.NetPayout = contract.NetPayout + netRewards\n }\n\n return contract\n}",
"func (c *Census) DecrNeedCount() {\n\tc.Lock()\n\tdefer c.Unlock()\n\tc.needCount -= int64(float64(ScanEvictDuration) / float64(RenewDuration))\n\tc.threshold = int64(float64(c.needCount) * SelfProtectedThreshold)\n}",
"func applyCliff(events []event, cliff time.Time) ([]event, error) {\n\tnewEvents := []event{}\n\tcoins := sdk.NewCoins()\n\tfor _, e := range events {\n\t\tif !e.Time.After(cliff) {\n\t\t\tcoins = coins.Add(e.Coins...)\n\t\t\tcontinue\n\t\t}\n\t\tif !coins.IsZero() {\n\t\t\tcliffEvent := event{Time: cliff, Coins: coins}\n\t\t\tnewEvents = append(newEvents, cliffEvent)\n\t\t\tcoins = sdk.NewCoins()\n\t\t}\n\t\tnewEvents = append(newEvents, e)\n\t}\n\tif !coins.IsZero() {\n\t\t// special case if all events are before the cliff\n\t\tcliffEvent := event{Time: cliff, Coins: coins}\n\t\tnewEvents = append(newEvents, cliffEvent)\n\t}\n\t// integrity check\n\toldTotal := sdk.NewCoins()\n\tfor _, e := range events {\n\t\toldTotal = oldTotal.Add(e.Coins...)\n\t}\n\tnewTotal := sdk.NewCoins()\n\tfor _, e := range newEvents {\n\t\tnewTotal = newTotal.Add(e.Coins...)\n\t}\n\tif !oldTotal.IsEqual(newTotal) {\n\t\treturn nil, fmt.Errorf(\"applying vesting cliff changed total from %s to %s\", oldTotal, newTotal)\n\t}\n\treturn newEvents, nil\n}",
"func (s *Store) Balance(ns walletdb.ReadBucket, minConf int32, syncHeight int32) (btcutil.Amount, error) {\n\tbal, err := fetchMinedBalance(ns)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\t// Subtract the balance for each credit that is spent by an unmined\n\t// transaction.\n\tvar op wire.OutPoint\n\tvar block Block\n\terr = ns.NestedReadBucket(bucketUnspent).ForEach(func(k, v []byte) error {\n\t\terr := readCanonicalOutPoint(k, &op)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = readUnspentBlock(v, &block)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Subtract the output's amount if it's locked.\n\t\t_, _, isLocked := isLockedOutput(ns, op, s.clock.Now())\n\t\tif isLocked {\n\t\t\t_, v := existsCredit(ns, &op.Hash, op.Index, &block)\n\t\t\tamt, err := fetchRawCreditAmount(v)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tbal -= amt\n\n\t\t\t// To prevent decrementing the balance twice if the\n\t\t\t// output has an unconfirmed spend, return now.\n\t\t\treturn nil\n\t\t}\n\n\t\tif existsRawUnminedInput(ns, k) != nil {\n\t\t\t_, v := existsCredit(ns, &op.Hash, op.Index, &block)\n\t\t\tamt, err := fetchRawCreditAmount(v)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tbal -= amt\n\t\t}\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\tif _, ok := err.(Error); ok {\n\t\t\treturn 0, err\n\t\t}\n\t\tstr := \"failed iterating unspent outputs\"\n\t\treturn 0, storeError(ErrDatabase, str, err)\n\t}\n\n\t// Decrement the balance for any unspent credit with less than\n\t// minConf confirmations and any (unspent) immature coinbase credit.\n\tcoinbaseMaturity := int32(s.chainParams.CoinbaseMaturity)\n\tstopConf := minConf\n\tif coinbaseMaturity > stopConf {\n\t\tstopConf = coinbaseMaturity\n\t}\n\tlastHeight := syncHeight - stopConf\n\tblockIt := makeReadReverseBlockIterator(ns)\n\tfor blockIt.prev() {\n\t\tblock := &blockIt.elem\n\n\t\tif block.Height < lastHeight {\n\t\t\tbreak\n\t\t}\n\n\t\tfor i := range block.transactions {\n\t\t\ttxHash := &block.transactions[i]\n\t\t\trec, err := fetchTxRecord(ns, txHash, &block.Block)\n\t\t\tif err != nil {\n\t\t\t\treturn 0, err\n\t\t\t}\n\t\t\tnumOuts := uint32(len(rec.MsgTx.TxOut))\n\t\t\tfor i := uint32(0); i < numOuts; i++ {\n\t\t\t\t// Avoid double decrementing the credit amount\n\t\t\t\t// if it was already removed for being spent by\n\t\t\t\t// an unmined tx or being locked.\n\t\t\t\top = wire.OutPoint{Hash: *txHash, Index: i}\n\t\t\t\t_, _, isLocked := isLockedOutput(\n\t\t\t\t\tns, op, s.clock.Now(),\n\t\t\t\t)\n\t\t\t\tif isLocked {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\topKey := canonicalOutPoint(txHash, i)\n\t\t\t\tif existsRawUnminedInput(ns, opKey) != nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\t_, v := existsCredit(ns, txHash, i, &block.Block)\n\t\t\t\tif v == nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tamt, spent, err := fetchRawCreditAmountSpent(v)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn 0, err\n\t\t\t\t}\n\t\t\t\tif spent {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tconfs := syncHeight - block.Height + 1\n\t\t\t\tif confs < minConf || (blockchain.IsCoinBaseTx(&rec.MsgTx) &&\n\t\t\t\t\tconfs < coinbaseMaturity) {\n\t\t\t\t\tbal -= amt\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tif blockIt.err != nil {\n\t\treturn 0, blockIt.err\n\t}\n\n\t// If unmined outputs are included, increment the balance for each\n\t// output that is unspent.\n\tif minConf == 0 {\n\t\terr = ns.NestedReadBucket(bucketUnminedCredits).ForEach(func(k, v []byte) error {\n\t\t\tif err := readCanonicalOutPoint(k, &op); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t// Skip adding the balance for this output if it's\n\t\t\t// locked.\n\t\t\t_, _, isLocked := isLockedOutput(ns, op, s.clock.Now())\n\t\t\tif isLocked {\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\tif existsRawUnminedInput(ns, k) != nil {\n\t\t\t\t// Output is spent by an unmined transaction.\n\t\t\t\t// Skip to next unmined credit.\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\tamount, err := fetchRawUnminedCreditAmount(v)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tbal += amount\n\t\t\treturn nil\n\t\t})\n\t\tif err != nil {\n\t\t\tif _, ok := err.(Error); ok {\n\t\t\t\treturn 0, err\n\t\t\t}\n\t\t\tstr := \"failed to iterate over unmined credits bucket\"\n\t\t\treturn 0, storeError(ErrDatabase, str, err)\n\t\t}\n\t}\n\n\treturn bal, nil\n}",
"func (va ClawbackVestingAccount) distributeReward(ctx sdk.Context, ak AccountKeeper, bondDenom string, reward sdk.Coins) {\n\tnow := ctx.BlockTime().Unix()\n\tt := va.StartTime\n\tfirstUnvestedPeriod := 0\n\tunvestedTokens := sdk.ZeroInt()\n\tfor i, period := range va.VestingPeriods {\n\t\tt += period.Length\n\t\tif t <= now {\n\t\t\tfirstUnvestedPeriod = i + 1\n\t\t\tcontinue\n\t\t}\n\t\tunvestedTokens = unvestedTokens.Add(period.Amount.AmountOf(bondDenom))\n\t}\n\n\trunningTotReward := sdk.NewCoins()\n\trunningTotStaking := sdk.ZeroInt()\n\tfor i := firstUnvestedPeriod; i < len(va.VestingPeriods); i++ {\n\t\tperiod := va.VestingPeriods[i]\n\t\trunningTotStaking = runningTotStaking.Add(period.Amount.AmountOf(bondDenom))\n\t\trunningTotRatio := runningTotStaking.ToDec().Quo(unvestedTokens.ToDec())\n\t\ttargetCoins := scaleCoins(reward, runningTotRatio)\n\t\tthisReward := targetCoins.Sub(runningTotReward)\n\t\trunningTotReward = targetCoins\n\t\tperiod.Amount = period.Amount.Add(thisReward...)\n\t\tva.VestingPeriods[i] = period\n\t}\n\n\tva.OriginalVesting = va.OriginalVesting.Add(reward...)\n\tak.SetAccount(ctx, &va)\n}",
"func balance(w *model.Wal, n int64) int64 {\n\t// Invariant 1\n\t// (this loop will run at most once)\n\tfor w.Cur.Resv < n {\n\t\tm := w.Cur.Resv\n\t\tif needFree(w, m) != m {\n\t\t\tutils.Log.Warnln(\"need free\")\n\t\t\treturn 0\n\t\t}\n\n\t\tmoveResv(w.Tail, w.Cur, m)\n\t\tuseNext(w)\n\t}\n\treturn balanceRest(w, w.Cur, n)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
updateDelegation returns an account with its delegation bookkeeping modified for clawback, given the current disposition of the account's bank and staking state. Also returns the modified amount to claw back. Computation steps: first, compute the total amount in bonded and unbonding states, used for BaseAccount bookkeeping; based on the old bookkeeping, determine the amount lost to slashing since origin; clip the amount to claw back to be at most the full funds in the account; first claw back the unbonded funds, then go after what's delegated; to the remaining delegated amount, add what's slashed; the "encumbered" (locked up and/or vesting) amount of this goes in DV; the remainder of the new delegated amount goes in DF.
|
func (va *ClawbackVestingAccount) updateDelegation(encumbered, toClawBack, bonded, unbonding, unbonded sdk.Coins) sdk.Coins {
delegated := bonded.Add(unbonding...)
oldDelegated := va.DelegatedVesting.Add(va.DelegatedFree...)
slashed := oldDelegated.Sub(coinsMin(delegated, oldDelegated))
total := delegated.Add(unbonded...)
toClawBack = coinsMin(toClawBack, total) // might have been slashed
newDelegated := coinsMin(delegated, total.Sub(toClawBack)).Add(slashed...)
va.DelegatedVesting = coinsMin(encumbered, newDelegated)
va.DelegatedFree = newDelegated.Sub(va.DelegatedVesting)
return toClawBack
}
|
[
"func (bva *BaseVestingAccount) trackDelegation(vestingCoins, amount sdk.Coins) {\n\tbc := bva.GetCoins()\n\n\tfor _, coin := range amount {\n\t\t// zip/lineup all coins by their denomination to provide O(n) time\n\n\t\tbaseAmt := bc.AmountOf(coin.Denom)\n\t\tvestingAmt := vestingCoins.AmountOf(coin.Denom)\n\t\tdelVestingAmt := bva.DelegatedVesting.AmountOf(coin.Denom)\n\n\t\t// Panic if the delegation amount is zero or if the base coins does not\n\t\t// exceed the desired delegation amount.\n\t\tif coin.Amount.IsZero() || baseAmt.LT(coin.Amount) {\n\t\t\tpanic(\"delegation attempt with zero coins or insufficient funds\")\n\t\t}\n\n\t\t// compute modules and y per the specification, where:\n\t\t// X := min(max(V - DV, 0), D)\n\t\t// Y := D - X\n\t\tx := sdk.MinInt(sdk.MaxInt(vestingAmt.Sub(delVestingAmt), sdk.ZeroInt()), coin.Amount)\n\t\ty := coin.Amount.Sub(x)\n\n\t\tif !x.IsZero() {\n\t\t\txCoin := sdk.NewCoin(coin.Denom, x)\n\t\t\tbva.DelegatedVesting = bva.DelegatedVesting.Add(sdk.Coins{xCoin})\n\t\t}\n\n\t\tif !y.IsZero() {\n\t\t\tyCoin := sdk.NewCoin(coin.Denom, y)\n\t\t\tbva.DelegatedFree = bva.DelegatedFree.Add(sdk.Coins{yCoin})\n\t\t}\n\n\t\tbva.Coins = bva.Coins.Sub(sdk.Coins{coin})\n\t}\n}",
"func (k Keeper) TransferDelegation(ctx sdk.Context, valAddr sdk.ValAddress, fromDelegator, toDelegator sdk.AccAddress, shares sdk.Dec) (sdk.Dec, error) {\n\t// Redelegations link a delegation to it's previous validator so slashes are propagated to the new validator.\n\t// If the delegation is transferred to a new owner, the redelegation object must be updated.\n\t// For expediency all transfers with redelegations are blocked.\n\tif k.stakingKeeper.HasReceivingRedelegation(ctx, fromDelegator, valAddr) {\n\t\treturn sdk.Dec{}, types.ErrRedelegationsNotCompleted\n\t}\n\n\tif shares.IsNil() || shares.LT(sdk.ZeroDec()) {\n\t\treturn sdk.Dec{}, errorsmod.Wrap(types.ErrUntransferableShares, \"nil or negative shares\")\n\t}\n\tif shares.Equal(sdk.ZeroDec()) {\n\t\t// Block 0 transfers to reduce edge cases.\n\t\treturn sdk.Dec{}, errorsmod.Wrap(types.ErrUntransferableShares, \"zero shares\")\n\t}\n\n\tfromDelegation, found := k.stakingKeeper.GetDelegation(ctx, fromDelegator, valAddr)\n\tif !found {\n\t\treturn sdk.Dec{}, types.ErrNoDelegatorForAddress\n\t}\n\tvalidator, found := k.stakingKeeper.GetValidator(ctx, valAddr)\n\tif !found {\n\t\treturn sdk.Dec{}, types.ErrNoValidatorFound\n\t}\n\t// Prevent validators from reducing their self delegation below the min.\n\tisValidatorOperator := fromDelegator.Equals(valAddr)\n\tif isValidatorOperator {\n\t\tif isBelowMinSelfDelegation(validator, fromDelegation.Shares.Sub(shares)) {\n\t\t\treturn sdk.Dec{}, types.ErrSelfDelegationBelowMinimum\n\t\t}\n\t}\n\n\treturnAmount, err := k.fastUndelegate(ctx, valAddr, fromDelegator, shares)\n\tif err != nil {\n\t\treturn sdk.Dec{}, err\n\t}\n\treturnCoins := sdk.NewCoins(sdk.NewCoin(k.stakingKeeper.BondDenom(ctx), returnAmount))\n\n\tif err := k.bankKeeper.SendCoins(ctx, fromDelegator, toDelegator, returnCoins); err != nil {\n\t\treturn sdk.Dec{}, err\n\t}\n\treceivedShares, err := k.delegateFromAccount(ctx, valAddr, toDelegator, returnAmount)\n\tif err != nil {\n\t\treturn sdk.Dec{}, err\n\t}\n\n\treturn receivedShares, nil\n}",
"func consolidateDelegations(ctx contract.Context, validator, delegator *types.Address) (*Delegation, []*Delegation, int, error) {\n\t// cycle through all delegations and delete those which are BONDED and\n\t// unlocked while accumulating their amounts\n\tdelegations, err := returnMatchingDelegations(ctx, validator, delegator)\n\tif err != nil {\n\t\treturn nil, nil, -1, err\n\t}\n\n\tunconsolidatedDelegationsCount := 0\n\ttotalDelegationAmount := common.BigZero()\n\tvar consolidatedDelegations []*Delegation\n\tfor _, delegation := range delegations {\n\t\tif delegation.LockTime > uint64(ctx.Now().Unix()) || delegation.State != BONDED {\n\t\t\tunconsolidatedDelegationsCount++\n\t\t\tcontinue\n\t\t}\n\n\t\ttotalDelegationAmount.Add(totalDelegationAmount, &delegation.Amount.Value)\n\t\tconsolidatedDelegations = append(consolidatedDelegations, delegation)\n\n\t\tif err = DeleteDelegation(ctx, delegation); err != nil {\n\t\t\treturn nil, nil, -1, err\n\t\t}\n\t}\n\n\tindex, err := GetNextDelegationIndex(ctx, *validator, *delegator)\n\tif err != nil {\n\t\treturn nil, nil, -1, err\n\t}\n\n\t// create new conolidated delegation\n\tdelegation := &Delegation{\n\t\tValidator: validator,\n\t\tDelegator: delegator,\n\t\tAmount: &types.BigUInt{Value: *totalDelegationAmount},\n\t\tUpdateAmount: loom.BigZeroPB(),\n\t\tLocktimeTier: 0,\n\t\tLockTime: 0,\n\t\tState: BONDED,\n\t\tIndex: index,\n\t}\n\tif err := SetDelegation(ctx, delegation); err != nil {\n\t\treturn nil, nil, -1, err\n\t}\n\treturn delegation, consolidatedDelegations, unconsolidatedDelegationsCount, nil\n}",
"func (bva *BaseVestingAccount) TrackDelegation(balance, vestingCoins, amount sdk.Coins) {\n\tfor _, coin := range amount {\n\t\tbaseAmt := balance.AmountOf(coin.Denom)\n\t\tvestingAmt := vestingCoins.AmountOf(coin.Denom)\n\t\tdelVestingAmt := bva.DelegatedVesting.AmountOf(coin.Denom)\n\n\t\t// Panic if the delegation amount is zero or if the base coins does not\n\t\t// exceed the desired delegation amount.\n\t\tif coin.Amount.IsZero() || baseAmt.LT(coin.Amount) {\n\t\t\tpanic(\"delegation attempt with zero coins or insufficient funds\")\n\t\t}\n\n\t\t// compute x and y per the specification, where:\n\t\t// X := min(max(V - DV, 0), D)\n\t\t// Y := D - X\n\t\tx := sdk.MinInt(sdk.MaxInt(vestingAmt.Sub(delVestingAmt), sdk.ZeroInt()), coin.Amount)\n\t\ty := coin.Amount.Sub(x)\n\n\t\tif !x.IsZero() {\n\t\t\txCoin := sdk.NewCoin(coin.Denom, x)\n\t\t\tbva.DelegatedVesting = bva.DelegatedVesting.Add(xCoin)\n\t\t}\n\n\t\tif !y.IsZero() {\n\t\t\tyCoin := sdk.NewCoin(coin.Denom, y)\n\t\t\tbva.DelegatedFree = bva.DelegatedFree.Add(yCoin)\n\t\t}\n\t}\n}",
"func (_TokensNetwork *TokensNetworkTransactor) UpdateBalanceProofDelegate(opts *bind.TransactOpts, token common.Address, partner common.Address, participant common.Address, transferred_amount *big.Int, locksroot [32]byte, nonce uint64, additional_hash [32]byte, partner_signature []byte, participant_signature []byte) (*types.Transaction, error) {\n\treturn _TokensNetwork.contract.Transact(opts, \"updateBalanceProofDelegate\", token, partner, participant, transferred_amount, locksroot, nonce, additional_hash, partner_signature, participant_signature)\n}",
"func (_TokensNetwork *TokensNetworkTransactorSession) UpdateBalanceProofDelegate(token common.Address, partner common.Address, participant common.Address, transferred_amount *big.Int, locksroot [32]byte, nonce uint64, additional_hash [32]byte, partner_signature []byte, participant_signature []byte) (*types.Transaction, error) {\n\treturn _TokensNetwork.Contract.UpdateBalanceProofDelegate(&_TokensNetwork.TransactOpts, token, partner, participant, transferred_amount, locksroot, nonce, additional_hash, partner_signature, participant_signature)\n}",
"func (dva *DelayedVestingAccount) TrackDelegation(blockTime time.Time, balance, amount sdk.Coins) {\n\tdva.BaseVestingAccount.TrackDelegation(balance, dva.GetVestingCoins(blockTime), amount)\n}",
"func (_DelegationController *DelegationControllerTransactor) GetAndUpdateDelegatedAmount(opts *bind.TransactOpts, holder common.Address) (*types.Transaction, error) {\n\treturn _DelegationController.contract.Transact(opts, \"getAndUpdateDelegatedAmount\", holder)\n}",
"func (k msgServer) CancelUnbondingDelegation(goCtx context.Context, msg *types.MsgCancelUnbondingDelegation) (*types.MsgCancelUnbondingDelegationResponse, error) {\n\tctx := sdk.UnwrapSDKContext(goCtx)\n\n\tvalAddr, err := sdk.ValAddressFromBech32(msg.ValidatorAddress)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdelegatorAddress, err := sdk.AccAddressFromBech32(msg.DelegatorAddress)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbondDenom := k.BondDenom(ctx)\n\tif msg.Amount.Denom != bondDenom {\n\t\treturn nil, sdkerrors.Wrapf(\n\t\t\tsdkerrors.ErrInvalidRequest, \"invalid coin denomination: got %s, expected %s\", msg.Amount.Denom, bondDenom,\n\t\t)\n\t}\n\n\tvalidator, found := k.GetValidator(ctx, valAddr)\n\tif !found {\n\t\treturn nil, types.ErrNoValidatorFound\n\t}\n\n\t// In some situations, the exchange rate becomes invalid, e.g. if\n\t// Validator loses all tokens due to slashing. In this case,\n\t// make all future delegations invalid.\n\tif validator.InvalidExRate() {\n\t\treturn nil, types.ErrDelegatorShareExRateInvalid\n\t}\n\n\tif validator.IsJailed() {\n\t\treturn nil, types.ErrValidatorJailed\n\t}\n\n\tubd, found := k.GetUnbondingDelegation(ctx, delegatorAddress, valAddr)\n\tif !found {\n\t\treturn nil, status.Errorf(\n\t\t\tcodes.NotFound,\n\t\t\t\"unbonding delegation with delegator %s not found for validator %s\",\n\t\t\tmsg.DelegatorAddress, msg.ValidatorAddress,\n\t\t)\n\t}\n\n\t// if this undelegation was from a liquid staking provider (identified if the delegator\n\t// is an ICA account), the global and validator liquid totals should be incremented\n\ttokens := msg.Amount.Amount\n\tshares, err := validator.SharesFromTokens(tokens)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif k.DelegatorIsLiquidStaker(delegatorAddress) {\n\t\tif err := k.SafelyIncreaseTotalLiquidStakedTokens(ctx, tokens, false); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif err := k.SafelyIncreaseValidatorLiquidShares(ctx, &validator, shares); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tvar (\n\t\tunbondEntry types.UnbondingDelegationEntry\n\t\tunbondEntryIndex int64 = -1\n\t)\n\n\tfor i, entry := range ubd.Entries {\n\t\tif entry.CreationHeight == msg.CreationHeight {\n\t\t\tunbondEntry = entry\n\t\t\tunbondEntryIndex = int64(i)\n\t\t\tbreak\n\t\t}\n\t}\n\tif unbondEntryIndex == -1 {\n\t\treturn nil, sdkerrors.ErrNotFound.Wrapf(\"unbonding delegation entry is not found at block height %d\", msg.CreationHeight)\n\t}\n\n\tif unbondEntry.Balance.LT(msg.Amount.Amount) {\n\t\treturn nil, sdkerrors.ErrInvalidRequest.Wrap(\"amount is greater than the unbonding delegation entry balance\")\n\t}\n\n\tif unbondEntry.CompletionTime.Before(ctx.BlockTime()) {\n\t\treturn nil, sdkerrors.ErrInvalidRequest.Wrap(\"unbonding delegation is already processed\")\n\t}\n\n\t// delegate back the unbonding delegation amount to the validator\n\t_, err = k.Keeper.Delegate(ctx, delegatorAddress, msg.Amount.Amount, types.Unbonding, validator, false)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tamount := unbondEntry.Balance.Sub(msg.Amount.Amount)\n\tif amount.IsZero() {\n\t\tubd.RemoveEntry(unbondEntryIndex)\n\t} else {\n\t\t// update the unbondingDelegationEntryBalance and InitialBalance for ubd entry\n\t\tunbondEntry.Balance = amount\n\t\tunbondEntry.InitialBalance = unbondEntry.InitialBalance.Sub(msg.Amount.Amount)\n\t\tubd.Entries[unbondEntryIndex] = unbondEntry\n\t}\n\n\t// set the unbonding delegation or remove it if there are no more entries\n\tif len(ubd.Entries) == 0 {\n\t\tk.RemoveUnbondingDelegation(ctx, ubd)\n\t} else {\n\t\tk.SetUnbondingDelegation(ctx, ubd)\n\t}\n\n\tctx.EventManager().EmitEvent(\n\t\tsdk.NewEvent(\n\t\t\ttypes.EventTypeCancelUnbondingDelegation,\n\t\t\tsdk.NewAttribute(sdk.AttributeKeyAmount, msg.Amount.String()),\n\t\t\tsdk.NewAttribute(types.AttributeKeyValidator, msg.ValidatorAddress),\n\t\t\tsdk.NewAttribute(types.AttributeKeyDelegator, msg.DelegatorAddress),\n\t\t\tsdk.NewAttribute(types.AttributeKeyCreationHeight, strconv.FormatInt(msg.CreationHeight, 10)),\n\t\t),\n\t)\n\n\treturn &types.MsgCancelUnbondingDelegationResponse{}, nil\n}",
"func (_DelegationController *DelegationControllerTransactorSession) GetAndUpdateDelegatedAmount(holder common.Address) (*types.Transaction, error) {\n\treturn _DelegationController.Contract.GetAndUpdateDelegatedAmount(&_DelegationController.TransactOpts, holder)\n}",
"func (dva *DelayedVestingAccount) TrackDelegation(blockTime time.Time, amount sdk.Coins) {\n\tdva.trackDelegation(dva.GetVestingCoins(blockTime), amount)\n}",
"func (_Genesis *GenesisTransactor) WithdrawDelegatedStake(opts *bind.TransactOpts, amount *big.Int) (*types.Transaction, error) {\n\treturn _Genesis.contract.Transact(opts, \"withdrawDelegatedStake\", amount)\n}",
"func (acc *Account) delegationsTotal() (amount *big.Int, inWithdraw *big.Int, rewards *big.Int, err error) {\n\t// pull all the delegations of the account\n\tlist, err := repository.R().DelegationsByAddressAll(&acc.Address)\n\tif err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\n\t// prep containers for calculation and loop all delegations found\n\tamount = new(big.Int)\n\trewards = new(big.Int)\n\tinWithdraw = new(big.Int)\n\tfor _, dlg := range list {\n\t\t// any active delegated amount?\n\t\tif 0 < dlg.AmountDelegated.ToInt().Uint64() {\n\t\t\tamount = new(big.Int).Add(amount, dlg.AmountDelegated.ToInt())\n\t\t}\n\n\t\t// get pending rewards for this delegation (can be stashed)\n\t\trw, err := repository.R().PendingRewards(&acc.Address, dlg.ToStakerId)\n\t\tif err != nil {\n\t\t\treturn nil, nil, nil, err\n\t\t}\n\n\t\t// any rewards?\n\t\tif 0 < rw.Amount.ToInt().Uint64() {\n\t\t\trewards = new(big.Int).Add(rewards, rw.Amount.ToInt())\n\t\t}\n\n\t\t// get pending withdrawals\n\t\twd, err := repository.R().WithdrawRequestsPendingTotal(&acc.Address, dlg.ToStakerId)\n\t\tif err != nil {\n\t\t\treturn nil, nil, nil, err\n\t\t}\n\n\t\t// add pending withdrawals value\n\t\tif 0 < wd.Uint64() {\n\t\t\tinWithdraw = new(big.Int).Add(inWithdraw, wd)\n\t\t}\n\t}\n\n\treturn amount, rewards, inWithdraw, nil\n}",
"func distributeDelegatorRewards(ctx contract.Context, cachedDelegations *CachedDposStorage, formerValidatorTotals map[string]loom.BigUInt, delegatorRewards map[string]*loom.BigUInt, distributedRewards *loom.BigUInt) (map[string]*loom.BigUInt, error) {\n\tnewDelegationTotals := make(map[string]*loom.BigUInt)\n\n\tcandidates, err := LoadCandidateList(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Initialize delegation totals with whitelist amounts\n\tfor _, candidate := range candidates {\n\t\tstatistic, _ := GetStatistic(ctx, loom.UnmarshalAddressPB(candidate.Address))\n\n\t\tif statistic != nil && statistic.WhitelistAmount != nil && !common.IsZero(statistic.WhitelistAmount.Value) {\n\t\t\tvalidatorKey := loom.UnmarshalAddressPB(statistic.Address).String()\n\t\t\tamount := calculateWeightedWhitelistAmount(*statistic)\n\t\t\tnewDelegationTotals[validatorKey] = &amount\n\t\t}\n\t}\n\n\tdelegations, err := cachedDelegations.loadDelegationList(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar currentDelegations = make(DelegationList, len(delegations))\n\tcopy(currentDelegations, delegations)\n\tfor _, d := range currentDelegations {\n\t\tdelegation, err := GetDelegation(ctx, d.Index, *d.Validator, *d.Delegator)\n\t\tif err == contract.ErrNotFound {\n\t\t\tcontinue\n\t\t} else if err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tvalidatorKey := loom.UnmarshalAddressPB(delegation.Validator).String()\n\n\t\t// Do not distribute rewards to delegators of the Limbo validator\n\t\t// NOTE: because all delegations are sorted in reverse index order, the\n\t\t// 0-index delegation (for rewards) is handled last. Therefore, all\n\t\t// increases to reward delegations will be reflected in newDelegation\n\t\t// totals that are computed at the end of this for loop. (We do this to\n\t\t// avoid looping over all delegations twice)\n\t\tif loom.UnmarshalAddressPB(delegation.Validator).Compare(LimboValidatorAddress(ctx)) != 0 {\n\t\t\t// allocating validator distributions to delegators\n\t\t\t// based on former validator delegation totals\n\t\t\tdelegationTotal := formerValidatorTotals[validatorKey]\n\t\t\trewardsTotal := delegatorRewards[validatorKey]\n\t\t\tif rewardsTotal != nil {\n\t\t\t\tweightedDelegation := calculateWeightedDelegationAmount(*delegation)\n\t\t\t\tdelegatorDistribution := calculateShare(weightedDelegation, delegationTotal, *rewardsTotal)\n\t\t\t\t// increase a delegator's distribution\n\t\t\t\tdistributedRewards.Add(distributedRewards, &delegatorDistribution)\n\t\t\t\tcachedDelegations.IncreaseRewardDelegation(ctx, delegation.Validator, delegation.Delegator, delegatorDistribution)\n\n\t\t\t\t// If the reward delegation is updated by the\n\t\t\t\t// IncreaseRewardDelegation command, we must be sure to use this\n\t\t\t\t// updated version in the rest of the loop. No other delegations\n\t\t\t\t// (non-rewards) have the possibility of being updated outside\n\t\t\t\t// of this loop.\n\t\t\t\tif ctx.FeatureEnabled(features.DPOSVersion3_1, false) && d.Index == REWARD_DELEGATION_INDEX {\n\t\t\t\t\tdelegation, err = GetDelegation(ctx, d.Index, *d.Validator, *d.Delegator)\n\t\t\t\t\tif err == contract.ErrNotFound {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t} else if err != nil {\n\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tupdatedAmount := common.BigZero()\n\t\tif delegation.State == BONDING {\n\t\t\tupdatedAmount.Add(&delegation.Amount.Value, &delegation.UpdateAmount.Value)\n\t\t\tdelegation.Amount = &types.BigUInt{Value: *updatedAmount}\n\t\t} else if delegation.State == UNBONDING {\n\t\t\tupdatedAmount.Sub(&delegation.Amount.Value, &delegation.UpdateAmount.Value)\n\t\t\tdelegation.Amount = &types.BigUInt{Value: *updatedAmount}\n\t\t\tcoin, err := loadCoin(ctx)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\terr = coin.Transfer(loom.UnmarshalAddressPB(delegation.Delegator), &delegation.UpdateAmount.Value)\n\t\t\tif err != nil {\n\t\t\t\ttransferFromErr := fmt.Sprintf(\"Failed coin Transfer - distributeDelegatorRewards, %v, %s\", delegation.Delegator.String(), delegation.UpdateAmount.Value.String())\n\t\t\t\treturn nil, logDposError(ctx, err, transferFromErr)\n\t\t\t}\n\t\t} else if delegation.State == REDELEGATING {\n\t\t\tif err = cachedDelegations.DeleteDelegation(ctx, delegation); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tdelegation.Validator = delegation.UpdateValidator\n\t\t\tdelegation.Amount = delegation.UpdateAmount\n\t\t\tdelegation.LocktimeTier = delegation.UpdateLocktimeTier\n\n\t\t\tindex, err := GetNextDelegationIndex(ctx, *delegation.Validator, *delegation.Delegator)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tdelegation.Index = index\n\n\t\t\tvalidatorKey = loom.UnmarshalAddressPB(delegation.Validator).String()\n\t\t}\n\n\t\t// Delete any delegation whose full amount has been unbonded. In all\n\t\t// other cases, update the delegation state to BONDED and reset its\n\t\t// UpdateAmount\n\t\tif common.IsZero(delegation.Amount.Value) && delegation.State == UNBONDING {\n\t\t\tif err := cachedDelegations.DeleteDelegation(ctx, delegation); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t} else {\n\t\t\t// After a delegation update, zero out UpdateAmount\n\t\t\tdelegation.UpdateAmount = loom.BigZeroPB()\n\t\t\tdelegation.State = BONDED\n\n\t\t\tresetDelegationIfExpired(ctx, delegation)\n\t\t\tif err := cachedDelegations.SetDelegation(ctx, delegation); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\n\t\t// Calculate delegation totals for all validators except the Limbo\n\t\t// validator\n\t\tif loom.UnmarshalAddressPB(delegation.Validator).Compare(LimboValidatorAddress(ctx)) != 0 {\n\t\t\tnewTotal := common.BigZero()\n\t\t\tweightedDelegation := calculateWeightedDelegationAmount(*delegation)\n\t\t\tnewTotal.Add(newTotal, &weightedDelegation)\n\t\t\tif newDelegationTotals[validatorKey] != nil {\n\t\t\t\tnewTotal.Add(newTotal, newDelegationTotals[validatorKey])\n\t\t\t}\n\t\t\tnewDelegationTotals[validatorKey] = newTotal\n\t\t}\n\t}\n\n\treturn newDelegationTotals, nil\n}",
"func sendDelegation() {\n\t// get the address\n\taddress := getTestAddress()\n\t// get the keyname and password\n\tkeyname, password := getKeynameAndPassword()\n\n\taddrFrom, err := sdk.AccAddressFromBech32(address) // validator\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t// helper methods for transactions\n\tcdc := app.MakeCodec() // make codec for the app\n\n\t// get the keybase\n\tkeybase := getKeybase()\n\n\t// get the validator address for delegation\n\tvalAddr, err := sdk.ValAddressFromBech32(\"jpyvaloper1ffv7nhd3z6sych2qpqkk03ec6hzkmufyz4scd0\") // **FAUCET**\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t// create delegation amount\n\tdelAmount := sdk.NewInt64Coin(sdk.DefaultBondDenom, 1000000)\n\tdelegation := staking.NewMsgDelegate(addrFrom, valAddr, delAmount)\n\tdelegationToSend := []sdk.Msg{delegation}\n\n\t// send the delegation to the blockchain\n\tsendMsgToBlockchain(cdc, address, keyname, password, delegationToSend, keybase)\n}",
"func CalculatePayoutForContract(contract Contract, rate float64, delegate bool) Contract{\n ////-------------JUST FOR TESTING -------------////\n totalNodeRewards := 378 //Amount of rewards for my delegation in cycle 11\n ////--------------END TESTING ------------------////\n\n grossRewards := contract.SharePercentage * float64(totalNodeRewards)\n contract.GrossPayout = grossRewards\n fee := rate * grossRewards\n contract.Fee = fee\n var netRewards float64\n if (delegate){\n netRewards = grossRewards\n contract.NetPayout = netRewards\n contract.Fee = 0\n } else {\n netRewards = grossRewards - fee\n contract.NetPayout = contract.NetPayout + netRewards\n }\n\n return contract\n}",
"func (va *ClawbackVestingAccount) addGrant(ctx sdk.Context, sk StakingKeeper, grantStartTime int64, grantLockupPeriods, grantVestingPeriods []Period, grantCoins sdk.Coins) {\n\t// how much is really delegated?\n\tbondedAmt := sk.GetDelegatorBonded(ctx, va.GetAddress())\n\tunbondingAmt := sk.GetDelegatorUnbonding(ctx, va.GetAddress())\n\tdelegatedAmt := bondedAmt.Add(unbondingAmt)\n\tdelegated := sdk.NewCoins(sdk.NewCoin(sk.BondDenom(ctx), delegatedAmt))\n\n\t// discover what has been slashed\n\toldDelegated := va.DelegatedVesting.Add(va.DelegatedFree...)\n\tslashed := oldDelegated.Sub(coinsMin(oldDelegated, delegated))\n\n\t// rebase the DV + DF by capping slashed at the current unvested amount\n\tunvested := va.OriginalVesting.Sub(va.GetVestedOnly(ctx.BlockTime()))\n\tnewSlashed := coinsMin(slashed, unvested)\n\tnewDelegated := delegated.Add(newSlashed...)\n\n\t// modify schedules for the new grant\n\tnewLockupStart, newLockupEnd, newLockupPeriods := DisjunctPeriods(va.StartTime, grantStartTime, va.LockupPeriods, grantLockupPeriods)\n\tnewVestingStart, newVestingEnd, newVestingPeriods := DisjunctPeriods(va.StartTime, grantStartTime,\n\t\tva.GetVestingPeriods(), grantVestingPeriods)\n\tif newLockupStart != newVestingStart {\n\t\tpanic(\"bad start time calculation\")\n\t}\n\tva.StartTime = newLockupStart\n\tva.EndTime = max64(newLockupEnd, newVestingEnd)\n\tva.LockupPeriods = newLockupPeriods\n\tva.VestingPeriods = newVestingPeriods\n\tva.OriginalVesting = va.OriginalVesting.Add(grantCoins...)\n\n\t// cap DV at the current unvested amount, DF rounds out to newDelegated\n\tunvested2 := va.GetVestingCoins(ctx.BlockTime())\n\tva.DelegatedVesting = coinsMin(newDelegated, unvested2)\n\tva.DelegatedFree = newDelegated.Sub(va.DelegatedVesting)\n}",
"func (k Keeper) SetUnbondingDelegation(ctx sdk.Context, ubd types.UnbondingDelegation) {\n\tstore := ctx.KVStore(k.storeKey)\n\tbz := types.MustMarshalUBD(k.cdc, ubd)\n\tkey := GetUBDKey(ubd.DelegatorAddr, ubd.ValidatorAddr)\n\tstore.Set(key, bz)\n\tstore.Set(GetUBDByValIndexKey(ubd.DelegatorAddr, ubd.ValidatorAddr), []byte{}) // index, store empty bytes\n}",
"func (_Genesis *GenesisCaller) DelegationDeposit(opts *bind.CallOpts, who common.Address) (*big.Int, [32]byte, error) {\n\tvar (\n\t\tret0 = new(*big.Int)\n\t\tret1 = new([32]byte)\n\t)\n\tout := &[]interface{}{\n\t\tret0,\n\t\tret1,\n\t}\n\terr := _Genesis.contract.Call(opts, out, \"delegationDeposit\", who)\n\treturn *ret0, *ret1, err\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
NewClawbackAction returns an exported.ClawbackAction for ClawbackVestingAccount.
|
func NewClawbackAction(requestor, dest sdk.AccAddress, ak AccountKeeper, bk BankKeeper, sk StakingKeeper) exported.ClawbackAction {
return clawbackAction{
requestor: requestor,
dest: dest,
ak: ak,
bk: bk,
sk: sk,
}
}
|
[
"func NewClawbackGrantAction(\n\tfunderAddress string,\n\tsk StakingKeeper,\n\tgrantStartTime int64,\n\tgrantLockupPeriods, grantVestingPeriods []Period,\n\tgrantCoins sdk.Coins,\n) exported.AddGrantAction {\n\treturn clawbackGrantAction{\n\t\tfunderAddress: funderAddress,\n\t\tsk: sk,\n\t\tgrantStartTime: grantStartTime,\n\t\tgrantLockupPeriods: grantLockupPeriods,\n\t\tgrantVestingPeriods: grantVestingPeriods,\n\t\tgrantCoins: grantCoins,\n\t}\n}",
"func NewClawbackRewardAction(ak AccountKeeper, bk BankKeeper, sk StakingKeeper) exported.RewardAction {\n\treturn clawbackRewardAction{\n\t\tak: ak,\n\t\tbk: bk,\n\t\tsk: sk,\n\t}\n}",
"func NewClawbackVestingAccount(baseAcc *authtypes.BaseAccount, funder sdk.AccAddress, originalVesting sdk.Coins, startTime int64, lockupPeriods, vestingPeriods Periods) *ClawbackVestingAccount {\n\t// copy and align schedules to avoid mutating inputs\n\tlp := make(Periods, len(lockupPeriods))\n\tcopy(lp, lockupPeriods)\n\tvp := make(Periods, len(vestingPeriods))\n\tcopy(vp, vestingPeriods)\n\t_, endTime := AlignSchedules(startTime, startTime, lp, vp)\n\tbaseVestingAcc := &BaseVestingAccount{\n\t\tBaseAccount: baseAcc,\n\t\tOriginalVesting: originalVesting,\n\t\tEndTime: endTime,\n\t}\n\n\treturn &ClawbackVestingAccount{\n\t\tBaseVestingAccount: baseVestingAcc,\n\t\tFunderAddress: funder.String(),\n\t\tStartTime: startTime,\n\t\tLockupPeriods: lp,\n\t\tVestingPeriods: vp,\n\t}\n}",
"func NewCollateralizeAction(c *Collateralize, tx *types.Transaction, index int) *Action {\n\thash := tx.Hash()\n\tfromaddr := tx.From()\n\tcfg := c.GetAPI().GetConfig()\n\ttokenDb, err := account.NewAccountDB(cfg, tokenE.GetName(), pty.CCNYTokenName, c.GetStateDB())\n\tif err != nil {\n\t\tclog.Error(\"NewCollateralizeAction\", \"Get Account DB error\", \"error\", err)\n\t\treturn nil\n\t}\n\n\treturn &Action{\n\t\tcoinsAccount: c.GetCoinsAccount(), tokenAccount: tokenDb, db: c.GetStateDB(), localDB: c.GetLocalDB(),\n\t\ttxhash: hash, fromaddr: fromaddr, blocktime: c.GetBlockTime(), height: c.GetHeight(),\n\t\texecaddr: dapp.ExecAddress(string(tx.Execer)), difficulty: c.GetDifficulty(), index: index, Collateralize: c}\n}",
"func (va *ClawbackVestingAccount) clawback(ctx sdk.Context, dest sdk.AccAddress, ak AccountKeeper, bk BankKeeper, sk StakingKeeper) error {\n\t// Compute the clawback based on the account state only, and update account\n\ttoClawBack := va.computeClawback(ctx.BlockTime().Unix())\n\tif toClawBack.IsZero() {\n\t\treturn nil\n\t}\n\taddr := va.GetAddress()\n\tbondDenom := sk.BondDenom(ctx)\n\n\t// Compute the clawback based on bank balance and delegation, and update account\n\tencumbered := va.GetVestingCoins(ctx.BlockTime())\n\tbondedAmt := sk.GetDelegatorBonded(ctx, addr)\n\tunbondingAmt := sk.GetDelegatorUnbonding(ctx, addr)\n\tbonded := sdk.NewCoins(sdk.NewCoin(bondDenom, bondedAmt))\n\tunbonding := sdk.NewCoins(sdk.NewCoin(bondDenom, unbondingAmt))\n\tunbonded := bk.GetAllBalances(ctx, addr)\n\ttoClawBack = va.updateDelegation(encumbered, toClawBack, bonded, unbonding, unbonded)\n\n\t// Write now now so that the bank module sees unvested tokens are unlocked.\n\t// Note that all store writes are aborted if there is a panic, so there is\n\t// no danger in writing incomplete results.\n\tak.SetAccount(ctx, va)\n\n\t// Now that future vesting events (and associated lockup) are removed,\n\t// the balance of the account is unlocked and can be freely transferred.\n\tspendable := bk.SpendableCoins(ctx, addr)\n\ttoXfer := coinsMin(toClawBack, spendable)\n\terr := bk.SendCoins(ctx, addr, dest, toXfer)\n\tif err != nil {\n\t\treturn err // shouldn't happen, given spendable check\n\t}\n\ttoClawBack = toClawBack.Sub(toXfer)\n\n\t// We need to traverse the staking data structures to update the\n\t// vesting account bookkeeping, and to recover more funds if necessary.\n\t// Staking is the only way unvested tokens should be missing from the bank balance.\n\n\t// If we need more, transfer UnbondingDelegations.\n\twant := toClawBack.AmountOf(bondDenom)\n\tunbondings := sk.GetUnbondingDelegations(ctx, addr, math.MaxUint16)\n\tfor _, unbonding := range unbondings {\n\t\tvalAddr, err := sdk.ValAddressFromBech32(unbonding.ValidatorAddress)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\ttransferred := sk.TransferUnbonding(ctx, addr, dest, valAddr, want)\n\t\twant = want.Sub(transferred)\n\t\tif !want.IsPositive() {\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// If we need more, transfer Delegations.\n\tif want.IsPositive() {\n\t\tdelegations := sk.GetDelegatorDelegations(ctx, addr, math.MaxUint16)\n\t\tfor _, delegation := range delegations {\n\t\t\tvalidatorAddr, err := sdk.ValAddressFromBech32(delegation.ValidatorAddress)\n\t\t\tif err != nil {\n\t\t\t\tpanic(err) // shouldn't happen\n\t\t\t}\n\t\t\tvalidator, found := sk.GetValidator(ctx, validatorAddr)\n\t\t\tif !found {\n\t\t\t\t// validator has been removed\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\twantShares, err := validator.SharesFromTokensTruncated(want)\n\t\t\tif err != nil {\n\t\t\t\t// validator has no tokens\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\ttransferredShares := sk.TransferDelegation(ctx, addr, dest, delegation.GetValidatorAddr(), wantShares)\n\t\t\t// to be conservative in what we're clawing back, round transferred shares up\n\t\t\ttransferred := validator.TokensFromSharesRoundUp(transferredShares).RoundInt()\n\t\t\twant = want.Sub(transferred)\n\t\t\tif !want.IsPositive() {\n\t\t\t\t// Could be slightly negative, due to rounding?\n\t\t\t\t// Don't think so, due to the precautions above.\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\t// If we've transferred everything and still haven't transferred the desired clawback amount,\n\t// then the account must have most some unvested tokens from slashing.\n\treturn nil\n}",
"func NewBcBotAction(j *bot.Jobs) *BcBotAction {\n\t// client := resty.New()\n\t// client.\n\t// \tSetRetryCount(3).\n\t// \tSetRetryWaitTime(10 * time.Second)\n\treturn &BcBotAction{jobs: j, client: nil, mutex: new(sync.RWMutex)}\n}",
"func (m MessageActionCustomAction) construct() MessageActionClass { return &m }",
"func (va *ClawbackVestingAccount) computeClawback(clawbackTime int64) sdk.Coins {\n\t// Compute the truncated vesting schedule and amounts.\n\t// Work with the schedule as the primary data and recompute derived fields, e.g. OriginalVesting.\n\tvestTime := va.StartTime\n\ttotalVested := sdk.NewCoins()\n\ttotalUnvested := sdk.NewCoins()\n\tunvestedIdx := 0\n\tfor i, period := range va.VestingPeriods {\n\t\tvestTime += period.Length\n\t\t// tie in time goes to clawback\n\t\tif vestTime < clawbackTime {\n\t\t\ttotalVested = totalVested.Add(period.Amount...)\n\t\t\tunvestedIdx = i + 1\n\t\t} else {\n\t\t\ttotalUnvested = totalUnvested.Add(period.Amount...)\n\t\t}\n\t}\n\tlastVestTime := vestTime\n\tnewVestingPeriods := va.VestingPeriods[:unvestedIdx]\n\n\t// To cap the unlocking schedule to the new total vested, conjunct with a limiting schedule\n\tcapPeriods := []Period{\n\t\t{\n\t\t\tLength: 0,\n\t\t\tAmount: totalVested,\n\t\t},\n\t}\n\t_, lastLockTime, newLockupPeriods := ConjunctPeriods(va.StartTime, va.StartTime, va.LockupPeriods, capPeriods)\n\n\t// Now construct the new account state\n\tva.OriginalVesting = totalVested\n\tva.EndTime = max64(lastVestTime, lastLockTime)\n\tva.LockupPeriods = newLockupPeriods\n\tva.VestingPeriods = newVestingPeriods\n\t// DelegatedVesting and DelegatedFree will be adjusted elsewhere\n\n\treturn totalUnvested\n}",
"func newAction() (ab *ActionBuilder) {\n\tab = new(ActionBuilder)\n\tab.action = slackscot.ActionDefinition{Hidden: false}\n\n\tab.action.Match = defaultMatcher\n\tab.action.Answer = defaultAnswerer\n\n\treturn ab\n}",
"func (h *Handler) NewAction(act action.Action, settings map[string]interface{}) *Action {\n\n\tvalue := reflect.ValueOf(act)\n\tvalue = value.Elem()\n\tref := value.Type().PkgPath()\n\n\tnewAct := &Action{ref: ref, settings: settings}\n\th.actions = append(h.actions, newAct)\n\n\treturn newAct\n}",
"func NewRecoverableAction(supervisor *Supervisor) *RecoverableAction {\n\tra := &RecoverableAction{\n\t\tactionChan: make(chan Action),\n\t\treplyChan: make(chan string, 5),\n\t\tsupervisor: supervisor,\n\t}\n\n\tra.heartbeat = NewHeartbeat(ra, 1e8)\n\n\tgo ra.backend()\n\n\treturn ra\n}",
"func (m MessageActionChatCreate) construct() MessageActionClass { return &m }",
"func CreateAction(action func(*cli.Context) error) func(*cli.Context) error {\n\treturn func(c *cli.Context) error {\n\t\terr := action(c)\n\t\tif err != nil {\n\t\t\tiocli.Error(\"%s\", err)\n\t\t}\n\n\t\treturn nil\n\t}\n}",
"func NewAction(app *buffalo.App) *Action {\n\tas := &Action{\n\t\tApp: app,\n\t\tModel: NewModel(),\n\t}\n\treturn as\n}",
"func New() Action {\n\treturn &action{}\n}",
"func NewAction(name string, arg interface{}) {\n\tDefaultActionRegistry.Post(name, arg)\n}",
"func NewRollbackAction(kit kit.Kit, viper *viper.Viper,\n\tauthSvrCli pbauthserver.AuthClient, dataMgrCli pbdatamanager.DataManagerClient,\n\tgseControllerCli pbgsecontroller.GSEControllerClient,\n\treq *pb.RollbackReleaseReq, resp *pb.RollbackReleaseResp) *RollbackAction {\n\n\taction := &RollbackAction{\n\t\tkit: kit,\n\t\tviper: viper,\n\t\tauthSvrCli: authSvrCli,\n\t\tdataMgrCli: dataMgrCli,\n\t\tgseControllerCli: gseControllerCli,\n\t\treq: req,\n\t\tresp: resp,\n\t}\n\n\taction.resp.Result = true\n\taction.resp.Code = pbcommon.ErrCode_E_OK\n\taction.resp.Message = \"OK\"\n\n\treturn action\n}",
"func NewChallengeAction(msg *Message) (*ChallengeAction, error) {\n\taction := &ChallengeAction{*msg}\n\n\treturn action, nil\n}",
"func NewAction(payload interface{}) Action {\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"Error: %v\\n\", r)\n\t\t\tfmt.Fprintf(os.Stderr, \"Payload: %v\\n\", payload)\n\t\t}\n\t}()\n\n\tvar a Action\n\ta.payload = payload\n\ta.headers = make(map[string]string)\n\n\tfor k, v := range payload.(map[interface{}]interface{}) {\n\t\tswitch k {\n\t\tcase \"catch\":\n\t\t\ta.catch = v.(string)\n\t\tcase \"warnings\":\n\t\t\t// TODO\n\t\t\tcontinue\n\t\tcase \"allowed_warnings\":\n\t\t\t// TODO\n\t\t\tcontinue\n\t\tcase \"node_selector\":\n\t\t\tcontinue\n\t\tcase \"headers\":\n\t\t\tfor kk, vv := range v.(map[interface{}]interface{}) {\n\t\t\t\ta.headers[kk.(string)] = vv.(string)\n\t\t\t}\n\t\tdefault:\n\t\t\ta.method = k.(string)\n\t\t\ta.params = v.(map[interface{}]interface{})\n\t\t}\n\t}\n\n\treturn a\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
TakeFromAccount implements the exported.ClawbackAction interface. It returns an error if the account is not at ClawbackVestingAccount or if the funder does not match.
|
func (ca clawbackAction) TakeFromAccount(ctx sdk.Context, rawAccount exported.VestingAccount) error {
cva, ok := rawAccount.(*ClawbackVestingAccount)
if !ok {
return sdkerrors.Wrapf(sdkerrors.ErrNotSupported, "clawback expects *ClawbackVestingAccount, got %T", rawAccount)
}
if ca.requestor.String() != cva.FunderAddress {
return sdkerrors.Wrapf(sdkerrors.ErrInvalidRequest, "clawback can only be requested by original funder %s", cva.FunderAddress)
}
return cva.clawback(ctx, ca.dest, ca.ak, ca.bk, ca.sk)
}
|
[
"func (va *ClawbackVestingAccount) clawback(ctx sdk.Context, dest sdk.AccAddress, ak AccountKeeper, bk BankKeeper, sk StakingKeeper) error {\n\t// Compute the clawback based on the account state only, and update account\n\ttoClawBack := va.computeClawback(ctx.BlockTime().Unix())\n\tif toClawBack.IsZero() {\n\t\treturn nil\n\t}\n\taddr := va.GetAddress()\n\tbondDenom := sk.BondDenom(ctx)\n\n\t// Compute the clawback based on bank balance and delegation, and update account\n\tencumbered := va.GetVestingCoins(ctx.BlockTime())\n\tbondedAmt := sk.GetDelegatorBonded(ctx, addr)\n\tunbondingAmt := sk.GetDelegatorUnbonding(ctx, addr)\n\tbonded := sdk.NewCoins(sdk.NewCoin(bondDenom, bondedAmt))\n\tunbonding := sdk.NewCoins(sdk.NewCoin(bondDenom, unbondingAmt))\n\tunbonded := bk.GetAllBalances(ctx, addr)\n\ttoClawBack = va.updateDelegation(encumbered, toClawBack, bonded, unbonding, unbonded)\n\n\t// Write now now so that the bank module sees unvested tokens are unlocked.\n\t// Note that all store writes are aborted if there is a panic, so there is\n\t// no danger in writing incomplete results.\n\tak.SetAccount(ctx, va)\n\n\t// Now that future vesting events (and associated lockup) are removed,\n\t// the balance of the account is unlocked and can be freely transferred.\n\tspendable := bk.SpendableCoins(ctx, addr)\n\ttoXfer := coinsMin(toClawBack, spendable)\n\terr := bk.SendCoins(ctx, addr, dest, toXfer)\n\tif err != nil {\n\t\treturn err // shouldn't happen, given spendable check\n\t}\n\ttoClawBack = toClawBack.Sub(toXfer)\n\n\t// We need to traverse the staking data structures to update the\n\t// vesting account bookkeeping, and to recover more funds if necessary.\n\t// Staking is the only way unvested tokens should be missing from the bank balance.\n\n\t// If we need more, transfer UnbondingDelegations.\n\twant := toClawBack.AmountOf(bondDenom)\n\tunbondings := sk.GetUnbondingDelegations(ctx, addr, math.MaxUint16)\n\tfor _, unbonding := range unbondings {\n\t\tvalAddr, err := sdk.ValAddressFromBech32(unbonding.ValidatorAddress)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\ttransferred := sk.TransferUnbonding(ctx, addr, dest, valAddr, want)\n\t\twant = want.Sub(transferred)\n\t\tif !want.IsPositive() {\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// If we need more, transfer Delegations.\n\tif want.IsPositive() {\n\t\tdelegations := sk.GetDelegatorDelegations(ctx, addr, math.MaxUint16)\n\t\tfor _, delegation := range delegations {\n\t\t\tvalidatorAddr, err := sdk.ValAddressFromBech32(delegation.ValidatorAddress)\n\t\t\tif err != nil {\n\t\t\t\tpanic(err) // shouldn't happen\n\t\t\t}\n\t\t\tvalidator, found := sk.GetValidator(ctx, validatorAddr)\n\t\t\tif !found {\n\t\t\t\t// validator has been removed\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\twantShares, err := validator.SharesFromTokensTruncated(want)\n\t\t\tif err != nil {\n\t\t\t\t// validator has no tokens\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\ttransferredShares := sk.TransferDelegation(ctx, addr, dest, delegation.GetValidatorAddr(), wantShares)\n\t\t\t// to be conservative in what we're clawing back, round transferred shares up\n\t\t\ttransferred := validator.TokensFromSharesRoundUp(transferredShares).RoundInt()\n\t\t\twant = want.Sub(transferred)\n\t\t\tif !want.IsPositive() {\n\t\t\t\t// Could be slightly negative, due to rounding?\n\t\t\t\t// Don't think so, due to the precautions above.\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\t// If we've transferred everything and still haven't transferred the desired clawback amount,\n\t// then the account must have most some unvested tokens from slashing.\n\treturn nil\n}",
"func (cga clawbackGrantAction) AddToAccount(ctx sdk.Context, rawAccount exported.VestingAccount) error {\n\tcva, ok := rawAccount.(*ClawbackVestingAccount)\n\tif !ok {\n\t\treturn sdkerrors.Wrapf(sdkerrors.ErrNotSupported,\n\t\t\t\"account %s must be a ClawbackVestingAccount, got %T\",\n\t\t\trawAccount.GetAddress(), rawAccount)\n\t}\n\tif cga.funderAddress != cva.FunderAddress {\n\t\treturn sdkerrors.Wrapf(sdkerrors.ErrInvalidRequest, \"account %s can only accept grants from account %s\",\n\t\t\trawAccount.GetAddress(), cva.FunderAddress)\n\t}\n\tcva.addGrant(ctx, cga.sk, cga.grantStartTime, cga.grantLockupPeriods, cga.grantVestingPeriods, cga.grantCoins)\n\treturn nil\n}",
"func NewClawbackVestingAccount(baseAcc *authtypes.BaseAccount, funder sdk.AccAddress, originalVesting sdk.Coins, startTime int64, lockupPeriods, vestingPeriods Periods) *ClawbackVestingAccount {\n\t// copy and align schedules to avoid mutating inputs\n\tlp := make(Periods, len(lockupPeriods))\n\tcopy(lp, lockupPeriods)\n\tvp := make(Periods, len(vestingPeriods))\n\tcopy(vp, vestingPeriods)\n\t_, endTime := AlignSchedules(startTime, startTime, lp, vp)\n\tbaseVestingAcc := &BaseVestingAccount{\n\t\tBaseAccount: baseAcc,\n\t\tOriginalVesting: originalVesting,\n\t\tEndTime: endTime,\n\t}\n\n\treturn &ClawbackVestingAccount{\n\t\tBaseVestingAccount: baseVestingAcc,\n\t\tFunderAddress: funder.String(),\n\t\tStartTime: startTime,\n\t\tLockupPeriods: lp,\n\t\tVestingPeriods: vp,\n\t}\n}",
"func (_Token *TokenTransactorSession) BurnFrom(account common.Address, amount *big.Int) (*types.Transaction, error) {\n\treturn _Token.Contract.BurnFrom(&_Token.TransactOpts, account, amount)\n}",
"func (_Token *TokenTransactor) BurnFrom(opts *bind.TransactOpts, account common.Address, amount *big.Int) (*types.Transaction, error) {\n\treturn _Token.contract.Transact(opts, \"burnFrom\", account, amount)\n}",
"func (_ERC721Token *ERC721TokenTransactor) TakeOwnership(opts *bind.TransactOpts, _tokenId *big.Int) (*types.Transaction, error) {\n\treturn _ERC721Token.contract.Transact(opts, \"takeOwnership\", _tokenId)\n}",
"func (k Keeper) delegateFromAccount(ctx sdk.Context, valAddr sdk.ValAddress, delegator sdk.AccAddress, amount sdkmath.Int) (sdk.Dec, error) {\n\tvalidator, found := k.stakingKeeper.GetValidator(ctx, valAddr)\n\tif !found {\n\t\treturn sdk.Dec{}, types.ErrNoValidatorFound\n\t}\n\t// source tokens are from an account, so subtractAccount true and tokenSrc unbonded\n\tnewShares, err := k.stakingKeeper.Delegate(ctx, delegator, amount, stakingtypes.Unbonded, validator, true)\n\tif err != nil {\n\t\treturn sdk.Dec{}, err\n\t}\n\treturn newShares, nil\n}",
"func (s *Server) Transfer(ctx context.Context, req *pb.TransferRequest) (rep *pb.TransferReply, err error) {\n\trep = &pb.TransferReply{}\n\n\t// Get originator account and confirm it belongs to this RVASP\n\tvar account Account\n\tif err = LookupAccount(s.db, req.Account).First(&account).Error; err != nil {\n\t\tif errors.Is(err, gorm.ErrRecordNotFound) {\n\t\t\tlog.Info().Str(\"account\", req.Account).Msg(\"not found\")\n\t\t\treturn nil, status.Error(codes.NotFound, \"account not found\")\n\t\t}\n\t\tlog.Error().Err(err).Msg(\"could not lookup account\")\n\t\treturn nil, status.Errorf(codes.FailedPrecondition, \"could not lookup account: %s\", err)\n\t}\n\n\t// Identify the beneficiary either using the demo database or the directory service\n\tvar beneficiary Wallet\n\tif req.ExternalDemo {\n\t\tif req.BeneficiaryVasp == \"\" {\n\t\t\treturn nil, status.Error(codes.InvalidArgument, \"if external demo is true, must specify beneficiary vasp\")\n\t\t}\n\n\t\tbeneficiary = Wallet{\n\t\t\tProvider: VASP{\n\t\t\t\tName: req.BeneficiaryVasp,\n\t\t\t},\n\t\t}\n\t} else {\n\t\t// Lookup beneficiary wallet and confirm it belongs to a remote RVASP\n\t\tif err = LookupBeneficiary(s.db, req.Beneficiary).First(&beneficiary).Error; err != nil {\n\t\t\tif errors.Is(err, gorm.ErrRecordNotFound) {\n\t\t\t\tlog.Info().Str(\"beneficiary\", req.Beneficiary).Msg(\"not found\")\n\t\t\t\treturn nil, status.Error(codes.NotFound, \"beneficiary not found (use external_demo?)\")\n\t\t\t}\n\t\t\tlog.Error().Err(err).Msg(\"could not lookup beneficiary\")\n\t\t\treturn nil, status.Errorf(codes.FailedPrecondition, \"could not lookup beneficiary: %s\", err)\n\t\t}\n\n\t\tif req.CheckBeneficiary {\n\t\t\tif req.BeneficiaryVasp != beneficiary.Provider.Name {\n\t\t\t\tlog.Warn().\n\t\t\t\t\tStr(\"expected\", req.BeneficiaryVasp).\n\t\t\t\t\tStr(\"actual\", beneficiary.Provider.Name).\n\t\t\t\t\tMsg(\"check beneficiary failed\")\n\t\t\t\treturn nil, status.Error(codes.InvalidArgument, \"beneficiary wallet does not match beneficiary VASP\")\n\t\t\t}\n\n\t\t}\n\t}\n\n\t// Conduct a TRISADS lookup if necessary to get the endpoint\n\tvar peer *peers.Peer\n\tif peer, err = s.peers.Search(beneficiary.Provider.Name); err != nil {\n\t\tlog.Error().Err(err).Msg(\"could not search peer from directory service\")\n\t\treturn nil, status.Errorf(codes.Internal, \"could not search peer from directory service: %s\", err)\n\t}\n\n\t// Ensure that the local RVASP has signing keys for the remote, otherwise perform key exchange\n\tvar signKey *rsa.PublicKey\n\tif signKey, err = peer.ExchangeKeys(true); err != nil {\n\t\tlog.Error().Err(err).Msg(\"could not exchange keys with remote peer\")\n\t\treturn nil, status.Errorf(codes.FailedPrecondition, \"could not exchange keys with remote peer: %s\", err)\n\t}\n\n\t// Save the pending transaction and increment the accounts pending field\n\txfer := Transaction{\n\t\tEnvelope: uuid.New().String(),\n\t\tAccount: account,\n\t\tAmount: decimal.NewFromFloat32(req.Amount),\n\t\tDebit: true,\n\t\tCompleted: false,\n\t}\n\n\tif err = s.db.Save(&xfer).Error; err != nil {\n\t\tlog.Error().Err(err).Msg(\"could not save transaction\")\n\t\treturn nil, status.Errorf(codes.FailedPrecondition, \"could not save transaction: %s\", err)\n\t}\n\n\t// Save the pending transaction on the account\n\t// TODO: remove pending transactions\n\taccount.Pending++\n\tif err = s.db.Save(&account).Error; err != nil {\n\t\tlog.Error().Err(err).Msg(\"could not save originator account\")\n\t\treturn nil, status.Errorf(codes.FailedPrecondition, \"could not save originator account: %s\", err)\n\t}\n\n\t// Create an identity and transaction payload for TRISA exchange\n\ttransaction := &generic.Transaction{\n\t\tTxid: fmt.Sprintf(\"%d\", xfer.ID),\n\t\tOriginator: account.WalletAddress,\n\t\tBeneficiary: beneficiary.Address,\n\t\tAmount: float64(req.Amount),\n\t\tNetwork: \"TestNet\",\n\t\tTimestamp: xfer.Timestamp.Format(time.RFC3339),\n\t}\n\tidentity := &ivms101.IdentityPayload{\n\t\tOriginator: &ivms101.Originator{},\n\t\tOriginatingVasp: &ivms101.OriginatingVasp{},\n\t}\n\tif identity.OriginatingVasp.OriginatingVasp, err = s.vasp.LoadIdentity(); err != nil {\n\t\tlog.Error().Err(err).Msg(\"could not load originator vasp\")\n\t\treturn nil, status.Errorf(codes.Internal, \"could not load originator vasp: %s\", err)\n\t}\n\n\tidentity.Originator = &ivms101.Originator{\n\t\tOriginatorPersons: make([]*ivms101.Person, 0, 1),\n\t\tAccountNumbers: []string{account.WalletAddress},\n\t}\n\tvar originator *ivms101.Person\n\tif originator, err = account.LoadIdentity(); err != nil {\n\t\tlog.Error().Err(err).Msg(\"could not load originator identity\")\n\t\treturn nil, status.Errorf(codes.Internal, \"could not load originator identity: %s\", err)\n\t}\n\tidentity.Originator.OriginatorPersons = append(identity.Originator.OriginatorPersons, originator)\n\n\tpayload := &protocol.Payload{}\n\tif payload.Transaction, err = anypb.New(transaction); err != nil {\n\t\tlog.Error().Err(err).Msg(\"could not dump payload transaction\")\n\t\treturn nil, status.Errorf(codes.Internal, \"could not dump payload transaction: %s\", err)\n\t}\n\tif payload.Identity, err = anypb.New(identity); err != nil {\n\t\tlog.Error().Err(err).Msg(\"could not dump payload identity\")\n\t\treturn nil, status.Errorf(codes.Internal, \"could not dump payload identity: %s\", err)\n\t}\n\n\t// Secure the envelope with the remote beneficiary's signing keys\n\tvar envelope *protocol.SecureEnvelope\n\tif envelope, err = handler.New(xfer.Envelope, payload, nil).Seal(signKey); err != nil {\n\t\tlog.Error().Err(err).Msg(\"could not create or sign secure envelope\")\n\t\treturn nil, status.Errorf(codes.FailedPrecondition, \"could not create or sign secure envelope: %s\", err)\n\t}\n\n\t// Conduct the TRISA transaction, handle errors and send back to user\n\tif envelope, err = peer.Transfer(envelope); err != nil {\n\t\tlog.Error().Err(err).Msg(\"could not perform TRISA exchange\")\n\t\treturn nil, status.Errorf(codes.FailedPrecondition, \"could not perform TRISA exchange: %s\", err)\n\t}\n\n\t// Open the response envelope with local private keys\n\tvar opened *handler.Envelope\n\tif opened, err = handler.Open(envelope, s.trisa.sign); err != nil {\n\t\tlog.Error().Err(err).Msg(\"could not unseal TRISA response\")\n\t\treturn nil, status.Errorf(codes.FailedPrecondition, \"could not unseal TRISA response: %s\", err)\n\t}\n\n\t// Verify the contents of the response\n\tpayload = opened.Payload\n\tif payload.Identity == nil || payload.Transaction == nil {\n\t\tlog.Warn().Msg(\"did not receive identity or transaction\")\n\t\treturn nil, status.Error(codes.FailedPrecondition, \"no identity or transaction returned\")\n\t}\n\n\tif payload.Identity.TypeUrl != \"type.googleapis.com/ivms101.IdentityPayload\" {\n\t\tlog.Warn().Str(\"type\", payload.Identity.TypeUrl).Msg(\"unsupported identity type\")\n\t\treturn nil, status.Errorf(codes.FailedPrecondition, \"unsupported identity type for rVASP: %q\", payload.Identity.TypeUrl)\n\t}\n\n\tif payload.Transaction.TypeUrl != \"type.googleapis.com/trisa.data.generic.v1beta1.Transaction\" {\n\t\tlog.Warn().Str(\"type\", payload.Transaction.TypeUrl).Msg(\"unsupported transaction type\")\n\t\treturn nil, status.Errorf(codes.FailedPrecondition, \"unsupported identity type for rVASP: %q\", payload.Transaction.TypeUrl)\n\t}\n\n\tidentity = &ivms101.IdentityPayload{}\n\ttransaction = &generic.Transaction{}\n\tif err = payload.Identity.UnmarshalTo(identity); err != nil {\n\t\tlog.Error().Err(err).Msg(\"could not unmarshal identity\")\n\t\treturn nil, status.Errorf(codes.FailedPrecondition, \"could not unmarshal identity: %s\", err)\n\t}\n\tif err = payload.Transaction.UnmarshalTo(transaction); err != nil {\n\t\tlog.Error().Err(err).Msg(\"could not unmarshal transaction\")\n\t\treturn nil, status.Errorf(codes.FailedPrecondition, \"could not unmarshal transaction: %s\", err)\n\t}\n\n\t// Update the completed transaction and save to disk\n\txfer.Beneficiary = Identity{\n\t\tWalletAddress: transaction.Beneficiary,\n\t}\n\txfer.Completed = true\n\txfer.Timestamp, _ = time.Parse(time.RFC3339, transaction.Timestamp)\n\n\t// Serialize the identity information as JSON data\n\tvar data []byte\n\tif data, err = json.Marshal(identity); err != nil {\n\t\tlog.Error().Err(err).Msg(\"could not marshal IVMS 101 identity\")\n\t\treturn nil, status.Errorf(codes.Internal, \"could not marshal IVMS 101 identity: %s\", err)\n\t}\n\txfer.Identity = string(data)\n\n\tif err = s.db.Save(&xfer).Error; err != nil {\n\t\tlog.Error().Err(err).Msg(\"could not save transaction\")\n\t\treturn nil, status.Errorf(codes.Internal, \"could not save transaction: %s\", err)\n\t}\n\n\t// Save the pending transaction on the account\n\t// TODO: remove pending transactions\n\taccount.Pending--\n\taccount.Completed++\n\taccount.Balance.Sub(xfer.Amount)\n\tif err = s.db.Save(&account).Error; err != nil {\n\t\tlog.Error().Err(err).Msg(\"could not save originator account\")\n\t\treturn nil, status.Errorf(codes.Internal, \"could not save originator account: %s\", err)\n\t}\n\n\t// Return the transfer response\n\trep.Transaction = xfer.Proto()\n\treturn rep, nil\n}",
"func (va ClawbackVestingAccount) postReward(ctx sdk.Context, reward sdk.Coins, ak AccountKeeper, bk BankKeeper, sk StakingKeeper) {\n\t// Find the scheduled amount of vested and unvested staking tokens\n\tbondDenom := sk.BondDenom(ctx)\n\tvested := ReadSchedule(va.StartTime, va.EndTime, va.VestingPeriods, va.OriginalVesting, ctx.BlockTime().Unix()).AmountOf(bondDenom)\n\tunvested := va.OriginalVesting.AmountOf(bondDenom).Sub(vested)\n\n\tif unvested.IsZero() {\n\t\t// no need to adjust the vesting schedule\n\t\treturn\n\t}\n\n\tif vested.IsZero() {\n\t\t// all staked tokens must be unvested\n\t\tva.distributeReward(ctx, ak, bondDenom, reward)\n\t\treturn\n\t}\n\n\t// Find current split of account balance on staking axis\n\tbonded := sk.GetDelegatorBonded(ctx, va.GetAddress())\n\tunbonding := sk.GetDelegatorUnbonding(ctx, va.GetAddress())\n\tdelegated := bonded.Add(unbonding)\n\n\t// discover what has been slashed and remove from delegated amount\n\toldDelegated := va.DelegatedVesting.AmountOf(bondDenom).Add(va.DelegatedFree.AmountOf(bondDenom))\n\tslashed := oldDelegated.Sub(intMin(oldDelegated, delegated))\n\tdelegated = delegated.Sub(intMin(delegated, slashed))\n\n\t// Prefer delegated tokens to be unvested\n\tunvested = intMin(unvested, delegated)\n\tvested = delegated.Sub(unvested)\n\n\t// Compute the unvested amount of reward and add to vesting schedule\n\tif unvested.IsZero() {\n\t\treturn\n\t}\n\tif vested.IsZero() {\n\t\tva.distributeReward(ctx, ak, bondDenom, reward)\n\t\treturn\n\t}\n\tunvestedRatio := unvested.ToDec().QuoTruncate(bonded.ToDec()) // round down\n\tunvestedReward := scaleCoins(reward, unvestedRatio)\n\tva.distributeReward(ctx, ak, bondDenom, unvestedReward)\n}",
"func (_Registry *RegistryTransactor) SafeTransferAndClaimFrom(opts *bind.TransactOpts, _from common.Address, _to common.Address, _id *big.Int, _value *big.Int, _data []byte, _claimData []byte) (*types.Transaction, error) {\n\treturn _Registry.contract.Transact(opts, \"safeTransferAndClaimFrom\", _from, _to, _id, _value, _data, _claimData)\n}",
"func Transfertobankaccount(v float64) predicate.Bulk {\n\treturn predicate.Bulk(func(s *sql.Selector) {\n\t\ts.Where(sql.EQ(s.C(FieldTransfertobankaccount), v))\n\t})\n}",
"func (ai *actionIterator) PopAccount() {\n\tif len(ai.heads) != 0 {\n\t\theap.Pop(&ai.heads)\n\t}\n}",
"func (_CraftingI *CraftingISession) SafeTransferFrom(from common.Address, to common.Address, tokenId *big.Int) (*types.Transaction, error) {\n\treturn _CraftingI.Contract.SafeTransferFrom(&_CraftingI.TransactOpts, from, to, tokenId)\n}",
"func (va *ClawbackVestingAccount) computeClawback(clawbackTime int64) sdk.Coins {\n\t// Compute the truncated vesting schedule and amounts.\n\t// Work with the schedule as the primary data and recompute derived fields, e.g. OriginalVesting.\n\tvestTime := va.StartTime\n\ttotalVested := sdk.NewCoins()\n\ttotalUnvested := sdk.NewCoins()\n\tunvestedIdx := 0\n\tfor i, period := range va.VestingPeriods {\n\t\tvestTime += period.Length\n\t\t// tie in time goes to clawback\n\t\tif vestTime < clawbackTime {\n\t\t\ttotalVested = totalVested.Add(period.Amount...)\n\t\t\tunvestedIdx = i + 1\n\t\t} else {\n\t\t\ttotalUnvested = totalUnvested.Add(period.Amount...)\n\t\t}\n\t}\n\tlastVestTime := vestTime\n\tnewVestingPeriods := va.VestingPeriods[:unvestedIdx]\n\n\t// To cap the unlocking schedule to the new total vested, conjunct with a limiting schedule\n\tcapPeriods := []Period{\n\t\t{\n\t\t\tLength: 0,\n\t\t\tAmount: totalVested,\n\t\t},\n\t}\n\t_, lastLockTime, newLockupPeriods := ConjunctPeriods(va.StartTime, va.StartTime, va.LockupPeriods, capPeriods)\n\n\t// Now construct the new account state\n\tva.OriginalVesting = totalVested\n\tva.EndTime = max64(lastVestTime, lastLockTime)\n\tva.LockupPeriods = newLockupPeriods\n\tva.VestingPeriods = newVestingPeriods\n\t// DelegatedVesting and DelegatedFree will be adjusted elsewhere\n\n\treturn totalUnvested\n}",
"func Transfer(fromAcct Account, toAcct Account, amount Money) error {\n\tif err := fromAcct.Withdraw(amount); err == nil {\n\t\tif depErr := toAcct.Deposit(amount); depErr == nil {\n\t\t\tfmt.Printf(\"Transfered %f from %s to %+v\", amount, fromAcct, toAcct)\n\t\t} else {\n\t\t\t//return the root cause\n\t\t\treturn depErr\n\t\t}\n\t} else {\n\t\t//return the root cause\n\t\treturn err\n\t}\n\treturn nil\n}",
"func (_Registry *RegistryTransactorSession) SafeTransferAndClaimFrom(_from common.Address, _to common.Address, _id *big.Int, _value *big.Int, _data []byte, _claimData []byte) (*types.Transaction, error) {\n\treturn _Registry.Contract.SafeTransferAndClaimFrom(&_Registry.TransactOpts, _from, _to, _id, _value, _data, _claimData)\n}",
"func (e *copyS2SMigrationFileEnumerator) addTransferFromAccount(ctx context.Context,\n\tsrcServiceURL azfile.ServiceURL, destBaseURL url.URL,\n\tsharePrefix, fileOrDirectoryPrefix, fileNamePattern string, cca *cookedCopyCmdArgs) error {\n\treturn enumerateSharesInAccount(\n\t\tctx,\n\t\tsrcServiceURL,\n\t\tsharePrefix,\n\t\tfunc(shareItem azfile.ShareItem) error {\n\t\t\t// Whatever the destination type is, it should be equivalent to account level,\n\t\t\t// so directly append share name to it.\n\t\t\ttmpDestURL := urlExtension{URL: destBaseURL}.generateObjectPath(shareItem.Name)\n\t\t\t// create bucket for destination, in case bucket doesn't exist.\n\t\t\tif err := e.createDestBucket(ctx, tmpDestURL, nil); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t// Two cases for exclude/include which need to match share names in account:\n\t\t\t// a. https://<fileservice>/share*/file*.vhd\n\t\t\t// b. https://<fileservice>/ which equals to https://<fileservice>/*\n\t\t\treturn e.addTransfersFromDirectory(\n\t\t\t\tctx,\n\t\t\t\tsrcServiceURL.NewShareURL(shareItem.Name).NewRootDirectoryURL(),\n\t\t\t\ttmpDestURL,\n\t\t\t\tfileOrDirectoryPrefix,\n\t\t\t\tfileNamePattern,\n\t\t\t\t\"\",\n\t\t\t\ttrue,\n\t\t\t\ttrue,\n\t\t\t\tcca)\n\t\t})\n}",
"func (e *copyS2SMigrationBlobEnumerator) addTransferFromAccount(ctx context.Context,\n\tsrcServiceURL azblob.ServiceURL, destBaseURL url.URL,\n\tcontainerPrefix, blobPrefix, blobNamePattern string, cca *cookedCopyCmdArgs) error {\n\treturn enumerateContainersInAccount(\n\t\tctx,\n\t\tsrcServiceURL,\n\t\tcontainerPrefix,\n\t\tfunc(containerItem azblob.ContainerItem) error {\n\t\t\t// Whatever the destination type is, it should be equivalent to account level,\n\t\t\t// so directly append container name to it.\n\t\t\ttmpDestURL := urlExtension{URL: destBaseURL}.generateObjectPath(containerItem.Name)\n\t\t\t// create bucket for destination, in case bucket doesn't exist.\n\t\t\tif err := e.createDestBucket(ctx, tmpDestURL, nil); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t// Two cases for exclude/include which need to match container names in account:\n\t\t\t// a. https://<blobservice>/container*/blob*.vhd\n\t\t\t// b. https://<blobservice>/ which equals to https://<blobservice>/*\n\t\t\treturn e.addTransfersFromContainer(\n\t\t\t\tctx,\n\t\t\t\tsrcServiceURL.NewContainerURL(containerItem.Name),\n\t\t\t\ttmpDestURL,\n\t\t\t\tblobPrefix,\n\t\t\t\tblobNamePattern,\n\t\t\t\t\"\",\n\t\t\t\ttrue,\n\t\t\t\ttrue,\n\t\t\t\tcca)\n\t\t})\n}",
"func (controller *Auth) safeRedilectAccount(sendMe string) {\n\tvar safeAddress string\n\tsafeAddress = controller.getSafeURL(sendMe)\n\tcontroller.Redirect(safeAddress, 302)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Clawback transfers unvested tokens in a ClawbackVestingAccount to dest. Future vesting events are removed. Unstaked tokens are simply sent. Unbonding and staked tokens are transferred with their staking state intact. Account state is updated to reflect the removals.
|
func (va *ClawbackVestingAccount) clawback(ctx sdk.Context, dest sdk.AccAddress, ak AccountKeeper, bk BankKeeper, sk StakingKeeper) error {
// Compute the clawback based on the account state only, and update account
toClawBack := va.computeClawback(ctx.BlockTime().Unix())
if toClawBack.IsZero() {
return nil
}
addr := va.GetAddress()
bondDenom := sk.BondDenom(ctx)
// Compute the clawback based on bank balance and delegation, and update account
encumbered := va.GetVestingCoins(ctx.BlockTime())
bondedAmt := sk.GetDelegatorBonded(ctx, addr)
unbondingAmt := sk.GetDelegatorUnbonding(ctx, addr)
bonded := sdk.NewCoins(sdk.NewCoin(bondDenom, bondedAmt))
unbonding := sdk.NewCoins(sdk.NewCoin(bondDenom, unbondingAmt))
unbonded := bk.GetAllBalances(ctx, addr)
toClawBack = va.updateDelegation(encumbered, toClawBack, bonded, unbonding, unbonded)
// Write now now so that the bank module sees unvested tokens are unlocked.
// Note that all store writes are aborted if there is a panic, so there is
// no danger in writing incomplete results.
ak.SetAccount(ctx, va)
// Now that future vesting events (and associated lockup) are removed,
// the balance of the account is unlocked and can be freely transferred.
spendable := bk.SpendableCoins(ctx, addr)
toXfer := coinsMin(toClawBack, spendable)
err := bk.SendCoins(ctx, addr, dest, toXfer)
if err != nil {
return err // shouldn't happen, given spendable check
}
toClawBack = toClawBack.Sub(toXfer)
// We need to traverse the staking data structures to update the
// vesting account bookkeeping, and to recover more funds if necessary.
// Staking is the only way unvested tokens should be missing from the bank balance.
// If we need more, transfer UnbondingDelegations.
want := toClawBack.AmountOf(bondDenom)
unbondings := sk.GetUnbondingDelegations(ctx, addr, math.MaxUint16)
for _, unbonding := range unbondings {
valAddr, err := sdk.ValAddressFromBech32(unbonding.ValidatorAddress)
if err != nil {
panic(err)
}
transferred := sk.TransferUnbonding(ctx, addr, dest, valAddr, want)
want = want.Sub(transferred)
if !want.IsPositive() {
break
}
}
// If we need more, transfer Delegations.
if want.IsPositive() {
delegations := sk.GetDelegatorDelegations(ctx, addr, math.MaxUint16)
for _, delegation := range delegations {
validatorAddr, err := sdk.ValAddressFromBech32(delegation.ValidatorAddress)
if err != nil {
panic(err) // shouldn't happen
}
validator, found := sk.GetValidator(ctx, validatorAddr)
if !found {
// validator has been removed
continue
}
wantShares, err := validator.SharesFromTokensTruncated(want)
if err != nil {
// validator has no tokens
continue
}
transferredShares := sk.TransferDelegation(ctx, addr, dest, delegation.GetValidatorAddr(), wantShares)
// to be conservative in what we're clawing back, round transferred shares up
transferred := validator.TokensFromSharesRoundUp(transferredShares).RoundInt()
want = want.Sub(transferred)
if !want.IsPositive() {
// Could be slightly negative, due to rounding?
// Don't think so, due to the precautions above.
break
}
}
}
// If we've transferred everything and still haven't transferred the desired clawback amount,
// then the account must have most some unvested tokens from slashing.
return nil
}
|
[
"func (va ClawbackVestingAccount) postReward(ctx sdk.Context, reward sdk.Coins, ak AccountKeeper, bk BankKeeper, sk StakingKeeper) {\n\t// Find the scheduled amount of vested and unvested staking tokens\n\tbondDenom := sk.BondDenom(ctx)\n\tvested := ReadSchedule(va.StartTime, va.EndTime, va.VestingPeriods, va.OriginalVesting, ctx.BlockTime().Unix()).AmountOf(bondDenom)\n\tunvested := va.OriginalVesting.AmountOf(bondDenom).Sub(vested)\n\n\tif unvested.IsZero() {\n\t\t// no need to adjust the vesting schedule\n\t\treturn\n\t}\n\n\tif vested.IsZero() {\n\t\t// all staked tokens must be unvested\n\t\tva.distributeReward(ctx, ak, bondDenom, reward)\n\t\treturn\n\t}\n\n\t// Find current split of account balance on staking axis\n\tbonded := sk.GetDelegatorBonded(ctx, va.GetAddress())\n\tunbonding := sk.GetDelegatorUnbonding(ctx, va.GetAddress())\n\tdelegated := bonded.Add(unbonding)\n\n\t// discover what has been slashed and remove from delegated amount\n\toldDelegated := va.DelegatedVesting.AmountOf(bondDenom).Add(va.DelegatedFree.AmountOf(bondDenom))\n\tslashed := oldDelegated.Sub(intMin(oldDelegated, delegated))\n\tdelegated = delegated.Sub(intMin(delegated, slashed))\n\n\t// Prefer delegated tokens to be unvested\n\tunvested = intMin(unvested, delegated)\n\tvested = delegated.Sub(unvested)\n\n\t// Compute the unvested amount of reward and add to vesting schedule\n\tif unvested.IsZero() {\n\t\treturn\n\t}\n\tif vested.IsZero() {\n\t\tva.distributeReward(ctx, ak, bondDenom, reward)\n\t\treturn\n\t}\n\tunvestedRatio := unvested.ToDec().QuoTruncate(bonded.ToDec()) // round down\n\tunvestedReward := scaleCoins(reward, unvestedRatio)\n\tva.distributeReward(ctx, ak, bondDenom, unvestedReward)\n}",
"func (_SmartToken *SmartTokenTransactor) Transfer(opts *bind.TransactOpts, to common.Address, tokens *big.Int) (*types.Transaction, error) {\n\treturn _SmartToken.contract.Transact(opts, \"transfer\", to, tokens)\n}",
"func (_BurnableToken *BurnableTokenTransactor) Transfer(opts *bind.TransactOpts, _to common.Address, _value *big.Int) (*types.Transaction, error) {\n\treturn _BurnableToken.contract.Transact(opts, \"transfer\", _to, _value)\n}",
"func (ca clawbackAction) TakeFromAccount(ctx sdk.Context, rawAccount exported.VestingAccount) error {\n\tcva, ok := rawAccount.(*ClawbackVestingAccount)\n\tif !ok {\n\t\treturn sdkerrors.Wrapf(sdkerrors.ErrNotSupported, \"clawback expects *ClawbackVestingAccount, got %T\", rawAccount)\n\t}\n\tif ca.requestor.String() != cva.FunderAddress {\n\t\treturn sdkerrors.Wrapf(sdkerrors.ErrInvalidRequest, \"clawback can only be requested by original funder %s\", cva.FunderAddress)\n\t}\n\treturn cva.clawback(ctx, ca.dest, ca.ak, ca.bk, ca.sk)\n}",
"func (_TokenContractFunctions *TokenContractFunctionsTransactor) Transfer(opts *bind.TransactOpts, _to common.Address, _value *big.Int) (*types.Transaction, error) {\n\treturn _TokenContractFunctions.contract.Transact(opts, \"transfer\", _to, _value)\n}",
"func (s *session) Transfer(cancel <-chan struct{}, t Trust, recipientId uuid.UUID) error {\n\t_, err := s.Invite(cancel, t, recipientId, func(o *InvitationOptions) {\n\t\to.Lvl = Own\n\t\to.Exp = OneHundredYears\n\t})\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\treturn errors.WithStack(s.Revoke(cancel, t, s.MyId()))\n}",
"func (_FixedSupplyToken *FixedSupplyTokenTransactor) Transfer(opts *bind.TransactOpts, to common.Address, tokens *big.Int) (*types.Transaction, error) {\n\treturn _FixedSupplyToken.contract.Transact(opts, \"transfer\", to, tokens)\n}",
"func (rpc *Rpc) TransferTokens(fromAddr *models.Account, toAddr *models.Account, amount models.Amount) (*models.Transaction, error) {\n\t// unlock the source account\n\trpc.log.Debugf(\"RPC->TransferTokens(): Sending %s tokens [%d => %d]\", amount.ToHex(), fromAddr.Id, toAddr.Id)\n\n\t// prep transaction details\n\ttx := map[string]interface{}{\n\t\t\"from\": fromAddr.Address,\n\t\t\"to\": toAddr.Address,\n\t\t\"value\": amount.ToHex(),\n\t}\n\n\t// perform the call\n\tvar txHash string\n\terr := rpc.Call(&txHash, \"personal_sendTransaction\", tx, fromAddr.Password)\n\tif err != nil {\n\t\trpc.log.Errorf(\"RPC->TransferTokens(): Error! %s\", err.Error())\n\t\treturn nil, err\n\t}\n\n\t// unlock the source account\n\trpc.log.Debugf(\"RPC->TransferTokens(): Tx [%d => %d] pending %s\", fromAddr.Id, toAddr.Id, txHash)\n\n\t// return a valid\n\treturn &models.Transaction{\n\t\tId: txHash,\n\t\tFromAccount: fromAddr,\n\t\tToAccount: toAddr,\n\t\tAmount: &amount,\n\t\tTimeStamp: &graphql.Time{Time: time.Now()},\n\t}, nil\n}",
"func (_FCToken *FCTokenTransactor) Transfer(opts *bind.TransactOpts, _to common.Address, _value *big.Int) (*types.Transaction, error) {\n\treturn _FCToken.contract.Transact(opts, \"transfer\", _to, _value)\n}",
"func transferAllToAddress(v *visor.Visor, gb visor.SignedBlock,\n dest coin.Address) (visor.SignedBlock, error) {\n sb := visor.SignedBlock{}\n if gb.Block.Head.BkSeq != uint64(0) {\n return sb, errors.New(\"Must be genesis block\")\n }\n // Send the entire genesis block to dest\n if len(gb.Block.Body.Transactions) != 1 {\n log.Panic(\"Genesis block has only 1 txn\")\n }\n tx := gb.Block.Body.Transactions[0]\n if len(tx.Out) != 1 {\n log.Panic(\"Genesis block has only 1 output\")\n }\n amt := visor.NewBalance(tx.Out[0].Coins, tx.Out[0].Hours)\n tx, err := v.Spend(amt, 0, dest)\n if err != nil {\n return sb, err\n }\n // Add the tx to the unconfirmed pool so it can get picked up\n err, _ = v.RecordTxn(tx)\n if err != nil {\n return sb, err\n }\n // Put the tx in a block and commit\n sb, err = v.CreateBlock(gb.Block.Head.Time + 1)\n if err != nil {\n return sb, err\n }\n err = v.ExecuteSignedBlock(sb)\n if err != nil {\n return sb, err\n }\n return sb, nil\n}",
"func (byb *TokenByb) Transfer(to smc.Address, value big.Int) (smcError smc.Error) {\n\t//The amount of the transfer is less than 0\n\tif Compare(value, Zero()) <= 0 {\n\t\tsmcError.ErrorCode = bcerrors.ErrCodeInterContractsInvalidParameter\n\t\tsmcError.ErrorDesc = \"Invalid value: it cannot be a negative\"\n\t\treturn\n\t}\n\n\t// The receiving address cannot be itself\n\t// And cannot transfer to contract account\n\tif byb.Sender.Addr == to || byb.ContractAcct.Addr == to {\n\t\tsmcError.ErrorCode = bcerrors.ErrCodeInterContractsUnsupportTransToSelf\n\t\treturn\n\t}\n\t// Checking \"to\" address role\n\taddrRole, smcError := byb.checkAddress(to)\n\tif smcError.ErrorCode != bcerrors.ErrCodeOK {\n\t\treturn smcError\n\t}\n\tif addrRole == addressRole_StockHolder ||\n\t\taddrRole == addressRole_Committee {\n\t\tsmcError.ErrorCode = bcerrors.ErrCodeInterContractsInvalidParameter\n\t\tsmcError.ErrorDesc = \"Invalid address, cannot transfer byb to this address\"\n\t\treturn\n\t}\n\n\t// Checking \"Sender\" address role\n\taddrRole, smcError = byb.checkAddress(byb.Sender.Addr)\n\tif smcError.ErrorCode != bcerrors.ErrCodeOK {\n\t\treturn smcError\n\t}\n\tif addrRole == addressRole_StockHolder ||\n\t\taddrRole == addressRole_Committee {\n\t\tsmcError.ErrorCode = bcerrors.ErrCodeInterContractsInvalidParameter\n\t\tsmcError.ErrorDesc = \"Invalid address, cannot transfer byb from this address\"\n\t\treturn\n\t}\n\n\ttransByb, smcError := byb.calcAndSetSenderBalance(value, addrRole)\n\tif smcError.ErrorCode != bcerrors.ErrCodeOK {\n\t\treturn\n\t}\n\t// Pack receipt first\n\tbyb.bybReceipt_onTransfer(byb.Sender.Addr, to, transByb)\n\n\t// Set the balance of the payee\n\tsmcError = byb.setPayeeBalance(to, transByb)\n\tif smcError.ErrorCode != bcerrors.ErrCodeOK {\n\t\treturn\n\t}\n\treturn\n}",
"func (rs *Resolver) Transfer(args *struct{ ToTransfer inputs.TransferInput }) (*types.Transaction, error) {\n\t// get the source id\n\tfid, err := strconv.Atoi(string(args.ToTransfer.FromAccountId))\n\tif err != nil {\n\t\t// log the error and quit\n\t\trs.log.Errorf(\"GQL->Mutation->Transfer(): Invalid source account ID [%s]. %s\", args.ToTransfer.FromAccountId, err.Error())\n\t\treturn nil, err\n\t}\n\n\t// get the source\n\tfrom, err := rs.Db.AccountById(fid)\n\tif err != nil {\n\t\t// log the error and quit\n\t\trs.log.Errorf(\"GQL->Mutation->Transfer(): Source account not found for account id [%s]. %s\", args.ToTransfer.FromAccountId, err.Error())\n\t\treturn nil, err\n\t}\n\n\t// get the target id\n\ttid, err := strconv.Atoi(string(args.ToTransfer.ToAccountId))\n\tif err != nil {\n\t\t// log the error and quit\n\t\trs.log.Errorf(\"GQL->Mutation->Transfer(): Invalid destination account ID [%s]. %s\", args.ToTransfer.ToAccountId, err.Error())\n\t\treturn nil, err\n\t}\n\n\t// get the source\n\tto, err := rs.Db.AccountById(tid)\n\tif err != nil {\n\t\t// log the error and quit\n\t\trs.log.Errorf(\"GQL->Mutation->Transfer(): Destination account not found for account id [%s]. %s\", args.ToTransfer.ToAccountId, err.Error())\n\t\treturn nil, err\n\t}\n\n\t// log the action\n\trs.log.Debugf(\"GQL->Mutation->Transfer(): Sending %s FTM tokens [%s -> %s].\", args.ToTransfer.Amount.ToFTM(), from.Name, to.Name)\n\n\t// do the transfer\n\ttr, err := rs.Rpc.TransferTokens(from, to, args.ToTransfer.Amount)\n\tif err != nil {\n\t\t// log the action\n\t\trs.log.Errorf(\"GQL->Mutation->Transfer(): Can not send tokens. %s\", err.Error())\n\t\treturn nil, err\n\t}\n\n\t// return nothing\n\treturn types.NewTransaction(tr, rs.Repository), nil\n}",
"func (_Sigmacore *SigmacoreTransactorSession) Transfer(dst common.Address, amt *big.Int) (*types.Transaction, error) {\n\treturn _Sigmacore.Contract.Transfer(&_Sigmacore.TransactOpts, dst, amt)\n}",
"func Transfer(from, to string, amount int64) error {\n\tif amount <= 0 {\n\t\treturn fmt.Errorf(\"invalid amount; %d\", amount)\n\t}\n\n\tvar accs []*share.Account\n\terr := client.GetByNames(ctx, share.KindAccount, []string{from, to}, &accs)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"get accounts error; %v\", err)\n\t}\n\n\tif accs[0].Balance < amount {\n\t\treturn fmt.Errorf(\"balance of account %s is %d, not enough for withdraw %d\", from, accs[0].Balance, amount)\n\t}\n\n\taccs[0].Balance -= amount\n\taccs[1].Balance += amount\n\ttrans := []*share.Transaction{\n\t\t{Type: share.TransactionTypeWithdraw, AccountID: from, Amount: -amount},\n\t\t{Type: share.TransactionTypeDeposit, AccountID: to, Amount: amount},\n\t}\n\tfor _, tran := range trans {\n\t\ttran.NewKey(share.KindTransaction)\n\t}\n\terr = client.SaveModels(ctx, \"\", []interface{}{accs[0], accs[1], trans[0], trans[1]})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"save models error; %v\", err)\n\t}\n\treturn nil\n}",
"func (_SfcContract *SfcContractTransactor) StashRewards(opts *bind.TransactOpts, delegator common.Address, toValidatorID *big.Int) (*types.Transaction, error) {\n\treturn _SfcContract.contract.Transact(opts, \"stashRewards\", delegator, toValidatorID)\n}",
"func (_StakingPortBacker *StakingPortBackerFilterer) WatchTokensWithdrawn(opts *bind.WatchOpts, sink chan<- *StakingPortBackerTokensWithdrawn) (event.Subscription, error) {\n\n\tlogs, sub, err := _StakingPortBacker.contract.WatchLogs(opts, \"TokensWithdrawn\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn event.NewSubscription(func(quit <-chan struct{}) error {\n\t\tdefer sub.Unsubscribe()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase log := <-logs:\n\t\t\t\t// New log arrived, parse the event and forward to the user\n\t\t\t\tevent := new(StakingPortBackerTokensWithdrawn)\n\t\t\t\tif err := _StakingPortBacker.contract.UnpackLog(event, \"TokensWithdrawn\", log); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tevent.Raw = log\n\n\t\t\t\tselect {\n\t\t\t\tcase sink <- event:\n\t\t\t\tcase err := <-sub.Err():\n\t\t\t\t\treturn err\n\t\t\t\tcase <-quit:\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\tcase err := <-sub.Err():\n\t\t\t\treturn err\n\t\t\tcase <-quit:\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}), nil\n}",
"func (b *Bank) Transfer(req *structs.Request) *structs.Request {\n\tvar account string\n\tif req.DestBank != b.Bankid {\n\t\taccount = req.Account\n\t} else {\n\t\taccount = req.DestAccount\n\t}\n\tb.CheckId(account)\n\ta := b.amap[account]\n\tnewTrans := MakeTransaction(req)\n\tresp := b.T.checkTransaction(newTrans)\n\tif resp == \"new\" {\n\t\tresp = \"processed\"\n\t\tb.T.RecordTransaction(newTrans)\n\t\tif req.DestBank != b.Bankid {\n\t\t\tif (a.Balance - req.Amount) < 0 {\n\t\t\t\treturn structs.Makereply(req.Requestid, req.Account, \"insufficientfunds\",\n\t\t\t\t\t\"transfer\", req.DestAccount, req.DestBank, req.Amount, a.getbalance())\n\t\t\t}\n\t\t\ta.Balance = a.Balance - req.Amount\n\t\t} else {\n\t\t\ta.Balance = a.Balance + req.Amount\n\t\t}\n\n\t}\n\treturn structs.Makereply(req.Requestid, req.Account, resp, \"transfer\",\n\t\treq.DestAccount, req.DestBank, req.Amount, a.getbalance())\n}",
"func (_TokenHub *TokenHubTransactor) TransferOut(opts *bind.TransactOpts, contractAddr common.Address, recipient common.Address, amount *big.Int, expireTime uint64) (*types.Transaction, error) {\n\treturn _TokenHub.contract.Transact(opts, \"transferOut\", contractAddr, recipient, amount, expireTime)\n}",
"func (_ProjectWallet *ProjectWalletTransactorSession) Transfer(_receiver common.Address, _amt *big.Int) (*types.Transaction, error) {\n\treturn _ProjectWallet.Contract.Transfer(&_ProjectWallet.TransactOpts, _receiver, _amt)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
distributeReward adds the reward to the future vesting schedule in proportion to the future vesting staking tokens.
|
func (va ClawbackVestingAccount) distributeReward(ctx sdk.Context, ak AccountKeeper, bondDenom string, reward sdk.Coins) {
now := ctx.BlockTime().Unix()
t := va.StartTime
firstUnvestedPeriod := 0
unvestedTokens := sdk.ZeroInt()
for i, period := range va.VestingPeriods {
t += period.Length
if t <= now {
firstUnvestedPeriod = i + 1
continue
}
unvestedTokens = unvestedTokens.Add(period.Amount.AmountOf(bondDenom))
}
runningTotReward := sdk.NewCoins()
runningTotStaking := sdk.ZeroInt()
for i := firstUnvestedPeriod; i < len(va.VestingPeriods); i++ {
period := va.VestingPeriods[i]
runningTotStaking = runningTotStaking.Add(period.Amount.AmountOf(bondDenom))
runningTotRatio := runningTotStaking.ToDec().Quo(unvestedTokens.ToDec())
targetCoins := scaleCoins(reward, runningTotRatio)
thisReward := targetCoins.Sub(runningTotReward)
runningTotReward = targetCoins
period.Amount = period.Amount.Add(thisReward...)
va.VestingPeriods[i] = period
}
va.OriginalVesting = va.OriginalVesting.Add(reward...)
ak.SetAccount(ctx, &va)
}
|
[
"func (_BondedECDSAKeep *BondedECDSAKeepTransactor) DistributeETHReward(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _BondedECDSAKeep.contract.Transact(opts, \"distributeETHReward\")\n}",
"func (_BondedECDSAKeep *BondedECDSAKeepSession) DistributeETHReward() (*types.Transaction, error) {\n\treturn _BondedECDSAKeep.Contract.DistributeETHReward(&_BondedECDSAKeep.TransactOpts)\n}",
"func (_BondedECDSAKeep *BondedECDSAKeepTransactor) DistributeERC20Reward(opts *bind.TransactOpts, _tokenAddress common.Address, _value *big.Int) (*types.Transaction, error) {\n\treturn _BondedECDSAKeep.contract.Transact(opts, \"distributeERC20Reward\", _tokenAddress, _value)\n}",
"func (_BondedECDSAKeep *BondedECDSAKeepTransactorSession) DistributeERC20Reward(_tokenAddress common.Address, _value *big.Int) (*types.Transaction, error) {\n\treturn _BondedECDSAKeep.Contract.DistributeERC20Reward(&_BondedECDSAKeep.TransactOpts, _tokenAddress, _value)\n}",
"func distributeDelegatorRewards(ctx contract.Context, cachedDelegations *CachedDposStorage, formerValidatorTotals map[string]loom.BigUInt, delegatorRewards map[string]*loom.BigUInt, distributedRewards *loom.BigUInt) (map[string]*loom.BigUInt, error) {\n\tnewDelegationTotals := make(map[string]*loom.BigUInt)\n\n\tcandidates, err := LoadCandidateList(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Initialize delegation totals with whitelist amounts\n\tfor _, candidate := range candidates {\n\t\tstatistic, _ := GetStatistic(ctx, loom.UnmarshalAddressPB(candidate.Address))\n\n\t\tif statistic != nil && statistic.WhitelistAmount != nil && !common.IsZero(statistic.WhitelistAmount.Value) {\n\t\t\tvalidatorKey := loom.UnmarshalAddressPB(statistic.Address).String()\n\t\t\tamount := calculateWeightedWhitelistAmount(*statistic)\n\t\t\tnewDelegationTotals[validatorKey] = &amount\n\t\t}\n\t}\n\n\tdelegations, err := cachedDelegations.loadDelegationList(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar currentDelegations = make(DelegationList, len(delegations))\n\tcopy(currentDelegations, delegations)\n\tfor _, d := range currentDelegations {\n\t\tdelegation, err := GetDelegation(ctx, d.Index, *d.Validator, *d.Delegator)\n\t\tif err == contract.ErrNotFound {\n\t\t\tcontinue\n\t\t} else if err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tvalidatorKey := loom.UnmarshalAddressPB(delegation.Validator).String()\n\n\t\t// Do not distribute rewards to delegators of the Limbo validator\n\t\t// NOTE: because all delegations are sorted in reverse index order, the\n\t\t// 0-index delegation (for rewards) is handled last. Therefore, all\n\t\t// increases to reward delegations will be reflected in newDelegation\n\t\t// totals that are computed at the end of this for loop. (We do this to\n\t\t// avoid looping over all delegations twice)\n\t\tif loom.UnmarshalAddressPB(delegation.Validator).Compare(LimboValidatorAddress(ctx)) != 0 {\n\t\t\t// allocating validator distributions to delegators\n\t\t\t// based on former validator delegation totals\n\t\t\tdelegationTotal := formerValidatorTotals[validatorKey]\n\t\t\trewardsTotal := delegatorRewards[validatorKey]\n\t\t\tif rewardsTotal != nil {\n\t\t\t\tweightedDelegation := calculateWeightedDelegationAmount(*delegation)\n\t\t\t\tdelegatorDistribution := calculateShare(weightedDelegation, delegationTotal, *rewardsTotal)\n\t\t\t\t// increase a delegator's distribution\n\t\t\t\tdistributedRewards.Add(distributedRewards, &delegatorDistribution)\n\t\t\t\tcachedDelegations.IncreaseRewardDelegation(ctx, delegation.Validator, delegation.Delegator, delegatorDistribution)\n\n\t\t\t\t// If the reward delegation is updated by the\n\t\t\t\t// IncreaseRewardDelegation command, we must be sure to use this\n\t\t\t\t// updated version in the rest of the loop. No other delegations\n\t\t\t\t// (non-rewards) have the possibility of being updated outside\n\t\t\t\t// of this loop.\n\t\t\t\tif ctx.FeatureEnabled(features.DPOSVersion3_1, false) && d.Index == REWARD_DELEGATION_INDEX {\n\t\t\t\t\tdelegation, err = GetDelegation(ctx, d.Index, *d.Validator, *d.Delegator)\n\t\t\t\t\tif err == contract.ErrNotFound {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t} else if err != nil {\n\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tupdatedAmount := common.BigZero()\n\t\tif delegation.State == BONDING {\n\t\t\tupdatedAmount.Add(&delegation.Amount.Value, &delegation.UpdateAmount.Value)\n\t\t\tdelegation.Amount = &types.BigUInt{Value: *updatedAmount}\n\t\t} else if delegation.State == UNBONDING {\n\t\t\tupdatedAmount.Sub(&delegation.Amount.Value, &delegation.UpdateAmount.Value)\n\t\t\tdelegation.Amount = &types.BigUInt{Value: *updatedAmount}\n\t\t\tcoin, err := loadCoin(ctx)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\terr = coin.Transfer(loom.UnmarshalAddressPB(delegation.Delegator), &delegation.UpdateAmount.Value)\n\t\t\tif err != nil {\n\t\t\t\ttransferFromErr := fmt.Sprintf(\"Failed coin Transfer - distributeDelegatorRewards, %v, %s\", delegation.Delegator.String(), delegation.UpdateAmount.Value.String())\n\t\t\t\treturn nil, logDposError(ctx, err, transferFromErr)\n\t\t\t}\n\t\t} else if delegation.State == REDELEGATING {\n\t\t\tif err = cachedDelegations.DeleteDelegation(ctx, delegation); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tdelegation.Validator = delegation.UpdateValidator\n\t\t\tdelegation.Amount = delegation.UpdateAmount\n\t\t\tdelegation.LocktimeTier = delegation.UpdateLocktimeTier\n\n\t\t\tindex, err := GetNextDelegationIndex(ctx, *delegation.Validator, *delegation.Delegator)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tdelegation.Index = index\n\n\t\t\tvalidatorKey = loom.UnmarshalAddressPB(delegation.Validator).String()\n\t\t}\n\n\t\t// Delete any delegation whose full amount has been unbonded. In all\n\t\t// other cases, update the delegation state to BONDED and reset its\n\t\t// UpdateAmount\n\t\tif common.IsZero(delegation.Amount.Value) && delegation.State == UNBONDING {\n\t\t\tif err := cachedDelegations.DeleteDelegation(ctx, delegation); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t} else {\n\t\t\t// After a delegation update, zero out UpdateAmount\n\t\t\tdelegation.UpdateAmount = loom.BigZeroPB()\n\t\t\tdelegation.State = BONDED\n\n\t\t\tresetDelegationIfExpired(ctx, delegation)\n\t\t\tif err := cachedDelegations.SetDelegation(ctx, delegation); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\n\t\t// Calculate delegation totals for all validators except the Limbo\n\t\t// validator\n\t\tif loom.UnmarshalAddressPB(delegation.Validator).Compare(LimboValidatorAddress(ctx)) != 0 {\n\t\t\tnewTotal := common.BigZero()\n\t\t\tweightedDelegation := calculateWeightedDelegationAmount(*delegation)\n\t\t\tnewTotal.Add(newTotal, &weightedDelegation)\n\t\t\tif newDelegationTotals[validatorKey] != nil {\n\t\t\t\tnewTotal.Add(newTotal, newDelegationTotals[validatorKey])\n\t\t\t}\n\t\t\tnewDelegationTotals[validatorKey] = newTotal\n\t\t}\n\t}\n\n\treturn newDelegationTotals, nil\n}",
"func (va ClawbackVestingAccount) postReward(ctx sdk.Context, reward sdk.Coins, ak AccountKeeper, bk BankKeeper, sk StakingKeeper) {\n\t// Find the scheduled amount of vested and unvested staking tokens\n\tbondDenom := sk.BondDenom(ctx)\n\tvested := ReadSchedule(va.StartTime, va.EndTime, va.VestingPeriods, va.OriginalVesting, ctx.BlockTime().Unix()).AmountOf(bondDenom)\n\tunvested := va.OriginalVesting.AmountOf(bondDenom).Sub(vested)\n\n\tif unvested.IsZero() {\n\t\t// no need to adjust the vesting schedule\n\t\treturn\n\t}\n\n\tif vested.IsZero() {\n\t\t// all staked tokens must be unvested\n\t\tva.distributeReward(ctx, ak, bondDenom, reward)\n\t\treturn\n\t}\n\n\t// Find current split of account balance on staking axis\n\tbonded := sk.GetDelegatorBonded(ctx, va.GetAddress())\n\tunbonding := sk.GetDelegatorUnbonding(ctx, va.GetAddress())\n\tdelegated := bonded.Add(unbonding)\n\n\t// discover what has been slashed and remove from delegated amount\n\toldDelegated := va.DelegatedVesting.AmountOf(bondDenom).Add(va.DelegatedFree.AmountOf(bondDenom))\n\tslashed := oldDelegated.Sub(intMin(oldDelegated, delegated))\n\tdelegated = delegated.Sub(intMin(delegated, slashed))\n\n\t// Prefer delegated tokens to be unvested\n\tunvested = intMin(unvested, delegated)\n\tvested = delegated.Sub(unvested)\n\n\t// Compute the unvested amount of reward and add to vesting schedule\n\tif unvested.IsZero() {\n\t\treturn\n\t}\n\tif vested.IsZero() {\n\t\tva.distributeReward(ctx, ak, bondDenom, reward)\n\t\treturn\n\t}\n\tunvestedRatio := unvested.ToDec().QuoTruncate(bonded.ToDec()) // round down\n\tunvestedReward := scaleCoins(reward, unvestedRatio)\n\tva.distributeReward(ctx, ak, bondDenom, unvestedReward)\n}",
"func (a Actor) AwardBlockReward(rt vmr.Runtime, params *AwardBlockRewardParams) *adt.EmptyValue {\n\trt.ValidateImmediateCallerIs(builtin.SystemActorAddr)\n\tAssertMsg(rt.CurrentBalance().GreaterThanEqual(params.GasReward),\n\t\t\"actor current balance %v insufficient to pay gas reward %v\", rt.CurrentBalance(), params.GasReward)\n\n\tAssertMsg(params.TicketCount > 0, \"cannot give block reward for zero tickets\")\n\n\tminer, ok := rt.ResolveAddress(params.Miner)\n\tif !ok {\n\t\trt.Abortf(exitcode.ErrIllegalState, \"failed to resolve given owner address\")\n\t}\n\n\tpriorBalance := rt.CurrentBalance()\n\n\tvar penalty abi.TokenAmount\n\tvar st State\n\trt.State().Transaction(&st, func() interface{} {\n\t\tblockReward := a.computeBlockReward(&st, big.Sub(priorBalance, params.GasReward), params.TicketCount)\n\t\ttotalReward := big.Add(blockReward, params.GasReward)\n\n\t\t// Cap the penalty at the total reward value.\n\t\tpenalty = big.Min(params.Penalty, totalReward)\n\n\t\t// Reduce the payable reward by the penalty.\n\t\trewardPayable := big.Sub(totalReward, penalty)\n\n\t\tAssertMsg(big.Add(rewardPayable, penalty).LessThanEqual(priorBalance),\n\t\t\t\"reward payable %v + penalty %v exceeds balance %v\", rewardPayable, penalty, priorBalance)\n\n\t\t// Record new reward into reward map.\n\t\tif rewardPayable.GreaterThan(abi.NewTokenAmount(0)) {\n\t\t\tnewReward := Reward{\n\t\t\t\tStartEpoch: rt.CurrEpoch(),\n\t\t\t\tEndEpoch: rt.CurrEpoch() + rewardVestingPeriod,\n\t\t\t\tValue: rewardPayable,\n\t\t\t\tAmountWithdrawn: abi.NewTokenAmount(0),\n\t\t\t\tVestingFunction: rewardVestingFunction,\n\t\t\t}\n\t\t\tif err := st.addReward(adt.AsStore(rt), miner, &newReward); err != nil {\n\t\t\t\trt.Abortf(exitcode.ErrIllegalState, \"failed to add reward to rewards map: %w\", err)\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n\n\t// Burn the penalty amount.\n\t_, code := rt.Send(builtin.BurntFundsActorAddr, builtin.MethodSend, nil, penalty)\n\tbuiltin.RequireSuccess(rt, code, \"failed to send penalty to BurntFundsActor\")\n\n\treturn nil\n}",
"func (s *MutableState) AddRewards(time epochtime.EpochTime, factor *quantity.Quantity, accounts []signature.PublicKey) error {\n\tsteps, err := s.RewardSchedule()\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar activeStep *staking.RewardStep\n\tfor _, step := range steps {\n\t\tif time < step.Until {\n\t\t\tactiveStep = &step\n\t\t\tbreak\n\t\t}\n\t}\n\tif activeStep == nil {\n\t\t// We're past the end of the schedule.\n\t\treturn nil\n\t}\n\n\tcommonPool, err := s.CommonPool()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"loading common pool\")\n\t}\n\n\tfor _, id := range accounts {\n\t\tent := s.Account(id)\n\n\t\tq := ent.Escrow.Active.Balance.Clone()\n\t\t// Multiply first.\n\t\tif err := q.Mul(factor); err != nil {\n\t\t\treturn errors.Wrap(err, \"multiplying by reward factor\")\n\t\t}\n\t\tif err := q.Mul(&activeStep.Scale); err != nil {\n\t\t\treturn errors.Wrap(err, \"multiplying by reward step scale\")\n\t\t}\n\t\tif err := q.Quo(staking.RewardAmountDenominator); err != nil {\n\t\t\treturn errors.Wrap(err, \"dividing by reward amount denominator\")\n\t\t}\n\n\t\tif q.IsZero() {\n\t\t\tcontinue\n\t\t}\n\n\t\tvar com *quantity.Quantity\n\t\trate := ent.Escrow.CommissionSchedule.CurrentRate(time)\n\t\tif rate != nil {\n\t\t\tcom = q.Clone()\n\t\t\t// Multiply first.\n\t\t\tif err := com.Mul(rate); err != nil {\n\t\t\t\treturn errors.Wrap(err, \"multiplying by commission rate\")\n\t\t\t}\n\t\t\tif err := com.Quo(staking.CommissionRateDenominator); err != nil {\n\t\t\t\treturn errors.Wrap(err, \"dividing by commission rate denominator\")\n\t\t\t}\n\n\t\t\tif err := q.Sub(com); err != nil {\n\t\t\t\treturn errors.Wrap(err, \"subtracting commission\")\n\t\t\t}\n\t\t}\n\n\t\tif !q.IsZero() {\n\t\t\tif err := quantity.Move(&ent.Escrow.Active.Balance, commonPool, q); err != nil {\n\t\t\t\treturn errors.Wrap(err, \"transferring to active escrow balance from common pool\")\n\t\t\t}\n\t\t}\n\n\t\tif com != nil && !com.IsZero() {\n\t\t\tdelegation := s.Delegation(id, id)\n\n\t\t\tif err := ent.Escrow.Active.Deposit(&delegation.Shares, commonPool, com); err != nil {\n\t\t\t\treturn errors.Wrap(err, \"depositing commission\")\n\t\t\t}\n\n\t\t\ts.SetDelegation(id, id, delegation)\n\t\t}\n\n\t\ts.SetAccount(id, ent)\n\t}\n\n\ts.SetCommonPool(commonPool)\n\n\treturn nil\n}",
"func (s *MutableState) AddRewardSingleAttenuated(time epochtime.EpochTime, factor *quantity.Quantity, attenuationNumerator, attenuationDenominator int, account signature.PublicKey) error {\n\tsteps, err := s.RewardSchedule()\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar activeStep *staking.RewardStep\n\tfor _, step := range steps {\n\t\tif time < step.Until {\n\t\t\tactiveStep = &step\n\t\t\tbreak\n\t\t}\n\t}\n\tif activeStep == nil {\n\t\t// We're past the end of the schedule.\n\t\treturn nil\n\t}\n\n\tvar numQ, denQ quantity.Quantity\n\tif err = numQ.FromInt64(int64(attenuationNumerator)); err != nil {\n\t\treturn errors.Wrapf(err, \"importing attenuation numerator %d\", attenuationNumerator)\n\t}\n\tif err = denQ.FromInt64(int64(attenuationDenominator)); err != nil {\n\t\treturn errors.Wrapf(err, \"importing attenuation denominator %d\", attenuationDenominator)\n\t}\n\n\tcommonPool, err := s.CommonPool()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"loading common pool\")\n\t}\n\n\tent := s.Account(account)\n\n\tq := ent.Escrow.Active.Balance.Clone()\n\t// Multiply first.\n\tif err := q.Mul(factor); err != nil {\n\t\treturn errors.Wrap(err, \"multiplying by reward factor\")\n\t}\n\tif err := q.Mul(&activeStep.Scale); err != nil {\n\t\treturn errors.Wrap(err, \"multiplying by reward step scale\")\n\t}\n\tif err := q.Mul(&numQ); err != nil {\n\t\treturn errors.Wrap(err, \"multiplying by attenuation numerator\")\n\t}\n\tif err := q.Quo(staking.RewardAmountDenominator); err != nil {\n\t\treturn errors.Wrap(err, \"dividing by reward amount denominator\")\n\t}\n\tif err := q.Quo(&denQ); err != nil {\n\t\treturn errors.Wrap(err, \"dividing by attenuation denominator\")\n\t}\n\n\tif q.IsZero() {\n\t\treturn nil\n\t}\n\n\tvar com *quantity.Quantity\n\trate := ent.Escrow.CommissionSchedule.CurrentRate(time)\n\tif rate != nil {\n\t\tcom = q.Clone()\n\t\t// Multiply first.\n\t\tif err := com.Mul(rate); err != nil {\n\t\t\treturn errors.Wrap(err, \"multiplying by commission rate\")\n\t\t}\n\t\tif err := com.Quo(staking.CommissionRateDenominator); err != nil {\n\t\t\treturn errors.Wrap(err, \"dividing by commission rate denominator\")\n\t\t}\n\n\t\tif err := q.Sub(com); err != nil {\n\t\t\treturn errors.Wrap(err, \"subtracting commission\")\n\t\t}\n\t}\n\n\tif !q.IsZero() {\n\t\tif err := quantity.Move(&ent.Escrow.Active.Balance, commonPool, q); err != nil {\n\t\t\treturn errors.Wrap(err, \"transferring to active escrow balance from common pool\")\n\t\t}\n\t}\n\n\tif com != nil && !com.IsZero() {\n\t\tdelegation := s.Delegation(account, account)\n\n\t\tif err := ent.Escrow.Active.Deposit(&delegation.Shares, commonPool, com); err != nil {\n\t\t\treturn errors.Wrap(err, \"depositing commission\")\n\t\t}\n\n\t\ts.SetDelegation(account, account, delegation)\n\t}\n\n\ts.SetAccount(account, ent)\n\n\ts.SetCommonPool(commonPool)\n\n\treturn nil\n}",
"func weighted_reward(w map[int]float64, allocation vrp.Allocation) float64 {\n\tvar reward float64\n\tfor id, _ := range allocation {\n\t\treward += w[id] * allocation[id]\n\t}\n\treturn reward\n}",
"func (_BondingManager *BondingManagerTransactor) Reward(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _BondingManager.contract.Transact(opts, \"reward\")\n}",
"func (d *Dao) AddReward(c context.Context, iRewardID int64, uid int64, iSource int64, iRoomid int64, iLifespan int64) (err error) {\n\t//aReward, _ := getRewardConfByLid(iRewardID)\n\n\tm, _ := time.ParseDuration(fmt.Sprintf(\"+%dh\", iLifespan))\n\n\targ := &AnchorTaskModel.AnchorReward{\n\t\tUid: uid,\n\t\tRewardId: iRewardID,\n\t\tRoomid: iRoomid,\n\t\tSource: iSource,\n\t\tAchieveTime: xtime.Time(time.Now().Unix()),\n\t\tExpireTime: xtime.Time(time.Now().Add(m).Unix()),\n\t\tStatus: model.RewardUnUsed,\n\t}\n\n\t//spew.Dump\n\t// (arg)\n\tif err := d.orm.Create(arg).Error; err != nil {\n\t\tlog.Error(\"addReward(%v) error(%v)\", arg, err)\n\t\treturn err\n\t}\n\n\tif err := d.SetNewReward(c, uid, int64(1)); err != nil {\n\t\tlog.Error(\"addRewardMc(%v) error(%v)\", uid, err)\n\t}\n\n\tif err := d.SetHasReward(c, uid, int64(1)); err != nil {\n\t\tlog.Error(\"SetHasReward(%v) error(%v)\", uid, err)\n\t}\n\n\tlog.Info(\"addReward (%v) succ\", arg)\n\n\treturn\n}",
"func (_XStaking *XStakingCaller) RewardsDistribution(opts *bind.CallOpts) (common.Address, error) {\n\tvar out []interface{}\n\terr := _XStaking.contract.Call(opts, &out, \"rewardsDistribution\")\n\n\tif err != nil {\n\t\treturn *new(common.Address), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address)\n\n\treturn out0, err\n\n}",
"func distributeLockedAmount(ctx coretypes.Sandbox, bets []*BetInfo, totalLockedAmount int64) bool {\n\tsumsByPlayers := make(map[coretypes.AgentID]int64)\n\ttotalWinningAmount := int64(0)\n\tfor _, bet := range bets {\n\t\tif _, ok := sumsByPlayers[bet.Player]; !ok {\n\t\t\tsumsByPlayers[bet.Player] = 0\n\t\t}\n\t\tsumsByPlayers[bet.Player] += bet.Sum\n\t\ttotalWinningAmount += bet.Sum\n\t}\n\n\t// NOTE 1: float64 was avoided for determinism reasons\n\t// NOTE 2: beware overflows\n\n\tfor player, sum := range sumsByPlayers {\n\t\tsumsByPlayers[player] = (totalLockedAmount * sum) / totalWinningAmount\n\t}\n\n\t// make deterministic sequence by sorting. Eliminate possible rounding effects\n\tseqPlayers := make([]coretypes.AgentID, 0, len(sumsByPlayers))\n\tresultSum := int64(0)\n\tfor player, sum := range sumsByPlayers {\n\t\tseqPlayers = append(seqPlayers, player)\n\t\tresultSum += sum\n\t}\n\tsort.Slice(seqPlayers, func(i, j int) bool {\n\t\treturn bytes.Compare(seqPlayers[i][:], seqPlayers[j][:]) < 0\n\t})\n\n\t// ensure we distribute not more than totalLockedAmount iotas\n\tif resultSum > totalLockedAmount {\n\t\tsumsByPlayers[seqPlayers[0]] -= resultSum - totalLockedAmount\n\t}\n\n\t// filter out those who proportionally got 0\n\tfinalWinners := seqPlayers[:0]\n\tfor _, player := range seqPlayers {\n\t\tif sumsByPlayers[player] <= 0 {\n\t\t\tcontinue\n\t\t}\n\t\tfinalWinners = append(finalWinners, player)\n\t}\n\t// distribute iotas\n\tfor i := range finalWinners {\n\n\t\tavailable := ctx.Balance(balance.ColorIOTA)\n\t\tctx.Event(fmt.Sprintf(\"sending reward iotas %d to the winner %s. Available iotas: %d\",\n\t\t\tsumsByPlayers[finalWinners[i]], finalWinners[i].String(), available))\n\n\t\t//if !ctx.MoveTokens(finalWinners[i], balance.ColorIOTA, sumsByPlayers[finalWinners[i]]) {\n\t\t//\treturn false\n\t\t//}\n\t}\n\treturn true\n}",
"func (vi *votedInfo) CalculateReward(multiplier, divider *big.Int, period int) {\n\tif multiplier.Sign() == 0 || period == 0 {\n\t\treturn\n\t}\n\tif divider.Sign() == 0 || vi.totalBondedDelegation.Sign() == 0 {\n\t\treturn\n\t}\n\t// reward = multiplier * period * bondedDelegation / (divider * totalBondedDelegation)\n\tbase := new(big.Int).Mul(multiplier, big.NewInt(int64(period)))\n\treward := new(big.Int)\n\tfor i, addrKey := range vi.rank {\n\t\tif i == vi.maxRankForReward {\n\t\t\tbreak\n\t\t}\n\t\tprep := vi.preps[addrKey]\n\t\tif prep.Enable() == false {\n\t\t\tcontinue\n\t\t}\n\n\t\treward.Mul(base, prep.GetBondedDelegation())\n\t\treward.Div(reward, divider)\n\t\treward.Div(reward, vi.totalBondedDelegation)\n\n\t\tlog.Tracef(\"VOTED REWARD %d = %d * %d * %d / (%d * %d)\",\n\t\t\treward, multiplier, period, prep.GetBondedDelegation(), divider, vi.totalBondedDelegation)\n\n\t\tprep.SetIScore(new(big.Int).Add(prep.IScore(), reward))\n\t}\n}",
"func EstimateReward(reward, pr, gamma float64) float64 {\n\tret := reward / (pr + gamma)\n\tlog.Logf(MABLogLevel, \"MAB Estimate Reward: %v / (%v + %v) = %v\\n\",\n\t\treward, pr, gamma, ret)\n\treturn ret\n}",
"func rewardAndSlash(ctx contract.Context, cachedDelegations *CachedDposStorage, state *State) ([]*DelegationResult, error) {\n\tformerValidatorTotals := make(map[string]loom.BigUInt)\n\tdelegatorRewards := make(map[string]*loom.BigUInt)\n\tdistributedRewards := common.BigZero()\n\n\tdelegations, err := cachedDelegations.loadDelegationList(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, validator := range state.Validators {\n\t\tcandidate := GetCandidateByPubKey(ctx, validator.PubKey)\n\n\t\tif candidate == nil {\n\t\t\tctx.Logger().Info(\"Attempted to reward validator no longer on candidates list.\", \"validator\", validator)\n\t\t\tcontinue\n\t\t}\n\n\t\tcandidateAddress := loom.UnmarshalAddressPB(candidate.Address)\n\t\tvalidatorKey := candidateAddress.String()\n\t\tstatistic, _ := GetStatistic(ctx, candidateAddress)\n\n\t\tif statistic == nil {\n\t\t\tdelegatorRewards[validatorKey] = common.BigZero()\n\t\t\tformerValidatorTotals[validatorKey] = *common.BigZero()\n\t\t} else {\n\t\t\t// If a validator is jailed, don't calculate and distribute rewards\n\t\t\tif ctx.FeatureEnabled(features.DPOSVersion3_3, false) {\n\t\t\t\tif statistic.Jailed {\n\t\t\t\t\tdelegatorRewards[validatorKey] = common.BigZero()\n\t\t\t\t\tformerValidatorTotals[validatorKey] = *common.BigZero()\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t\t// If a validator's SlashPercentage is 0, the validator is\n\t\t\t// rewarded for avoiding faults during the last slashing period\n\t\t\tif common.IsZero(statistic.SlashPercentage.Value) {\n\t\t\t\tdistributionTotal := calculateRewards(statistic.DelegationTotal.Value, state.Params, state.TotalValidatorDelegations.Value)\n\n\t\t\t\t// The validator share, equal to validator_fee * total_validotor_reward\n\t\t\t\t// is to be split between the referrers and the validator\n\t\t\t\tvalidatorShare := CalculateFraction(loom.BigUInt{big.NewInt(int64(candidate.Fee))}, distributionTotal)\n\n\t\t\t\t// delegatorsShare is what fraction of the total rewards will be\n\t\t\t\t// distributed to delegators\n\t\t\t\tdelegatorsShare := common.BigZero()\n\t\t\t\tdelegatorsShare.Sub(&distributionTotal, &validatorShare)\n\t\t\t\tdelegatorRewards[validatorKey] = delegatorsShare\n\n\t\t\t\t// Distribute rewards to referrers\n\t\t\t\tfor _, d := range delegations {\n\t\t\t\t\tif loom.UnmarshalAddressPB(d.Validator).Compare(loom.UnmarshalAddressPB(candidate.Address)) == 0 {\n\t\t\t\t\t\tdelegation, err := GetDelegation(ctx, d.Index, *d.Validator, *d.Delegator)\n\t\t\t\t\t\t// if the delegation is not found OR if the delegation\n\t\t\t\t\t\t// has no referrer, we do not need to attempt to\n\t\t\t\t\t\t// distribute the referrer rewards\n\t\t\t\t\t\tif err == contract.ErrNotFound || len(delegation.Referrer) == 0 {\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t} else if err != nil {\n\t\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\t// if referrer is not found, do not distribute the reward\n\t\t\t\t\t\treferrerAddress := getReferrer(ctx, delegation.Referrer)\n\t\t\t\t\t\tif referrerAddress == nil {\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\t// calculate referrerReward\n\t\t\t\t\t\treferrerReward := calculateRewards(delegation.Amount.Value, state.Params, state.TotalValidatorDelegations.Value)\n\t\t\t\t\t\treferrerReward = CalculateFraction(loom.BigUInt{big.NewInt(int64(candidate.Fee))}, referrerReward)\n\t\t\t\t\t\treferrerReward = CalculateFraction(defaultReferrerFee, referrerReward)\n\n\t\t\t\t\t\t// referrer fees are delegater to limbo validator\n\t\t\t\t\t\tdistributedRewards.Add(distributedRewards, &referrerReward)\n\t\t\t\t\t\tcachedDelegations.IncreaseRewardDelegation(ctx, LimboValidatorAddress(ctx).MarshalPB(), referrerAddress, referrerReward)\n\n\t\t\t\t\t\t// any referrer bonus amount is subtracted from the validatorShare\n\t\t\t\t\t\tvalidatorShare.Sub(&validatorShare, &referrerReward)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tdistributedRewards.Add(distributedRewards, &validatorShare)\n\t\t\t\tcachedDelegations.IncreaseRewardDelegation(ctx, candidate.Address, candidate.Address, validatorShare)\n\n\t\t\t\t// If a validator has some non-zero WhitelistAmount,\n\t\t\t\t// calculate the validator's reward based on whitelist amount\n\t\t\t\tif !common.IsZero(statistic.WhitelistAmount.Value) {\n\t\t\t\t\tamount := calculateWeightedWhitelistAmount(*statistic)\n\t\t\t\t\twhitelistDistribution := calculateShare(amount, statistic.DelegationTotal.Value, *delegatorsShare)\n\t\t\t\t\t// increase a delegator's distribution\n\t\t\t\t\tdistributedRewards.Add(distributedRewards, &whitelistDistribution)\n\t\t\t\t\tcachedDelegations.IncreaseRewardDelegation(ctx, candidate.Address, candidate.Address, whitelistDistribution)\n\t\t\t\t}\n\n\t\t\t\t// Keeping track of cumulative distributed rewards by adding\n\t\t\t\t// every validator's total rewards to\n\t\t\t\t// `state.TotalRewardDistribution`\n\t\t\t\t// NOTE: because we round down in every `calculateRewards` call,\n\t\t\t\t// we expect `state.TotalRewardDistribution` to be a slight\n\t\t\t\t// overestimate of what was actually distributed. We could be\n\t\t\t\t// exact with our record keeping by incrementing\n\t\t\t\t// `state.TotalRewardDistribution` each time\n\t\t\t\t// `IncreaseRewardDelegation` is called, but because we will not\n\t\t\t\t// use `state.TotalRewardDistributions` as part of any invariants,\n\t\t\t\t// we will live with this situation.\n\t\t\t\tif !ctx.FeatureEnabled(features.DPOSVersion3_1, false) {\n\t\t\t\t\tstate.TotalRewardDistribution.Value.Add(&state.TotalRewardDistribution.Value, &distributionTotal)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tif err := slashValidatorDelegations(ctx, cachedDelegations, statistic, candidateAddress); err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\tif err := SetStatistic(ctx, statistic); err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tformerValidatorTotals[validatorKey] = statistic.DelegationTotal.Value\n\t\t}\n\t}\n\n\tnewDelegationTotals, err := distributeDelegatorRewards(ctx, cachedDelegations, formerValidatorTotals, delegatorRewards, distributedRewards)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif ctx.FeatureEnabled(features.DPOSVersion3_1, false) {\n\t\tstate.TotalRewardDistribution.Value.Add(&state.TotalRewardDistribution.Value, distributedRewards)\n\t}\n\n\tdelegationResults := make([]*DelegationResult, 0, len(newDelegationTotals))\n\tfor validator := range newDelegationTotals {\n\t\tdelegationResults = append(delegationResults, &DelegationResult{\n\t\t\tValidatorAddress: loom.MustParseAddress(validator),\n\t\t\tDelegationTotal: *newDelegationTotals[validator],\n\t\t})\n\t}\n\tsort.Sort(byDelegationTotal(delegationResults))\n\n\treturn delegationResults, nil\n}",
"func accumulateRewards(config *params.ChainConfig, state *state.DB, header *types.Header) {\n\t// TODO: implement mining rewards\n}",
"func (k Keeper) ClaimEarnReward(ctx sdk.Context, owner, receiver sdk.AccAddress, denom string, multiplierName string) error {\n\tmultiplier, found := k.GetMultiplierByDenom(ctx, denom, multiplierName)\n\tif !found {\n\t\treturn errorsmod.Wrapf(types.ErrInvalidMultiplier, \"denom '%s' has no multiplier '%s'\", denom, multiplierName)\n\t}\n\n\tclaimEnd := k.GetClaimEnd(ctx)\n\n\tif ctx.BlockTime().After(claimEnd) {\n\t\treturn errorsmod.Wrapf(types.ErrClaimExpired, \"block time %s > claim end time %s\", ctx.BlockTime(), claimEnd)\n\t}\n\n\tsyncedClaim, found := k.GetSynchronizedEarnClaim(ctx, owner)\n\tif !found {\n\t\treturn errorsmod.Wrapf(types.ErrClaimNotFound, \"address: %s\", owner)\n\t}\n\n\tamt := syncedClaim.Reward.AmountOf(denom)\n\n\tclaimingCoins := sdk.NewCoins(sdk.NewCoin(denom, amt))\n\trewardCoins := sdk.NewCoins(sdk.NewCoin(denom, sdk.NewDecFromInt(amt).Mul(multiplier.Factor).RoundInt()))\n\tif rewardCoins.IsZero() {\n\t\treturn types.ErrZeroClaim\n\t}\n\tlength := k.GetPeriodLength(ctx.BlockTime(), multiplier.MonthsLockup)\n\n\terr := k.SendTimeLockedCoinsToAccount(ctx, types.IncentiveMacc, receiver, rewardCoins, length)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// remove claimed coins (NOT reward coins)\n\tsyncedClaim.Reward = syncedClaim.Reward.Sub(claimingCoins...)\n\tk.SetEarnClaim(ctx, syncedClaim)\n\n\tctx.EventManager().EmitEvent(\n\t\tsdk.NewEvent(\n\t\t\ttypes.EventTypeClaim,\n\t\t\tsdk.NewAttribute(types.AttributeKeyClaimedBy, owner.String()),\n\t\t\tsdk.NewAttribute(types.AttributeKeyClaimAmount, claimingCoins.String()),\n\t\t\tsdk.NewAttribute(types.AttributeKeyClaimType, syncedClaim.GetType()),\n\t\t),\n\t)\n\treturn nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
scaleCoins scales the given coins, rounding down.
|
func scaleCoins(coins sdk.Coins, scale sdk.Dec) sdk.Coins {
scaledCoins := sdk.NewCoins()
for _, coin := range coins {
amt := coin.Amount.ToDec().Mul(scale).TruncateInt() // round down
scaledCoins = scaledCoins.Add(sdk.NewCoin(coin.Denom, amt))
}
return scaledCoins
}
|
[
"func Scale(cr *Cairo, sx float64, sy float64) {\n\tccr, _ := (*C.cairo_t)(unsafe.Pointer(cr)), cgoAllocsUnknown\n\tcsx, _ := (C.double)(sx), cgoAllocsUnknown\n\tcsy, _ := (C.double)(sy), cgoAllocsUnknown\n\tC.cairo_scale(ccr, csx, csy)\n}",
"func scaleToRange(in, inMax, outMin, outMax float64) float64 {\n\treturn outMin + (outMax-outMin)*(in/inMax)\n}",
"func (blk *Block) Scale(sx, sy float64) {\n\tops := contentstream.NewContentCreator().\n\t\tScale(sx, sy).\n\t\tOperations()\n\n\t*blk.contents = append(*ops, *blk.contents...)\n\tblk.contents.WrapIfNeeded()\n\n\tblk.width *= sx\n\tblk.height *= sy\n}",
"func (c Coord) Scale(s float64) Coord {\n\tc.X *= s\n\tc.Y *= s\n\treturn c\n}",
"func (k Keeper) MintCoins(ctx sdk.Context, newCoins sdk.Coins) error {\n\tif newCoins.Empty() {\n\t\t// skip as no coins need to be minted\n\t\treturn nil\n\t}\n\treturn k.supplyKeeper.MintCoins(ctx, types.ModuleName, newCoins)\n}",
"func (c *LinehaulCostComputation) Scale(factor float64) {\n\tc.BaseLinehaul = c.BaseLinehaul.MultiplyFloat64(factor)\n\tc.OriginLinehaulFactor = c.OriginLinehaulFactor.MultiplyFloat64(factor)\n\tc.DestinationLinehaulFactor = c.DestinationLinehaulFactor.MultiplyFloat64(factor)\n\tc.ShorthaulCharge = c.ShorthaulCharge.MultiplyFloat64(factor)\n\tc.LinehaulChargeTotal = c.LinehaulChargeTotal.MultiplyFloat64(factor)\n}",
"func scale(n float64) string { return fmt.Sprintf(`scale(%g)`, n) }",
"func (a Polynomial) Scale(s byte) Polynomial {\n\tif s == 0 {\n\t\treturn Polynomial{a.field, nil}\n\t}\n\tif s == 1 {\n\t\treturn a\n\t}\n\tcoefficients := make([]byte, len(a.coefficients))\n\tfor i, coeff_i := range a.coefficients {\n\t\tcoefficients[i] = a.field.Mul(coeff_i, s)\n\t}\n\treturn NewPolynomial(a.field, coefficients...)\n}",
"func (v *Context) Scale(sx, sy float64) {\n\tC.cairo_scale(v.native(), C.double(sx), C.double(sy))\n}",
"func scale(dst *block, src *[4]block) {\n\tfor i := 0; i < 4; i++ {\n\t\tdstOff := (i&2)<<4 | (i&1)<<2\n\t\tfor y := 0; y < 4; y++ {\n\t\t\tfor x := 0; x < 4; x++ {\n\t\t\t\tj := 16*y + 2*x\n\t\t\t\tsum := src[i][j] + src[i][j+1] + src[i][j+8] + src[i][j+9]\n\t\t\t\tdst[8*y+x+dstOff] = (sum + 2) >> 2\n\t\t\t}\n\t\t}\n\t}\n}",
"func Scale(from1 float64, from2 float64, to1 float64, to2 float64) func(x float64) float64 {\n\n\tlength1 := from2 - from1\n\tlength2 := to2 - to1\n\n\treturn func(x float64) float64 {\n\t\tratio := (x - from1) / length1\n\n\t\treturn to1 + ratio*length2\n\t}\n}",
"func (t Tree) RangeScale(lo, hi int, multiplier int32) {\n\tfor i := lo; i < hi && 0 <= i && i < len(t); i++ {\n\t\tt.Mul(i, multiplier)\n\t}\n}",
"func (canvas *Canvas) Scale(x, y float32) {\n\twriteCommand(canvas.contents, \"cm\", x, 0, 0, y, 0, 0)\n}",
"func ScaleRecipe(amounts []float64, portions int) []float64 {\n\tscale := float64(portions) / 2.0\n\tscaledAmounts := make([]float64, len(amounts))\n\tfor ingredient, amount := range amounts {\n\t\tscaledAmounts[ingredient] = amount * scale\n\t}\n\treturn scaledAmounts\n}",
"func Scale(zoom float64) float64 {\n\treturn 256 * math.Pow(2, zoom)\n}",
"func (v Vector) Scale(c float64) Vector {\n\tfor i, x := range v {\n\t\tv[i] = x * c\n\t}\n\treturn v\n}",
"func (s SamplesC64) Scale(r float32) {\n\tsimd.ScaleComplex(r, s)\n}",
"func scale(bytes int64) (scaled int64, scale string) {\n\tif bytes < 0 {\n\t\tscaled, scale = uscale(uint64(bytes * -1))\n\t\tscaled *= -1\n\t} else {\n\t\tscaled, scale = uscale(uint64(bytes))\n\t}\n\treturn\n}",
"func (t Tree) Scale(value int32) {\n\tfor i := range t {\n\t\tt[i] *= value\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
NewClawbackRewardAction returns an exported.RewardAction for a ClawbackVestingAccount.
|
func NewClawbackRewardAction(ak AccountKeeper, bk BankKeeper, sk StakingKeeper) exported.RewardAction {
return clawbackRewardAction{
ak: ak,
bk: bk,
sk: sk,
}
}
|
[
"func NewClawbackAction(requestor, dest sdk.AccAddress, ak AccountKeeper, bk BankKeeper, sk StakingKeeper) exported.ClawbackAction {\n\treturn clawbackAction{\n\t\trequestor: requestor,\n\t\tdest: dest,\n\t\tak: ak,\n\t\tbk: bk,\n\t\tsk: sk,\n\t}\n}",
"func NewClawbackGrantAction(\n\tfunderAddress string,\n\tsk StakingKeeper,\n\tgrantStartTime int64,\n\tgrantLockupPeriods, grantVestingPeriods []Period,\n\tgrantCoins sdk.Coins,\n) exported.AddGrantAction {\n\treturn clawbackGrantAction{\n\t\tfunderAddress: funderAddress,\n\t\tsk: sk,\n\t\tgrantStartTime: grantStartTime,\n\t\tgrantLockupPeriods: grantLockupPeriods,\n\t\tgrantVestingPeriods: grantVestingPeriods,\n\t\tgrantCoins: grantCoins,\n\t}\n}",
"func NewClaimRewards(owner uos.AccountName) *uos.Action {\n\ta := &uos.Action{\n\t\tAccount: AN(\"uosio\"),\n\t\tName: ActN(\"claimrewards\"),\n\t\tAuthorization: []uos.PermissionLevel{\n\t\t\t{Actor: owner, Permission: uos.PermissionName(\"active\")},\n\t\t},\n\t\tActionData: uos.NewActionData(ClaimRewards{\n\t\t\tOwner: owner,\n\t\t}),\n\t}\n\treturn a\n}",
"func NewBcBotAction(j *bot.Jobs) *BcBotAction {\n\t// client := resty.New()\n\t// client.\n\t// \tSetRetryCount(3).\n\t// \tSetRetryWaitTime(10 * time.Second)\n\treturn &BcBotAction{jobs: j, client: nil, mutex: new(sync.RWMutex)}\n}",
"func (cra clawbackRewardAction) ProcessReward(ctx sdk.Context, reward sdk.Coins, rawAccount exported.VestingAccount) error {\n\tcva, ok := rawAccount.(*ClawbackVestingAccount)\n\tif !ok {\n\t\treturn sdkerrors.Wrapf(sdkerrors.ErrNotSupported, \"expected *ClawbackVestingAccount, got %T\", rawAccount)\n\t}\n\tcva.postReward(ctx, reward, cra.ak, cra.bk, cra.sk)\n\treturn nil\n}",
"func NewRollbackAction(kit kit.Kit, viper *viper.Viper,\n\tauthSvrCli pbauthserver.AuthClient, dataMgrCli pbdatamanager.DataManagerClient,\n\tgseControllerCli pbgsecontroller.GSEControllerClient,\n\treq *pb.RollbackReleaseReq, resp *pb.RollbackReleaseResp) *RollbackAction {\n\n\taction := &RollbackAction{\n\t\tkit: kit,\n\t\tviper: viper,\n\t\tauthSvrCli: authSvrCli,\n\t\tdataMgrCli: dataMgrCli,\n\t\tgseControllerCli: gseControllerCli,\n\t\treq: req,\n\t\tresp: resp,\n\t}\n\n\taction.resp.Result = true\n\taction.resp.Code = pbcommon.ErrCode_E_OK\n\taction.resp.Message = \"OK\"\n\n\treturn action\n}",
"func NewCollateralizeAction(c *Collateralize, tx *types.Transaction, index int) *Action {\n\thash := tx.Hash()\n\tfromaddr := tx.From()\n\tcfg := c.GetAPI().GetConfig()\n\ttokenDb, err := account.NewAccountDB(cfg, tokenE.GetName(), pty.CCNYTokenName, c.GetStateDB())\n\tif err != nil {\n\t\tclog.Error(\"NewCollateralizeAction\", \"Get Account DB error\", \"error\", err)\n\t\treturn nil\n\t}\n\n\treturn &Action{\n\t\tcoinsAccount: c.GetCoinsAccount(), tokenAccount: tokenDb, db: c.GetStateDB(), localDB: c.GetLocalDB(),\n\t\ttxhash: hash, fromaddr: fromaddr, blocktime: c.GetBlockTime(), height: c.GetHeight(),\n\t\texecaddr: dapp.ExecAddress(string(tx.Execer)), difficulty: c.GetDifficulty(), index: index, Collateralize: c}\n}",
"func (va *ClawbackVestingAccount) PostReward(ctx sdk.Context, reward sdk.Coins, action exported.RewardAction) error {\n\treturn action.ProcessReward(ctx, reward, va)\n}",
"func (_Minter *MinterSession) CreateReward(_fracNum *big.Int, _fracDenom *big.Int) (*types.Transaction, error) {\n\treturn _Minter.Contract.CreateReward(&_Minter.TransactOpts, _fracNum, _fracDenom)\n}",
"func (_Minter *MinterTransactorSession) CreateReward(_fracNum *big.Int, _fracDenom *big.Int) (*types.Transaction, error) {\n\treturn _Minter.Contract.CreateReward(&_Minter.TransactOpts, _fracNum, _fracDenom)\n}",
"func NewCheckmate(winner Colour) Outcome { return Outcome{Winner: winner, Reason: checkmate} }",
"func NewRecoverableAction(supervisor *Supervisor) *RecoverableAction {\n\tra := &RecoverableAction{\n\t\tactionChan: make(chan Action),\n\t\treplyChan: make(chan string, 5),\n\t\tsupervisor: supervisor,\n\t}\n\n\tra.heartbeat = NewHeartbeat(ra, 1e8)\n\n\tgo ra.backend()\n\n\treturn ra\n}",
"func newAction() (ab *ActionBuilder) {\n\tab = new(ActionBuilder)\n\tab.action = slackscot.ActionDefinition{Hidden: false}\n\n\tab.action.Match = defaultMatcher\n\tab.action.Answer = defaultAnswerer\n\n\treturn ab\n}",
"func GetReward(a Action, feedback Action) float64 {\n\tif a == feedback {\n\t\treturn 1\n\t}\n\treturn -1\n}",
"func (_BondingManager *BondingManagerTransactor) Reward(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _BondingManager.contract.Transact(opts, \"reward\")\n}",
"func NewChallengeAction(msg *Message) (*ChallengeAction, error) {\n\taction := &ChallengeAction{*msg}\n\n\treturn action, nil\n}",
"func (c *Client) CreateCustomReward(params *ChannelCustomRewardsParams) (*ChannelCustomRewardResponse, error) {\n\tresp, err := c.postAsJSON(\"/channel_points/custom_rewards\", &ManyChannelCustomRewards{}, params)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treward := &ChannelCustomRewardResponse{}\n\tresp.HydrateResponseCommon(&reward.ResponseCommon)\n\treward.Data.ChannelCustomRewards = resp.Data.(*ManyChannelCustomRewards).ChannelCustomRewards\n\n\treturn reward, nil\n}",
"func NewAction(name string, arg interface{}) {\n\tDefaultActionRegistry.Post(name, arg)\n}",
"func New() Action {\n\treturn &action{}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
ProcessReward implements the exported.RewardAction interface.
|
func (cra clawbackRewardAction) ProcessReward(ctx sdk.Context, reward sdk.Coins, rawAccount exported.VestingAccount) error {
cva, ok := rawAccount.(*ClawbackVestingAccount)
if !ok {
return sdkerrors.Wrapf(sdkerrors.ErrNotSupported, "expected *ClawbackVestingAccount, got %T", rawAccount)
}
cva.postReward(ctx, reward, cra.ak, cra.bk, cra.sk)
return nil
}
|
[
"func (_BondingManager *BondingManagerTransactor) Reward(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _BondingManager.contract.Transact(opts, \"reward\")\n}",
"func (vi *votedInfo) CalculateReward(multiplier, divider *big.Int, period int) {\n\tif multiplier.Sign() == 0 || period == 0 {\n\t\treturn\n\t}\n\tif divider.Sign() == 0 || vi.totalBondedDelegation.Sign() == 0 {\n\t\treturn\n\t}\n\t// reward = multiplier * period * bondedDelegation / (divider * totalBondedDelegation)\n\tbase := new(big.Int).Mul(multiplier, big.NewInt(int64(period)))\n\treward := new(big.Int)\n\tfor i, addrKey := range vi.rank {\n\t\tif i == vi.maxRankForReward {\n\t\t\tbreak\n\t\t}\n\t\tprep := vi.preps[addrKey]\n\t\tif prep.Enable() == false {\n\t\t\tcontinue\n\t\t}\n\n\t\treward.Mul(base, prep.GetBondedDelegation())\n\t\treward.Div(reward, divider)\n\t\treward.Div(reward, vi.totalBondedDelegation)\n\n\t\tlog.Tracef(\"VOTED REWARD %d = %d * %d * %d / (%d * %d)\",\n\t\t\treward, multiplier, period, prep.GetBondedDelegation(), divider, vi.totalBondedDelegation)\n\n\t\tprep.SetIScore(new(big.Int).Add(prep.IScore(), reward))\n\t}\n}",
"func (c RewardsController) CollectReward(id string) revel.Result {\n\tif !c.GetCurrentUser() {\n\t\treturn c.ForbiddenResponse()\n\t}\n\n\tif !bson.IsObjectIdHex(id) {\n\t\treturn c.ErrorResponse(nil, c.Message(\"error.invalid\", \"\"), core.ModelStatus[core.StatusInvalidID])\n\t}\n\n\tvar selector = []bson.M{\n\t\tbson.M{\"user_id\": c.CurrentUser.GetID().Hex()},\n\t\tbson.M{\"_id\": id},\n\t\tbson.M{\"multi\": false},\n\t}\n\tvar query = bson.M{\"$set\": []bson.M{\n\t\tbson.M{\"status.name\": core.StatusObtained},\n\t\tbson.M{\"status.code\": core.ValidationStatus[core.StatusObtained]},\n\t}}\n\n\t// Get pending Rewards for the user\n\tif Reward, ok := app.Mapper.GetModel(&models.Reward{}); ok {\n\t\tif err := Reward.UpdateQuery(selector, query, false); err != nil {\n\t\t\trevel.ERROR.Print(\"ERROR Find\")\n\t\t\treturn c.ErrorResponse(err, err.Error(), 400)\n\t\t}\n\t\treturn c.SuccessResponse(bson.M{\"data\": \"Reward collected successfully\"}, \"success\", core.ModelsType[core.ModelSimpleResponse], nil)\n\t}\n\n\treturn c.ServerErrorResponse()\n}",
"func (_BondingManager *BondingManagerTransactorSession) Reward() (*types.Transaction, error) {\n\treturn _BondingManager.Contract.Reward(&_BondingManager.TransactOpts)\n}",
"func GetReward(a Action, feedback Action) float64 {\n\tif a == feedback {\n\t\treturn 1\n\t}\n\treturn -1\n}",
"func (c *Coinbase) AddReward(output *Output) {\n\toutput.EncryptedMask = make([]byte, 1)\n\tc.Rewards = append(c.Rewards, output)\n}",
"func (va *ClawbackVestingAccount) PostReward(ctx sdk.Context, reward sdk.Coins, action exported.RewardAction) error {\n\treturn action.ProcessReward(ctx, reward, va)\n}",
"func (a Actor) AwardBlockReward(rt vmr.Runtime, params *AwardBlockRewardParams) *adt.EmptyValue {\n\trt.ValidateImmediateCallerIs(builtin.SystemActorAddr)\n\tAssertMsg(rt.CurrentBalance().GreaterThanEqual(params.GasReward),\n\t\t\"actor current balance %v insufficient to pay gas reward %v\", rt.CurrentBalance(), params.GasReward)\n\n\tAssertMsg(params.TicketCount > 0, \"cannot give block reward for zero tickets\")\n\n\tminer, ok := rt.ResolveAddress(params.Miner)\n\tif !ok {\n\t\trt.Abortf(exitcode.ErrIllegalState, \"failed to resolve given owner address\")\n\t}\n\n\tpriorBalance := rt.CurrentBalance()\n\n\tvar penalty abi.TokenAmount\n\tvar st State\n\trt.State().Transaction(&st, func() interface{} {\n\t\tblockReward := a.computeBlockReward(&st, big.Sub(priorBalance, params.GasReward), params.TicketCount)\n\t\ttotalReward := big.Add(blockReward, params.GasReward)\n\n\t\t// Cap the penalty at the total reward value.\n\t\tpenalty = big.Min(params.Penalty, totalReward)\n\n\t\t// Reduce the payable reward by the penalty.\n\t\trewardPayable := big.Sub(totalReward, penalty)\n\n\t\tAssertMsg(big.Add(rewardPayable, penalty).LessThanEqual(priorBalance),\n\t\t\t\"reward payable %v + penalty %v exceeds balance %v\", rewardPayable, penalty, priorBalance)\n\n\t\t// Record new reward into reward map.\n\t\tif rewardPayable.GreaterThan(abi.NewTokenAmount(0)) {\n\t\t\tnewReward := Reward{\n\t\t\t\tStartEpoch: rt.CurrEpoch(),\n\t\t\t\tEndEpoch: rt.CurrEpoch() + rewardVestingPeriod,\n\t\t\t\tValue: rewardPayable,\n\t\t\t\tAmountWithdrawn: abi.NewTokenAmount(0),\n\t\t\t\tVestingFunction: rewardVestingFunction,\n\t\t\t}\n\t\t\tif err := st.addReward(adt.AsStore(rt), miner, &newReward); err != nil {\n\t\t\t\trt.Abortf(exitcode.ErrIllegalState, \"failed to add reward to rewards map: %w\", err)\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n\n\t// Burn the penalty amount.\n\t_, code := rt.Send(builtin.BurntFundsActorAddr, builtin.MethodSend, nil, penalty)\n\tbuiltin.RequireSuccess(rt, code, \"failed to send penalty to BurntFundsActor\")\n\n\treturn nil\n}",
"func EstimateReward(reward, pr, gamma float64) float64 {\n\tret := reward / (pr + gamma)\n\tlog.Logf(MABLogLevel, \"MAB Estimate Reward: %v / (%v + %v) = %v\\n\",\n\t\treward, pr, gamma, ret)\n\treturn ret\n}",
"func (_Token *TokenCallerSession) BaseReward(index *big.Int) (*big.Int, *big.Int, *big.Int, error) {\n\treturn _Token.Contract.BaseReward(&_Token.CallOpts, index)\n}",
"func (_Token *TokenCaller) BaseReward(opts *bind.CallOpts, index *big.Int) (*big.Int, *big.Int, *big.Int, error) {\n\tvar (\n\t\tret0 = new(*big.Int)\n\t\tret1 = new(*big.Int)\n\t\tret2 = new(*big.Int)\n\t)\n\tout := &[]interface{}{\n\t\tret0,\n\t\tret1,\n\t\tret2,\n\t}\n\terr := _Token.contract.Call(opts, out, \"baseReward\", index)\n\treturn *ret0, *ret1, *ret2, err\n}",
"func (d *Dao) AddReward(c context.Context, iRewardID int64, uid int64, iSource int64, iRoomid int64, iLifespan int64) (err error) {\n\t//aReward, _ := getRewardConfByLid(iRewardID)\n\n\tm, _ := time.ParseDuration(fmt.Sprintf(\"+%dh\", iLifespan))\n\n\targ := &AnchorTaskModel.AnchorReward{\n\t\tUid: uid,\n\t\tRewardId: iRewardID,\n\t\tRoomid: iRoomid,\n\t\tSource: iSource,\n\t\tAchieveTime: xtime.Time(time.Now().Unix()),\n\t\tExpireTime: xtime.Time(time.Now().Add(m).Unix()),\n\t\tStatus: model.RewardUnUsed,\n\t}\n\n\t//spew.Dump\n\t// (arg)\n\tif err := d.orm.Create(arg).Error; err != nil {\n\t\tlog.Error(\"addReward(%v) error(%v)\", arg, err)\n\t\treturn err\n\t}\n\n\tif err := d.SetNewReward(c, uid, int64(1)); err != nil {\n\t\tlog.Error(\"addRewardMc(%v) error(%v)\", uid, err)\n\t}\n\n\tif err := d.SetHasReward(c, uid, int64(1)); err != nil {\n\t\tlog.Error(\"SetHasReward(%v) error(%v)\", uid, err)\n\t}\n\n\tlog.Info(\"addReward (%v) succ\", arg)\n\n\treturn\n}",
"func ViewReward(rw http.ResponseWriter, r *http.Request) {\n\t// get the token\n\treqToken := r.Header.Get(\"Authorization\")\n\t\n\t// get the claims\n\tclaims, isNotValid := GetClaims(reqToken, rw)\n\tif isNotValid {\n\t\treturn\n\t}\n\n\tdt, err := db.GetUserRewards(claims.Roll)\n\tif err != nil {\n\t\trw.WriteHeader(http.StatusInternalServerError)\n\t\trw.Write(Rsp(err.Error(), \"Server Error\"))\n\t\treturn\n\t}\n\trw.WriteHeader(http.StatusOK)\n\tres := c.RespData{\n\t\tMessage: \"All data\",\n\t\tData: dt,\n\t}\n\tjson.NewEncoder(rw).Encode(res)\n}",
"func (t *serviceChaincode) rewardService(stub shim.ChaincodeStubInterface, args []string) pb.Response {\n\tvar service_name string\n\tvar reward_type string\n\tvar err error\n\n\tservice_name = args[0]\n\treward_type = args[1]\n\n\t// Amount\n\treward_amount := big.NewInt(0)\n\t_, good := reward_amount.SetString(args[2], 10)\n\tif !good {\n\t\treturn shim.Error(\"Expecting integer value for amount\")\n\t}\n\n\t// STEP 0: get service's developer\n\tservice_key := ServicePrefix + service_name\n\tserviceAsBytes, err := stub.GetState(service_key)\n\tif err != nil {\n\t\treturn shim.Error(\"Fail to get the service's info.\")\n\t}\n\n\tvar serviceJSON service\n\terr = json.Unmarshal([]byte(serviceAsBytes), &serviceJSON)\n\tif err != nil {\n\t\treturn shim.Error(\"Error unmarshal service bytes.\")\n\t}\n\n\tdev := serviceJSON.Developer\n\n\t// STEP 1: get the address of the dev\n\tuser_key := UserPrefix + dev\n\tuserAsBytes, err := stub.GetState(user_key)\n\tif err != nil {\n\t\treturn shim.Error(\"Fail to get the developer's info.\")\n\t}\n\tvar userJSON user\n\terr = json.Unmarshal([]byte(userAsBytes), &userJSON)\n\tif err != nil {\n\t\treturn shim.Error(\"Error unmarshal user bytes.\")\n\t}\n\n\t// STEP 3: reward the developer\n\ttoAdd := userJSON.Address\n\terr = stub.Transfer(toAdd, reward_type, reward_amount)\n\tif err != nil {\n\t\treturn shim.Error(\"Fail realize the reawrd.\")\n\t}\n\n\treturn shim.Success([]byte(\"Reward the service success.\"))\n}",
"func (accManager AccountManager) ClaimReward(\n\tctx sdk.Context, username types.AccountKey) sdk.Error {\n\treward, err := accManager.storage.GetReward(ctx, username)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err := accManager.AddSavingCoin(\n\t\tctx, username, reward.UnclaimReward, \"\", \"\", types.ClaimReward); err != nil {\n\t\treturn err\n\t}\n\treward.UnclaimReward = types.NewCoinFromInt64(0)\n\tif err := accManager.storage.SetReward(ctx, username, reward); err != nil {\n\t\treturn err\n\t}\n\n\t// clear reward history\n\tif err := accManager.ClearRewardHistory(ctx, username); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (m *DelegationDelegatorReward) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateReward(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateValidatorAddress(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (d *Dao) UseReward(id int64, usePlat string) (rst bool, err error) {\n\tif err := d.orm.\n\t\tModel(&model.AnchorReward{}).\n\t\tWhere(\"id=?\", id).\n\t\tUpdate(map[string]interface{}{\"status\": model.RewardUsed, \"use_plat\": usePlat, \"use_time\": xtime.Time(time.Now().Unix())}).Error; err != nil {\n\t\tlog.Error(\"useReward (%v) error(%v)\", id, err)\n\t\treturn rst, err\n\t}\n\trst = true\n\treturn\n}",
"func accumulateRewards(config *params.ChainConfig, state *state.DB, header *types.Header) {\n\t// TODO: implement mining rewards\n}",
"func (va ClawbackVestingAccount) distributeReward(ctx sdk.Context, ak AccountKeeper, bondDenom string, reward sdk.Coins) {\n\tnow := ctx.BlockTime().Unix()\n\tt := va.StartTime\n\tfirstUnvestedPeriod := 0\n\tunvestedTokens := sdk.ZeroInt()\n\tfor i, period := range va.VestingPeriods {\n\t\tt += period.Length\n\t\tif t <= now {\n\t\t\tfirstUnvestedPeriod = i + 1\n\t\t\tcontinue\n\t\t}\n\t\tunvestedTokens = unvestedTokens.Add(period.Amount.AmountOf(bondDenom))\n\t}\n\n\trunningTotReward := sdk.NewCoins()\n\trunningTotStaking := sdk.ZeroInt()\n\tfor i := firstUnvestedPeriod; i < len(va.VestingPeriods); i++ {\n\t\tperiod := va.VestingPeriods[i]\n\t\trunningTotStaking = runningTotStaking.Add(period.Amount.AmountOf(bondDenom))\n\t\trunningTotRatio := runningTotStaking.ToDec().Quo(unvestedTokens.ToDec())\n\t\ttargetCoins := scaleCoins(reward, runningTotRatio)\n\t\tthisReward := targetCoins.Sub(runningTotReward)\n\t\trunningTotReward = targetCoins\n\t\tperiod.Amount = period.Amount.Add(thisReward...)\n\t\tva.VestingPeriods[i] = period\n\t}\n\n\tva.OriginalVesting = va.OriginalVesting.Add(reward...)\n\tak.SetAccount(ctx, &va)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
PostReward implements the exported.ClawbackVestingAccountI interface.
|
func (va *ClawbackVestingAccount) PostReward(ctx sdk.Context, reward sdk.Coins, action exported.RewardAction) error {
return action.ProcessReward(ctx, reward, va)
}
|
[
"func (va ClawbackVestingAccount) postReward(ctx sdk.Context, reward sdk.Coins, ak AccountKeeper, bk BankKeeper, sk StakingKeeper) {\n\t// Find the scheduled amount of vested and unvested staking tokens\n\tbondDenom := sk.BondDenom(ctx)\n\tvested := ReadSchedule(va.StartTime, va.EndTime, va.VestingPeriods, va.OriginalVesting, ctx.BlockTime().Unix()).AmountOf(bondDenom)\n\tunvested := va.OriginalVesting.AmountOf(bondDenom).Sub(vested)\n\n\tif unvested.IsZero() {\n\t\t// no need to adjust the vesting schedule\n\t\treturn\n\t}\n\n\tif vested.IsZero() {\n\t\t// all staked tokens must be unvested\n\t\tva.distributeReward(ctx, ak, bondDenom, reward)\n\t\treturn\n\t}\n\n\t// Find current split of account balance on staking axis\n\tbonded := sk.GetDelegatorBonded(ctx, va.GetAddress())\n\tunbonding := sk.GetDelegatorUnbonding(ctx, va.GetAddress())\n\tdelegated := bonded.Add(unbonding)\n\n\t// discover what has been slashed and remove from delegated amount\n\toldDelegated := va.DelegatedVesting.AmountOf(bondDenom).Add(va.DelegatedFree.AmountOf(bondDenom))\n\tslashed := oldDelegated.Sub(intMin(oldDelegated, delegated))\n\tdelegated = delegated.Sub(intMin(delegated, slashed))\n\n\t// Prefer delegated tokens to be unvested\n\tunvested = intMin(unvested, delegated)\n\tvested = delegated.Sub(unvested)\n\n\t// Compute the unvested amount of reward and add to vesting schedule\n\tif unvested.IsZero() {\n\t\treturn\n\t}\n\tif vested.IsZero() {\n\t\tva.distributeReward(ctx, ak, bondDenom, reward)\n\t\treturn\n\t}\n\tunvestedRatio := unvested.ToDec().QuoTruncate(bonded.ToDec()) // round down\n\tunvestedReward := scaleCoins(reward, unvestedRatio)\n\tva.distributeReward(ctx, ak, bondDenom, unvestedReward)\n}",
"func (cra clawbackRewardAction) ProcessReward(ctx sdk.Context, reward sdk.Coins, rawAccount exported.VestingAccount) error {\n\tcva, ok := rawAccount.(*ClawbackVestingAccount)\n\tif !ok {\n\t\treturn sdkerrors.Wrapf(sdkerrors.ErrNotSupported, \"expected *ClawbackVestingAccount, got %T\", rawAccount)\n\t}\n\tcva.postReward(ctx, reward, cra.ak, cra.bk, cra.sk)\n\treturn nil\n}",
"func (_BondingManager *BondingManagerTransactor) Reward(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _BondingManager.contract.Transact(opts, \"reward\")\n}",
"func (va ClawbackVestingAccount) distributeReward(ctx sdk.Context, ak AccountKeeper, bondDenom string, reward sdk.Coins) {\n\tnow := ctx.BlockTime().Unix()\n\tt := va.StartTime\n\tfirstUnvestedPeriod := 0\n\tunvestedTokens := sdk.ZeroInt()\n\tfor i, period := range va.VestingPeriods {\n\t\tt += period.Length\n\t\tif t <= now {\n\t\t\tfirstUnvestedPeriod = i + 1\n\t\t\tcontinue\n\t\t}\n\t\tunvestedTokens = unvestedTokens.Add(period.Amount.AmountOf(bondDenom))\n\t}\n\n\trunningTotReward := sdk.NewCoins()\n\trunningTotStaking := sdk.ZeroInt()\n\tfor i := firstUnvestedPeriod; i < len(va.VestingPeriods); i++ {\n\t\tperiod := va.VestingPeriods[i]\n\t\trunningTotStaking = runningTotStaking.Add(period.Amount.AmountOf(bondDenom))\n\t\trunningTotRatio := runningTotStaking.ToDec().Quo(unvestedTokens.ToDec())\n\t\ttargetCoins := scaleCoins(reward, runningTotRatio)\n\t\tthisReward := targetCoins.Sub(runningTotReward)\n\t\trunningTotReward = targetCoins\n\t\tperiod.Amount = period.Amount.Add(thisReward...)\n\t\tva.VestingPeriods[i] = period\n\t}\n\n\tva.OriginalVesting = va.OriginalVesting.Add(reward...)\n\tak.SetAccount(ctx, &va)\n}",
"func (c *Coinbase) AddReward(output *Output) {\n\toutput.EncryptedMask = make([]byte, 1)\n\tc.Rewards = append(c.Rewards, output)\n}",
"func (a Actor) AwardBlockReward(rt vmr.Runtime, params *AwardBlockRewardParams) *adt.EmptyValue {\n\trt.ValidateImmediateCallerIs(builtin.SystemActorAddr)\n\tAssertMsg(rt.CurrentBalance().GreaterThanEqual(params.GasReward),\n\t\t\"actor current balance %v insufficient to pay gas reward %v\", rt.CurrentBalance(), params.GasReward)\n\n\tAssertMsg(params.TicketCount > 0, \"cannot give block reward for zero tickets\")\n\n\tminer, ok := rt.ResolveAddress(params.Miner)\n\tif !ok {\n\t\trt.Abortf(exitcode.ErrIllegalState, \"failed to resolve given owner address\")\n\t}\n\n\tpriorBalance := rt.CurrentBalance()\n\n\tvar penalty abi.TokenAmount\n\tvar st State\n\trt.State().Transaction(&st, func() interface{} {\n\t\tblockReward := a.computeBlockReward(&st, big.Sub(priorBalance, params.GasReward), params.TicketCount)\n\t\ttotalReward := big.Add(blockReward, params.GasReward)\n\n\t\t// Cap the penalty at the total reward value.\n\t\tpenalty = big.Min(params.Penalty, totalReward)\n\n\t\t// Reduce the payable reward by the penalty.\n\t\trewardPayable := big.Sub(totalReward, penalty)\n\n\t\tAssertMsg(big.Add(rewardPayable, penalty).LessThanEqual(priorBalance),\n\t\t\t\"reward payable %v + penalty %v exceeds balance %v\", rewardPayable, penalty, priorBalance)\n\n\t\t// Record new reward into reward map.\n\t\tif rewardPayable.GreaterThan(abi.NewTokenAmount(0)) {\n\t\t\tnewReward := Reward{\n\t\t\t\tStartEpoch: rt.CurrEpoch(),\n\t\t\t\tEndEpoch: rt.CurrEpoch() + rewardVestingPeriod,\n\t\t\t\tValue: rewardPayable,\n\t\t\t\tAmountWithdrawn: abi.NewTokenAmount(0),\n\t\t\t\tVestingFunction: rewardVestingFunction,\n\t\t\t}\n\t\t\tif err := st.addReward(adt.AsStore(rt), miner, &newReward); err != nil {\n\t\t\t\trt.Abortf(exitcode.ErrIllegalState, \"failed to add reward to rewards map: %w\", err)\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n\n\t// Burn the penalty amount.\n\t_, code := rt.Send(builtin.BurntFundsActorAddr, builtin.MethodSend, nil, penalty)\n\tbuiltin.RequireSuccess(rt, code, \"failed to send penalty to BurntFundsActor\")\n\n\treturn nil\n}",
"func (vi *votedInfo) CalculateReward(multiplier, divider *big.Int, period int) {\n\tif multiplier.Sign() == 0 || period == 0 {\n\t\treturn\n\t}\n\tif divider.Sign() == 0 || vi.totalBondedDelegation.Sign() == 0 {\n\t\treturn\n\t}\n\t// reward = multiplier * period * bondedDelegation / (divider * totalBondedDelegation)\n\tbase := new(big.Int).Mul(multiplier, big.NewInt(int64(period)))\n\treward := new(big.Int)\n\tfor i, addrKey := range vi.rank {\n\t\tif i == vi.maxRankForReward {\n\t\t\tbreak\n\t\t}\n\t\tprep := vi.preps[addrKey]\n\t\tif prep.Enable() == false {\n\t\t\tcontinue\n\t\t}\n\n\t\treward.Mul(base, prep.GetBondedDelegation())\n\t\treward.Div(reward, divider)\n\t\treward.Div(reward, vi.totalBondedDelegation)\n\n\t\tlog.Tracef(\"VOTED REWARD %d = %d * %d * %d / (%d * %d)\",\n\t\t\treward, multiplier, period, prep.GetBondedDelegation(), divider, vi.totalBondedDelegation)\n\n\t\tprep.SetIScore(new(big.Int).Add(prep.IScore(), reward))\n\t}\n}",
"func (_Token *TokenCaller) BaseReward(opts *bind.CallOpts, index *big.Int) (*big.Int, *big.Int, *big.Int, error) {\n\tvar (\n\t\tret0 = new(*big.Int)\n\t\tret1 = new(*big.Int)\n\t\tret2 = new(*big.Int)\n\t)\n\tout := &[]interface{}{\n\t\tret0,\n\t\tret1,\n\t\tret2,\n\t}\n\terr := _Token.contract.Call(opts, out, \"baseReward\", index)\n\treturn *ret0, *ret1, *ret2, err\n}",
"func (_Token *TokenCallerSession) BaseReward(index *big.Int) (*big.Int, *big.Int, *big.Int, error) {\n\treturn _Token.Contract.BaseReward(&_Token.CallOpts, index)\n}",
"func (_Minter *MinterSession) CreateReward(_fracNum *big.Int, _fracDenom *big.Int) (*types.Transaction, error) {\n\treturn _Minter.Contract.CreateReward(&_Minter.TransactOpts, _fracNum, _fracDenom)\n}",
"func (_Minter *MinterTransactorSession) CreateReward(_fracNum *big.Int, _fracDenom *big.Int) (*types.Transaction, error) {\n\treturn _Minter.Contract.CreateReward(&_Minter.TransactOpts, _fracNum, _fracDenom)\n}",
"func (d *Dao) AddReward(c context.Context, iRewardID int64, uid int64, iSource int64, iRoomid int64, iLifespan int64) (err error) {\n\t//aReward, _ := getRewardConfByLid(iRewardID)\n\n\tm, _ := time.ParseDuration(fmt.Sprintf(\"+%dh\", iLifespan))\n\n\targ := &AnchorTaskModel.AnchorReward{\n\t\tUid: uid,\n\t\tRewardId: iRewardID,\n\t\tRoomid: iRoomid,\n\t\tSource: iSource,\n\t\tAchieveTime: xtime.Time(time.Now().Unix()),\n\t\tExpireTime: xtime.Time(time.Now().Add(m).Unix()),\n\t\tStatus: model.RewardUnUsed,\n\t}\n\n\t//spew.Dump\n\t// (arg)\n\tif err := d.orm.Create(arg).Error; err != nil {\n\t\tlog.Error(\"addReward(%v) error(%v)\", arg, err)\n\t\treturn err\n\t}\n\n\tif err := d.SetNewReward(c, uid, int64(1)); err != nil {\n\t\tlog.Error(\"addRewardMc(%v) error(%v)\", uid, err)\n\t}\n\n\tif err := d.SetHasReward(c, uid, int64(1)); err != nil {\n\t\tlog.Error(\"SetHasReward(%v) error(%v)\", uid, err)\n\t}\n\n\tlog.Info(\"addReward (%v) succ\", arg)\n\n\treturn\n}",
"func (as AccountStorage) SetReward(ctx sdk.Context, accKey types.AccountKey, reward *Reward) sdk.Error {\n\tstore := ctx.KVStore(as.key)\n\trewardByte, err := as.cdc.MarshalJSON(*reward)\n\tif err != nil {\n\t\treturn ErrFailedToMarshalReward(err)\n\t}\n\tstore.Set(getRewardKey(accKey), rewardByte)\n\treturn nil\n}",
"func (_BondingManager *BondingManagerTransactorSession) Reward() (*types.Transaction, error) {\n\treturn _BondingManager.Contract.Reward(&_BondingManager.TransactOpts)\n}",
"func (c *AccountController) Post(ctx *app.PostAccountContext) error {\n\t// AccountController_Post: start_implement\n\n\t// Put your logic here\n\n\t// AccountController_Post: end_implement\n\treturn nil\n}",
"func (accManager AccountManager) AddIncomeAndReward(\n\tctx sdk.Context, username types.AccountKey,\n\toriginalDonation, friction, actualReward types.Coin,\n\tconsumer, postAuthor types.AccountKey, postID string) sdk.Error {\n\treward, err := accManager.storage.GetReward(ctx, username)\n\tif err != nil {\n\t\treturn err\n\t}\n\treward.TotalIncome = reward.TotalIncome.Plus(actualReward)\n\treward.OriginalIncome = reward.OriginalIncome.Plus(friction)\n\treward.FrictionIncome = reward.FrictionIncome.Plus(friction)\n\treward.InflationIncome = reward.InflationIncome.Plus(actualReward)\n\treward.UnclaimReward = reward.UnclaimReward.Plus(actualReward)\n\tif err := accManager.storage.SetReward(ctx, username, reward); err != nil {\n\t\treturn err\n\t}\n\n\t// add reward detail\n\tbank, err := accManager.storage.GetBankFromAccountKey(ctx, username)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\trewardDetail := model.RewardDetail{\n\t\tOriginalDonation: originalDonation,\n\t\tFrictionDonation: friction,\n\t\tActualReward: actualReward,\n\t\tConsumer: consumer,\n\t\tPostAuthor: postAuthor,\n\t\tPostID: postID,\n\t}\n\tif err := accManager.AddRewardHistory(ctx, username, bank.NumOfReward,\n\t\trewardDetail); err != nil {\n\t\treturn err\n\t}\n\n\tbank.NumOfReward++\n\tif err := accManager.storage.SetBankFromAccountKey(ctx, username, bank); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (_Staking *StakingTransactor) DelegatorClaimReward(opts *bind.TransactOpts, _nodeAddr common.Address) (*types.Transaction, error) {\n\treturn _Staking.contract.Transact(opts, \"delegatorClaimReward\", _nodeAddr)\n}",
"func (_m *IWalletController) PostWallet(w http.ResponseWriter, r *http.Request) {\n\t_m.Called(w, r)\n}",
"func (c *Client) CreateCustomReward(params *ChannelCustomRewardsParams) (*ChannelCustomRewardResponse, error) {\n\tresp, err := c.postAsJSON(\"/channel_points/custom_rewards\", &ManyChannelCustomRewards{}, params)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treward := &ChannelCustomRewardResponse{}\n\tresp.HydrateResponseCommon(&reward.ResponseCommon)\n\treward.Data.ChannelCustomRewards = resp.Data.(*ManyChannelCustomRewards).ChannelCustomRewards\n\n\treturn reward, nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
postReward encumbers a previouslydeposited reward according to the current vesting apportionment of staking. Note that rewards might be unvested, but are unlocked.
|
func (va ClawbackVestingAccount) postReward(ctx sdk.Context, reward sdk.Coins, ak AccountKeeper, bk BankKeeper, sk StakingKeeper) {
// Find the scheduled amount of vested and unvested staking tokens
bondDenom := sk.BondDenom(ctx)
vested := ReadSchedule(va.StartTime, va.EndTime, va.VestingPeriods, va.OriginalVesting, ctx.BlockTime().Unix()).AmountOf(bondDenom)
unvested := va.OriginalVesting.AmountOf(bondDenom).Sub(vested)
if unvested.IsZero() {
// no need to adjust the vesting schedule
return
}
if vested.IsZero() {
// all staked tokens must be unvested
va.distributeReward(ctx, ak, bondDenom, reward)
return
}
// Find current split of account balance on staking axis
bonded := sk.GetDelegatorBonded(ctx, va.GetAddress())
unbonding := sk.GetDelegatorUnbonding(ctx, va.GetAddress())
delegated := bonded.Add(unbonding)
// discover what has been slashed and remove from delegated amount
oldDelegated := va.DelegatedVesting.AmountOf(bondDenom).Add(va.DelegatedFree.AmountOf(bondDenom))
slashed := oldDelegated.Sub(intMin(oldDelegated, delegated))
delegated = delegated.Sub(intMin(delegated, slashed))
// Prefer delegated tokens to be unvested
unvested = intMin(unvested, delegated)
vested = delegated.Sub(unvested)
// Compute the unvested amount of reward and add to vesting schedule
if unvested.IsZero() {
return
}
if vested.IsZero() {
va.distributeReward(ctx, ak, bondDenom, reward)
return
}
unvestedRatio := unvested.ToDec().QuoTruncate(bonded.ToDec()) // round down
unvestedReward := scaleCoins(reward, unvestedRatio)
va.distributeReward(ctx, ak, bondDenom, unvestedReward)
}
|
[
"func (va *ClawbackVestingAccount) PostReward(ctx sdk.Context, reward sdk.Coins, action exported.RewardAction) error {\n\treturn action.ProcessReward(ctx, reward, va)\n}",
"func (va ClawbackVestingAccount) distributeReward(ctx sdk.Context, ak AccountKeeper, bondDenom string, reward sdk.Coins) {\n\tnow := ctx.BlockTime().Unix()\n\tt := va.StartTime\n\tfirstUnvestedPeriod := 0\n\tunvestedTokens := sdk.ZeroInt()\n\tfor i, period := range va.VestingPeriods {\n\t\tt += period.Length\n\t\tif t <= now {\n\t\t\tfirstUnvestedPeriod = i + 1\n\t\t\tcontinue\n\t\t}\n\t\tunvestedTokens = unvestedTokens.Add(period.Amount.AmountOf(bondDenom))\n\t}\n\n\trunningTotReward := sdk.NewCoins()\n\trunningTotStaking := sdk.ZeroInt()\n\tfor i := firstUnvestedPeriod; i < len(va.VestingPeriods); i++ {\n\t\tperiod := va.VestingPeriods[i]\n\t\trunningTotStaking = runningTotStaking.Add(period.Amount.AmountOf(bondDenom))\n\t\trunningTotRatio := runningTotStaking.ToDec().Quo(unvestedTokens.ToDec())\n\t\ttargetCoins := scaleCoins(reward, runningTotRatio)\n\t\tthisReward := targetCoins.Sub(runningTotReward)\n\t\trunningTotReward = targetCoins\n\t\tperiod.Amount = period.Amount.Add(thisReward...)\n\t\tva.VestingPeriods[i] = period\n\t}\n\n\tva.OriginalVesting = va.OriginalVesting.Add(reward...)\n\tak.SetAccount(ctx, &va)\n}",
"func (a Actor) AwardBlockReward(rt vmr.Runtime, params *AwardBlockRewardParams) *adt.EmptyValue {\n\trt.ValidateImmediateCallerIs(builtin.SystemActorAddr)\n\tAssertMsg(rt.CurrentBalance().GreaterThanEqual(params.GasReward),\n\t\t\"actor current balance %v insufficient to pay gas reward %v\", rt.CurrentBalance(), params.GasReward)\n\n\tAssertMsg(params.TicketCount > 0, \"cannot give block reward for zero tickets\")\n\n\tminer, ok := rt.ResolveAddress(params.Miner)\n\tif !ok {\n\t\trt.Abortf(exitcode.ErrIllegalState, \"failed to resolve given owner address\")\n\t}\n\n\tpriorBalance := rt.CurrentBalance()\n\n\tvar penalty abi.TokenAmount\n\tvar st State\n\trt.State().Transaction(&st, func() interface{} {\n\t\tblockReward := a.computeBlockReward(&st, big.Sub(priorBalance, params.GasReward), params.TicketCount)\n\t\ttotalReward := big.Add(blockReward, params.GasReward)\n\n\t\t// Cap the penalty at the total reward value.\n\t\tpenalty = big.Min(params.Penalty, totalReward)\n\n\t\t// Reduce the payable reward by the penalty.\n\t\trewardPayable := big.Sub(totalReward, penalty)\n\n\t\tAssertMsg(big.Add(rewardPayable, penalty).LessThanEqual(priorBalance),\n\t\t\t\"reward payable %v + penalty %v exceeds balance %v\", rewardPayable, penalty, priorBalance)\n\n\t\t// Record new reward into reward map.\n\t\tif rewardPayable.GreaterThan(abi.NewTokenAmount(0)) {\n\t\t\tnewReward := Reward{\n\t\t\t\tStartEpoch: rt.CurrEpoch(),\n\t\t\t\tEndEpoch: rt.CurrEpoch() + rewardVestingPeriod,\n\t\t\t\tValue: rewardPayable,\n\t\t\t\tAmountWithdrawn: abi.NewTokenAmount(0),\n\t\t\t\tVestingFunction: rewardVestingFunction,\n\t\t\t}\n\t\t\tif err := st.addReward(adt.AsStore(rt), miner, &newReward); err != nil {\n\t\t\t\trt.Abortf(exitcode.ErrIllegalState, \"failed to add reward to rewards map: %w\", err)\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n\n\t// Burn the penalty amount.\n\t_, code := rt.Send(builtin.BurntFundsActorAddr, builtin.MethodSend, nil, penalty)\n\tbuiltin.RequireSuccess(rt, code, \"failed to send penalty to BurntFundsActor\")\n\n\treturn nil\n}",
"func (s *Spammer) PostTransaction(tx *devnetvm.Transaction, clt evilwallet.Client) {\n\tif tx == nil {\n\t\ts.log.Debug(ErrTransactionIsNil)\n\t\ts.ErrCounter.CountError(ErrTransactionIsNil)\n\t}\n\tallSolid := s.handleSolidityForReuseOutputs(clt, tx)\n\tif !allSolid {\n\t\ts.log.Debug(ErrInputsNotSolid)\n\t\ts.ErrCounter.CountError(errors.WithMessagef(ErrInputsNotSolid, \"txID: %s\", tx.ID().Base58()))\n\t\treturn\n\t}\n\n\tif err := evilwallet.RateSetterSleep(clt, s.UseRateSetter); err != nil {\n\t\treturn\n\t}\n\ttxID, blockID, err := clt.PostTransaction(tx)\n\tif err != nil {\n\t\ts.log.Debug(ErrFailPostTransaction)\n\t\ts.ErrCounter.CountError(errors.WithMessage(ErrFailPostTransaction, err.Error()))\n\t\treturn\n\t}\n\tif s.EvilScenario.OutputWallet.Type() == evilwallet.Reuse {\n\t\ts.EvilWallet.SetTxOutputsSolid(tx.Essence().Outputs(), clt.URL())\n\t}\n\n\tcount := s.State.txSent.Add(1)\n\ts.log.Debugf(\"%s: Last transaction sent, ID: %s, txCount: %d\", blockID.Base58(), txID.Base58(), count)\n}",
"func (c *Client) RenterPostAllowance(allowance modules.Allowance) (err error) {\n\tvalues := url.Values{}\n\tvalues.Set(\"funds\", allowance.Funds.String())\n\tvalues.Set(\"hosts\", strconv.FormatUint(allowance.Hosts, 10))\n\tvalues.Set(\"period\", strconv.FormatUint(uint64(allowance.Period), 10))\n\tvalues.Set(\"renewwindow\", strconv.FormatUint(uint64(allowance.RenewWindow), 10))\n\terr = c.post(\"/renter\", values.Encode(), nil)\n\treturn\n}",
"func (vmctx *VMContext) PostRequest(par coretypes.PostRequestParams) bool {\n\tvmctx.log.Debugw(\"-- PostRequest\",\n\t\t\"target\", par.TargetContractID.String(),\n\t\t\"ep\", par.EntryPoint.String(),\n\t\t\"transfer\", cbalances.Str(par.Transfer),\n\t)\n\tmyAgentID := vmctx.MyAgentID()\n\tif !vmctx.debitFromAccount(myAgentID, cbalances.NewFromMap(map[balance.Color]int64{\n\t\tbalance.ColorIOTA: 1,\n\t})) {\n\t\tvmctx.log.Debugf(\"-- PostRequest: not enough funds for request token\")\n\t\treturn false\n\t}\n\tif !vmctx.debitFromAccount(myAgentID, par.Transfer) {\n\t\tvmctx.log.Debugf(\"-- PostRequest: not enough funds\")\n\t\treturn false\n\t}\n\treqParams := requestargs.New(nil)\n\treqParams.AddEncodeSimpleMany(par.Params)\n\treqSection := sctransaction.NewRequestSection(vmctx.CurrentContractHname(), par.TargetContractID, par.EntryPoint).\n\t\tWithTimelock(par.TimeLock).\n\t\tWithTransfer(par.Transfer).\n\t\tWithArgs(reqParams)\n\treturn vmctx.txBuilder.AddRequestSection(reqSection) == nil\n}",
"func (m *Mining) setMiningRewardAmount(v uint64) {\n\tvalue, err := json.Marshal(&v)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif err = m.State.Set(keyOfRewardAmount(), value); err != nil {\n\t\tpanic(err)\n\t}\n}",
"func (a *Recipient) Payback(uOrder Order, assetName string, issuerPubkey string, amount string) error {\n\t// once we have the stablecoin here, we can remove the assetName\n\tStableBalance, err := xlm.GetAssetBalance(a.U.PublicKey, \"STABLEUSD\")\n\t// checks for the stablecoin asset\n\tif err != nil {\n\t\tlog.Println(\"YOU HAVE NO STABLECOIN BALANCE, PLEASE REFILL ACCOUNT\")\n\t\treturn fmt.Errorf(\"YOU HAVE NO STABLECOIN BALANCE, PLEASE REFILL ACCOUNT\")\n\t}\n\n\tDEBAssetBalance, err := xlm.GetAssetBalance(a.U.PublicKey, assetName)\n\tif err != nil {\n\t\tlog.Println(\"Don't have the debt asset in possession\")\n\t\tlog.Fatal(err)\n\t}\n\n\tif utils.StoF(amount) > utils.StoF(StableBalance) {\n\t\t// check whether the recipient has enough StableUSD tokens in order to make\n\t\t// this happen\n\t\tlog.Println(\"YOU CAN'T SEND AN AMOUNT MORE THAN WHAT YOU HAVE\")\n\t\treturn fmt.Errorf(\"YOU CAN'T SEND AN AMOUNT MORE THAN WHAT YOU HAVE\")\n\t}\n\t// check balance in DEBAssetCode anmd\n\tmonthlyBill, err := oracle.MonthlyBill()\n\tif err != nil {\n\t\tlog.Println(\"Unable to fetch oracle price, exiting\")\n\t\treturn err\n\t}\n\n\tlog.Println(\"Retrieved average price from oracle: \", monthlyBill)\n\t// the oracke needs to know the assetName so that it can find the other details\n\t// about this asset from the db. This should run on the server side and must\n\t// be split when we do run client side stuff.\n\t// hardcode for now, need to add the oracle here so that we\n\t// can do this dynamically\n\t// send amount worth DEBTokens back to issuer\n\tconfHeight, txHash, err := a.SendAssetToIssuer(assetName, issuerPubkey, amount)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn err\n\t}\n\n\tlog.Println(\"Paid debt amount: \", amount, \" back to issuer, tx hash: \", txHash, \" \", confHeight)\n\tlog.Println(\"Checking balance to see if our account was debited\")\n\tnewBalance, err := xlm.GetAssetBalance(a.U.PublicKey, assetName)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tnewBalanceFloat := utils.StoF(newBalance)\n\tDEBAssetBalanceFloat := utils.StoF(DEBAssetBalance)\n\tmBillFloat := utils.StoF(monthlyBill)\n\n\tpaidAmount := DEBAssetBalanceFloat - newBalanceFloat\n\tlog.Println(\"Old Balance: \", DEBAssetBalanceFloat, \"New Balance: \", newBalanceFloat, \"Paid: \", paidAmount, \"Bill Amount: \", mBillFloat)\n\n\t// would be nice to take some additional action like sending a notification or\n\t// something to investors or to the email address given so that everyone is made\n\t// aware of this and there's data transparency\n\n\tif paidAmount < mBillFloat {\n\t\tlog.Println(\"Amount paid is less than amount required, balance not updating, please make sure to cover this next time\")\n\t} else if paidAmount > mBillFloat {\n\t\tlog.Println(\"You've chosen to pay more than what is required for this month. Adjusting payback period accordingly\")\n\t} else {\n\t\tlog.Println(\"You've paid exactly what is required for this month. Payback period remains as usual\")\n\t}\n\t// we need to update the database here\n\t// no need to retrieve this order again because we have it already\n\tuOrder.BalLeft -= paidAmount\n\tuOrder.DateLastPaid = utils.Timestamp()\n\tif uOrder.BalLeft == 0 {\n\t\tlog.Println(\"YOU HAVE PAID OFF THIS ASSET, TRANSFERRING OWNERSHIP OF ASSET TO YOU\")\n\t\t// don't delete the asset from the received assets list, we still need it so\n\t\t// that we c an look back and find out hwo many assets this particular\n\t\t// enttiy has been invested in, have a leaderboard kind of thing, etc.\n\t\tuOrder.PaidOff = true\n\t\t// we should call neighbourly or some ohter partner here to transfer assets\n\t\t// using the bond they provide us with\n\t\t// the nice part here is that the recipient can not pay off more than what is\n\t\t// invested because the trustline will not allow such an incident to happen\n\t}\n\t// balLeft must be updated on the server side and can be challenged easily\n\t// if there's some discrepancy since the tx's are on the blockchain\n\terr = InsertOrder(uOrder)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = a.UpdateOrderSlice(uOrder)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfmt.Println(\"UPDATED ORDER: \", uOrder)\n\treturn err\n}",
"func (app *stakingApplication) disburseFeesVQ(\n\tctx *abciAPI.Context,\n\tstakeState *stakingState.MutableState,\n\tproposerEntity *signature.PublicKey,\n\tnumEligibleValidators int,\n\tvotingEntities []signature.PublicKey,\n) error {\n\tlastBlockFees, err := stakeState.LastBlockFees(ctx)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"staking: failed to query last block fees: %w\", err)\n\t}\n\n\tctx.Logger().Debug(\"disbursing signer and next proposer fees\",\n\t\t\"total_amount\", lastBlockFees,\n\t\t\"num_eligible_validators\", numEligibleValidators,\n\t\t\"num_voting_entities\", len(votingEntities),\n\t)\n\tif lastBlockFees.IsZero() {\n\t\t// Nothing to disburse.\n\t\treturn nil\n\t}\n\n\tconsensusParameters, err := stakeState.ConsensusParameters(ctx)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"ConsensusParameters: %w\", err)\n\t}\n\n\t// Compute the portion associated with each eligible validator's share of the fees, and within that, how much goes\n\t// to the voter and how much goes to the next proposer.\n\tperValidator := lastBlockFees.Clone()\n\tvar nEVQ quantity.Quantity\n\tif err = nEVQ.FromInt64(int64(numEligibleValidators)); err != nil {\n\t\treturn fmt.Errorf(\"import numEligibleValidators %d: %w\", numEligibleValidators, err)\n\t}\n\tif err = perValidator.Quo(&nEVQ); err != nil {\n\t\treturn fmt.Errorf(\"divide perValidator: %w\", err)\n\t}\n\tdenom := consensusParameters.FeeSplitWeightVote.Clone()\n\tif err = denom.Add(&consensusParameters.FeeSplitWeightNextPropose); err != nil {\n\t\treturn fmt.Errorf(\"add FeeSplitWeightNextPropose: %w\", err)\n\t}\n\tshareNextProposer := perValidator.Clone()\n\tif err = shareNextProposer.Mul(&consensusParameters.FeeSplitWeightNextPropose); err != nil {\n\t\treturn fmt.Errorf(\"multiply shareNextProposer: %w\", err)\n\t}\n\tif err = shareNextProposer.Quo(denom); err != nil {\n\t\treturn fmt.Errorf(\"divide shareNextProposer: %w\", err)\n\t}\n\tshareVote := perValidator.Clone()\n\tif err = shareVote.Sub(shareNextProposer); err != nil {\n\t\treturn fmt.Errorf(\"subtract shareVote: %w\", err)\n\t}\n\n\t// Multiply to get the next proposer's total payment.\n\tnumVotingEntities := len(votingEntities)\n\tvar nVEQ quantity.Quantity\n\tif err = nVEQ.FromInt64(int64(numVotingEntities)); err != nil {\n\t\treturn fmt.Errorf(\"import numVotingEntities %d: %w\", numVotingEntities, err)\n\t}\n\tnextProposerTotal := shareNextProposer.Clone()\n\tif err = nextProposerTotal.Mul(&nVEQ); err != nil {\n\t\treturn fmt.Errorf(\"multiply nextProposerTotal: %w\", err)\n\t}\n\n\t// Pay the next proposer.\n\tif !nextProposerTotal.IsZero() && proposerEntity != nil {\n\t\tproposerAddr := staking.NewAddress(*proposerEntity)\n\t\tproposerAcct, err := stakeState.Account(ctx, proposerAddr)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to fetch next proposer account: %w\", err)\n\t\t}\n\t\tif err = quantity.Move(&proposerAcct.General.Balance, lastBlockFees, nextProposerTotal); err != nil {\n\t\t\treturn fmt.Errorf(\"move nextProposerTotal: %w\", err)\n\t\t}\n\t\tif err = stakeState.SetAccount(ctx, proposerAddr, proposerAcct); err != nil {\n\t\t\treturn fmt.Errorf(\"failed to set next proposer account: %w\", err)\n\t\t}\n\n\t\t// Emit transfer event.\n\t\tctx.EmitEvent(abciAPI.NewEventBuilder(app.Name()).TypedAttribute(&staking.TransferEvent{\n\t\t\tFrom: staking.FeeAccumulatorAddress,\n\t\t\tTo: proposerAddr,\n\t\t\tAmount: *nextProposerTotal,\n\t\t}))\n\t}\n\n\t// Pay the voters.\n\tif !shareVote.IsZero() {\n\t\tfor _, voterEntity := range votingEntities {\n\t\t\tvoterAddr := staking.NewAddress(voterEntity)\n\t\t\tvoterAcct, err := stakeState.Account(ctx, voterAddr)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"failed to fetch voter account %s: %w\", voterAddr, err)\n\t\t\t}\n\t\t\tif err = quantity.Move(&voterAcct.General.Balance, lastBlockFees, shareVote); err != nil {\n\t\t\t\treturn fmt.Errorf(\"move shareVote: %w\", err)\n\t\t\t}\n\t\t\tif err = stakeState.SetAccount(ctx, voterAddr, voterAcct); err != nil {\n\t\t\t\treturn fmt.Errorf(\"failed to set voter account %s: %w\", voterAddr, err)\n\t\t\t}\n\n\t\t\t// Emit transfer event.\n\t\t\tctx.EmitEvent(abciAPI.NewEventBuilder(app.Name()).TypedAttribute(&staking.TransferEvent{\n\t\t\t\tFrom: staking.FeeAccumulatorAddress,\n\t\t\t\tTo: voterAddr,\n\t\t\t\tAmount: *shareVote,\n\t\t\t}))\n\t\t}\n\t}\n\n\t// Put the rest into the common pool.\n\tif !lastBlockFees.IsZero() {\n\t\tremaining := lastBlockFees.Clone()\n\t\tcommonPool, err := stakeState.CommonPool(ctx)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to query common pool: %w\", err)\n\t\t}\n\t\tif err = quantity.Move(commonPool, lastBlockFees, remaining); err != nil {\n\t\t\treturn fmt.Errorf(\"move remaining: %w\", err)\n\t\t}\n\t\tif err = stakeState.SetCommonPool(ctx, commonPool); err != nil {\n\t\t\treturn fmt.Errorf(\"failed to set common pool: %w\", err)\n\t\t}\n\n\t\t// Emit transfer event.\n\t\tctx.EmitEvent(abciAPI.NewEventBuilder(app.Name()).TypedAttribute(&staking.TransferEvent{\n\t\t\tFrom: staking.FeeAccumulatorAddress,\n\t\t\tTo: staking.CommonPoolAddress,\n\t\t\tAmount: *remaining,\n\t\t}))\n\t}\n\n\treturn nil\n}",
"func (_BondedECDSAKeep *BondedECDSAKeepTransactor) DistributeETHReward(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _BondedECDSAKeep.contract.Transact(opts, \"distributeETHReward\")\n}",
"func (vi *votedInfo) CalculateReward(multiplier, divider *big.Int, period int) {\n\tif multiplier.Sign() == 0 || period == 0 {\n\t\treturn\n\t}\n\tif divider.Sign() == 0 || vi.totalBondedDelegation.Sign() == 0 {\n\t\treturn\n\t}\n\t// reward = multiplier * period * bondedDelegation / (divider * totalBondedDelegation)\n\tbase := new(big.Int).Mul(multiplier, big.NewInt(int64(period)))\n\treward := new(big.Int)\n\tfor i, addrKey := range vi.rank {\n\t\tif i == vi.maxRankForReward {\n\t\t\tbreak\n\t\t}\n\t\tprep := vi.preps[addrKey]\n\t\tif prep.Enable() == false {\n\t\t\tcontinue\n\t\t}\n\n\t\treward.Mul(base, prep.GetBondedDelegation())\n\t\treward.Div(reward, divider)\n\t\treward.Div(reward, vi.totalBondedDelegation)\n\n\t\tlog.Tracef(\"VOTED REWARD %d = %d * %d * %d / (%d * %d)\",\n\t\t\treward, multiplier, period, prep.GetBondedDelegation(), divider, vi.totalBondedDelegation)\n\n\t\tprep.SetIScore(new(big.Int).Add(prep.IScore(), reward))\n\t}\n}",
"func (_m *IWalletController) PostWallet(w http.ResponseWriter, r *http.Request) {\n\t_m.Called(w, r)\n}",
"func (_BondingManager *BondingManagerTransactor) Reward(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _BondingManager.contract.Transact(opts, \"reward\")\n}",
"func (c *Controller) PostBet() gin.HandlerFunc {\n\treturn func(ctx *gin.Context) {\n\t\tvar betRequest requestmodels.BetRequestDto\n\t\terr := ctx.ShouldBindJSON(&betRequest)\n\t\tif err != nil {\n\t\t\tctx.String(http.StatusBadRequest, \"update request is not valid.\")\n\t\t\treturn\n\t\t}\n\n\t\tif !c.betRequestValidator.BetRequestIsValid(betRequest) {\n\t\t\tctx.String(http.StatusBadRequest, \"update request is not valid.\")\n\t\t\treturn\n\t\t}\n\n\t\terr = c.betService.SendBet(betRequest)\n\t\tif err != nil {\n\t\t\tctx.String(http.StatusInternalServerError, \"request could not be processed.\")\n\t\t\treturn\n\t\t}\n\n\t\tctx.Status(http.StatusOK)\n\t}\n}",
"func ApplyRewardTx(tx *types.Transaction, statedb *state.Statedb) (*types.Receipt, error) {\n\tstatedb.CreateAccount(tx.Data.To)\n\tstatedb.AddBalance(tx.Data.To, tx.Data.Amount)\n\n\thash, err := statedb.Hash()\n\tif err != nil {\n\t\treturn nil, errors.NewStackedError(err, \"failed to get statedb root hash\")\n\t}\n\n\treceipt := &types.Receipt{\n\t\tTxHash: tx.Hash,\n\t\tPostState: hash,\n\t}\n\n\treturn receipt, nil\n}",
"func (k Keeper) ClaimSwapReward(ctx sdk.Context, owner, receiver sdk.AccAddress, denom string, multiplierName string) error {\n\tmultiplier, found := k.GetMultiplierByDenom(ctx, denom, multiplierName)\n\tif !found {\n\t\treturn errorsmod.Wrapf(types.ErrInvalidMultiplier, \"denom '%s' has no multiplier '%s'\", denom, multiplierName)\n\t}\n\n\tclaimEnd := k.GetClaimEnd(ctx)\n\n\tif ctx.BlockTime().After(claimEnd) {\n\t\treturn errorsmod.Wrapf(types.ErrClaimExpired, \"block time %s > claim end time %s\", ctx.BlockTime(), claimEnd)\n\t}\n\n\tsyncedClaim, found := k.GetSynchronizedSwapClaim(ctx, owner)\n\tif !found {\n\t\treturn errorsmod.Wrapf(types.ErrClaimNotFound, \"address: %s\", owner)\n\t}\n\n\tamt := syncedClaim.Reward.AmountOf(denom)\n\n\tclaimingCoins := sdk.NewCoins(sdk.NewCoin(denom, amt))\n\trewardCoins := sdk.NewCoins(sdk.NewCoin(denom, sdk.NewDecFromInt(amt).Mul(multiplier.Factor).RoundInt()))\n\tif rewardCoins.IsZero() {\n\t\treturn types.ErrZeroClaim\n\t}\n\tlength := k.GetPeriodLength(ctx.BlockTime(), multiplier.MonthsLockup)\n\n\terr := k.SendTimeLockedCoinsToAccount(ctx, types.IncentiveMacc, receiver, rewardCoins, length)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// remove claimed coins (NOT reward coins)\n\tsyncedClaim.Reward = syncedClaim.Reward.Sub(claimingCoins...)\n\tk.SetSwapClaim(ctx, syncedClaim)\n\n\tctx.EventManager().EmitEvent(\n\t\tsdk.NewEvent(\n\t\t\ttypes.EventTypeClaim,\n\t\t\tsdk.NewAttribute(types.AttributeKeyClaimedBy, owner.String()),\n\t\t\tsdk.NewAttribute(types.AttributeKeyClaimAmount, claimingCoins.String()),\n\t\t\tsdk.NewAttribute(types.AttributeKeyClaimType, syncedClaim.GetType()),\n\t\t),\n\t)\n\treturn nil\n}",
"func (requestManager *RequestManager) PostWithTransfer(requesterKeyPair *ed25519.KeyPair, color colored.Color, amount uint64,\n\tchain *solo.Chain, contractName string, functionName string, params ...interface{}) (dict.Dict, error) {\n\tresponse, err := post(true, color, amount, requesterKeyPair, chain, contractName, functionName, params...)\n\treturn response, err\n}",
"func (transaction *AccountCreateTransaction) SetDeclineStakingReward(decline bool) *AccountCreateTransaction {\n\ttransaction._RequireNotFrozen()\n\ttransaction.declineReward = decline\n\treturn transaction\n}",
"func (c *Coinbase) AddReward(output *Output) {\n\toutput.EncryptedMask = make([]byte, 1)\n\tc.Rewards = append(c.Rewards, output)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
NewDiamond builds a new diamond instance. The default diamond gets populated with a random KSUID as diamondID. Default diamond has cnflicts handling enabled.
|
func NewDiamond(repo string, stores context2.Stores, opts ...DiamondOption) *Diamond {
diamond := defaultDiamond(repo, stores)
for _, apply := range opts {
apply(diamond)
}
if diamond.deconflicter == nil {
// points to the appropriate metadata path rendering function from model,
// depending on the conflicts handling mode selected.
switch diamond.DiamondDescriptor.Mode {
case model.EnableCheckpoints:
diamond.deconflicter = model.GenerateCheckpointPath
case model.ForbidConflicts:
diamond.deconflicter = func(a, b string) string {
diamond.l.Error("dev error: deconflicter called in inadequate context", zap.String("arg", a), zap.String("arg", b))
panic("dev error: must not call deconflicter")
}
case model.EnableConflicts:
fallthrough
default:
diamond.deconflicter = model.GenerateConflictPath
}
}
if diamond.DiamondDescriptor.Tag != "" {
diamond.l = diamond.l.With(zap.String("tag", diamond.DiamondDescriptor.Tag))
diamond.Bundle.l = diamond.l
}
if diamond.MetricsEnabled() {
diamond.m = diamond.EnsureMetrics("core", &M{}).(*M)
}
return diamond
}
|
[
"func CreateDiamond(repo string, stores context2.Stores, opts ...DiamondOption) (model.DiamondDescriptor, error) {\n\tvar err error\n\td := NewDiamond(repo, stores, opts...)\n\n\tdefer func(t0 time.Time) {\n\t\tif d.MetricsEnabled() {\n\t\t\td.m.Usage.UsedAll(t0, \"CreateDiamond\")(err)\n\t\t}\n\t}(time.Now())\n\n\tif d.DiamondDescriptor.DiamondID == \"\" {\n\t\treturn model.DiamondDescriptor{}, errors.New(\"a diamond must have a diamondID\")\n\t}\n\n\tif err = RepoExists(repo, stores); err != nil {\n\t\treturn model.DiamondDescriptor{}, err\n\t}\n\n\terr = d.uploadDescriptor()\n\tif err != nil {\n\t\treturn model.DiamondDescriptor{},\n\t\t\terrors.New(\"cannot update diamond descriptor\").Wrap(err)\n\t}\n\treturn d.DiamondDescriptor, nil\n}",
"func (in *Diamond) DeepCopy() *Diamond {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(Diamond)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (drawex *DrawexImpl) DrawDiamond(diamon *component.Diamond) string {\n\tlog.Debug(\"TikzDrawexImpl request to draw diamond: \", stringutil.JSON(diamon))\n\tif diamon == nil {\n\t\tlog.Error(\"TikzDrawexImpl request to draw null diamon\")\n\t\treturn \"\"\n\t}\n\treturn drawex.DrawNode(DiamondShape, diamon.Common)\n}",
"func NewDawg() *Dawg {\n\treturn &Dawg{\n\t\tRoot: DawgNode{\n\t\t\tID: 0,\n\t\t\tEdges: Edges{},\n\t\t},\n\t\tCounter: 1,\n\t}\n}",
"func DiamondID(id string) DiamondDescriptorOption {\n\treturn func(d *DiamondDescriptor) {\n\t\tif id != \"\" {\n\t\t\td.DiamondID = id\n\t\t}\n\t}\n}",
"func NewGarden(diagram string, children []string) (*Garden, error) {\n\trows, err := rowsFromDiagram(diagram, len(children))\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tg := Garden{}\n\n\tfor i, child := range sortedCopy(children) {\n\t\tif _, ok := g[child]; ok {\n\t\t\treturn nil, fmt.Errorf(\"Cannot have a child name more than once, got: %s\", child)\n\t\t}\n\n\t\tplants, err := plantsFromRows(rows, i)\n\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tg[child] = plants\n\t}\n\n\treturn &g, nil\n}",
"func GetDiamond(repo, diamondID string, stores context2.Stores, opts ...DiamondOption) (model.DiamondDescriptor, error) {\n\tvar err error\n\n\tgetOpts := []DiamondOption{\n\t\tDiamondDescriptor(\n\t\t\tmodel.NewDiamondDescriptor(model.DiamondID(diamondID)),\n\t\t),\n\t}\n\tgetOpts = append(getOpts, opts...)\n\n\td := NewDiamond(repo, stores, getOpts...)\n\n\tdefer func(t0 time.Time) {\n\t\tif d.MetricsEnabled() {\n\t\t\td.m.Usage.UsedAll(t0, \"GetDiamond\")(err)\n\t\t}\n\t}(time.Now())\n\n\tif err = RepoExists(repo, stores); err != nil {\n\t\treturn model.DiamondDescriptor{}, err\n\t}\n\n\tif err = d.downloadDescriptor(); err != nil {\n\t\treturn model.DiamondDescriptor{}, err\n\t}\n\treturn d.DiamondDescriptor, nil\n}",
"func NewGarden(diagram string, children []string) (*Garden, error) {\n\t// Normalize the diagram.\n\tdiagramTrimmed := strings.Trim(diagram, \"\\n\")\n\tif diagramTrimmed == diagram {\n\t\treturn nil, errors.New(\"diagram didn't start with a newline\")\n\t}\n\trows := strings.Split(diagramTrimmed, \"\\n\")\n\tif len(rows) != 2 {\n\t\treturn nil, errors.New(\"invalid number of rows in diagram\")\n\t}\n\tif len(rows[0]) != len(rows[1]) {\n\t\treturn nil, errors.New(\"diagram row lengths do not match\")\n\t}\n\tif len(rows[0]) != 2*len(children) {\n\t\treturn nil, errors.New(\"diagram rows are not the correct length\")\n\t}\n\tfor i := 0; i < len(rows[0]); i++ {\n\t\tc1 := rows[0][i]\n\t\tc2 := rows[1][i]\n\t\tif c1 != 'G' && c1 != 'C' && c1 != 'R' && c1 != 'V' {\n\t\t\treturn nil, errors.New(\"invalid symbol in diagram\")\n\t\t}\n\t\tif c2 != 'G' && c2 != 'C' && c2 != 'R' && c2 != 'V' {\n\t\t\treturn nil, errors.New(\"invalid symbol in diagram\")\n\t\t}\n\t}\n\t// Make a copy of the input array and sort it.\n\tchildrenCopy := make([]string, len(children))\n\tcopy(childrenCopy, children)\n\tsort.Strings(childrenCopy)\n\t// Make sure all children are unique.\n\tfor i := 1; i < len(childrenCopy); i++ {\n\t\tif childrenCopy[i-1] == childrenCopy[i] {\n\t\t\treturn nil, errors.New(\"same child name seen more than once\")\n\t\t}\n\t}\n\t// Add each child to the Garden, and their plants.\n\tg := make(Garden)\n\tfor i, r := range rows[0] {\n\t\tchildName := childrenCopy[i/2]\n\t\tg[childName] = append(g[childName], lookupPlant(r))\n\t}\n\tfor i, r := range rows[1] {\n\t\tchildName := childrenCopy[i/2]\n\t\tg[childName] = append(g[childName], lookupPlant(r))\n\t}\n\treturn &g, nil\n}",
"func NewGarden(diagram string, children []string) (*Garden, error) {\n\tg := &Garden{}\n\n\tg.children = make([]string, len(children))\n\tcopy(g.children, children)\n\tsort.Strings(g.children)\n\n\tg.diagram = strings.Split(diagram, \"\\n\")\n\tif len(g.diagram) != 3 {\n\t\treturn nil, errors.New(\"bad diagram\")\n\t}\n\n\tif len(g.diagram[1])%2 != 0 || len(g.diagram[2])%2 != 0 {\n\t\treturn nil, errors.New(\"bad len diagram\")\n\t}\n\n\tif strings.ToLower(g.diagram[1]) == g.diagram[1] || strings.ToLower(g.diagram[2]) == g.diagram[2] {\n\t\treturn nil, errors.New(\"bad caps diagram\")\n\t}\n\n\tg.indexes = make(map[string]int, len(g.children))\n\tfor i, c := range g.children {\n\t\tif c == \"\" {\n\t\t\treturn nil, errors.New(\"no children\")\n\t\t}\n\t\tif _, ok := g.indexes[c]; ok {\n\t\t\treturn nil, errors.New(\"children exists\")\n\t\t}\n\t\tg.indexes[c] = i + 1\n\t}\n\n\treturn g, nil\n}",
"func NewDrand(s key.Store, g *key.Group, c *Config) (*Drand, error) {\n\td, err := initDrand(s, c)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdkgConf := &dkg.Config{\n\t\tSuite: key.G2.(dkg.Suite),\n\t\tGroup: g,\n\t\tTimeout: d.opts.dkgTimeout,\n\t}\n\td.dkg, err = dkg.NewHandler(d.priv, dkgConf, d.dkgNetwork())\n\td.group = g\n\treturn d, err\n}",
"func newDam() *dam {\n\treturn &dam{\n\t\tlock: sync.RWMutex{},\n\t\tbarrier: make(chan error),\n\t}\n}",
"func New() *Dag {\n\treturn &Dag{\n\t\tvertices: make(map[string]*Vertex),\n\t}\n}",
"func NewGarden(diagram string, children []string) (*Garden, error) {\n\trows := strings.Split(diagram, \"\\n\")\n\tif len(rows) != 3 {\n\t\treturn nil, fmt.Errorf(\"want a diagram with 2 rows of plants and each row on it's own line (got %s)\", diagram)\n\t}\n\n\tif len(rows[1]) == 0 || len(rows[2]) == 0 {\n\t\treturn nil, fmt.Errorf(\"want each row to be non-empty (got %s)\", diagram)\n\t}\n\n\tif len(rows[1]) != len(rows[2]) {\n\t\treturn nil,\n\t\t\tfmt.Errorf(\"want each diagram row to be equal length, got %d != %d\",\n\t\t\t\tlen(rows[1]), len(rows[2]))\n\n\t}\n\n\tif len(rows[1])%2 != 0 || len(rows[2])%2 != 0 {\n\t\treturn nil, fmt.Errorf(\"want each row to have an even number of plants (got %s)\", diagram)\n\t}\n\n\tfor _, r := range rows[1:] {\n\t\tfor _, c := range r {\n\t\t\tif _, ok := plants[c]; !ok {\n\t\t\t\treturn nil,\n\t\t\t\t\tfmt.Errorf(\"invalid diagram, want plant code C, G, R, V, got %s\",\n\t\t\t\t\t\tstring(c))\n\t\t\t}\n\t\t}\n\t}\n\n\tif len(children) == 0 {\n\t\treturn nil, fmt.Errorf(\"want a non-empty list of children\")\n\t}\n\n\tstudentRowPosition := map[string]int{}\n\tfor _, c := range children {\n\t\tstudentRowPosition[c] = 0\n\t}\n\tif len(studentRowPosition) != len(children) {\n\t\treturn nil, fmt.Errorf(\"want non-duplicated student names (got %q)\", children)\n\t}\n\n\tif len(rows[1])+len(rows[2]) != 4*len(studentRowPosition) {\n\t\treturn nil,\n\t\t\tfmt.Errorf(\"invalid diagram, want %d plants, got %d\",\n\t\t\t\t4*len(studentRowPosition), len(rows[1])+len(rows[2]))\n\t}\n\n\tsortedNames := make([]string, len(children))\n\tcopy(sortedNames, children)\n\tsort.Strings(sortedNames)\n\tfor i, name := range sortedNames {\n\t\tstudentRowPosition[name] = 2*i + 1\n\t}\n\tplantRows := [][]rune{[]rune(rows[1]), []rune(rows[2])}\n\n\treturn &Garden{studentRowPosition: studentRowPosition, plantRows: plantRows}, nil\n}",
"func NewDiamClient(opt *DiamOpt) *DiamClient {\n\tcfg := &sm.Settings{\n\t\tOriginHost: datatype.DiameterIdentity(opt.originHost),\n\t\tOriginRealm: datatype.DiameterIdentity(opt.originRealm),\n\t\tVendorID: datatype.Unsigned32(opt.vendorID),\n\t\tProductName: datatype.UTF8String(opt.productName),\n\t\tOriginStateID: datatype.Unsigned32(time.Now().Unix()),\n\t\tFirmwareRevision: datatype.Unsigned32(opt.firmwareRevision),\n\t\tHostIPAddresses: []datatype.Address{\n\t\t\tdatatype.Address(net.ParseIP(opt.hostAddress)),\n\t\t},\n\t}\n\n\tmux := sm.New(cfg)\n\n\tcli := &sm.Client{\n\t\tDict: dict.Default,\n\t\tHandler: mux,\n\t\tMaxRetransmits: 0,\n\t\tRetransmitInterval: time.Second,\n\t\tEnableWatchdog: true,\n\t\tWatchdogInterval: time.Duration(opt.watchdogInterval) * time.Second,\n\t\tSupportedVendorID: []*diam.AVP{\n\t\t\tdiam.NewAVP(avp.SupportedVendorID, avp.Mbit, 0, datatype.Unsigned32(opt.vendorID)),\n\t\t},\n\t\tVendorSpecificApplicationID: []*diam.AVP{\n\t\t\tdiam.NewAVP(avp.VendorSpecificApplicationID, avp.Mbit, 0, &diam.GroupedAVP{\n\t\t\t\tAVP: []*diam.AVP{\n\t\t\t\t\tdiam.NewAVP(avp.AuthApplicationID, avp.Mbit, 0, datatype.Unsigned32(opt.AppID())),\n\t\t\t\t\tdiam.NewAVP(avp.VendorID, avp.Mbit, 0, datatype.Unsigned32(opt.vendorID)),\n\t\t\t\t},\n\t\t\t}),\n\t\t},\n\t}\n\n\tdone := make(chan struct{}, 1000)\n\tmux.HandleIdx(\n\t\tdiam.CommandIndex{AppID: diam.TGPP_S6A_APP_ID, Code: diam.AuthenticationInformation, Request: false},\n\t\thandleAuthenticationInformationAnswer(done))\n\tmux.HandleIdx(\n\t\tdiam.CommandIndex{AppID: diam.TGPP_S6A_APP_ID, Code: diam.UpdateLocation, Request: false},\n\t\thandleUpdateLocationAnswer(done))\n\tmux.HandleIdx(diam.ALL_CMD_INDEX, handleAll())\n\n\treturn &DiamClient{\n\t\tcli: cli,\n\t\topt: opt,\n\t\tcfg: cfg,\n\t\tdone: done,\n\t}\n}",
"func (d *DiamondMiner) doAutoBidForMyDiamond() {\n\t//fmt.Println(\"- doAutoBidForMyDiamond\")\n\n\tfirstFeeTxs := d.txpool.GetDiamondCreateTxs(1) // 取出第一枚钻石挖掘交易\n\tif firstFeeTxs == nil || len(firstFeeTxs) == 0 {\n\t\treturn // No diamonds\n\t}\n\tfirstFeeTx := firstFeeTxs[0]\n\t// Address to give up competition\n\tfor _, iaddr := range d.Config.AutoBidIgnoreAddresses {\n\t\tif bytes.Compare(firstFeeTx.GetAddress(), *iaddr) == 0 {\n\t\t\tif !d.Config.Continued {\n\t\t\t\t// In case of discontinuous mining, stop the mining of this machine\n\t\t\t\t//fmt.Println(\"diamond miner stop all, because fee addr:\", iaddr.ToReadable())\n\t\t\t\td.StopAll()\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t}\n\t// I came first\n\tif bytes.Compare(firstFeeTx.GetAddress(), d.Config.FeeAccount.Address) == 0 {\n\t\tif !d.Config.Continued {\n\t\t\t// In case of discontinuous mining, stop the mining of this machine\n\t\t\t//fmt.Println(\"diamond miner stop all, because fee addr:\", firstFeeTx.GetAddress().ToReadable())\n\t\t\td.StopAll()\n\t\t}\n\t\treturn\n\t}\n\tif d.currentSuccessMiningDiamondTx == nil {\n\t\treturn\n\t}\n\t// Compare diamond serial numbers\n\tcuract := transactions.CheckoutAction_4_DiamondCreateFromTx(d.currentSuccessMiningDiamondTx)\n\tfirstact := transactions.CheckoutAction_4_DiamondCreateFromTx(firstFeeTx.(interfacev2.Transaction))\n\tif curact == nil || firstact == nil {\n\t\treturn\n\t}\n\tif curact.Number != firstact.Number {\n\t\td.currentSuccessMiningDiamondTx = nil // Invalid mining\n\t\treturn\n\t}\n\n\t// Start bidding\n\ttopfee := firstFeeTx.GetFee()\n\tmyfee, e1 := topfee.Add(d.Config.AutoBidMarginFee)\n\tif e1 != nil {\n\t\tfmt.Println(\"doAutoBidForMyDiamond Error:\", e1)\n\t\treturn\n\t}\n\tif newmyfee, _, e2 := myfee.CompressForMainNumLen(4, true); e2 == nil && newmyfee != nil {\n\t\tmyfee = newmyfee // Up compression length\n\t}\n\t// Is it higher than the maximum price I set\n\tif d.Config.AutoBidMaxFee.LessThan(topfee) {\n\t\treturn\n\t}\n\tif d.Config.AutoBidMaxFee.LessThan(myfee) {\n\t\tmyfee = d.Config.AutoBidMaxFee // The highest price has been reached\n\t}\n\n\t// Update transaction fee\n\tnewtx := d.currentSuccessMiningDiamondTx\n\tnewtx.SetFee(myfee)\n\tnewtx.ClearHash() // Reset hash cache\n\t// Private key\n\tallPrivateKeyBytes := make(map[string][]byte, 1)\n\tallPrivateKeyBytes[string(d.Config.FeeAccount.Address)] = d.Config.FeeAccount.PrivateKey\n\t// do sign\n\tnewtx.FillNeedSigns(allPrivateKeyBytes, nil)\n\t// add to pool\n\terr4 := d.txpool.AddTx(newtx.(interfaces.Transaction))\n\tif err4 != nil {\n\t\tfmt.Println(\"doAutoBidForMyDiamond Add to Tx Pool, Error: \", err4.Error())\n\t\treturn\n\t}\n\n\t// success\n\tfmt.Printf(\"diamond auto bid name: <%s>, tx: <%s>, fee: %s => %s \\n\",\n\t\tstring(curact.Diamond), newtx.Hash().ToHex(),\n\t\ttopfee.ToFinString(), myfee.ToFinString(),\n\t)\n}",
"func DiamondClone(m DiamondDescriptor) DiamondDescriptorOption {\n\treturn func(d *DiamondDescriptor) {\n\t\t*d = m\n\t}\n}",
"func NewDiscriminator(disc *low.Discriminator) *Discriminator {\n\td := new(Discriminator)\n\td.low = disc\n\td.PropertyName = disc.PropertyName.Value\n\tmapping := make(map[string]string)\n\tfor k, v := range disc.Mapping.Value {\n\t\tmapping[k.Value] = v.Value\n\t}\n\td.Mapping = mapping\n\treturn d\n}",
"func NewDI() DIer {\n\td := new(DI)\n\td.store = make(map[string]interface{})\n\treturn d\n}",
"func NewD(opts ...Option) *D {\n\td := &D{\n\t\tconfig: DefaultConfig(),\n\t\tdata: newData(),\n\t\tstopCh: make(chan struct{}),\n\t}\n\tfor _, opt := range opts {\n\t\topt(d)\n\t}\n\n\treturn d\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
basenameKeyFilter applies a filter on results from some iterator (e.g. the KeysPrefix store function). This is useful to filter out items located deeper in the metadata tree, but for which the simple separator rule cannot be applied.
|
func basenameKeyFilter(filter string) func([]string, string, error) ([]string, string, error) {
return func(keys []string, next string, err error) ([]string, string, error) {
if err != nil {
return keys, next, err
}
filtered := make([]string, 0, len(keys))
for _, key := range keys {
if !strings.HasPrefix(path.Base(key), filter) {
continue
}
filtered = append(filtered, key)
}
return filtered, next, err
}
}
|
[
"func KeyHasSuffix(v string) predicate.Blob {\n\treturn predicate.Blob(\n\t\tfunc(s *sql.Selector) {\n\t\t\ts.Where(sql.HasSuffix(s.C(FieldKey), v))\n\t\t},\n\t)\n}",
"func KeyHasPrefix(v string) predicate.Blob {\n\treturn predicate.Blob(\n\t\tfunc(s *sql.Selector) {\n\t\t\ts.Where(sql.HasPrefix(s.C(FieldKey), v))\n\t\t},\n\t)\n}",
"func SubHasPrefix(v string) predicate.Account {\n\treturn predicate.Account(func(s *sql.Selector) {\n\t\ts.Where(sql.HasPrefix(s.C(FieldSub), v))\n\t})\n}",
"func (p *Properties) FilterHasPrefix(prefix string) *Properties {\n\treturn p.Filter(func(v string) bool {\n\t\treturn strings.HasPrefix(v, prefix)\n\t})\n}",
"func IterateKey(prefix string) []string {\n\tvar results []string\n\n\tdb.View(func(tx *bolt.Tx) error {\n\t\tc := tx.Bucket([]byte(DefaultBucket)).Cursor()\n\n\t\tpre := []byte(prefix)\n\t\tfor k, _ := c.Seek(pre); k != nil && bytes.HasPrefix(k, pre); k, _ = c.Next() {\n\t\t\tresults = append(results, string(k))\n\t\t}\n\t\treturn nil\n\t})\n\treturn results\n}",
"func (v *Verb) Filter(prefix string) (result []*Verb) {\n\tfor _, child := range v.verbs {\n\t\tif strings.HasPrefix(child.Name, prefix) {\n\t\t\tresult = append(result, child)\n\t\t}\n\t}\n\treturn result\n}",
"func BunameHasPrefix(v string) predicate.Building {\n\treturn predicate.Building(func(s *sql.Selector) {\n\t\ts.Where(sql.HasPrefix(s.C(FieldBuname), v))\n\t})\n}",
"func PrefixFilter(prefix string) FilterFunc {\n\treturn func(m *MountInfo) (bool, bool) {\n\t\tskip := !strings.HasPrefix(m.MountPoint, prefix)\n\t\treturn skip, false\n\t}\n}",
"func LastnameHasPrefix(v string) predicate.User {\n\treturn predicate.User(func(s *sql.Selector) {\n\t\ts.Where(sql.HasPrefix(s.C(FieldLastname), v))\n\t})\n}",
"func LastNameHasPrefix(v string) predicate.User {\n\treturn predicate.User(func(s *sql.Selector) {\n\t\ts.Where(sql.HasPrefix(s.C(FieldLastName), v))\n\t})\n}",
"func BusinessNameHasPrefix(v string) predicate.User {\n\treturn predicate.User(func(s *sql.Selector) {\n\t\ts.Where(sql.HasPrefix(s.C(FieldBusinessName), v))\n\t})\n}",
"func FilterSuffix(a []string, suffix string) []string {\n\treturn FilterFunc(a, ValueHasSuffix(suffix))\n}",
"func RowKeyPrefixFilter(rowKeyPrefix []byte) kv.FnKeyCmp {\n\treturn func(currentKey kv.Key) bool {\n\t\t// Next until key without prefix of this record.\n\t\traw, err := codec.StripEnd(rowKeyPrefix)\n\t\tif err != nil {\n\t\t\treturn false\n\t\t}\n\t\treturn !bytes.HasPrefix(currentKey, raw)\n\t}\n}",
"func ExtractKeySuffix(key string) (string, error) {\n\tsubs := strings.Split(key, \"/\")\n\tif len(subs) < 2 {\n\t\treturn \"\", errors.Errorf(\"invalid key: %s\", key)\n\t}\n\treturn subs[len(subs)-1], nil\n}",
"func BioHasPrefix(v string) predicate.User {\n\treturn predicate.User(sql.FieldHasPrefix(FieldBio, v))\n}",
"func ExtractKeySuffix(key string) (string, error) {\n\tsubs := strings.Split(key, \"/\")\n\tif len(subs) < 2 {\n\t\treturn \"\", cerror.ErrInvalidEtcdKey.GenWithStackByArgs(key)\n\t}\n\treturn subs[len(subs)-1], nil\n}",
"func FilterPrefix(stringSet sets.String, prefix string, ignoreCase bool) sets.String {\n\tif prefix == \"\" {\n\t\treturn stringSet\n\t}\n\treturn filterSet(stringSet, prefix, ignoreCase, strings.HasPrefix)\n}",
"func BaseHasSuffix(v string) predicate.MetaSchema {\n\treturn predicate.MetaSchema(func(s *sql.Selector) {\n\t\ts.Where(sql.HasSuffix(s.C(FieldBase), v))\n\t},\n\t)\n}",
"func KinNameHasSuffix(v string) predicate.Rent {\n\treturn predicate.Rent(func(s *sql.Selector) {\n\t\ts.Where(sql.HasSuffix(s.C(FieldKinName), v))\n\t})\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
write a plain text message
|
func writeString(w http.ResponseWriter, body string, status int) {
writeBody(w, []byte(body), status, "text/plain")
}
|
[
"func writePlainText(statusCode int, text string, w http.ResponseWriter) {\n\tw.Header().Set(\"Content-Type\", \"text/plain\")\n\tw.WriteHeader(statusCode)\n\tfmt.Fprintln(w, text)\n}",
"func (s *Socket) WriteText(body []byte, timeout time.Duration) error {\n\treturn s.write(body, gorilla.TextMessage, timeout)\n}",
"func (s *Socket) WriteText(body []byte, timeout time.Duration) error {\n\treturn s.write(body, gobwas.OpText, timeout)\n}",
"func Text(secret, plainText string) (*Msg, error) {\n\tsalt, err := Salt()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tkey, h := Key(secret, salt)\n\tcipherText, err := text.Encrypt([]byte(plainText), key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tm := &Msg{v: cipherText, s: salt, h: h}\n\tm.encode(true)\n\treturn m, nil\n}",
"func (c *Conn) WriteTextMessage(data []byte) error {\n\treturn c.postEnvelope(&envelope{websocket.TextMessage, data})\n}",
"func (s *Status) WriteText(w http.ResponseWriter) error {\n\tw.Header().Set(\"Content-Type\", \"text/plain\")\n\tw.WriteHeader(s.Code)\n\t_, err := io.WriteString(w, s.String())\n\treturn err\n}",
"func sendText(msg string, s *discordgo.Session, m *discordgo.MessageCreate) {\n\t_, _ = s.ChannelMessageSend(m.ChannelID, msg)\n\tfmt.Printf(\"Sending %q\\n\", msg)\n}",
"func Text(ctx context.Context, code int, str interface{}) error {\n\tw := ResponseWriter(ctx)\n\tw.Header().Set(\"Content-Type\", \"text/plain\")\n\tw.WriteHeader(code)\n\treturn write(w, str)\n}",
"func (c *Client) Write(message *Message) {\n\tfmt.Fprintf(c.Conn, message.ToString())\n}",
"func plainText(h http.HandlerFunc) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Header().Add(\"Content-Type\", \"text/plain; charset=utf-8\")\n\t\th(w, r)\n\t}\n}",
"func sendTextResponse(w http.ResponseWriter, msg string) {\n\tw.Write([]byte(msg))\n}",
"func writeText(w io.Writer, text []byte, html bool) {\n\tif html {\n\t\ttemplate.HTMLEscape(w, text);\n\t\treturn;\n\t}\n\tw.Write(text);\n}",
"func Text(resp http.ResponseWriter, content string, code int) error {\n\tresp.Header().Add(\"Content-Type\", \"text/plain\")\n\tresp.WriteHeader(code)\n\t_, err := resp.Write([]byte(content))\n\treturn maskAny(err)\n}",
"func Write(msg string) {\n\terr := conn.WriteMessage(websocket.TextMessage, []byte(msg))\n\tif err != nil {\n\t\t// TODO handle errors or return\n\t\tpanic(err)\n\t}\n}",
"func SendPlainText(w http.ResponseWriter, text string, code int) {\n\tw.WriteHeader(code)\n\tw.Header().Set(\"Content-Type\", \"text/plain\")\n\tw.Write([]byte(text))\n}",
"func TextServer(w http.ResponseWriter, req *http.Request) {\n\tio.WriteString(w, \"Hello\")\n}",
"func WritePlainEmail(subject, bodyMessage string) string {\n\n\treturn WriteEmail(\"text/plain\", subject, bodyMessage)\n}",
"func (sc *SnowthClient) WriteText(data []TextData, nodes ...*SnowthNode) error {\n\treturn sc.WriteTextContext(context.Background(), data, nodes...)\n}",
"func AppendNormalText(format string, a ...interface{}) ServerMessage {\n\ttext := fmt.Sprintf(format, a...)\n\tsomething := &TextMessage{}\n\tsomething.Type = \"text\"\n\tsomething.Text = html.EscapeString(text)\n\tsomething.Color = NormalOutputColor\n\treturn something\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
NewRolloutBlockLister returns a new RolloutBlockLister.
|
func NewRolloutBlockLister(indexer cache.Indexer) RolloutBlockLister {
return &rolloutBlockLister{indexer: indexer}
}
|
[
"func (s rolloutBlockNamespaceLister) Get(name string) (*v1alpha1.RolloutBlock, error) {\n\tobj, exists, err := s.indexer.GetByKey(s.namespace + \"/\" + name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !exists {\n\t\treturn nil, errors.NewNotFound(v1alpha1.Resource(\"rolloutblock\"), name)\n\t}\n\treturn obj.(*v1alpha1.RolloutBlock), nil\n}",
"func (s rolloutBlockNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.RolloutBlock, err error) {\n\terr = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {\n\t\tret = append(ret, m.(*v1alpha1.RolloutBlock))\n\t})\n\treturn ret, err\n}",
"func (s *rolloutBlockLister) RolloutBlocks(namespace string) RolloutBlockNamespaceLister {\n\treturn rolloutBlockNamespaceLister{indexer: s.indexer, namespace: namespace}\n}",
"func (s *rolloutBlockLister) List(selector labels.Selector) (ret []*v1alpha1.RolloutBlock, err error) {\n\terr = cache.ListAll(s.indexer, selector, func(m interface{}) {\n\t\tret = append(ret, m.(*v1alpha1.RolloutBlock))\n\t})\n\treturn ret, err\n}",
"func New() *Blockstream {\n\treturn &Blockstream{}\n}",
"func New() *block {\n\treturn &block{\n\t\tBroadcastChan: make(chan Message, broadcastChanSize),\n\t\tbroadcastSeen: map[string]struct{}{},\n\t}\n}",
"func newBlocker() (*Blocker, error) {\n\tv4, err := iptables.NewWithProtocol(iptables.ProtocolIPv4)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tv6, err := iptables.NewWithProtocol(iptables.ProtocolIPv6)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tb := &Blocker{ip4tables: v4, ip6tables: v6}\n\treturn b, b.init()\n}",
"func NewBlockListener(net service.Service, sync *Syncer, concurrency int, logger log.Log) *BlockListener {\n\tbl := BlockListener{\n\t\tSyncer: sync,\n\t\tLog: logger,\n\t\tsemaphore: make(chan struct{}, concurrency),\n\t\texit: make(chan struct{}),\n\t\treceivedGossipBlocks: net.RegisterGossipProtocol(config.NewBlockProtocol, priorityq.High),\n\t}\n\treturn &bl\n}",
"func NewBundleLister(indexer cache.Indexer) BundleLister {\n\treturn &bundleLister{indexer: indexer}\n}",
"func New() *block {\n\treturn &block{\n\t\tbroadcastChan: make(chan Message, broadcastChanSize),\n\t\tbroadcastMsgSeen: map[string]struct{}{},\n\t}\n}",
"func New(r io.Reader) (block *Block, err error) {\n\tblock = new(Block)\n\tif err = block.parseHeader(r); err != nil {\n\t\treturn block, err\n\t}\n\tblock.lr = io.LimitReader(r, block.Length)\n\treturn block, nil\n}",
"func New(b backend.Backend) *Blockfinder {\n\tbf := &Blockfinder{\n\t\tbackend: b,\n\t}\n\tbf.blocks = make(map[uint32]time.Time)\n\tbf.blockResponses = b.BlockResponses()\n\treturn bf\n}",
"func (c *Client) NewBlockFilter() (*QuantityResponse, error) {\n\trequest := c.newRequest(EthNewBlockFilter)\n\n\tresponse := &QuantityResponse{}\n\n\treturn response, c.send(request, response)\n}",
"func NewBlock(typeName string, labels []string) *Block {\n\tblock := newBlock()\n\tblock.init(typeName, labels)\n\treturn block\n}",
"func NewBlockFilter(\n\tlogger log.Logger,\n\tlabelSelector labels.Selector,\n\tresolutionLevel compact.ResolutionLevel,\n\tcompactionLevel int,\n) *BlockFilter {\n\treturn &BlockFilter{\n\t\tlabelSelector: labelSelector,\n\t\tlogger: logger,\n\t\tresolutionLevel: resolutionLevel,\n\t\tcompactionLevel: compactionLevel,\n\t}\n}",
"func NewBlock(raw []byte) *Block {\n block := &Block{}\n block.UnmarshalRlp(raw)\n\n return block\n}",
"func NewBlock(minerID string, parent *Block, uncles map[string]*Block, timestamp int) *Block {\n\tnewDepth := -1\n\tnewFees := 0\n\tif parent != nil {\n\t\tnewDepth = parent.depth + 1\n\t\tnewFees = (timestamp - parent.timestamp)*FEES_PER_SECOND\n\t}\n\n\tbuncles := make(map[string]*Block)\n\tfor k, v := range uncles {\n\t\tbuncles[k] = v\n\t}\n\treturn &Block{\n\t\tminerID: minerID,\n\t\tparent: parent,\n\t\tuncles: buncles,\n\t\ttimestamp: timestamp,\n\t\tfees: newFees,\n\t\tdepth: newDepth,\n\t}\n}",
"func NewBlock() (*Block, error) {\n\tn, err := findLast()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\th, err := ftoh(n)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfmt.Println(\"Hash: \" + h)\n\n\treturn &Block{Number: n + 1, PreviousHash: h}, nil\n}",
"func NewLister() Lister {\n\treturn _lister{\n\t\tioUtil: iioutil.New(),\n\t\tdotYmlUnmarshaller: dotyml.NewUnmarshaller(),\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
List lists all RolloutBlocks in the indexer.
|
func (s *rolloutBlockLister) List(selector labels.Selector) (ret []*v1alpha1.RolloutBlock, err error) {
err = cache.ListAll(s.indexer, selector, func(m interface{}) {
ret = append(ret, m.(*v1alpha1.RolloutBlock))
})
return ret, err
}
|
[
"func (s rolloutBlockNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.RolloutBlock, err error) {\n\terr = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {\n\t\tret = append(ret, m.(*v1alpha1.RolloutBlock))\n\t})\n\treturn ret, err\n}",
"func (s *rolloutBlockLister) RolloutBlocks(namespace string) RolloutBlockNamespaceLister {\n\treturn rolloutBlockNamespaceLister{indexer: s.indexer, namespace: namespace}\n}",
"func NewRolloutBlockLister(indexer cache.Indexer) RolloutBlockLister {\n\treturn &rolloutBlockLister{indexer: indexer}\n}",
"func (s *S3BlockstoreConfigServiceOp) List(ctx context.Context, options *ListOptions) (*S3Blockstores, *Response, error) {\n\tpath, err := setQueryParams(backupAdministratorS3BlockstoreBasePath, options)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\treq, err := s.Client.NewRequest(ctx, http.MethodGet, path, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\troot := new(S3Blockstores)\n\tresp, err := s.Client.Do(ctx, req, root)\n\n\treturn root, resp, err\n}",
"func (b *Block) List(input *BlockCursorInput) (*Blocks, error) {\n\tparams := make(map[string]string)\n\tparams[\"cursor\"] = input.Cursor\n\tresp, err := b.c.Request(http.MethodGet, \"/blocks\", new(bytes.Buffer), params)\n\tif err != nil {\n\t\treturn &Blocks{}, err\n\t}\n\tdefer resp.Body.Close()\n\n\tvar blocks *Blocks\n\terr = json.NewDecoder(resp.Body).Decode(&blocks)\n\tif err != nil {\n\t\treturn &Blocks{}, err\n\t}\n\treturn blocks, nil\n}",
"func (a *DefaultClient) List(l vfs.Location) ([]string, error) {\n\tURL, err := url.Parse(l.(*Location).ContainerURL())\n\tif err != nil {\n\t\treturn []string{}, err\n\t}\n\n\tcontainerURL := azblob.NewContainerURL(*URL, a.pipeline)\n\tctx := context.Background()\n\tvar list []string\n\tfor marker := (azblob.Marker{}); marker.NotDone(); {\n\t\tlistBlob, err := containerURL.ListBlobsHierarchySegment(ctx, marker, \"/\",\n\t\t\tazblob.ListBlobsSegmentOptions{Prefix: utils.RemoveLeadingSlash(l.Path())})\n\t\tif err != nil {\n\t\t\treturn []string{}, err\n\t\t}\n\n\t\tmarker = listBlob.NextMarker\n\n\t\tfor i := range listBlob.Segment.BlobItems {\n\t\t\tlist = append(list, listBlob.Segment.BlobItems[i].Name)\n\t\t}\n\t}\n\treturn list, nil\n}",
"func (h *Handler) GetAllBlock(c echo.Context) error {\n\tblocks, err := h.Service.GetAllBlock()\n\n\tif err != nil {\n\t\treturn c.JSON(getStatusCode(err), ResponseError{Message: err.Error()})\n\t}\n\n\treturn c.JSON(http.StatusOK, blocks)\n}",
"func (logger *Logger) ReadAllBlocks() ([]pb.BlockDump, error) {\n\tfiles, err := ioutil.ReadDir(logger.dir)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar res []pb.BlockDump\n\tfor _, file := range files {\n\t\tfname := file.Name()\n\t\tbytes, err := ioutil.ReadFile(logger.dir + \"/\" + fname)\n\t\tif err != nil {\n\t\t\tlog.Fatalln(\"Error reading file:\", err)\n\t\t}\n\t\tdumpBlock := pb.BlockDump{}\n\t\tif err := proto.Unmarshal(bytes, &dumpBlock); err != nil {\n\t\t\tlog.Fatalln(\"Failed to parse BlockDump:\", err)\n\t\t}\n\t\tres = append(res, dumpBlock)\n\t}\n\n\treturn res, nil\n}",
"func (c APIClient) ListBlock() ([]*pfs.BlockInfo, error) {\n\tblockInfos, err := c.BlockAPIClient.ListBlock(\n\t\tc.ctx(),\n\t\t&pfs.ListBlockRequest{},\n\t)\n\tif err != nil {\n\t\treturn nil, sanitizeErr(err)\n\t}\n\treturn blockInfos.BlockInfo, nil\n}",
"func (c *Client) ListWorkerBlocks(ctx context.Context, params *ListWorkerBlocksInput, optFns ...func(*Options)) (*ListWorkerBlocksOutput, error) {\n\tif params == nil {\n\t\tparams = &ListWorkerBlocksInput{}\n\t}\n\n\tresult, metadata, err := c.invokeOperation(ctx, \"ListWorkerBlocks\", params, optFns, addOperationListWorkerBlocksMiddlewares)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tout := result.(*ListWorkerBlocksOutput)\n\tout.ResultMetadata = metadata\n\treturn out, nil\n}",
"func (s rolloutBlockNamespaceLister) Get(name string) (*v1alpha1.RolloutBlock, error) {\n\tobj, exists, err := s.indexer.GetByKey(s.namespace + \"/\" + name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !exists {\n\t\treturn nil, errors.NewNotFound(v1alpha1.Resource(\"rolloutblock\"), name)\n\t}\n\treturn obj.(*v1alpha1.RolloutBlock), nil\n}",
"func (c *BlockVolumeClient) List(params *BlockVolumeParams) (*BlockVolumeList, error) {\n\tlist := &BlockVolumeList{}\n\n\terr := c.Backend.CallIntoInterface(\"v1/Storage/Block/Volume/list\", params, list)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn list, nil\n}",
"func Blocks(offset, count uint) ([]BlockItem, error) {\n\tjsonBlocks := []struct {\n\t\tNumber uint `json:\"number\"`\n\t\tHash string `json:\"hash\"`\n\t\tDate Time `json:\"date\"`\n\t\tDifficulty uint64 `json:\"difficulty\"`\n\t\tMiner string `json:\"miner\"`\n\t}{}\n\tif err := fetch(&jsonBlocks, blockEndpoint, offset, count); err != nil {\n\t\treturn nil, err\n\t}\n\tblocks := make([]BlockItem, len(jsonBlocks))\n\tfor i, b := range jsonBlocks {\n\t\tblocks[i] = BlockItem(b)\n\t}\n\treturn blocks, nil\n}",
"func (l *LessonTut) Blocks() []*BlockTut { return l.blocks }",
"func (s *bundleLister) List(selector labels.Selector) (ret []*v1alpha1.Bundle, err error) {\n\terr = cache.ListAll(s.indexer, selector, func(m interface{}) {\n\t\tret = append(ret, m.(*v1alpha1.Bundle))\n\t})\n\treturn ret, err\n}",
"func (i Index) List() []string {\n\treturn []string{\n\t\t\"/api\",\n\t\t\"/api/armies\",\n\t\t\"/api/armies/{name}\",\n\t}\n}",
"func (s *wafregionalRuleLister) List(selector labels.Selector) (ret []*v1alpha1.WafregionalRule, err error) {\n\terr = cache.ListAll(s.indexer, selector, func(m interface{}) {\n\t\tret = append(ret, m.(*v1alpha1.WafregionalRule))\n\t})\n\treturn ret, err\n}",
"func (b *indexBlock) Blocks() ([]blockHandle, error) {\n\tvar lastKey []byte\n\tkb := make([]byte, 0, MaxSstKeySize)\n\tvar blocks []blockHandle\n\n\tfor b.r.Len() > 0 {\n\t\teKey, err := prefixDecodeFrom(b.r, lastKey, kb)\n\t\tlastKey = eKey\n\t\tif _, err := binary.ReadUvarint(b.r); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tbh, err := newBlockHandle(b.r)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tblocks = append(blocks, bh)\n\t}\n\n\treturn blocks, nil\n}",
"func (c *MockLoadBalancersClient) List(ctx context.Context, resourceGroupName string) ([]network.LoadBalancer, error) {\n\tvar l []network.LoadBalancer\n\tfor _, lb := range c.LBs {\n\t\tl = append(l, lb)\n\t}\n\treturn l, nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
RolloutBlocks returns an object that can list and get RolloutBlocks.
|
func (s *rolloutBlockLister) RolloutBlocks(namespace string) RolloutBlockNamespaceLister {
return rolloutBlockNamespaceLister{indexer: s.indexer, namespace: namespace}
}
|
[
"func NewRolloutBlockLister(indexer cache.Indexer) RolloutBlockLister {\n\treturn &rolloutBlockLister{indexer: indexer}\n}",
"func (l *LessonTut) Blocks() []*BlockTut { return l.blocks }",
"func (s rolloutBlockNamespaceLister) Get(name string) (*v1alpha1.RolloutBlock, error) {\n\tobj, exists, err := s.indexer.GetByKey(s.namespace + \"/\" + name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !exists {\n\t\treturn nil, errors.NewNotFound(v1alpha1.Resource(\"rolloutblock\"), name)\n\t}\n\treturn obj.(*v1alpha1.RolloutBlock), nil\n}",
"func (s *rolloutBlockLister) List(selector labels.Selector) (ret []*v1alpha1.RolloutBlock, err error) {\n\terr = cache.ListAll(s.indexer, selector, func(m interface{}) {\n\t\tret = append(ret, m.(*v1alpha1.RolloutBlock))\n\t})\n\treturn ret, err\n}",
"func getBlocks() {\n\tURL := \"https://api.sendgrid.com/v3/suppression/blocks?start_time=\" + strconv.Itoa(config.LastTimestamp)\n\n\treq, err := http.NewRequest(\"GET\", URL, nil)\n\treq.Header.Set(\"Authorization\", \"Bearer \"+config.SendGridToken)\n\n\tif err != nil {\n\t\tlogger.Errorf(\"Failed to build Request: %s\", err)\n\t\treturn\n\t}\n\n\tclient := &http.Client{}\n\tres, err := client.Do(req)\n\tif err != nil {\n\t\tlogger.Errorf(\"HTTP Call failed: %s\", err.Error())\n\t\treturn\n\t}\n\tdefer res.Body.Close()\n\n\tdecoder := json.NewDecoder(res.Body)\n\tvar blocks []Block\n\terr = decoder.Decode(&blocks)\n\tif err != nil {\n\t\tlogger.Errorf(\"Failed parsing json: %T\\n%s\\n%#v\\n\", err, err, err)\n\t} else {\n\t\tcheckBlocks(blocks)\n\t}\n}",
"func Blocks(offset, count uint) ([]BlockItem, error) {\n\tjsonBlocks := []struct {\n\t\tNumber uint `json:\"number\"`\n\t\tHash string `json:\"hash\"`\n\t\tDate Time `json:\"date\"`\n\t\tDifficulty uint64 `json:\"difficulty\"`\n\t\tMiner string `json:\"miner\"`\n\t}{}\n\tif err := fetch(&jsonBlocks, blockEndpoint, offset, count); err != nil {\n\t\treturn nil, err\n\t}\n\tblocks := make([]BlockItem, len(jsonBlocks))\n\tfor i, b := range jsonBlocks {\n\t\tblocks[i] = BlockItem(b)\n\t}\n\treturn blocks, nil\n}",
"func (s rolloutBlockNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.RolloutBlock, err error) {\n\terr = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {\n\t\tret = append(ret, m.(*v1alpha1.RolloutBlock))\n\t})\n\treturn ret, err\n}",
"func (core *coreService) RawBlocks(startHeight uint64, count uint64, withReceipts bool, withTransactionLogs bool) ([]*iotexapi.BlockInfo, error) {\n\tif count == 0 || count > core.cfg.RangeQueryLimit {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"range exceeds the limit\")\n\t}\n\n\ttipHeight := core.bc.TipHeight()\n\tif startHeight > tipHeight {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"start height should not exceed tip height\")\n\t}\n\tendHeight := startHeight + count - 1\n\tif endHeight > tipHeight {\n\t\tendHeight = tipHeight\n\t}\n\tvar res []*iotexapi.BlockInfo\n\tfor height := startHeight; height <= endHeight; height++ {\n\t\tblk, err := core.dao.GetBlockByHeight(height)\n\t\tif err != nil {\n\t\t\treturn nil, status.Error(codes.NotFound, err.Error())\n\t\t}\n\t\tvar receiptsPb []*iotextypes.Receipt\n\t\tif withReceipts && height > 0 {\n\t\t\treceipts, err := core.dao.GetReceipts(height)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, status.Error(codes.NotFound, err.Error())\n\t\t\t}\n\t\t\tfor _, receipt := range receipts {\n\t\t\t\treceiptsPb = append(receiptsPb, receipt.ConvertToReceiptPb())\n\t\t\t}\n\t\t}\n\t\tvar transactionLogs *iotextypes.TransactionLogs\n\t\tif withTransactionLogs {\n\t\t\tif transactionLogs, err = core.dao.TransactionLogs(height); err != nil {\n\t\t\t\treturn nil, status.Error(codes.NotFound, err.Error())\n\t\t\t}\n\t\t}\n\t\tres = append(res, &iotexapi.BlockInfo{\n\t\t\tBlock: blk.ConvertToBlockPb(),\n\t\t\tReceipts: receiptsPb,\n\t\t\tTransactionLogs: transactionLogs,\n\t\t})\n\t}\n\treturn res, nil\n}",
"func (_PlasmaFramework *PlasmaFrameworkSession) Blocks(arg0 *big.Int) (struct {\n\tRoot [32]byte\n\tTimestamp *big.Int\n}, error) {\n\treturn _PlasmaFramework.Contract.Blocks(&_PlasmaFramework.CallOpts, arg0)\n}",
"func (b *Block) List(input *BlockCursorInput) (*Blocks, error) {\n\tparams := make(map[string]string)\n\tparams[\"cursor\"] = input.Cursor\n\tresp, err := b.c.Request(http.MethodGet, \"/blocks\", new(bytes.Buffer), params)\n\tif err != nil {\n\t\treturn &Blocks{}, err\n\t}\n\tdefer resp.Body.Close()\n\n\tvar blocks *Blocks\n\terr = json.NewDecoder(resp.Body).Decode(&blocks)\n\tif err != nil {\n\t\treturn &Blocks{}, err\n\t}\n\treturn blocks, nil\n}",
"func (nc *NSBClient) GetBlocks(rangeL, rangeR int64) (*BlocksInfo, error) {\n\tb, err := nc.handler.Group(\"/blockchain\").GetWithParams(request.Param{\n\t\t\"minHeight\": rangeL,\n\t\t\"maxHeight\": rangeR,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar bb []byte\n\tbb, err = nc.preloadJSONResponse(b)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar a BlocksInfo\n\terr = json.Unmarshal(bb, &a)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &a, nil\n}",
"func GetBlocks(w http.ResponseWriter, r *http.Request) {\n\tresJSON, ok := json.Marshal(blockchain.Blockchain)\n\tif ok == nil {\n\t\tn := len(resJSON)\n\t\tres := string(resJSON[:n])\n\t\tio.WriteString(w, res)\n\t}\n}",
"func (m *BlocksMessage) GetBlocks() ([]*types.Block, error) {\n\tblocks := []*types.Block{}\n\tfor _, data := range m.RawBlocks {\n\t\tblock := &types.Block{}\n\t\tif err := json.Unmarshal(data, block); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tblocks = append(blocks, block)\n\t}\n\treturn blocks, nil\n}",
"func (s *Service) GetExplorerBlocks(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tfrom := r.FormValue(\"from\")\n\tto := r.FormValue(\"to\")\n\tpageParam := r.FormValue(\"page\")\n\toffsetParam := r.FormValue(\"offset\")\n\torder := r.FormValue(\"order\")\n\tdata := &Data{\n\t\tBlocks: []*Block{},\n\t}\n\tdefer func() {\n\t\tif err := json.NewEncoder(w).Encode(data.Blocks); err != nil {\n\t\t\tutils.Logger().Warn().Err(err).Msg(\"cannot JSON-encode blocks\")\n\t\t}\n\t}()\n\n\tif from == \"\" {\n\t\tutils.Logger().Warn().Msg(\"Missing from parameter\")\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\tdb := s.Storage.GetDB()\n\tfromInt, err := strconv.Atoi(from)\n\tif err != nil {\n\t\tutils.Logger().Warn().Err(err).Msg(\"invalid from parameter\")\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\tvar toInt int\n\tif to == \"\" {\n\t\ttoInt, err = func() (int, error) {\n\t\t\tbytes, err := db.Get([]byte(BlockHeightKey))\n\t\t\tif err == nil {\n\t\t\t\treturn strconv.Atoi(string(bytes))\n\t\t\t}\n\t\t\treturn toInt, err\n\t\t}()\n\t} else {\n\t\ttoInt, err = strconv.Atoi(to)\n\t}\n\tif err != nil {\n\t\tutils.Logger().Warn().Err(err).Msg(\"invalid to parameter\")\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\tvar offset int\n\tif offsetParam != \"\" {\n\t\toffset, err = strconv.Atoi(offsetParam)\n\t\tif err != nil || offset < 1 {\n\t\t\tutils.Logger().Warn().Msg(\"invalid offset parameter\")\n\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t} else {\n\t\toffset = paginationOffset\n\t}\n\tvar page int\n\tif pageParam != \"\" {\n\t\tpage, err = strconv.Atoi(pageParam)\n\t\tif err != nil {\n\t\t\tutils.Logger().Warn().Err(err).Msg(\"invalid page parameter\")\n\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tpage = 0\n\t}\n\n\taccountBlocks := s.ReadBlocksFromDB(fromInt, toInt)\n\tfor id, accountBlock := range accountBlocks {\n\t\tif id == 0 || id == len(accountBlocks)-1 || accountBlock == nil {\n\t\t\tcontinue\n\t\t}\n\t\tblock := NewBlock(accountBlock, id+fromInt-1)\n\t\t// Populate transactions\n\t\tfor _, tx := range accountBlock.Transactions() {\n\t\t\ttransaction := GetTransaction(tx, accountBlock)\n\t\t\tif transaction != nil {\n\t\t\t\tblock.TXs = append(block.TXs, transaction)\n\t\t\t}\n\t\t}\n\t\tif accountBlocks[id-1] == nil {\n\t\t\tblock.BlockTime = int64(0)\n\t\t\tblock.PrevBlock = RefBlock{\n\t\t\t\tID: \"\",\n\t\t\t\tHeight: \"\",\n\t\t\t}\n\t\t} else {\n\t\t\tblock.BlockTime = accountBlock.Time().Int64() - accountBlocks[id-1].Time().Int64()\n\t\t\tblock.PrevBlock = RefBlock{\n\t\t\t\tID: accountBlocks[id-1].Hash().Hex(),\n\t\t\t\tHeight: strconv.Itoa(id + fromInt - 2),\n\t\t\t}\n\t\t}\n\t\tif accountBlocks[id+1] == nil {\n\t\t\tblock.NextBlock = RefBlock{\n\t\t\t\tID: \"\",\n\t\t\t\tHeight: \"\",\n\t\t\t}\n\t\t} else {\n\t\t\tblock.NextBlock = RefBlock{\n\t\t\t\tID: accountBlocks[id+1].Hash().Hex(),\n\t\t\t\tHeight: strconv.Itoa(id + fromInt),\n\t\t\t}\n\t\t}\n\t\tdata.Blocks = append(data.Blocks, block)\n\t}\n\tif offset*page >= len(data.Blocks) {\n\t\tdata.Blocks = []*Block{}\n\t} else if offset*page+offset > len(data.Blocks) {\n\t\tdata.Blocks = data.Blocks[offset*page:]\n\t} else {\n\t\tdata.Blocks = data.Blocks[offset*page : offset*page+offset]\n\t}\n\tif order == \"DESC\" {\n\t\tsort.Slice(data.Blocks[:], func(i, j int) bool {\n\t\t\treturn data.Blocks[i].Timestamp > data.Blocks[j].Timestamp\n\t\t})\n\t} else {\n\t\tsort.Slice(data.Blocks[:], func(i, j int) bool {\n\t\t\treturn data.Blocks[i].Timestamp < data.Blocks[j].Timestamp\n\t\t})\n\t}\n}",
"func (obj Events) Block() Block {\n\treturn Block(obj)\n}",
"func (h *Handler) GetAllBlock(c echo.Context) error {\n\tblocks, err := h.Service.GetAllBlock()\n\n\tif err != nil {\n\t\treturn c.JSON(getStatusCode(err), ResponseError{Message: err.Error()})\n\t}\n\n\treturn c.JSON(http.StatusOK, blocks)\n}",
"func consensusBlocksGetFromBlock(b types.Block, h types.BlockHeight) ConsensusBlocksGet {\n\ttxns := make([]ConsensusBlocksGetTxn, 0, len(b.Transactions))\n\tfor _, t := range b.Transactions {\n\t\t// Get the transaction's SiacoinOutputs.\n\t\tscos := make([]ConsensusBlocksGetSiacoinOutput, 0, len(t.SiacoinOutputs))\n\t\tfor i, sco := range t.SiacoinOutputs {\n\t\t\tscos = append(scos, ConsensusBlocksGetSiacoinOutput{\n\t\t\t\tID: t.SiacoinOutputID(uint64(i)),\n\t\t\t\tValue: sco.Value,\n\t\t\t\tUnlockHash: sco.UnlockHash,\n\t\t\t})\n\t\t}\n\t\t// Get the transaction's SiafundOutputs.\n\t\tsfos := make([]ConsensusBlocksGetSiafundOutput, 0, len(t.SiafundOutputs))\n\t\tfor i, sfo := range t.SiafundOutputs {\n\t\t\tsfos = append(sfos, ConsensusBlocksGetSiafundOutput{\n\t\t\t\tID: t.SiafundOutputID(uint64(i)),\n\t\t\t\tValue: sfo.Value,\n\t\t\t\tUnlockHash: sfo.UnlockHash,\n\t\t\t})\n\t\t}\n\t\t// Get the transaction's FileContracts.\n\t\tfcos := make([]ConsensusBlocksGetFileContract, 0, len(t.FileContracts))\n\t\tfor i, fc := range t.FileContracts {\n\t\t\t// Get the FileContract's valid proof outputs.\n\t\t\tfcid := t.FileContractID(uint64(i))\n\t\t\tvpos := make([]ConsensusBlocksGetSiacoinOutput, 0, len(fc.ValidProofOutputs))\n\t\t\tfor j, vpo := range fc.ValidProofOutputs {\n\t\t\t\tvpos = append(vpos, ConsensusBlocksGetSiacoinOutput{\n\t\t\t\t\tID: fcid.StorageProofOutputID(types.ProofValid, uint64(j)),\n\t\t\t\t\tValue: vpo.Value,\n\t\t\t\t\tUnlockHash: vpo.UnlockHash,\n\t\t\t\t})\n\t\t\t}\n\t\t\t// Get the FileContract's missed proof outputs.\n\t\t\tmpos := make([]ConsensusBlocksGetSiacoinOutput, 0, len(fc.MissedProofOutputs))\n\t\t\tfor j, mpo := range fc.MissedProofOutputs {\n\t\t\t\tmpos = append(mpos, ConsensusBlocksGetSiacoinOutput{\n\t\t\t\t\tID: fcid.StorageProofOutputID(types.ProofMissed, uint64(j)),\n\t\t\t\t\tValue: mpo.Value,\n\t\t\t\t\tUnlockHash: mpo.UnlockHash,\n\t\t\t\t})\n\t\t\t}\n\t\t\tfcos = append(fcos, ConsensusBlocksGetFileContract{\n\t\t\t\tID: fcid,\n\t\t\t\tFileSize: fc.FileSize,\n\t\t\t\tFileMerkleRoot: fc.FileMerkleRoot,\n\t\t\t\tWindowStart: fc.WindowStart,\n\t\t\t\tWindowEnd: fc.WindowEnd,\n\t\t\t\tPayout: fc.Payout,\n\t\t\t\tValidProofOutputs: vpos,\n\t\t\t\tMissedProofOutputs: mpos,\n\t\t\t\tUnlockHash: fc.UnlockHash,\n\t\t\t\tRevisionNumber: fc.RevisionNumber,\n\t\t\t})\n\t\t}\n\t\ttxns = append(txns, ConsensusBlocksGetTxn{\n\t\t\tID: t.ID(),\n\t\t\tSiacoinInputs: t.SiacoinInputs,\n\t\t\tSiacoinOutputs: scos,\n\t\t\tFileContracts: fcos,\n\t\t\tFileContractRevisions: t.FileContractRevisions,\n\t\t\tStorageProofs: t.StorageProofs,\n\t\t\tSiafundInputs: t.SiafundInputs,\n\t\t\tSiafundOutputs: sfos,\n\t\t\tMinerFees: t.MinerFees,\n\t\t\tArbitraryData: t.ArbitraryData,\n\t\t\tTransactionSignatures: t.TransactionSignatures,\n\t\t})\n\t}\n\treturn ConsensusBlocksGet{\n\t\tID: b.ID(),\n\t\tHeight: h,\n\t\tParentID: b.ParentID,\n\t\tNonce: b.Nonce,\n\t\tTimestamp: b.Timestamp,\n\t\tMinerPayouts: b.MinerPayouts,\n\t\tTransactions: txns,\n\t}\n}",
"func (ts *Tipset) Block(miner Miner, winCount int64, msgs ...*ApplicableMessage) {\n\tblock := Block{\n\t\tMinerAddr: miner.MinerActorAddr.ID,\n\t\tWinCount: winCount,\n\t}\n\tfor _, am := range msgs {\n\t\tblock.Messages = append(block.Messages, MustSerialize(am.Message))\n\n\t\t// if we see this message for the first time, add it to the `msgIdx` map and to the `orderMsgs` slice.\n\t\tif _, ok := ts.tss.msgIdx[am.Message.Cid()]; !ok {\n\t\t\tts.tss.msgIdx[am.Message.Cid()] = am\n\t\t\tts.tss.orderedMsgs = append(ts.tss.orderedMsgs, am)\n\t\t}\n\t}\n\n\tts.Blocks = append(ts.Blocks, block)\n}",
"func (core *coreService) BlockByHeightRange(start uint64, count uint64) ([]*apitypes.BlockWithReceipts, error) {\n\tif count == 0 {\n\t\treturn nil, errors.Wrap(errInvalidFormat, \"count must be greater than zero\")\n\t}\n\tif count > core.cfg.RangeQueryLimit {\n\t\treturn nil, errors.Wrap(errInvalidFormat, \"range exceeds the limit\")\n\t}\n\n\tvar (\n\t\ttipHeight = core.bc.TipHeight()\n\t\tres = make([]*apitypes.BlockWithReceipts, 0)\n\t)\n\tif start > tipHeight {\n\t\treturn nil, errors.Wrap(errInvalidFormat, \"start height should not exceed tip height\")\n\t}\n\tfor height := start; height <= tipHeight && count > 0; height++ {\n\t\tblkStore, err := core.getBlockByHeight(height)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tres = append(res, blkStore)\n\t\tcount--\n\t}\n\treturn res, nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
List lists all RolloutBlocks in the indexer for a given namespace.
|
func (s rolloutBlockNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.RolloutBlock, err error) {
err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
ret = append(ret, m.(*v1alpha1.RolloutBlock))
})
return ret, err
}
|
[
"func (s *rolloutBlockLister) List(selector labels.Selector) (ret []*v1alpha1.RolloutBlock, err error) {\n\terr = cache.ListAll(s.indexer, selector, func(m interface{}) {\n\t\tret = append(ret, m.(*v1alpha1.RolloutBlock))\n\t})\n\treturn ret, err\n}",
"func (s *rolloutBlockLister) RolloutBlocks(namespace string) RolloutBlockNamespaceLister {\n\treturn rolloutBlockNamespaceLister{indexer: s.indexer, namespace: namespace}\n}",
"func (s dropletSnapshotNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.DropletSnapshot, err error) {\n\terr = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {\n\t\tret = append(ret, m.(*v1alpha1.DropletSnapshot))\n\t})\n\treturn ret, err\n}",
"func (s rolloutBlockNamespaceLister) Get(name string) (*v1alpha1.RolloutBlock, error) {\n\tobj, exists, err := s.indexer.GetByKey(s.namespace + \"/\" + name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !exists {\n\t\treturn nil, errors.NewNotFound(v1alpha1.Resource(\"rolloutblock\"), name)\n\t}\n\treturn obj.(*v1alpha1.RolloutBlock), nil\n}",
"func (s bundleNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.Bundle, err error) {\n\terr = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {\n\t\tret = append(ret, m.(*v1alpha1.Bundle))\n\t})\n\treturn ret, err\n}",
"func NewRolloutBlockLister(indexer cache.Indexer) RolloutBlockLister {\n\treturn &rolloutBlockLister{indexer: indexer}\n}",
"func (s sMBNamespaceLister) List(selector labels.Selector) (ret []*v1.SMB, err error) {\n\terr = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {\n\t\tret = append(ret, m.(*v1.SMB))\n\t})\n\treturn ret, err\n}",
"func (s robNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.Rob, err error) {\n\terr = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {\n\t\tret = append(ret, m.(*v1alpha1.Rob))\n\t})\n\treturn ret, err\n}",
"func (s awsEbsSnapshotNamespaceLister) List(selector labels.Selector) (ret []*v1.AwsEbsSnapshot, err error) {\n\terr = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {\n\t\tret = append(ret, m.(*v1.AwsEbsSnapshot))\n\t})\n\treturn ret, err\n}",
"func (s beeNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.Bee, err error) {\n\terr = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {\n\t\tret = append(ret, m.(*v1beta1.Bee))\n\t})\n\treturn ret, err\n}",
"func (s ebsSnapshotNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.EbsSnapshot, err error) {\n\terr = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {\n\t\tret = append(ret, m.(*v1alpha1.EbsSnapshot))\n\t})\n\treturn ret, err\n}",
"func (s hookNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.Hook, err error) {\n\terr = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {\n\t\tret = append(ret, m.(*v1alpha1.Hook))\n\t})\n\treturn ret, err\n}",
"func (s *S3BlockstoreConfigServiceOp) List(ctx context.Context, options *ListOptions) (*S3Blockstores, *Response, error) {\n\tpath, err := setQueryParams(backupAdministratorS3BlockstoreBasePath, options)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\treq, err := s.Client.NewRequest(ctx, http.MethodGet, path, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\troot := new(S3Blockstores)\n\tresp, err := s.Client.Do(ctx, req, root)\n\n\treturn root, resp, err\n}",
"func (s lotusNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.Lotus, err error) {\n\terr = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {\n\t\tret = append(ret, m.(*v1beta1.Lotus))\n\t})\n\treturn ret, err\n}",
"func (n *namespaceClient) List() ([]ns.Metadata, error) {\n\turl := fmt.Sprintf(\"%s%s\", n.url, nsh.GetURL)\n\tresp, err := n.client.DoHTTPRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdata := &admin.NamespaceGetResponse{}\n\tdefer func() {\n\t\tioutil.ReadAll(resp.Body)\n\t\tresp.Body.Close()\n\t}()\n\tif err := jsonpb.Unmarshal(resp.Body, data); err != nil {\n\t\treturn nil, err\n\t}\n\tnsMetas := []ns.Metadata{}\n\tfor nsID, nsOpts := range data.GetRegistry().GetNamespaces() {\n\t\tmd, err := ns.ToMetadata(nsID, nsOpts)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tnsMetas = append(nsMetas, md)\n\t}\n\tn.logger.Info(\"namespace retrieved\")\n\treturn nsMetas, nil\n}",
"func (client NamespacesClient) List() (result EHNamespaceListResult, err error) {\n\treq, err := client.ListPreparer()\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"eventhub.NamespacesClient\", \"List\", nil, \"Failure preparing request\")\n\t\treturn\n\t}\n\n\tresp, err := client.ListSender(req)\n\tif err != nil {\n\t\tresult.Response = autorest.Response{Response: resp}\n\t\terr = autorest.NewErrorWithError(err, \"eventhub.NamespacesClient\", \"List\", resp, \"Failure sending request\")\n\t\treturn\n\t}\n\n\tresult, err = client.ListResponder(resp)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"eventhub.NamespacesClient\", \"List\", resp, \"Failure responding to request\")\n\t}\n\n\treturn\n}",
"func (s auctioneerNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.Auctioneer, err error) {\n\terr = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {\n\t\tret = append(ret, m.(*v1alpha1.Auctioneer))\n\t})\n\treturn ret, err\n}",
"func (s customBuilderNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.CustomBuilder, err error) {\n\terr = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {\n\t\tret = append(ret, m.(*v1alpha1.CustomBuilder))\n\t})\n\treturn ret, err\n}",
"func (a *HistoricalApi) namespaceList(request *restful.Request, response *restful.Response) {\n\tif resp, err := a.historicalSource.GetNamespaces(); err != nil {\n\t\tresponse.WriteError(http.StatusInternalServerError, err)\n\t} else {\n\t\tresponse.WriteEntity(resp)\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get retrieves the RolloutBlock from the indexer for a given namespace and name.
|
func (s rolloutBlockNamespaceLister) Get(name string) (*v1alpha1.RolloutBlock, error) {
obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
if err != nil {
return nil, err
}
if !exists {
return nil, errors.NewNotFound(v1alpha1.Resource("rolloutblock"), name)
}
return obj.(*v1alpha1.RolloutBlock), nil
}
|
[
"func (s robNamespaceLister) Get(name string) (*v1alpha1.Rob, error) {\n\tobj, exists, err := s.indexer.GetByKey(s.namespace + \"/\" + name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !exists {\n\t\treturn nil, errors.NewNotFound(v1alpha1.Resource(\"rob\"), name)\n\t}\n\treturn obj.(*v1alpha1.Rob), nil\n}",
"func (s beeNamespaceLister) Get(name string) (*v1beta1.Bee, error) {\n\tobj, exists, err := s.indexer.GetByKey(s.namespace + \"/\" + name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !exists {\n\t\treturn nil, errors.NewNotFound(v1beta1.Resource(\"bee\"), name)\n\t}\n\treturn obj.(*v1beta1.Bee), nil\n}",
"func (s customBuilderNamespaceLister) Get(name string) (*v1alpha1.CustomBuilder, error) {\n\tobj, exists, err := s.indexer.GetByKey(s.namespace + \"/\" + name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !exists {\n\t\treturn nil, errors.NewNotFound(v1alpha1.Resource(\"custombuilder\"), name)\n\t}\n\treturn obj.(*v1alpha1.CustomBuilder), nil\n}",
"func (s lotusNamespaceLister) Get(name string) (*v1beta1.Lotus, error) {\n\tobj, exists, err := s.indexer.GetByKey(s.namespace + \"/\" + name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !exists {\n\t\treturn nil, errors.NewNotFound(v1beta1.Resource(\"lotus\"), name)\n\t}\n\treturn obj.(*v1beta1.Lotus), nil\n}",
"func (s dropletSnapshotNamespaceLister) Get(name string) (*v1alpha1.DropletSnapshot, error) {\n\tobj, exists, err := s.indexer.GetByKey(s.namespace + \"/\" + name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !exists {\n\t\treturn nil, errors.NewNotFound(v1alpha1.Resource(\"dropletsnapshot\"), name)\n\t}\n\treturn obj.(*v1alpha1.DropletSnapshot), nil\n}",
"func (s bundleNamespaceLister) Get(name string) (*v1alpha1.Bundle, error) {\n\tobj, exists, err := s.indexer.GetByKey(s.namespace + \"/\" + name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !exists {\n\t\treturn nil, errors.NewNotFound(v1alpha1.Resource(\"bundle\"), name)\n\t}\n\treturn obj.(*v1alpha1.Bundle), nil\n}",
"func (s sMBNamespaceLister) Get(name string) (*v1.SMB, error) {\n\tobj, exists, err := s.indexer.GetByKey(s.namespace + \"/\" + name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !exists {\n\t\treturn nil, errors.NewNotFound(v1.Resource(\"smb\"), name)\n\t}\n\treturn obj.(*v1.SMB), nil\n}",
"func (s auctioneerNamespaceLister) Get(name string) (*v1alpha1.Auctioneer, error) {\n\tobj, exists, err := s.indexer.GetByKey(s.namespace + \"/\" + name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !exists {\n\t\treturn nil, errors.NewNotFound(v1alpha1.Resource(\"auctioneer\"), name)\n\t}\n\treturn obj.(*v1alpha1.Auctioneer), nil\n}",
"func (s hookNamespaceLister) Get(name string) (*v1alpha1.Hook, error) {\n\tobj, exists, err := s.indexer.GetByKey(s.namespace + \"/\" + name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !exists {\n\t\treturn nil, errors.NewNotFound(v1alpha1.Resource(\"hook\"), name)\n\t}\n\treturn obj.(*v1alpha1.Hook), nil\n}",
"func (s awsEbsSnapshotNamespaceLister) Get(name string) (*v1.AwsEbsSnapshot, error) {\n\tobj, exists, err := s.indexer.GetByKey(s.namespace + \"/\" + name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !exists {\n\t\treturn nil, errors.NewNotFound(v1.Resource(\"awsebssnapshot\"), name)\n\t}\n\treturn obj.(*v1.AwsEbsSnapshot), nil\n}",
"func (s batchNamespaceLister) Get(name string) (*v1.Batch, error) {\n\tobj, exists, err := s.indexer.GetByKey(s.namespace + \"/\" + name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !exists {\n\t\treturn nil, errors.NewNotFound(v1.Resource(\"batch\"), name)\n\t}\n\treturn obj.(*v1.Batch), nil\n}",
"func (s ebsSnapshotNamespaceLister) Get(name string) (*v1alpha1.EbsSnapshot, error) {\n\tobj, exists, err := s.indexer.GetByKey(s.namespace + \"/\" + name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !exists {\n\t\treturn nil, errors.NewNotFound(v1alpha1.Resource(\"ebssnapshot\"), name)\n\t}\n\treturn obj.(*v1alpha1.EbsSnapshot), nil\n}",
"func (s bookNamespaceLister) Get(name string) (*v1.Book, error) {\n\tobj, exists, err := s.indexer.GetByKey(s.namespace + \"/\" + name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !exists {\n\t\treturn nil, errors.NewNotFound(v1.Resource(\"book\"), name)\n\t}\n\treturn obj.(*v1.Book), nil\n}",
"func (s logicalNetworkNamespaceLister) Get(name string) (*v1.LogicalNetwork, error) {\n\tobj, exists, err := s.indexer.GetByKey(s.namespace + \"/\" + name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !exists {\n\t\treturn nil, errors.NewNotFound(r.Resource(\"logicalnetwork\"), name)\n\t}\n\treturn obj.(*v1.LogicalNetwork), nil\n}",
"func (nsi namespaceIndex) Get(name string) *namespaceData {\n\tdata := nsi[name]\n\tif data != nil {\n\t\treturn data\n\t}\n\tdata = &namespaceData{}\n\tnsi[name] = data\n\treturn data\n}",
"func (api *APIClient) GetBlockByRepoName(repoPieces RepoPieces) (Block, error) {\n\tu, err := url.Parse(fmt.Sprintf(\"%s/api/v1/blocks\", api.baseURL))\n\tif err != nil {\n\t\treturn Block{}, errors.New(\"unable to parse Learn remote\")\n\t}\n\tv := url.Values{}\n\tv.Set(\"repo_name\", repoPieces.RepoName)\n\tv.Set(\"org\", repoPieces.Org)\n\tv.Set(\"origin\", repoPieces.Origin)\n\tu.RawQuery = v.Encode()\n\n\treq, err := http.NewRequest(\"GET\", u.String(), nil)\n\tif err != nil {\n\t\treturn Block{}, err\n\t}\n\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\treq.Header.Set(\"Source\", \"gLearn_cli\")\n\treq.Header.Set(\"Authorization\", fmt.Sprintf(\"Bearer %s\", api.Credentials.token))\n\n\tres, err := api.client.Do(req)\n\tif err != nil {\n\t\treturn Block{}, err\n\t}\n\tdefer res.Body.Close()\n\n\tif res.StatusCode != http.StatusOK {\n\t\treturn Block{}, fmt.Errorf(\"Error: response status: %d\", res.StatusCode)\n\t}\n\n\tvar blockResp blockResponse\n\terr = json.NewDecoder(res.Body).Decode(&blockResp)\n\tif err != nil {\n\t\treturn Block{}, err\n\t}\n\n\tif len(blockResp.Blocks) == 1 {\n\t\treturn blockResp.Blocks[0], nil\n\t}\n\treturn Block{}, nil\n}",
"func (s interactNamespaceLister) Get(name string) (*v1.Interact, error) {\n\tobj, exists, err := s.indexer.GetByKey(s.namespace + \"/\" + name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !exists {\n\t\treturn nil, errors.NewNotFound(v1.Resource(\"interact\"), name)\n\t}\n\treturn obj.(*v1.Interact), nil\n}",
"func (s wafregionalRuleNamespaceLister) Get(name string) (*v1alpha1.WafregionalRule, error) {\n\tobj, exists, err := s.indexer.GetByKey(s.namespace + \"/\" + name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !exists {\n\t\treturn nil, errors.NewNotFound(v1alpha1.Resource(\"wafregionalrule\"), name)\n\t}\n\treturn obj.(*v1alpha1.WafregionalRule), nil\n}",
"func (s workloadDefinitionNamespaceLister) Get(name string) (*v1beta1.WorkloadDefinition, error) {\n\tobj, exists, err := s.indexer.GetByKey(s.namespace + \"/\" + name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !exists {\n\t\treturn nil, errors.NewNotFound(v1beta1.Resource(\"workloaddefinition\"), name)\n\t}\n\treturn obj.(*v1beta1.WorkloadDefinition), nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
AddCopy creates a new QuarksSecrets controller to watch for the user defined secrets.
|
func AddCopy(ctx context.Context, config *config.Config, mgr manager.Manager) error {
ctx = ctxlog.NewContextWithRecorder(ctx, "copy-reconciler", mgr.GetEventRecorderFor("copy-recorder"))
log := ctxlog.ExtractLogger(ctx)
r := NewCopyReconciler(ctx, config, mgr, credsgen.NewInMemoryGenerator(log), controllerutil.SetControllerReference)
c, err := controller.New("copy-controller", mgr, controller.Options{
Reconciler: r,
MaxConcurrentReconciles: config.MaxQuarksSecretWorkers,
})
if err != nil {
return errors.Wrap(err, "Adding copy controller to manager failed.")
}
nsPred := newNSPredicate(ctx, mgr.GetClient(), config.MonitoredID)
// Watch for changes to the copied status of QuarksSecrets
p := predicate.Funcs{
CreateFunc: func(e event.CreateEvent) bool { return false },
DeleteFunc: func(e event.DeleteEvent) bool { return false },
GenericFunc: func(e event.GenericEvent) bool { return false },
UpdateFunc: func(e event.UpdateEvent) bool {
n := e.ObjectNew.(*qsv1a1.QuarksSecret)
if n.Status.Copied != nil {
ctxlog.Debugf(ctx, "Skipping QuarksSecret '%s', if copy status '%v' is true", n.Name, *n.Status.Copied)
return !(*n.Status.Copied)
}
return true
},
}
err = c.Watch(&source.Kind{Type: &qsv1a1.QuarksSecret{}}, &handler.EnqueueRequestForObject{}, nsPred, p)
if err != nil {
return errors.Wrapf(err, "Watching quarks secrets failed in copy controller.")
}
// Watch for changes to user created secrets
p = predicate.Funcs{
CreateFunc: func(e event.CreateEvent) bool { return false },
DeleteFunc: func(e event.DeleteEvent) bool { return false },
GenericFunc: func(e event.GenericEvent) bool { return false },
UpdateFunc: func(e event.UpdateEvent) bool {
n := e.ObjectNew.(*corev1.Secret)
o := e.ObjectOld.(*corev1.Secret)
shouldProcessReconcile := isUserCreatedSecret(n)
if reflect.DeepEqual(n.Data, o.Data) && reflect.DeepEqual(n.Labels, o.Labels) &&
reflect.DeepEqual(n.Annotations, o.Annotations) {
return false
}
return shouldProcessReconcile
},
}
err = c.Watch(&source.Kind{Type: &corev1.Secret{}}, handler.EnqueueRequestsFromMapFunc(
func(a crc.Object) []reconcile.Request {
secret := a.(*corev1.Secret)
if skip.Reconciles(ctx, mgr.GetClient(), secret) {
return []reconcile.Request{}
}
reconciles, err := listQuarksSecretsReconciles(ctx, mgr.GetClient(), secret, secret.Namespace)
if err != nil {
ctxlog.Errorf(ctx, "Failed to calculate reconciles for secret '%s/%s': %v", secret.Namespace, secret.Name, err)
}
if len(reconciles) > 0 {
return reconciles
}
return reconciles
}), nsPred, p)
if err != nil {
return errors.Wrapf(err, "Watching user defined secrets failed in copy controller.")
}
return nil
}
|
[
"func copySecret(ctx context.Context, src, dst kubernetes.Interface, namespace, name string) error {\n\tsecret, err := src.CoreV1().Secrets(namespace).Get(ctx, name, metav1.GetOptions{})\n\tif err != nil {\n\t\treturn xerrors.Errorf(\"failed retrieving secret: %s from source cluster: %w\", name, err)\n\t}\n\t_, err = dst.CoreV1().Secrets(namespace).Create(ctx, &corev1.Secret{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: name,\n\t\t\tNamespace: namespace,\n\t\t\tLabels: secret.Labels,\n\t\t},\n\t\tData: secret.Data,\n\t}, metav1.CreateOptions{})\n\tif !errors.IsAlreadyExists(err) && err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func copySecret(namespace *string) error {\n\tsecretCaCrt := \"bitfusion-client-secret-ca.crt\"\n\tsecretClientYml := \"bitfusion-client-secret-client.yml\"\n\tsecretServersConf := \"bitfusion-client-secret-servers.conf\"\n\tconfig, err := rest.InClusterConfig()\n\tif err != nil {\n\t\treturn err\n\t}\n\tclientset, err := kubernetes.NewForConfig(config)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, secretCaCrtErr := clientset.CoreV1().Secrets(*namespace).Get(context.TODO(), secretCaCrt, metav1.GetOptions{})\n\t_, secretClientYmlErr := clientset.CoreV1().Secrets(*namespace).Get(context.TODO(), secretClientYml, metav1.GetOptions{})\n\t_, secretServersConfErr := clientset.CoreV1().Secrets(*namespace).Get(context.TODO(), secretServersConf, metav1.GetOptions{})\n\n\tif errors.IsNotFound(secretCaCrtErr) || errors.IsNotFound(secretClientYmlErr) || errors.IsNotFound(secretServersConfErr) {\n\t\terr = nil\n\t\tglog.Infof(\"Secrets %s not found in namespace %s \\n\", secretCaCrt, *namespace)\n\t\tglog.Infof(\"Secrets %s not found in namespace %s \\n\", secretClientYml, *namespace)\n\t\tglog.Infof(\"Secrets %s not found in namespace %s \\n\", secretServersConf, *namespace)\n\n\t\tsecretFiles := []string{secretCaCrt, secretClientYml, secretServersConf}\n\t\tfor _, name := range secretFiles {\n\n\t\t\tsecret, err := clientset.CoreV1().Secrets(\"kube-system\").Get(context.TODO(), name, metav1.GetOptions{})\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tnewSecret := &corev1.Secret{\n\t\t\t\tData: secret.Data,\n\t\t\t\tType: secret.Type,\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName: name,\n\t\t\t\t\tNamespace: *namespace,\n\t\t\t\t},\n\t\t\t}\n\t\t\t// Create the secret\n\t\t\t_, err = clientset.CoreV1().Secrets(*namespace).Create(context.TODO(), newSecret, metav1.CreateOptions{})\n\t\t\tif err != nil {\n\t\t\t\tglog.Errorf(\"Can't create secret: %v\", err)\n\t\t\t}\n\n\t\t}\n\n\t}\n\treturn err\n}",
"func CopySecret(corev1Input clientcorev1.CoreV1Interface, srcNS string, srcSecretName string, tgtNS string, svcAccount string) (*corev1.Secret, error) {\n\treturn CopySecretWithName(corev1Input,\n\t\tsrcNS,\n\t\tsrcSecretName,\n\t\ttgtNS,\n\t\tsrcSecretName, /* Use same target name as source by default */\n\t\tsvcAccount)\n}",
"func (k *K8sWatcher) addK8sSecretV1(secret *slim_corev1.Secret) error {\n\tresource := envoy.Resources{\n\t\tSecrets: []*envoy_entensions_tls_v3.Secret{k8sToEnvoySecret(secret)},\n\t}\n\treturn k.envoyConfigManager.UpsertEnvoyResources(context.TODO(), resource, k.envoyConfigManager)\n}",
"func CopySecretWithName(corev1Input clientcorev1.CoreV1Interface, srcNS, srcSecretName, tgtNS, tgtSecretName, svcAccount string) (*corev1.Secret, error) {\n\ttgtNamespaceSvcAcct := corev1Input.ServiceAccounts(tgtNS)\n\tsrcSecrets := corev1Input.Secrets(srcNS)\n\ttgtNamespaceSecrets := corev1Input.Secrets(tgtNS)\n\n\t// First try to find the secret we're supposed to copy\n\tsrcSecret, err := srcSecrets.Get(context.Background(), srcSecretName, metav1.GetOptions{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// check for nil source secret\n\tif srcSecret == nil {\n\t\treturn nil, errors.New(\"error copying secret; there is no error but secret is nil\")\n\t}\n\n\t// Found the secret, so now make a copy in our new namespace\n\tnewSecret, err := tgtNamespaceSecrets.Create(\n\t\tcontext.Background(),\n\t\t&corev1.Secret{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: tgtSecretName,\n\t\t\t},\n\t\t\tData: srcSecret.Data,\n\t\t\tType: srcSecret.Type,\n\t\t},\n\t\tmetav1.CreateOptions{})\n\n\t// If the secret already exists then that's ok - may have already been created\n\tif err != nil && !apierrs.IsAlreadyExists(err) {\n\t\treturn nil, fmt.Errorf(\"error copying the Secret: %s\", err)\n\t}\n\n\ttgtSvcAccount, err := tgtNamespaceSvcAcct.Get(context.Background(), svcAccount, metav1.GetOptions{})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error getting service account %s: %w\", svcAccount, err)\n\t}\n\n\tfor _, secret := range tgtSvcAccount.ImagePullSecrets {\n\t\tif secret.Name == tgtSecretName {\n\t\t\treturn newSecret, nil\n\t\t}\n\t}\n\t// Prevent overwriting existing imagePullSecrets\n\tpatch := `[{\"op\":\"add\",\"path\":\"/imagePullSecrets/-\",\"value\":{\"name\":\"` + tgtSecretName + `\"}}]`\n\tif len(tgtSvcAccount.ImagePullSecrets) == 0 {\n\t\tpatch = `[{\"op\":\"add\",\"path\":\"/imagePullSecrets\",\"value\":[{\"name\":\"` + tgtSecretName + `\"}]}]`\n\t}\n\t_, err = tgtNamespaceSvcAcct.Patch(context.Background(), svcAccount, types.JSONPatchType,\n\t\t[]byte(patch), metav1.PatchOptions{})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"patch failed on NS/SA (%s/%s): %w\",\n\t\t\ttgtNS, svcAccount, err)\n\t}\n\treturn newSecret, nil\n}",
"func add(mgr manager.Manager, r reconcile.Reconciler) error {\n\t// Create a new controller\n\tc, err := controller.New(ControllerName, mgr, controller.Options{Reconciler: r})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Watch for changes to primary resource KeycloakClient\n\terr = c.Watch(&source.Kind{Type: &kc.KeycloakClient{}}, &handler.EnqueueRequestForObject{})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Make sure to watch the credential secrets\n\terr = c.Watch(&source.Kind{Type: &corev1.Secret{}}, &handler.EnqueueRequestForOwner{\n\t\tIsController: true,\n\t\tOwnerType: &kc.KeycloakClient{},\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func newSecrets(c *APIV1Client) *secrets {\n\treturn &secrets{\n\t\tclient: c.RESTClient(),\n\t}\n}",
"func defaultCopySecretsToNamespace(ec ExecutionContext, cn string, secrets []string) error {\n\tk8scli, err := clients.Kubernetes()\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, secrectName := range secrets {\n\t\tsecretData, err := k8scli.Client.CoreV1().Secrets(cn).Get(secrectName, metav1.GetOptions{})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\toldMeta := secretData.ObjectMeta\n\t\tsecretData.ObjectMeta = metav1.ObjectMeta{Name: oldMeta.Name, Namespace: ec.Location, Labels: oldMeta.Labels, Annotations: oldMeta.Annotations}\n\t\t_, err = k8scli.Client.CoreV1().Secrets(ec.Location).Create(secretData)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}",
"func (e *Kex2Provisioner) AddSecret(s kex2.Secret) {\n\te.secretCh <- s\n}",
"func (k *Kubernetes) CopyErdaSecrets(originns, dstns string) ([]apiv1.Secret, error) {\n\tsecrets, err := k.secret.List(originns)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresult := []apiv1.Secret{}\n\tfor _, secret := range secrets.Items {\n\t\t// ignore default token\n\t\tif !strutil.HasPrefixes(secret.Name, \"dice-\") {\n\t\t\tcontinue\n\t\t}\n\t\tdstsecret := &apiv1.Secret{\n\t\t\tTypeMeta: metav1.TypeMeta{\n\t\t\t\tAPIVersion: \"v1\",\n\t\t\t\tKind: \"Secret\",\n\t\t\t},\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: secret.Name,\n\t\t\t\tNamespace: dstns,\n\t\t\t},\n\t\t\tData: secret.Data,\n\t\t\tStringData: secret.StringData,\n\t\t\tType: secret.Type,\n\t\t}\n\t\tif err := k.secret.CreateIfNotExist(dstsecret); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tresult = append(result, secret)\n\t}\n\treturn result, nil\n}",
"func (c *HttpController) Secret(writer http.ResponseWriter, request *http.Request) {\n\tdefer func() {\n\t\t_ = request.Body.Close()\n\t}()\n\n\tprovider := bootstrapContainer.SecretProviderFrom(c.dic.Get)\n\tsecretRequest := common.SecretRequest{}\n\terr := json.NewDecoder(request.Body).Decode(&secretRequest)\n\tif err != nil {\n\t\tedgexError := errors.NewCommonEdgeX(errors.KindContractInvalid, \"JSON decode failed\", err)\n\t\tc.sendEdgexError(writer, request, edgexError, sdkCommon.APIV2SecretRoute)\n\t\treturn\n\t}\n\n\tpath, secret := c.prepareSecret(secretRequest)\n\n\tif err := provider.StoreSecrets(path, secret); err != nil {\n\t\tedgexError := errors.NewCommonEdgeX(errors.KindServerError, \"Storing secret failed\", err)\n\t\tc.sendEdgexError(writer, request, edgexError, sdkCommon.APIV2SecretRoute)\n\t\treturn\n\t}\n\n\tresponse := common.NewBaseResponse(secretRequest.RequestId, \"\", http.StatusCreated)\n\tc.sendResponse(writer, request, sdkCommon.APIV2SecretRoute, response, http.StatusCreated)\n}",
"func (c *KubeTestPlatform) AddSecrets(secrets []kube.SecretDescription) error {\n\tif c.KubeClient == nil {\n\t\treturn fmt.Errorf(\"kubernetes cluster needs to be setup\")\n\t}\n\n\tfor _, secret := range secrets {\n\t\tc.Secrets.Add(kube.NewSecret(c.KubeClient, secret.Namespace, secret.Name, secret.Data))\n\t}\n\n\t// setup secret resources\n\tif err := c.Secrets.setup(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (in *SecretsAPI) DeepCopy() *SecretsAPI {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(SecretsAPI)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (l CMD) AddSecret(secret string) Logger {\n\tl.secrets = append(l.secrets, secret)\n\treturn l\n}",
"func NewSecretReplicator(client kubernetes.Interface, resyncPeriod time.Duration, allowAll bool) Replicator {\n\trepl := secretReplicator{\n\t\treplicatorProps: replicatorProps{\n\t\t\tallowAll: allowAll,\n\t\t\tclient: client,\n\t\t\tdependencyMap: make(map[string][]string),\n\t\t},\n\t}\n\n\tstore, controller := cache.NewInformer(\n\t\t&cache.ListWatch{\n\t\t\tListFunc: func(lo metav1.ListOptions) (runtime.Object, error) {\n\t\t\t\treturn client.CoreV1().Secrets(\"\").List(lo)\n\t\t\t},\n\t\t\tWatchFunc: func(lo metav1.ListOptions) (watch.Interface, error) {\n\t\t\t\treturn client.CoreV1().Secrets(\"\").Watch(lo)\n\t\t\t},\n\t\t},\n\t\t&v1.Secret{},\n\t\tresyncPeriod,\n\t\tcache.ResourceEventHandlerFuncs{\n\t\t\tAddFunc: repl.SecretAdded,\n\t\t\tUpdateFunc: func(old interface{}, new interface{}) { repl.SecretAdded(new) },\n\t\t\tDeleteFunc: repl.SecretDeleted,\n\t\t},\n\t)\n\n\trepl.store = store\n\trepl.controller = controller\n\n\treturn &repl\n}",
"func AddClientSecret(ctx context.Context, objID string) (autorest.Response, error) {\n\tappClient := getApplicationsClient()\n\treturn appClient.UpdatePasswordCredentials(\n\t\tctx,\n\t\tobjID,\n\t\tgraphrbac.PasswordCredentialsUpdateParameters{\n\t\t\tValue: &[]graphrbac.PasswordCredential{\n\t\t\t\t{\n\t\t\t\t\tStartDate: &date.Time{time.Now()},\n\t\t\t\t\tEndDate: &date.Time{time.Date(2018, time.December, 20, 22, 0, 0, 0, time.UTC)},\n\t\t\t\t\tValue: to.StringPtr(\"052265a2-bdc8-49aa-81bd-ecf7e9fe0c42\"), // this will become the client secret! Record this value, there is no way to get it back\n\t\t\t\t\tKeyID: to.StringPtr(\"08023993-9209-4580-9d4a-e060b44a64b8\"),\n\t\t\t\t},\n\t\t\t},\n\t\t})\n}",
"func Create(ctx context.Context, dev *model.Dev, c *kubernetes.Clientset, s *syncthing.Syncthing) error {\n\tsecretName := GetSecretName(dev)\n\n\tsct, err := Get(ctx, secretName, dev.Namespace, c)\n\tif err != nil && !strings.Contains(err.Error(), \"not found\") {\n\t\treturn fmt.Errorf(\"error getting kubernetes secret: %s\", err)\n\t}\n\n\tconfig, err := getConfigXML(s)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error generating syncthing configuration: %s\", err)\n\t}\n\tdata := &v1.Secret{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: secretName,\n\t\t\tLabels: map[string]string{\n\t\t\t\tconstants.DevLabel: \"true\",\n\t\t\t},\n\t\t},\n\t\tType: v1.SecretTypeOpaque,\n\t\tData: map[string][]byte{\n\t\t\t\"config.xml\": config,\n\t\t\t\"cert.pem\": []byte(certPEM),\n\t\t\t\"key.pem\": []byte(keyPEM),\n\t\t},\n\t}\n\n\tidx := 0\n\tfor _, s := range dev.Secrets {\n\t\tcontent, err := os.ReadFile(s.LocalPath)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error reading secret '%s': %s\", s.LocalPath, err)\n\t\t}\n\t\tif strings.Contains(s.GetKeyName(), \"stignore\") {\n\t\t\tidx++\n\t\t\tdata.Data[fmt.Sprintf(\"%s-%d\", s.GetKeyName(), idx)] = content\n\t\t} else {\n\t\t\tdata.Data[s.GetKeyName()] = content\n\t\t}\n\n\t}\n\n\tif sct.Name == \"\" {\n\t\t_, err := c.CoreV1().Secrets(dev.Namespace).Create(ctx, data, metav1.CreateOptions{})\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error creating kubernetes sync secret: %s\", err)\n\t\t}\n\n\t\toktetoLog.Infof(\"created okteto secret '%s'\", secretName)\n\t} else {\n\t\t_, err := c.CoreV1().Secrets(dev.Namespace).Update(ctx, data, metav1.UpdateOptions{})\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error updating kubernetes okteto secret: %s\", err)\n\t\t}\n\t\toktetoLog.Infof(\"updated okteto secret '%s'\", secretName)\n\t}\n\treturn nil\n}",
"func NewSecretReplicator(client kubernetes.Interface, options ReplicatorOptions, resyncPeriod time.Duration) Replicator {\n\trepl := ObjectReplicator{\n\t\tReplicatorProps: NewReplicatorProps(client, \"secret\", options),\n\t\tReplicatorActions: _secretActions,\n\t}\n\tsecrets := client.CoreV1().Secrets(\"\")\n\tlistWatch := cache.ListWatch{\n\t\tListFunc: func(lo metav1.ListOptions) (runtime.Object, error) {\n\t\t\treturn secrets.List(lo)\n\t\t},\n\t\tWatchFunc: secrets.Watch,\n\t}\n\trepl.InitStores(&listWatch, &v1.Secret{}, resyncPeriod)\n\treturn &repl\n}",
"func NewSecretController(informerFactory informers.SharedInformerFactory, syncrule helpers.SyncRule, local bool) *SecretController {\n\tsecretInformer := informerFactory.Core().V1().Secrets()\n\n\tc := &SecretController{\n\t\tinformerFactory: informerFactory,\n\t\tsecretInformer: secretInformer,\n\t\tsyncrule: syncrule,\n\t}\n\tif local {\n\t\tsecretInformer.Informer().AddEventHandler(\n\t\t\t// Your custom resource event handlers.\n\t\t\tcache.ResourceEventHandlerFuncs{\n\t\t\t\t// Called on creation\n\t\t\t\tAddFunc: c.localSecretAdd,\n\t\t\t\t// Called on resource update and every resyncPeriod on existing resources.\n\t\t\t\tUpdateFunc: c.localSecretUpdate,\n\t\t\t\t// Called on resource deletion.\n\t\t\t\tDeleteFunc: c.localSecretDelete,\n\t\t\t},\n\t\t)\n\t\treturn c\n\t}\n\tsecretInformer.Informer().AddEventHandler(\n\t\t// Your custom resource event handlers.\n\t\tcache.ResourceEventHandlerFuncs{\n\t\t\t// Called on creation\n\t\t\tAddFunc: c.secretAdd,\n\t\t\t// Called on resource update and every resyncPeriod on existing resources.\n\t\t\tUpdateFunc: c.secretUpdate,\n\t\t\t// Called on resource deletion.\n\t\t\tDeleteFunc: c.secretDelete,\n\t\t},\n\t)\n\treturn c\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
listQuarksSecretsReconciles lists all Quarks Secrets associated with the a particular secret.
|
func listQuarksSecretsReconciles(ctx context.Context, client crc.Client, secret *corev1.Secret, namespace string) ([]reconcile.Request, error) {
quarksSecretList := &qsv1a1.QuarksSecretList{}
err := client.List(ctx, quarksSecretList, crc.InNamespace(namespace))
if err != nil {
return nil, errors.Wrap(err, "failed to list QuarksSecrets")
}
result := []reconcile.Request{}
for _, quarksSecret := range quarksSecretList.Items {
if quarksSecret.Spec.SecretName == secret.Name {
request := reconcile.Request{
NamespacedName: types.NamespacedName{
Name: quarksSecret.Name,
Namespace: quarksSecret.Namespace,
}}
result = append(result, request)
ctxlog.NewMappingEvent(secret).Debug(ctx, request, "QuarksSecret", secret.Name, qsv1a1.KubeSecretReference)
}
}
return result, nil
}
|
[
"func (s Secrets) List(ctx context.Context, maxResults int32) ([]string, error) {\n\tvers, err := s.Ops.Secrets().List(ctx, maxResults)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tout := make([]string, 0, len(vers))\n\tfor _, ver := range vers {\n\t\tout = append(out, ver.ID)\n\t}\n\treturn out, nil\n}",
"func NewQuarksSecretReconciler(ctx context.Context, config *config.Config, mgr manager.Manager, generator credsgen.Generator, srf setReferenceFunc) reconcile.Reconciler {\n\treturn &ReconcileQuarksSecret{\n\t\tctx: ctx,\n\t\tconfig: config,\n\t\tclient: mgr.GetClient(),\n\t\tscheme: mgr.GetScheme(),\n\t\tgenerator: generator,\n\t\tsetReference: srf,\n\t}\n}",
"func (r *ReconcileQuarksSecret) Reconcile(_ context.Context, request reconcile.Request) (reconcile.Result, error) {\n\tqsec := &qsv1a1.QuarksSecret{}\n\n\t// Set the ctx to be Background, as the top-level context for incoming requests.\n\tctx, cancel := context.WithTimeout(r.ctx, r.config.CtxTimeOut)\n\tdefer cancel()\n\n\tctxlog.Infof(ctx, \"Reconciling QuarksSecret %s\", request.NamespacedName)\n\terr := r.client.Get(ctx, request.NamespacedName, qsec)\n\tif err != nil {\n\t\tif apierrors.IsNotFound(err) {\n\t\t\t// Request object not found, could have been deleted after reconcile request.\n\t\t\t// Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.\n\t\t\t// Return and don't requeue\n\t\t\tctxlog.Info(ctx, \"Skip reconcile: quarks secret not found\")\n\t\t\treturn reconcile.Result{}, nil\n\t\t}\n\t\t// Error reading the object - requeue the request.\n\t\tctxlog.Info(ctx, \"Error reading the object\")\n\t\treturn reconcile.Result{}, errors.Wrap(err, \"Error reading quarksSecret\")\n\t}\n\tif meltdown.NewWindow(r.config.MeltdownDuration, qsec.Status.LastReconcile).Contains(time.Now()) {\n\t\tctxlog.WithEvent(qsec, \"Meltdown\").Debugf(ctx, \"Resource '%s' is in meltdown, requeue reconcile after %s\", qsec.GetNamespacedName(), r.config.MeltdownRequeueAfter)\n\t\treturn reconcile.Result{RequeueAfter: r.config.MeltdownRequeueAfter}, nil\n\t}\n\n\t// Create secret\n\tswitch qsec.Spec.Type {\n\tcase qsv1a1.Password:\n\t\tctxlog.Info(ctx, \"Generating password\")\n\t\terr = r.createPasswordSecret(ctx, qsec)\n\t\tif err != nil {\n\t\t\tctxlog.Infof(ctx, \"Error generating password secret: %s\", err.Error())\n\t\t\treturn reconcile.Result{}, errors.Wrap(err, \"generating password secret failed.\")\n\t\t}\n\tcase qsv1a1.RSAKey:\n\t\tctxlog.Info(ctx, \"Generating RSA Key\")\n\t\terr = r.createRSASecret(ctx, qsec)\n\t\tif err != nil {\n\t\t\tctxlog.Infof(ctx, \"Error generating RSA key secret: %s\", err.Error())\n\t\t\treturn reconcile.Result{}, errors.Wrap(err, \"generating RSA key secret failed.\")\n\t\t}\n\tcase qsv1a1.SSHKey:\n\t\tctxlog.Info(ctx, \"Generating SSH Key\")\n\t\terr = r.createSSHSecret(ctx, qsec)\n\t\tif err != nil {\n\t\t\tctxlog.Infof(ctx, \"Error generating SSH key secret: %s\", err.Error())\n\t\t\treturn reconcile.Result{}, errors.Wrap(err, \"generating SSH key secret failed.\")\n\t\t}\n\tcase qsv1a1.Certificate, qsv1a1.TLS:\n\t\tctxlog.Info(ctx, \"Generating certificate\")\n\t\terr = r.createCertificateSecret(ctx, qsec)\n\t\tif err != nil {\n\t\t\tif isCaNotReady(err) {\n\t\t\t\tctxlog.Info(ctx, fmt.Sprintf(\"CA for secret '%s' is not ready yet: %s\", request.NamespacedName, err))\n\t\t\t\treturn reconcile.Result{RequeueAfter: time.Second * 5}, nil\n\t\t\t}\n\t\t\tctxlog.Info(ctx, \"Error generating certificate secret: \"+err.Error())\n\t\t\treturn reconcile.Result{}, errors.Wrap(err, \"generating certificate secret.\")\n\t\t}\n\tcase qsv1a1.BasicAuth:\n\t\terr = r.createBasicAuthSecret(ctx, qsec)\n\t\tif err != nil {\n\t\t\treturn reconcile.Result{}, errors.Wrap(err, \"generating basic-auth secret\")\n\t\t}\n\tcase qsv1a1.TemplatedConfig:\n\t\tif err := r.createTemplatedConfigSecret(ctx, qsec); err != nil {\n\t\t\tif isSecNotReady(err) {\n\t\t\t\tctxlog.Info(ctx, fmt.Sprintf(\"Secrets '%s' is not ready yet: %s\", request.NamespacedName, err))\n\t\t\t\treturn reconcile.Result{RequeueAfter: time.Second * 5}, nil\n\t\t\t}\n\t\t\tctxlog.Info(ctx, \"Error generating templatedConfig secret: \"+err.Error())\n\t\t\treturn reconcile.Result{}, errors.Wrap(err, \"generating templatedConfig secret.\")\n\t\t}\n\tcase qsv1a1.SecretCopy:\n\t\t// noop\n\t\treturn reconcile.Result{}, nil\n\tcase qsv1a1.DockerConfigJSON:\n\t\tctxlog.Info(ctx, \"Generating dockerConfigJson\")\n\t\terr = r.createDockerConfigJSON(ctx, qsec)\n\t\tif err != nil {\n\t\t\tif isSecNotReady(err) {\n\t\t\t\tctxlog.Info(ctx, fmt.Sprintf(\"Secrets '%s' is not ready yet: %s\", request.NamespacedName, err))\n\t\t\t\treturn reconcile.Result{RequeueAfter: time.Second * 5}, nil\n\t\t\t}\n\t\t\tctxlog.Info(ctx, \"Error generating dockerConfigJson secret: \"+err.Error())\n\t\t\treturn reconcile.Result{}, errors.Wrap(err, \"generating dockerConfigJson secret.\")\n\t\t}\n\tdefault:\n\t\terr = ctxlog.WithEvent(qsec, \"InvalidTypeError\").Errorf(ctx, \"Invalid type: %s\", qsec.Spec.Type)\n\t\treturn reconcile.Result{}, err\n\t}\n\tr.updateStatus(ctx, qsec)\n\treturn reconcile.Result{}, nil\n}",
"func ListSecrets(tableName *string, allVersions bool) ([]*Credential, error) {\n\tlog.Debug(\"Listing secrets\")\n\n\tvar items []map[string]*dynamodb.AttributeValue\n\tvar lastEvaluatedKey map[string]*dynamodb.AttributeValue\n\n\tfor {\n\t\tres, err := dynamoSvc.Scan(&dynamodb.ScanInput{\n\t\t\tTableName: tableName,\n\t\t\tExpressionAttributeNames: map[string]*string{\n\t\t\t\t\"#N\": aws.String(\"name\"),\n\t\t\t},\n\t\t\tProjectionExpression: aws.String(\"#N, version, created_at\"),\n\t\t\tConsistentRead: aws.Bool(true),\n\t\t\tExclusiveStartKey: lastEvaluatedKey,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\titems = append(items, res.Items...)\n\t\tlastEvaluatedKey = res.LastEvaluatedKey\n\t\tif lastEvaluatedKey == nil {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tcreds, err := decodeCredential(items)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif !allVersions {\n\t\tcreds, err = filterLatest(creds)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tsort.Sort(ByName(creds))\n\treturn creds, nil\n\n}",
"func (r Repository) ListSecrets(ctx context.Context, ns string, options metav1.ListOptions) (*corev1.SecretList, error) {\n\treturn r.kcli.CoreV1().Secrets(ns).List(ctx, options)\n}",
"func SecretList(opt *options.SearchOptions) *corev1.SecretList {\n\tclientset := client.InitClient()\n\tns, o := setOptions(opt)\n\tlist, err := clientset.CoreV1().Secrets(ns).List(context.TODO(), *o)\n\tif err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"err\": err.Error(),\n\t\t}).Debug(\"Unable to get Secret List\")\n\t}\n\treturn list\n}",
"func (store *managerStore) ListAllSecrets() ([]*commtypes.BcsSecret, error) {\n\tif cacheMgr.isOK {\n\t\treturn listCacheSecrets()\n\t}\n\n\tclient := store.BkbcsClient.BcsSecrets(\"\")\n\tv2Secs, err := client.List(context.Background(), metav1.ListOptions{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsecrets := make([]*commtypes.BcsSecret, 0, len(v2Secs.Items))\n\tfor _, sec := range v2Secs.Items {\n\t\tobj := sec.Spec.BcsSecret\n\t\tobj.ResourceVersion = sec.ResourceVersion\n\t\tsecrets = append(secrets, &obj)\n\t}\n\n\treturn secrets, nil\n}",
"func GetReconciles(ctx context.Context, client crc.Client, object apis.Object) ([]reconcile.Request, error) {\n\tobjReferencedBy := func(parent qjv1a1.QuarksJob) (bool, error) {\n\t\tvar (\n\t\t\tobjectReferences map[string]bool\n\t\t\terr error\n\t\t\tname string\n\t\t\tversionedSecret bool\n\t\t)\n\n\t\tswitch object := object.(type) {\n\t\tcase *corev1.ConfigMap:\n\t\t\tobjectReferences = podref.GetConfMapRefFromPod(parent.Spec.Template.Spec.Template.Spec)\n\t\t\tname = object.Name\n\t\tcase *corev1.Secret:\n\t\t\tobjectReferences = podref.GetSecretRefFromPodSpec(parent.Spec.Template.Spec.Template.Spec)\n\t\t\tname = object.Name\n\t\t\tversionedSecret = vss.IsVersionedSecret(*object)\n\t\tdefault:\n\t\t\treturn false, errors.New(\"can't get reconciles for unknown object type; supported types are ConfigMap and Secret\")\n\t\t}\n\n\t\tif err != nil {\n\t\t\treturn false, errors.Wrap(err, \"error listing references\")\n\t\t}\n\n\t\tif versionedSecret {\n\t\t\tkeys := make([]string, len(objectReferences))\n\t\t\ti := 0\n\t\t\tfor k := range objectReferences {\n\t\t\t\tkeys[i] = k\n\t\t\t\ti++\n\t\t\t}\n\t\t\tok := vss.ContainsSecretName(keys, name)\n\t\t\treturn ok, nil\n\t\t}\n\n\t\t_, ok := objectReferences[name]\n\t\treturn ok, nil\n\t}\n\n\tnamespace := object.GetNamespace()\n\tresult := []reconcile.Request{}\n\n\tlog.Debugf(ctx, \"Searching QuarksJobs for references to '%s/%s' \", namespace, object.GetName())\n\tquarksJobs := &qjv1a1.QuarksJobList{}\n\terr := client.List(ctx, quarksJobs, crc.InNamespace(namespace))\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to list QuarksJobs for reconciles\")\n\t}\n\n\tfor _, qJob := range quarksJobs.Items {\n\t\tif !(qJob.Spec.UpdateOnConfigChange && qJob.IsAutoErrand()) {\n\t\t\tcontinue\n\t\t}\n\t\tisRef, err := objReferencedBy(qJob)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif isRef {\n\t\t\tresult = append(result, reconcile.Request{\n\t\t\t\tNamespacedName: types.NamespacedName{\n\t\t\t\t\tName: qJob.Name,\n\t\t\t\t\tNamespace: qJob.Namespace,\n\t\t\t\t}})\n\t\t}\n\t}\n\n\treturn result, nil\n}",
"func (c *Cache) SecretList() []Secret {\n\tfailureDeadline := time.After(c.timeouts.MaxWait)\n\t// Optimistically wait for a backend response before using a cached response.\n\tbackendDeadline := time.After(c.timeouts.BackendDeadline)\n\n\tcacheDone := c.cacheSecretList()\n\tbackendDone := c.backendSecretList()\n\n\tvar cachedSecrets []Secret\n\tfor {\n\t\tselect {\n\t\tcase secrets := <-backendDone:\n\t\t\treturn secrets\n\t\tcase cachedSecrets = <-cacheDone:\n\t\t\tcacheDone = nil\n\t\tcase <-backendDeadline:\n\t\t\tif cachedSecrets != nil {\n\t\t\t\treturn cachedSecrets\n\t\t\t}\n\t\tcase <-failureDeadline:\n\t\t\tc.Errorf(\"Cache and backend timeout: secretList()\")\n\t\t\treturn make([]Secret, 0)\n\t\t}\n\t}\n}",
"func (api *API) ListWorkersSecrets(ctx context.Context, rc *ResourceContainer, params ListWorkersSecretsParams) (WorkersListSecretsResponse, error) {\n\tif rc.Level != AccountRouteLevel {\n\t\treturn WorkersListSecretsResponse{}, ErrRequiredAccountLevelResourceContainer\n\t}\n\n\tif rc.Identifier == \"\" {\n\t\treturn WorkersListSecretsResponse{}, ErrMissingAccountID\n\t}\n\n\turi := fmt.Sprintf(\"/accounts/%s/workers/scripts/%s/secrets\", rc.Identifier, params.ScriptName)\n\tres, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil)\n\tif err != nil {\n\t\treturn WorkersListSecretsResponse{}, err\n\t}\n\n\tresult := WorkersListSecretsResponse{}\n\tif err := json.Unmarshal(res, &result); err != nil {\n\t\treturn result, fmt.Errorf(\"%s: %w\", errUnmarshalError, err)\n\t}\n\n\treturn result, err\n}",
"func (dir EnvDir) Secrets() []string {\n\treturn []string{}\n}",
"func (v *Vault) ListSecrets() {\n\tlog.Debugf(\"Listing secrets in Vault KV Path: %v\", s.VaultKVPath())\n\td, err := v.Client.Logical().List(s.VaultKVPath())\n\tif err != nil {\n\t\tlog.Fatalf(\"Vault error: %v\", err)\n\t}\n\n\ttable := tablewriter.NewWriter(os.Stdout)\n\ttable.SetHeader([]string{\"Key\"})\n\tfor _, l := range d.Data[\"keys\"].([]interface{}) {\n\t\ttable.Append([]string{l.(string)})\n\t}\n\ttable.Render()\n}",
"func GetSecretList(c *gin.Context) {\n\trepo := session.Repo(c)\n\tlist, err := Config.Services.Secrets.SecretList(repo)\n\tif err != nil {\n\t\tc.String(500, \"Error getting secret list. %s\", err)\n\t\treturn\n\t}\n\t// copy the secret detail to remove the sensitive\n\t// password and token fields.\n\tfor i, secret := range list {\n\t\tlist[i] = secret.Copy()\n\t}\n\tc.JSON(200, list)\n}",
"func (ef EnvFlags) Secrets() []string {\n\tresult := make([]string, len(ef))\n\ti := 0\n\tfor _, v := range ef {\n\t\tresult[i] = v\n\t\ti++\n\t}\n\treturn result\n}",
"func getSecrets(p *Plugin) ([]Secret, error) {\n\ttype (\n\t\t// PasswordState JSON response for the passwords\n\t\tPasswordList struct {\n\t\t\tPasswordID int `json:\"PasswordID\"`\n\t\t\tTitle string `json:\"Title\"`\n\t\t\tUserName string `json:\"UserName\"`\n\t\t\tDescription string `json:\"Description\"`\n\t\t\tGenericField1 string `json:\"GenericField1\"`\n\t\t\tGenericField2 string `json:\"GenericField2\"`\n\t\t\tGenericField3 string `json:\"GenericField3\"`\n\t\t\tGenericField4 string `json:\"GenericField4\"`\n\t\t\tGenericField5 string `json:\"GenericField5\"`\n\t\t\tGenericField6 string `json:\"GenericField6\"`\n\t\t\tGenericField7 string `json:\"GenericField7\"`\n\t\t\tGenericField8 string `json:\"GenericField8\"`\n\t\t\tGenericField9 string `json:\"GenericField9\"`\n\t\t\tGenericField10 string `json:\"GenericField10\"`\n\t\t\tAccountTypeID int `json:\"AccountTypeID\"`\n\t\t\tNotes string `json:\"Notes\"`\n\t\t\tURL string `json:\"URL\"`\n\t\t\tPassword string `json:\"Password\"`\n\t\t\tExpiryDate string `json:\"ExpiryDate\"`\n\t\t\tAllowExport bool `json:\"AllowExport\"`\n\t\t\tAccountType string `json:\"AccountType\"`\n\t\t}\n\t)\n\n\tvar (\n\t\turl strings.Builder\n\t\tsecrets []Secret\n\t)\n\n\turl.WriteString(strings.TrimRight(p.Config.ApiEndpoint, \"/\"))\n\turl.WriteString(\"/passwords/{PasswordListID}\")\n\n\t// Configure the API client:\n\tclient := resty.New()\n\tclient.\n\t\tSetRetryCount(p.Config.ConnectionRetries).\n\t\tSetTimeout(time.Duration(p.Config.ConnectionTimeout) * time.Second)\n\tif p.Config.Debug {\n\t\tclient.SetDebug(true)\n\t}\n\tif p.Config.SkipTlsVerify {\n\t\tclient.SetTLSClientConfig(&tls.Config{InsecureSkipVerify: p.Config.SkipTlsVerify})\n\t}\n\tclient.\n\t\tSetQueryParams(map[string]string{\n\t\t\t\"QueryAll\": \"true\",\n\t\t\t\"PreventAuditing\": \"false\",\n\t\t}).\n\t\tSetPathParams(map[string]string{\n\t\t\t\"PasswordListID\": strconv.Itoa(p.Config.PasswordListId),\n\t\t}).\n\t\tSetHeaders(map[string]string{\n\t\t\t\"APIKey\": p.Config.ApiKey,\n\t\t\t\"Content-Type\": \"application/json\",\n\t\t})\n\n\t// Send the request:\n\tlogrus.WithField(\"endpoint\", p.Config.ApiEndpoint).WithField(\"list_id\", p.Config.PasswordListId).Infoln(\"Querying PasswordState API.\")\n\tresponse, err := client.R().\n\t\tSetResult([]PasswordList{}).\n\t\tGet(url.String())\n\n\tif err != nil {\n\t\tlogrus.WithError(err).Errorln(\"Failed to retrieved data from PasswordState.\")\n\t\treturn nil, err\n\t}\n\n\tpasswords := *response.Result().(*[]PasswordList)\n\tlogrus.WithField(\"count\", len(passwords)).Infoln(\"Passwords retrieved from PasswordState.\")\n\tlogrus.WithField(\"key_field\", p.Config.KeyField).WithField(\"value_field\", p.Config.ValueField).Infoln(\"Converting retrieved passwords to secrets.\")\n\tfor _, password := range passwords {\n\t\tkey := reflect.Indirect(reflect.ValueOf(password)).FieldByName(p.Config.KeyField).String()\n\t\tif key == \"\" || key == \"<invalid Value>\" {\n\t\t\tlogrus.WithField(\"password_id\", password.PasswordID).WithField(\"field\", p.Config.KeyField).Warnln(\"Key is empty. Skipping the secret.\")\n\t\t\tcontinue\n\t\t}\n\t\tvalue := reflect.Indirect(reflect.ValueOf(password)).FieldByName(p.Config.ValueField).String()\n\t\tif value == \"\" || value == \"<invalid Value>\" {\n\t\t\tlogrus.WithField(\"password_id\", password.PasswordID).WithField(\"field\", p.Config.ValueField).Warnln(\"Value is empty. Skipping the secret.\")\n\t\t\tcontinue\n\t\t}\n\t\tsecret := Secret{\n\t\t\tKey: key,\n\t\t\tValue: value,\n\t\t}\n\t\tsecrets = append(secrets, secret)\n\t}\n\n\tlogrus.WithField(\"count\", len(secrets)).Infoln(\"Finished processing the secrets.\")\n\treturn secrets, nil\n}",
"func (s *ActionsService) ListEnvSecrets(ctx context.Context, repoID int, env string, opts *ListOptions) (*Secrets, *Response, error) {\n\turl := fmt.Sprintf(\"repositories/%v/environments/%v/secrets\", repoID, env)\n\treturn s.listSecrets(ctx, url, opts)\n}",
"func (c *Cache) backendSecretList() chan []Secret {\n\tsecretsc := make(chan []Secret, 1)\n\tgo func() {\n\t\tsecrets, ok := c.backend.SecretList()\n\t\tif !ok {\n\t\t\treturn\n\t\t}\n\n\t\tsecretsc <- secrets\n\t\tclose(secretsc)\n\n\t\tnewMap := NewSecretMap()\n\n\t\tfor _, backendSecret := range secrets {\n\t\t\t// If the cache contains a secret with content, keep it over backendSecret.\n\t\t\tif s, ok := c.secretMap.Get(backendSecret.Name); ok && len(s.Secret.Content) > 0 {\n\t\t\t\tnewMap.Put(backendSecret.Name, s.Secret)\n\t\t\t} else { // Otherwise, cache the latest information.\n\t\t\t\tnewMap.Put(backendSecret.Name, backendSecret)\n\t\t\t}\n\t\t}\n\t\tc.secretMap.Overwrite(newMap)\n\t}()\n\treturn secretsc\n}",
"func (l *Libvirt) ListAllSecrets() ([]*Secret, error) {\n\treq := libvirt.RemoteConnectListAllSecretsReq{\n\t\tNeedResults: 1,\n\t\tFlags: 0}\n\tres := libvirt.RemoteConnectListAllSecretsRes{}\n\n\tbuf, err := encode(&req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp, err := l.send(libvirt.RemoteProcConnectListAllSecrets, 0, libvirt.MessageTypeCall, libvirt.RemoteProgram, libvirt.MessageStatusOK, &buf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tr := <-resp\n\tif r.Header.Status != libvirt.MessageStatusOK {\n\t\treturn nil, decodeError(r.Payload)\n\t}\n\n\tdec := xdr.NewDecoder(bytes.NewReader(r.Payload))\n\t_, err = dec.Decode(&res)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar secrets []*Secret\n\tfor _, secret := range res.Secrets {\n\t\tsecrets = append(secrets, &Secret{l: l, RemoteSecret: secret})\n\t}\n\treturn secrets, nil\n}",
"func (l *Libvirt) Secrets() ([]Secret, error) {\n\tsecrets, _, err := l.ConnectListAllSecrets(1, 0)\n\treturn secrets, err\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
RecoverPanics wraps http.Handler to recover and log panics.
|
func RecoverPanics(handler http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
defer func() {
if x := recover(); x != nil {
w.WriteHeader(http.StatusInternalServerError)
fmt.Fprint(w, "apis panic. Look in log for details.")
glog.Infof("APIServer panic'd on %v %v: %#v\n%s\n", req.Method, req.RequestURI, x, debug.Stack())
}
}()
defer httplog.NewLogged(req, &w).StacktraceWhen(
httplog.StatusIsNot(
http.StatusOK,
http.StatusAccepted,
http.StatusMovedPermanently,
http.StatusTemporaryRedirect,
http.StatusConflict,
http.StatusNotFound,
StatusUnprocessableEntity,
),
).Log()
// Dispatch to the internal handler
handler.ServeHTTP(w, req)
})
}
|
[
"func RecoverPanics(handler http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {\n\t\tdefer func() {\n\t\t\tif x := recover(); x != nil {\n\t\t\t\thttp.Error(w, \"apis panic. Look in log for details.\", http.StatusInternalServerError)\n\t\t\t\tglog.Errorf(\"APIServer panic'd on %v %v: %v\\n%s\\n\", req.Method, req.RequestURI, x, debug.Stack())\n\t\t\t}\n\t\t}()\n\t\tdefer httplog.NewLogged(req, &w).StacktraceWhen(\n\t\t\thttplog.StatusIsNot(\n\t\t\t\thttp.StatusOK,\n\t\t\t\thttp.StatusCreated,\n\t\t\t\thttp.StatusAccepted,\n\t\t\t\thttp.StatusBadRequest,\n\t\t\t\thttp.StatusMovedPermanently,\n\t\t\t\thttp.StatusTemporaryRedirect,\n\t\t\t\thttp.StatusConflict,\n\t\t\t\thttp.StatusNotFound,\n\t\t\t\thttp.StatusUnauthorized,\n\t\t\t\thttp.StatusForbidden,\n\t\t\t\terrors.StatusUnprocessableEntity,\n\t\t\t\thttp.StatusSwitchingProtocols,\n\t\t\t),\n\t\t).Log()\n\n\t\t// Dispatch to the internal handler\n\t\thandler.ServeHTTP(w, req)\n\t})\n}",
"func panicRecover(handler func(w http.ResponseWriter, r *http.Request)) func(w http.ResponseWriter, r *http.Request) {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tdefer gulu.Panic.Recover(nil)\n\n\t\thandler(w, r)\n\t}\n}",
"func PanicRecover(h juggler.Handler, vars *expvar.Map) juggler.Handler {\n\treturn juggler.HandlerFunc(func(ctx context.Context, c *juggler.Conn, m message.Msg) {\n\t\tdefer func() {\n\t\t\tif e := recover(); e != nil {\n\t\t\t\tif vars != nil {\n\t\t\t\t\tvars.Add(\"RecoveredPanics\", 1)\n\t\t\t\t}\n\n\t\t\t\tvar err error\n\t\t\t\tswitch e := e.(type) {\n\t\t\t\tcase error:\n\t\t\t\t\terr = e\n\t\t\t\tdefault:\n\t\t\t\t\terr = fmt.Errorf(\"%v\", e)\n\t\t\t\t}\n\t\t\t\tc.Close(err)\n\t\t\t}\n\t\t}()\n\t\th.Handle(ctx, c, m)\n\t})\n}",
"func Recoverer(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\n\t\tdefer func() {\n\t\t\tif rec := recover(); rec != nil {\n\t\t\t\tvar err error\n\n\t\t\t\tswitch t := rec.(type) {\n\t\t\t\tcase string:\n\t\t\t\t\terr = Error(t)\n\t\t\t\tcase error:\n\t\t\t\t\terr = t\n\t\t\t\tdefault:\n\t\t\t\t\terr = ErrUnknownError\n\t\t\t\t}\n\t\t\t\tif errors.Is(err, http.ErrAbortHandler) {\n\t\t\t\t\t// ErrAbortHandler is called when the client closes the connection or the connection is closed\n\t\t\t\t\t// so we don't need to lose our poop, just clean it up and move on\n\t\t\t\t\tErrorOut.Printf(\"%s\\n\", ErrRequestError{r, ErrAborted.Error()})\n\t\t\t\t\tDebugOut.Printf(\"ErrAbortHandler: %s\\n\", ErrRequestError{r, fmt.Sprintf(\"Panic occurred: %s\", gerrors.Wrap(err, 2).ErrorStack())})\n\t\t\t\t\thttp.Error(w, ErrRequestError{r, StatusClientClosedRequestText}.Error(), StatusClientClosedRequest) // Machine-readable\n\t\t\t\t\treturn\n\t\t\t\t} else if Conf.GetBool(ConfigRecovererLogStackTraces) {\n\t\t\t\t\tErrorOut.Printf(\"%s\\n\", ErrRequestError{r, fmt.Sprintf(\"Panic occurred: %s\", gerrors.Wrap(err, 2).ErrorStack())})\n\t\t\t\t} else {\n\t\t\t\t\tErrorOut.Printf(\"%s\\n\", ErrRequestError{r, fmt.Sprintf(\"Panic occurred: %s\", err)})\n\t\t\t\t}\n\t\t\t\t//http.Error(w, ErrRequestError{r, \"an internal error occurred\"}.Error(), http.StatusInternalServerError)\n\t\t\t\tRequestErrorResponse(r, w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)\n\t\t\t\treturn\n\t\t\t}\n\t\t}()\n\t\tnext.ServeHTTP(w, r)\n\t})\n}",
"func (a *App) recoverHandler(next http.Handler) http.Handler {\n\tfn := func(w http.ResponseWriter, r *http.Request) {\n\t\tdefer func() {\n\t\t\tif err := recover(); err != nil {\n\t\t\t\ta.logr.Log(\"Panic: %+v\", err)\n\t\t\t\t// TODO: use a nicer 500 page\n\t\t\t\thttp.Error(w, http.StatusText(500), 500)\n\t\t\t}\n\t\t}()\n\t\tnext.ServeHTTP(w, r)\n\t}\n\treturn http.HandlerFunc(fn)\n}",
"func recoverer(logger Logger) func(next http.Handler) http.Handler {\n\treturn func(next http.Handler) http.Handler {\n\t\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\n\t\t\t// Capture our own copy of the logger so change in this closure\n\t\t\t// won't affect the object passed-in.\n\n\t\t\tlogger := logger\n\n\t\t\t// Defer a function to catch any panic and log a stack trace.\n\n\t\t\tdefer func() {\n\t\t\t\tif rcv := recover(); rcv != nil {\n\t\t\t\t\tif reqID := middleware.GetReqID(r.Context()); reqID != \"\" {\n\t\t\t\t\t\tlogger = logger.With(\"HTTP Request ID\", reqID)\n\t\t\t\t\t}\n\n\t\t\t\t\tscheme := \"http\"\n\t\t\t\t\tif r.TLS != nil {\n\t\t\t\t\t\tscheme = \"https\"\n\t\t\t\t\t}\n\n\t\t\t\t\tlogger.Errorw(\n\t\t\t\t\t\tlogMsgPanicRecovery,\n\t\t\t\t\t\t\"Method\", r.Method,\n\t\t\t\t\t\t\"URI\", fmt.Sprintf(\"%s://%s%s\", scheme, r.Host, r.RequestURI),\n\t\t\t\t\t\t\"Protocol\", r.Proto,\n\t\t\t\t\t\t\"Remote Address\", r.RemoteAddr,\n\t\t\t\t\t\t\"Panic Value\", rcv,\n\t\t\t\t\t\t\"Stack Trace\", string(debug.Stack()),\n\t\t\t\t\t)\n\t\t\t\t}\n\t\t\t}()\n\n\t\t\tnext.ServeHTTP(w, r)\n\t\t})\n\t}\n}",
"func Recovery() func(next http.Handler) http.Handler {\n\treturn func(next http.Handler) http.Handler {\n\t\treturn http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {\n\t\t\tdefer func() {\n\t\t\t\tif err := recover(); err != nil {\n\t\t\t\t\tcombinedErr := fmt.Sprintf(\"PANIC: %v\\n%s\", err, string(log.Stack(2)))\n\t\t\t\t\thttp.Error(w, combinedErr, 500)\n\t\t\t\t}\n\t\t\t}()\n\n\t\t\tnext.ServeHTTP(w, req)\n\t\t})\n\t}\n}",
"func (app *application) recoverPanic(next http.Handler) http.Handler {\r\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\r\n\t\tdefer func() {\r\n\t\t\tif err := recover(); err != nil {\r\n\t\t\t\tw.Header().Set(\"Connection\", \"close\")\r\n\t\t\t\tapp.serverError(w, fmt.Errorf(\"%s\", err))\r\n\t\t\t}\r\n\t\t}()\r\n\t\tnext.ServeHTTP(w, r)\r\n\t})\r\n}",
"func recoveryHandle(app http.Handler) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tdefer func() {\n\t\t\tif err := recover(); err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t\tstack := debug.Stack()\n\t\t\t\tlog.Println(string(stack))\n\t\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\t\tfmt.Fprintf(w, \"<h1>panic: %v</h1><pre>%s</pre>\", err, linkForm(string(stack)))\n\t\t\t}\n\t\t}()\n\t\tapp.ServeHTTP(w, r)\n\t}\n}",
"func (l *Logger) Recover(h PanicHandler) *Logger {\n\tl.mu.Lock()\n\tdefer l.mu.Unlock()\n\n\tif h == nil {\n\t\th = panicHandler\n\t}\n\n\tl.ph = h\n\treturn l\n}",
"func recoverHandler(next http.Handler) http.Handler {\n\t// Define a function that defers a function to recover from a panic\n\tfn := func(w http.ResponseWriter, r *http.Request) {\n\t\tdefer func() {\n\t\t\tif err := recover(); err != nil {\n\t\t\t\tlog.Printf(\"panic: %+v\", err)\n\t\t\t\thttp.Error(w, http.StatusText(500), 500)\n\t\t\t}\n\t\t}()\n\n\t\tnext.ServeHTTP(w, r)\n\t}\n\n\treturn http.HandlerFunc(fn)\n}",
"func NewRecoverHandler() Middleware {\n\treturn func(h http.Handler) http.Handler {\n\t\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\tdefer func() {\n\t\t\t\tif err := recover(); err != nil {\n\t\t\t\t\thandler.WriteError(w, r, fmt.Errorf(\"PANIC: %+v\", err), http.StatusInternalServerError)\n\t\t\t\t}\n\t\t\t}()\n\t\t\th.ServeHTTP(w, r)\n\t\t})\n\t}\n}",
"func RecoveryHandler(w http.ResponseWriter, r *http.Request, err interface{}) {\n\tHandler(w, r, err)\n}",
"func Recover() echo.MiddlewareFunc {\n\treturn func(next echo.HandlerFunc) echo.HandlerFunc {\n\t\treturn func(c echo.Context) (err error) {\n\t\t\tdefer func() {\n\t\t\t\tif p := recover(); p != nil {\n\t\t\t\t\tfmt.Fprintln(os.Stderr, p)\n\t\t\t\t\tos.Stderr.Write(debug.Stack())\n\t\t\t\t\tif pErr, ok := p.(error); ok {\n\t\t\t\t\t\terr = ErrHTTPRecovered.WithCause(pErr)\n\t\t\t\t\t} else {\n\t\t\t\t\t\terr = ErrHTTPRecovered.WithAttributes(\"panic\", p)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}()\n\t\t\treturn next(c)\n\t\t}\n\t}\n}",
"func (r *Recovery) Handler(next http.Handler) http.Handler {\n\tfn := func(w http.ResponseWriter, req *http.Request) {\n\t\tdefer func() {\n\t\t\tif err := recover(); err != nil {\n\n\t\t\t\t// Check for a broken connection, as it is not really a\n\t\t\t\t// condition that warrants a panic stack trace.\n\t\t\t\tvar brokenPipe bool\n\t\t\t\tif ne, ok := err.(*net.OpError); ok {\n\t\t\t\t\tif se, ok := ne.Err.(*os.SyscallError); ok {\n\t\t\t\t\t\tif se.Err == syscall.EPIPE || se.Err == syscall.ECONNRESET {\n\t\t\t\t\t\t\tbrokenPipe = true\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// don't try to respond if the conn broke. duh.\n\t\t\t\tif !brokenPipe {\n\t\t\t\t\tr.panicHandler.ServeHTTP(w, req)\n\t\t\t\t\tstack := make([]byte, r.opt.StackSize)\n\t\t\t\t\tstack = stack[:runtime.Stack(stack, r.opt.IncludeFullStack)]\n\n\t\t\t\t\tr.Printf(\"Recovering from Panic: %s\\n%s\", err, stack)\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\n\t\tnext.ServeHTTP(w, req)\n\t}\n\n\treturn http.HandlerFunc(fn)\n}",
"func recoverAndLogHandler(handler http.Handler, logger log.Logger) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t// Capture the HTTP status written by the handler.\n\t\tvar httpStatus int\n\t\trww := newStatusWriter(w, &httpStatus)\n\n\t\t// Recover panics from inside handler and try to send the client\n\t\t// 500 Internal server error. If the handler panicked after already\n\t\t// sending a (partial) response, this is a no-op.\n\t\tdefer func() {\n\t\t\tif v := recover(); v != nil {\n\t\t\t\tvar err error\n\t\t\t\tswitch e := v.(type) {\n\t\t\t\tcase error:\n\t\t\t\t\terr = e\n\t\t\t\tcase string:\n\t\t\t\t\terr = errors.New(e)\n\t\t\t\tcase fmt.Stringer:\n\t\t\t\t\terr = errors.New(e.String())\n\t\t\t\tdefault:\n\t\t\t\t\terr = fmt.Errorf(\"panic with value %v\", v)\n\t\t\t\t}\n\n\t\t\t\tlogger.Error(\"Panic in RPC HTTP handler\",\n\t\t\t\t\t\"err\", err, \"stack\", string(debug.Stack()))\n\t\t\t\twriteInternalError(rww, err)\n\t\t\t}\n\t\t}()\n\n\t\t// Log timing and response information from the handler.\n\t\tbegin := time.Now()\n\t\tdefer func() {\n\t\t\telapsed := time.Since(begin)\n\t\t\tlogger.Debug(\"served RPC HTTP response\",\n\t\t\t\t\"method\", r.Method,\n\t\t\t\t\"url\", r.URL,\n\t\t\t\t\"status\", httpStatus,\n\t\t\t\t\"duration-sec\", elapsed.Seconds(),\n\t\t\t\t\"remoteAddr\", r.RemoteAddr,\n\t\t\t)\n\t\t}()\n\n\t\trww.Header().Set(\"X-Server-Time\", fmt.Sprintf(\"%v\", begin.Unix()))\n\t\thandler.ServeHTTP(rww, r)\n\t})\n}",
"func RecoveryHandler(handler func(http.ResponseWriter, *http.Request)) func(http.ResponseWriter, *http.Request) {\n\treturn Recoverer(http.HandlerFunc(handler)).ServeHTTP\n}",
"func GinRecovery() gin.HandlerFunc {\n\tconst reset = \"\\033[0m\"\n\tvar logger *log.Logger = log.New(os.Stderr, \"\\n\\n\\x1b[31m\", log.LstdFlags)\n\n\treturn func(c *gin.Context) {\n\t\tdefer func() {\n\t\t\tif err := recover(); err != nil {\n\t\t\t\t// Check for a broken connection, as it is not really a\n\t\t\t\t// condition that warrants a panic stack trace.\n\t\t\t\tvar brokenPipe bool\n\t\t\t\tif ne, ok := err.(*net.OpError); ok {\n\t\t\t\t\tif se, ok := ne.Err.(*os.SyscallError); ok {\n\t\t\t\t\t\tif strings.Contains(strings.ToLower(se.Error()), \"broken pipe\") || strings.Contains(strings.ToLower(se.Error()), \"connection reset by peer\") {\n\t\t\t\t\t\t\tbrokenPipe = true\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif logger != nil {\n\t\t\t\t\tstack := debug.Stack()\n\n\t\t\t\t\thttpRequest, _ := httputil.DumpRequest(c.Request, false)\n\t\t\t\t\theaders := strings.Split(string(httpRequest), \"\\r\\n\")\n\t\t\t\t\tfor idx, header := range headers {\n\t\t\t\t\t\tcurrent := strings.Split(header, \":\")\n\t\t\t\t\t\tif current[0] == \"Authorization\" {\n\t\t\t\t\t\t\theaders[idx] = current[0] + \": *\"\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tif brokenPipe {\n\t\t\t\t\t\tlogger.Printf(\"%s\\n%s%s\", err, string(httpRequest), reset)\n\t\t\t\t\t} else if gin.IsDebugging() {\n\t\t\t\t\t\tlogger.Printf(\"[Recovery] %s panic recovered:\\n%s\\n%s\\n%s%s\",\n\t\t\t\t\t\t\ttime.Now().Format(time.RFC3339), strings.Join(headers, \"\\r\\n\"), err, stack, reset)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tlogger.Printf(\"[Recovery] %s panic recovered:\\n%s\\n%s%s\",\n\t\t\t\t\t\t\ttime.Now().Format(time.RFC3339), err, stack, reset)\n\t\t\t\t\t}\n\n\t\t\t\t\t// // Также сигналит в телегу\n\t\t\t\t\t// Telegram(\"[Recovery] %s panic recovered:\\n%s\\n%s%s\", time.Now().Format(time.RFC3339), err, stack, reset)\n\t\t\t\t}\n\n\t\t\t\t// If the connection is dead, we can't write a status to it.\n\t\t\t\tif brokenPipe {\n\t\t\t\t\tc.Error(err.(error)) // nolint: errcheck\n\t\t\t\t\tc.Abort()\n\t\t\t\t} else {\n\t\t\t\t\tc.AbortWithStatus(http.StatusInternalServerError)\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t\tc.Next()\n\t}\n}",
"func Recovery(handlers ...func(c *Context, err interface{})) Middleware {\n\treturn func(c *Context) {\n\t\tdefer func() {\n\t\t\tif err := recover(); err != nil {\n\t\t\t\tif handlers != nil {\n\t\t\t\t\tfor i := range handlers {\n\t\t\t\t\t\thandlers[i](c, err)\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tc.Render(http.StatusInternalServerError, err)\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t\tc.Next()\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Opts returns a redisconn.Opts object with the configured options applied.
|
func (cfg ClientConfig) Opts() redisconn.Opts {
opts := redisconn.Opts{}
cfg.Options.ApplyOpts(&opts)
return opts
}
|
[
"func GetRedisOpts(driver *dipper.Driver) *Options {\n\tif conn, ok := dipper.GetMapData(driver.Options, \"data.connection\"); ok {\n\t\tdefer delete(conn.(map[string]interface{}), \"Password\")\n\t}\n\tif tls, ok := dipper.GetMapData(driver.Options, \"data.connection.TLS\"); ok {\n\t\tdefer delete(tls.(map[string]interface{}), \"CACerts\")\n\t}\n\n\tif localRedis, ok := os.LookupEnv(\"LOCALREDIS\"); ok && localRedis != \"\" {\n\t\tif opts, e := redis.ParseURL(localRedis); e == nil {\n\t\t\treturn &Options{\n\t\t\t\tOptions: opts,\n\t\t\t}\n\t\t}\n\n\t\treturn &Options{\n\t\t\tOptions: &redis.Options{\n\t\t\t\tAddr: \"127.0.0.1:6379\",\n\t\t\t\tDB: 0,\n\t\t\t},\n\t\t}\n\t}\n\n\topts := &redis.Options{}\n\tif value, ok := driver.GetOptionStr(\"data.connection.Addr\"); ok {\n\t\topts.Addr = value\n\t}\n\tif value, ok := driver.GetOptionStr(\"data.connection.Username\"); ok {\n\t\topts.Username = value\n\t}\n\tif value, ok := driver.GetOptionStr(\"data.connection.Password\"); ok {\n\t\topts.Password = value\n\t}\n\tif DB, ok := driver.GetOptionStr(\"data.connection.DB\"); ok {\n\t\topts.DB = dipper.Must(strconv.Atoi(DB)).(int)\n\t}\n\tif driver.CheckOption(\"data.connection.TLS.Enabled\") {\n\t\topts.TLSConfig = setupTLSConfig(driver)\n\t}\n\n\treturn &Options{\n\t\tOptions: opts,\n\t}\n}",
"func (cfg ClusterConfig) Opts() rediscluster.Opts {\n\topts := rediscluster.Opts{}\n\tcfg.Options.ApplyOpts(&opts.HostOpts)\n\tcfg.Cluster.ApplyOpts(&opts)\n\treturn opts\n}",
"func (r *Redis) Opts() (opts []redis.Option, err error) {\n\tnt := net.NetworkTypeFromString(r.Network)\n\tif nt == 0 || nt == net.Unknown || strings.EqualFold(nt.String(), net.Unknown.String()) {\n\t\tnt = net.TCP\n\t}\n\tr.Network = nt.String()\n\topts = []redis.Option{\n\t\tredis.WithAddrs(r.Addrs...),\n\t\tredis.WithDialTimeout(r.DialTimeout),\n\t\tredis.WithIdleCheckFrequency(r.IdleCheckFrequency),\n\t\tredis.WithIdleTimeout(r.IdleTimeout),\n\t\tredis.WithKeyPrefix(r.KeyPref),\n\t\tredis.WithMaximumConnectionAge(r.MaxConnAge),\n\t\tredis.WithRetryLimit(r.MaxRetries),\n\t\tredis.WithMaximumRetryBackoff(r.MaxRetryBackoff),\n\t\tredis.WithMinimumIdleConnection(r.MinIdleConns),\n\t\tredis.WithMinimumRetryBackoff(r.MinRetryBackoff),\n\t\tredis.WithOnConnectFunction(func(ctx context.Context, conn *redis.Conn) error {\n\t\t\tlog.Infof(\"redis connection succeed to %s\", conn.ClientGetName(ctx).String())\n\t\t\treturn nil\n\t\t}),\n\t\tredis.WithUsername(r.Username),\n\t\tredis.WithPassword(r.Password),\n\t\tredis.WithPoolSize(r.PoolSize),\n\t\tredis.WithPoolTimeout(r.PoolTimeout),\n\t\t// In the current implementation, we do not need to use the read only flag for redis usages.\n\t\t// This implementation is to only align to the redis interface.\n\t\t// We will remove this comment out if we need to use this.\n\t\t// redis.WithReadOnlyFlag(readOnly bool) ,\n\t\tredis.WithNetwork(r.Network),\n\t\tredis.WithReadTimeout(r.ReadTimeout),\n\t\tredis.WithRouteByLatencyFlag(r.RouteByLatency),\n\t\tredis.WithRouteRandomlyFlag(r.RouteRandomly),\n\t\tredis.WithWriteTimeout(r.WriteTimeout),\n\t\tredis.WithInitialPingDuration(r.InitialPingDuration),\n\t\tredis.WithInitialPingTimeLimit(r.InitialPingTimeLimit),\n\t\tredis.WithSentinelPassword(r.SentinelPassword),\n\t\tredis.WithSentinelMasterName(r.SentinelMasterName),\n\t}\n\n\tif r.TLS != nil && r.TLS.Enabled {\n\t\ttls, err := tls.New(r.TLS.Opts()...)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\topts = append(opts, redis.WithTLSConfig(tls))\n\t}\n\n\tif r.Net != nil {\n\t\tnetOpts, err := r.Net.Opts()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdialer, err := net.NewDialer(netOpts...)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\topts = append(opts, redis.WithDialer(dialer))\n\t}\n\n\tif len(r.Addrs) > 1 {\n\t\topts = append(opts,\n\t\t\tredis.WithRedirectLimit(r.MaxRedirects),\n\t\t)\n\t} else {\n\t\topts = append(opts,\n\t\t\tredis.WithDB(r.DB),\n\t\t)\n\t}\n\n\treturn opts, nil\n}",
"func NewOpts() Opts {\n\treturn Opts{\n\t\tBroker: getenv(\"MQTT_TEST_CLIENT_HOST\", \"ssl://mqtt:8883\"),\n\t\tID: getenv(\"MQTT_TEST_CLIENT_ID\", \"test-client\"),\n\t\tSerial: \"1001\",\n\t\tMode: ModeAuto,\n\t\tQoS: 2,\n\t\tRetained: true,\n\t\tSetWill: false,\n\t\tTLS: false,\n\t}\n}",
"func (c *conn) Options() *ConnOptions {\n\treturn c.options\n}",
"func (c *Client) Options() *Options {\n\treturn c.opt\n}",
"func customizedOption(viper *viper.Viper, rwType RWType) *Options {\n\n\tvar opt = Options{}\n\tletOldEnvSupportViper(viper, rwType)\n\thosts := addrStructure(viper.GetStringSlice(rwType.FmtSuffix(\"REDIS_PORT\")),\n\t\tviper.GetStringSlice(rwType.FmtSuffix(\"REDIS_HOST\")))\n\topt.Type = ClientType(viper.GetString(rwType.FmtSuffix(\"REDIS_TYPE\")))\n\topt.Hosts = hosts\n\topt.ReadOnly = rwType.IsReadOnly()\n\topt.Database = viper.GetInt(rwType.FmtSuffix(\"REDIS_DB_NAME\"))\n\topt.Password = viper.GetString(rwType.FmtSuffix(\"REDIS_DB_PASSWORD\"))\n\topt.KeyPrefix = viper.GetString(rwType.FmtSuffix(\"REDIS_KEY_PREFIX\"))\n\t// various timeout setting\n\topt.DialTimeout = viper.GetDuration(rwType.FmtSuffix(\"REDIS_TIMEOUT\")) * time.Second\n\topt.ReadTimeout = viper.GetDuration(rwType.FmtSuffix(\"REDIS_TIMEOUT\")) * time.Second\n\topt.WriteTimeout = viper.GetDuration(rwType.FmtSuffix(\"REDIS_TIMEOUT\")) * time.Second\n\t// REDIS_MAX_CONNECTIONS\n\topt.PoolSize = viper.GetInt(rwType.FmtSuffix(\"REDIS_MAX_CONNECTIONS\"))\n\topt.PoolTimeout = viper.GetDuration(rwType.FmtSuffix(\"REDIS_TIMEOUT\")) * time.Second\n\topt.IdleTimeout = viper.GetDuration(rwType.FmtSuffix(\"REDIS_TIMEOUT\")) * time.Second\n\topt.IdleCheckFrequency = viper.GetDuration(rwType.FmtSuffix(\"REDIS_TIMEOUT\")) * time.Second\n\topt.TLSConfig = nil\n\treturn &opt\n}",
"func (p *P) Options() []gnomock.Option {\n\tp.setDefaults()\n\n\topts := []gnomock.Option{\n\t\tgnomock.WithHealthCheck(p.healthcheck),\n\t\tgnomock.WithEnv(\"DOCKER_INFLUXDB_INIT_MODE=setup\"),\n\t\tgnomock.WithEnv(\"DOCKER_INFLUXDB_INIT_USERNAME=\" + p.Username),\n\t\tgnomock.WithEnv(\"DOCKER_INFLUXDB_INIT_PASSWORD=\" + p.Password),\n\t\tgnomock.WithEnv(\"DOCKER_INFLUXDB_INIT_ORG=\" + p.Org),\n\t\tgnomock.WithEnv(\"DOCKER_INFLUXDB_INIT_BUCKET=\" + p.Bucket),\n\t\tgnomock.WithEnv(\"DOCKER_INFLUXDB_INIT_ADMIN_TOKEN=\" + p.AuthToken),\n\t}\n\n\treturn opts\n}",
"func RedisOptions() *redis.Options {\n\treturn &redis.Options{\n\t\tAddr: \"localhost:6379\",\n\t\tPassword: \"\", // no password set\n\t\tDB: 0, // use default DB\n\t}\n}",
"func (r Redis) RedisOptions() *redis.Options {\n\treturn &redis.Options{\n\t\tNetwork: \"tcp\",\n\t\tAddr: r.Address,\n\t\tDB: r.DB,\n\t\tUsername: r.Username,\n\t\tPassword: r.Password,\n\t}\n}",
"func (t Timeouts) ApplyOpts(opts *redisconn.Opts) {\n\tif t.Dial > 0 {\n\t\topts.DialTimeout = t.Dial\n\t}\n\tif t.IO > 0 {\n\t\topts.IOTimeout = t.IO\n\t}\n}",
"func WithRedisOptions(opts *redis.Options) ClientOption {\n\treturn func(cfg *clientConfig) {\n\t\thost, port, err := net.SplitHostPort(opts.Addr)\n\t\tif err != nil {\n\t\t\thost = defaultHost\n\t\t\tport = defaultPort\n\t\t}\n\t\tcfg.host = host\n\t\tcfg.port = port\n\t\tcfg.db = strconv.Itoa(opts.DB)\n\t}\n}",
"func NewOpts() TransactionOptions {\n\treturn TransactionOptions{\n\t\tDescription: \"\",\n\t\tCurrency: \"EUR\",\n\t}\n}",
"func ToOptions(cfg Config, sess *session.Session, settings *awsutil.AWSSessionSettings) []Option {\n\tif !cfg.IncludeMetadata {\n\t\treturn nil\n\t}\n\tmetadataClient := ec2metadata.New(sess)\n\treturn []Option{\n\t\tWithHostname(getMetadata(\n\t\t\tsimpleMetadataProvider{metadata: cfg.Hostname},\n\t\t\tenvMetadataProvider{envKey: envAWSHostname},\n\t\t\tec2MetadataProvider{client: metadataClient, metadataKey: metadataHostname},\n\t\t)),\n\t\tWithInstanceID(getMetadata(\n\t\t\tsimpleMetadataProvider{metadata: cfg.InstanceID},\n\t\t\tenvMetadataProvider{envKey: envAWSInstanceID},\n\t\t\tec2MetadataProvider{client: metadataClient, metadataKey: metadataInstanceID},\n\t\t)),\n\t\tWithResourceARN(getMetadata(\n\t\t\tsimpleMetadataProvider{metadata: cfg.ResourceARN},\n\t\t\tsimpleMetadataProvider{metadata: settings.ResourceARN},\n\t\t)),\n\t}\n}",
"func (n *noopBroker) Options() Options {\n\treturn n.opts\n}",
"func ClientOpts(clt *resty.Client) BuildOptions {\n\treturn func(client *PluginClient) {\n\t\tclient.client = clt\n\t}\n}",
"func (ep *Endpoint) ClientOpts() ([]client.Opt, error) {\n\tvar result []client.Opt\n\tif ep.Host != \"\" {\n\t\thelper, err := connhelper.GetConnectionHelper(ep.Host)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif helper == nil {\n\t\t\ttlsConfig, err := ep.tlsConfig()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tresult = append(result,\n\t\t\t\twithHTTPClient(tlsConfig),\n\t\t\t\tclient.WithHost(ep.Host),\n\t\t\t)\n\n\t\t} else {\n\t\t\tresult = append(result,\n\t\t\t\tclient.WithHTTPClient(&http.Client{\n\t\t\t\t\t// No TLS, and no proxy.\n\t\t\t\t\tTransport: &http.Transport{\n\t\t\t\t\t\tDialContext: helper.Dialer,\n\t\t\t\t\t},\n\t\t\t\t}),\n\t\t\t\tclient.WithHost(helper.Host),\n\t\t\t\tclient.WithDialContext(helper.Dialer),\n\t\t\t)\n\t\t}\n\t}\n\n\tresult = append(result, client.WithVersionFromEnv(), client.WithAPIVersionNegotiation())\n\treturn result, nil\n}",
"func NewWithOpts(options ...Option) *Config {\n\topts := &Options{}\n\n\tfor _, opt := range options {\n\t\topt(opts)\n\t}\n\n\tv := viper.New()\n\tv.AutomaticEnv()\n\tv.SetEnvKeyReplacer(strings.NewReplacer(\"-\", \"_\", \".\", \"_\"))\n\n\tflagSet := new(pflag.FlagSet)\n\n\tc := &Config{\n\t\tKstream: KstreamConfig{},\n\t\tFilament: FilamentConfig{},\n\t\tAPI: APIConfig{},\n\t\tPE: pe.Config{},\n\t\tLog: log.Config{},\n\t\tAggregator: aggregator.Config{},\n\t\tFilters: &Filters{},\n\t\tviper: v,\n\t\tflags: flagSet,\n\t\topts: opts,\n\t}\n\n\tif opts.run || opts.replay {\n\t\taggregator.AddFlags(flagSet)\n\t\tconsole.AddFlags(flagSet)\n\t\tamqp.AddFlags(flagSet)\n\t\telasticsearch.AddFlags(flagSet)\n\t\thttp.AddFlags(flagSet)\n\t\teventlog.AddFlags(flagSet)\n\t\tremovet.AddFlags(flagSet)\n\t\treplacet.AddFlags(flagSet)\n\t\trenamet.AddFlags(flagSet)\n\t\ttrimt.AddFlags(flagSet)\n\t\ttagst.AddFlags(flagSet)\n\t\tmailsender.AddFlags(flagSet)\n\t\tslacksender.AddFlags(flagSet)\n\t\tyara.AddFlags(flagSet)\n\t}\n\n\tif opts.run || opts.capture {\n\t\tpe.AddFlags(flagSet)\n\t}\n\n\tc.addFlags()\n\n\treturn c\n}",
"func (w *Websocket) Options() *transport.Options {\n\treturn w.topts\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Opts returns a rediscluster.Opts object with the configured options applied.
|
func (cfg ClusterConfig) Opts() rediscluster.Opts {
opts := rediscluster.Opts{}
cfg.Options.ApplyOpts(&opts.HostOpts)
cfg.Cluster.ApplyOpts(&opts)
return opts
}
|
[
"func (cfg ClientConfig) Opts() redisconn.Opts {\n\topts := redisconn.Opts{}\n\tcfg.Options.ApplyOpts(&opts)\n\treturn opts\n}",
"func (r *Redis) Opts() (opts []redis.Option, err error) {\n\tnt := net.NetworkTypeFromString(r.Network)\n\tif nt == 0 || nt == net.Unknown || strings.EqualFold(nt.String(), net.Unknown.String()) {\n\t\tnt = net.TCP\n\t}\n\tr.Network = nt.String()\n\topts = []redis.Option{\n\t\tredis.WithAddrs(r.Addrs...),\n\t\tredis.WithDialTimeout(r.DialTimeout),\n\t\tredis.WithIdleCheckFrequency(r.IdleCheckFrequency),\n\t\tredis.WithIdleTimeout(r.IdleTimeout),\n\t\tredis.WithKeyPrefix(r.KeyPref),\n\t\tredis.WithMaximumConnectionAge(r.MaxConnAge),\n\t\tredis.WithRetryLimit(r.MaxRetries),\n\t\tredis.WithMaximumRetryBackoff(r.MaxRetryBackoff),\n\t\tredis.WithMinimumIdleConnection(r.MinIdleConns),\n\t\tredis.WithMinimumRetryBackoff(r.MinRetryBackoff),\n\t\tredis.WithOnConnectFunction(func(ctx context.Context, conn *redis.Conn) error {\n\t\t\tlog.Infof(\"redis connection succeed to %s\", conn.ClientGetName(ctx).String())\n\t\t\treturn nil\n\t\t}),\n\t\tredis.WithUsername(r.Username),\n\t\tredis.WithPassword(r.Password),\n\t\tredis.WithPoolSize(r.PoolSize),\n\t\tredis.WithPoolTimeout(r.PoolTimeout),\n\t\t// In the current implementation, we do not need to use the read only flag for redis usages.\n\t\t// This implementation is to only align to the redis interface.\n\t\t// We will remove this comment out if we need to use this.\n\t\t// redis.WithReadOnlyFlag(readOnly bool) ,\n\t\tredis.WithNetwork(r.Network),\n\t\tredis.WithReadTimeout(r.ReadTimeout),\n\t\tredis.WithRouteByLatencyFlag(r.RouteByLatency),\n\t\tredis.WithRouteRandomlyFlag(r.RouteRandomly),\n\t\tredis.WithWriteTimeout(r.WriteTimeout),\n\t\tredis.WithInitialPingDuration(r.InitialPingDuration),\n\t\tredis.WithInitialPingTimeLimit(r.InitialPingTimeLimit),\n\t\tredis.WithSentinelPassword(r.SentinelPassword),\n\t\tredis.WithSentinelMasterName(r.SentinelMasterName),\n\t}\n\n\tif r.TLS != nil && r.TLS.Enabled {\n\t\ttls, err := tls.New(r.TLS.Opts()...)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\topts = append(opts, redis.WithTLSConfig(tls))\n\t}\n\n\tif r.Net != nil {\n\t\tnetOpts, err := r.Net.Opts()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdialer, err := net.NewDialer(netOpts...)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\topts = append(opts, redis.WithDialer(dialer))\n\t}\n\n\tif len(r.Addrs) > 1 {\n\t\topts = append(opts,\n\t\t\tredis.WithRedirectLimit(r.MaxRedirects),\n\t\t)\n\t} else {\n\t\topts = append(opts,\n\t\t\tredis.WithDB(r.DB),\n\t\t)\n\t}\n\n\treturn opts, nil\n}",
"func GetRedisOpts(driver *dipper.Driver) *Options {\n\tif conn, ok := dipper.GetMapData(driver.Options, \"data.connection\"); ok {\n\t\tdefer delete(conn.(map[string]interface{}), \"Password\")\n\t}\n\tif tls, ok := dipper.GetMapData(driver.Options, \"data.connection.TLS\"); ok {\n\t\tdefer delete(tls.(map[string]interface{}), \"CACerts\")\n\t}\n\n\tif localRedis, ok := os.LookupEnv(\"LOCALREDIS\"); ok && localRedis != \"\" {\n\t\tif opts, e := redis.ParseURL(localRedis); e == nil {\n\t\t\treturn &Options{\n\t\t\t\tOptions: opts,\n\t\t\t}\n\t\t}\n\n\t\treturn &Options{\n\t\t\tOptions: &redis.Options{\n\t\t\t\tAddr: \"127.0.0.1:6379\",\n\t\t\t\tDB: 0,\n\t\t\t},\n\t\t}\n\t}\n\n\topts := &redis.Options{}\n\tif value, ok := driver.GetOptionStr(\"data.connection.Addr\"); ok {\n\t\topts.Addr = value\n\t}\n\tif value, ok := driver.GetOptionStr(\"data.connection.Username\"); ok {\n\t\topts.Username = value\n\t}\n\tif value, ok := driver.GetOptionStr(\"data.connection.Password\"); ok {\n\t\topts.Password = value\n\t}\n\tif DB, ok := driver.GetOptionStr(\"data.connection.DB\"); ok {\n\t\topts.DB = dipper.Must(strconv.Atoi(DB)).(int)\n\t}\n\tif driver.CheckOption(\"data.connection.TLS.Enabled\") {\n\t\topts.TLSConfig = setupTLSConfig(driver)\n\t}\n\n\treturn &Options{\n\t\tOptions: opts,\n\t}\n}",
"func NewOpts() Opts {\n\treturn Opts{\n\t\tBroker: getenv(\"MQTT_TEST_CLIENT_HOST\", \"ssl://mqtt:8883\"),\n\t\tID: getenv(\"MQTT_TEST_CLIENT_ID\", \"test-client\"),\n\t\tSerial: \"1001\",\n\t\tMode: ModeAuto,\n\t\tQoS: 2,\n\t\tRetained: true,\n\t\tSetWill: false,\n\t\tTLS: false,\n\t}\n}",
"func (d *portworx) GetClusterOpts(n node.Node, options []string) (map[string]string, error) {\n\topts := node.ConnectionOpts{\n\t\tIgnoreError: false,\n\t\tTimeBeforeRetry: defaultRetryInterval,\n\t\tTimeout: defaultTimeout,\n\t}\n\tcmd := fmt.Sprintf(\"%s cluster options list -j json\", d.getPxctlPath(n))\n\tout, err := d.nodeDriver.RunCommand(n, cmd, opts)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get pxctl cluster options on node [%s], Err: %v\", n.Name, err)\n\t}\n\tvar data = map[string]interface{}{}\n\terr = json.Unmarshal([]byte(out), &data)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to unmarshal pxctl cluster option on node [%s], Err: %v\", n.Name, err)\n\t}\n\tsort.Strings(options)\n\tvar options_map = make(map[string]string)\n\t//Values can be string, array or map\n\tfor key, val := range data {\n\t\tindex := sort.SearchStrings(options, key)\n\t\tif index < len(options) && options[index] == key {\n\t\t\toptions_map[key] = fmt.Sprint(val)\n\t\t}\n\t}\n\t//Make sure required options are available\n\tfor _, option := range options {\n\t\tif _, ok := options_map[option]; !ok {\n\t\t\treturn nil, fmt.Errorf(\"Failed to find option: %v\", option)\n\t\t}\n\t}\n\treturn options_map, nil\n}",
"func ToOptions(cfg Config, sess *session.Session, settings *awsutil.AWSSessionSettings) []Option {\n\tif !cfg.IncludeMetadata {\n\t\treturn nil\n\t}\n\tmetadataClient := ec2metadata.New(sess)\n\treturn []Option{\n\t\tWithHostname(getMetadata(\n\t\t\tsimpleMetadataProvider{metadata: cfg.Hostname},\n\t\t\tenvMetadataProvider{envKey: envAWSHostname},\n\t\t\tec2MetadataProvider{client: metadataClient, metadataKey: metadataHostname},\n\t\t)),\n\t\tWithInstanceID(getMetadata(\n\t\t\tsimpleMetadataProvider{metadata: cfg.InstanceID},\n\t\t\tenvMetadataProvider{envKey: envAWSInstanceID},\n\t\t\tec2MetadataProvider{client: metadataClient, metadataKey: metadataInstanceID},\n\t\t)),\n\t\tWithResourceARN(getMetadata(\n\t\t\tsimpleMetadataProvider{metadata: cfg.ResourceARN},\n\t\t\tsimpleMetadataProvider{metadata: settings.ResourceARN},\n\t\t)),\n\t}\n}",
"func (r *root) opts() kivik.Options {\n\treturn r.options\n}",
"func (p *P) Options() []gnomock.Option {\n\tp.setDefaults()\n\n\topts := []gnomock.Option{\n\t\tgnomock.WithEnv(\"discovery.type=single-node\"),\n\t\tgnomock.WithEnv(\"xpack.security.enabled=false\"),\n\t\tgnomock.WithEnv(\"ES_JAVA_OPTS=-Xms256m -Xmx256m\"),\n\t\tgnomock.WithHealthCheck(p.healthcheck),\n\t}\n\n\tif len(p.Inputs) > 0 {\n\t\topts = append(opts, gnomock.WithInit(p.initf))\n\t}\n\n\treturn opts\n}",
"func (p *P) Options() []gnomock.Option {\n\tp.setDefaults()\n\n\topts := []gnomock.Option{\n\t\tgnomock.WithHealthCheck(p.healthcheck),\n\t\tgnomock.WithEnv(\"DOCKER_INFLUXDB_INIT_MODE=setup\"),\n\t\tgnomock.WithEnv(\"DOCKER_INFLUXDB_INIT_USERNAME=\" + p.Username),\n\t\tgnomock.WithEnv(\"DOCKER_INFLUXDB_INIT_PASSWORD=\" + p.Password),\n\t\tgnomock.WithEnv(\"DOCKER_INFLUXDB_INIT_ORG=\" + p.Org),\n\t\tgnomock.WithEnv(\"DOCKER_INFLUXDB_INIT_BUCKET=\" + p.Bucket),\n\t\tgnomock.WithEnv(\"DOCKER_INFLUXDB_INIT_ADMIN_TOKEN=\" + p.AuthToken),\n\t}\n\n\treturn opts\n}",
"func (c *Client) Options() *Options {\n\treturn c.opt\n}",
"func (c *Caches) Option() Options {\n\treturn c.options\n}",
"func customizedOption(viper *viper.Viper, rwType RWType) *Options {\n\n\tvar opt = Options{}\n\tletOldEnvSupportViper(viper, rwType)\n\thosts := addrStructure(viper.GetStringSlice(rwType.FmtSuffix(\"REDIS_PORT\")),\n\t\tviper.GetStringSlice(rwType.FmtSuffix(\"REDIS_HOST\")))\n\topt.Type = ClientType(viper.GetString(rwType.FmtSuffix(\"REDIS_TYPE\")))\n\topt.Hosts = hosts\n\topt.ReadOnly = rwType.IsReadOnly()\n\topt.Database = viper.GetInt(rwType.FmtSuffix(\"REDIS_DB_NAME\"))\n\topt.Password = viper.GetString(rwType.FmtSuffix(\"REDIS_DB_PASSWORD\"))\n\topt.KeyPrefix = viper.GetString(rwType.FmtSuffix(\"REDIS_KEY_PREFIX\"))\n\t// various timeout setting\n\topt.DialTimeout = viper.GetDuration(rwType.FmtSuffix(\"REDIS_TIMEOUT\")) * time.Second\n\topt.ReadTimeout = viper.GetDuration(rwType.FmtSuffix(\"REDIS_TIMEOUT\")) * time.Second\n\topt.WriteTimeout = viper.GetDuration(rwType.FmtSuffix(\"REDIS_TIMEOUT\")) * time.Second\n\t// REDIS_MAX_CONNECTIONS\n\topt.PoolSize = viper.GetInt(rwType.FmtSuffix(\"REDIS_MAX_CONNECTIONS\"))\n\topt.PoolTimeout = viper.GetDuration(rwType.FmtSuffix(\"REDIS_TIMEOUT\")) * time.Second\n\topt.IdleTimeout = viper.GetDuration(rwType.FmtSuffix(\"REDIS_TIMEOUT\")) * time.Second\n\topt.IdleCheckFrequency = viper.GetDuration(rwType.FmtSuffix(\"REDIS_TIMEOUT\")) * time.Second\n\topt.TLSConfig = nil\n\treturn &opt\n}",
"func (c *Container) Options() (*dockertest.RunOptions, error) {\n\n\tstrPort := strconv.Itoa(c.getBrokerPort())\n\tpb := map[docker.Port][]docker.PortBinding{}\n\tpb[docker.Port(fmt.Sprintf(\"%d/tcp\", brokerPort))] = []docker.PortBinding{{\n\t\tHostIP: \"0.0.0.0\",\n\t\tHostPort: strPort,\n\t}}\n\n\tstrPort = strconv.Itoa(c.getClientPort())\n\tpb[docker.Port(fmt.Sprintf(\"%d/tcp\", clientPort))] = []docker.PortBinding{{\n\t\tHostIP: \"0.0.0.0\",\n\t\tHostPort: strPort,\n\t}}\n\n\trepo, tag := c.params.GetRepoTag(\"confluentinc/cp-kafka\", \"5.3.0\")\n\tenv := c.params.MergeEnv([]string{\n\t\t\"KAFKA_BROKER_ID=1\",\n\t\tfmt.Sprintf(\"KAFKA_LISTENERS=\\\"PLAINTEXT://0.0.0.0:%d,BROKER://0.0.0.0:%d\", c.getClientPort(), c.getBrokerPort()),\n\t\t\"KAFKA_LISTENER_SECURITY_PROTOCOL_MAP=\\\"BROKER:PLAINTEXT,PLAINTEXT:PLAINTEXT\\\"\",\n\t\t\"KAFKA_INTER_BROKER_LISTENER_NAME=BROKER\",\n\t\t\"KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR=1\",\n\t})\n\n\treturn &dockertest.RunOptions{\n\t\tRepository: repo,\n\t\tTag: tag,\n\t\tEnv: env,\n\t\tPortBindings: pb,\n\t}, nil\n}",
"func ClusterAuthenticationOpts() *shttp.AuthenticationOpts {\n\treturn &shttp.AuthenticationOpts{\n\t\tUsername: config.GetString(\"analyzer.auth.cluster.username\"),\n\t\tPassword: config.GetString(\"analyzer.auth.cluster.password\"),\n\t\tCookie: config.GetStringMapString(\"http.cookie\"),\n\t}\n}",
"func NewWithOpts(options ...Option) *Config {\n\topts := &Options{}\n\n\tfor _, opt := range options {\n\t\topt(opts)\n\t}\n\n\tv := viper.New()\n\tv.AutomaticEnv()\n\tv.SetEnvKeyReplacer(strings.NewReplacer(\"-\", \"_\", \".\", \"_\"))\n\n\tflagSet := new(pflag.FlagSet)\n\n\tc := &Config{\n\t\tKstream: KstreamConfig{},\n\t\tFilament: FilamentConfig{},\n\t\tAPI: APIConfig{},\n\t\tPE: pe.Config{},\n\t\tLog: log.Config{},\n\t\tAggregator: aggregator.Config{},\n\t\tFilters: &Filters{},\n\t\tviper: v,\n\t\tflags: flagSet,\n\t\topts: opts,\n\t}\n\n\tif opts.run || opts.replay {\n\t\taggregator.AddFlags(flagSet)\n\t\tconsole.AddFlags(flagSet)\n\t\tamqp.AddFlags(flagSet)\n\t\telasticsearch.AddFlags(flagSet)\n\t\thttp.AddFlags(flagSet)\n\t\teventlog.AddFlags(flagSet)\n\t\tremovet.AddFlags(flagSet)\n\t\treplacet.AddFlags(flagSet)\n\t\trenamet.AddFlags(flagSet)\n\t\ttrimt.AddFlags(flagSet)\n\t\ttagst.AddFlags(flagSet)\n\t\tmailsender.AddFlags(flagSet)\n\t\tslacksender.AddFlags(flagSet)\n\t\tyara.AddFlags(flagSet)\n\t}\n\n\tif opts.run || opts.capture {\n\t\tpe.AddFlags(flagSet)\n\t}\n\n\tc.addFlags()\n\n\treturn c\n}",
"func GetAllOpts() []Opt { return Conf.GetAllOpts() }",
"func NewOpts() TransactionOptions {\n\treturn TransactionOptions{\n\t\tDescription: \"\",\n\t\tCurrency: \"EUR\",\n\t}\n}",
"func (c *Client) Opts() *account.PlasmaTransactOpts {\n\treturn c.opts\n}",
"func (n *natsBroker) Options() broker.Options {\n\treturn n.options\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
ApplyOpts applies any values set in t to opts, if a value is not set on t, then we don't update opts and allow redispipe to use its defaults.
|
func (t Timeouts) ApplyOpts(opts *redisconn.Opts) {
if t.Dial > 0 {
opts.DialTimeout = t.Dial
}
if t.IO > 0 {
opts.IOTimeout = t.IO
}
}
|
[
"func (cfg *Config) Apply(opts ...Option) error {\n\tfor _, opt := range opts {\n\t\tif opt == nil {\n\t\t\tcontinue\n\t\t}\n\t\tif err := opt(cfg); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}",
"func (c *config) apply(opts []Option) {\n\tfor _, v := range opts {\n\t\tv(c)\n\t}\n}",
"func applyOptions(c *Container, opts ...Option) error {\n\tfor _, opt := range opts {\n\t\tif err := opt.set(c); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}",
"func (m *Message) applyOpts(opts []Option) *Message {\n\tfor _, opt := range opts {\n\t\topt(m)\n\t}\n\treturn m\n}",
"func (ro *RequesterOptions) apply(opts ...RequesterOption) {\n\tfor _, opt := range opts {\n\t\topt(ro)\n\t}\n}",
"func (o *PatchOptions) ApplyOptions(opts []PatchOption) {\n\tfor _, opt := range opts {\n\t\topt.ApplyToHelper(o)\n\t}\n}",
"func (o *MatchOptions) ApplyOptions(opts []MatchOption) *MatchOptions {\n\tfor _, opt := range opts {\n\t\topt.ApplyToMatcher(o)\n\t}\n\treturn o\n}",
"func (opts *Options) Apply(options ...Option) error {\n\tfor _, o := range options {\n\t\tif err := o(opts); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}",
"func (o *TestOptions) ApplyOptions(opts []TestOption) *TestOptions {\n\tfor _, opt := range opts {\n\t\topt.ApplyToTestOptions(o)\n\t}\n\treturn o\n}",
"func (opts *ExtractTarOptions) ApplyOptions(options []ExtractTarOption) {\n\tfor _, opt := range options {\n\t\topt.ApplyOption(opts)\n\t}\n}",
"func (of optionFunc) apply(c *config) { of(c) }",
"func (o *IteratorOptions) apply(opts ...IteratorOption) {\n\tfor _, opt := range opts {\n\t\topt(o)\n\t}\n}",
"func (o *ListImplementationRevisionsOptions) Apply(opts ...GetImplementationOption) {\n\tfor _, opt := range opts {\n\t\topt(o)\n\t}\n}",
"func apply(\n\ttest func(t *testing.T, conf interface{}, opts ...option),\n\tconf interface{}, opts ...option,\n) func(*testing.T) {\n\treturn func(t *testing.T) {\n\t\ttest(t, conf, opts...)\n\t}\n}",
"func (o Options) Apply(session *Session) error {\n\tif len(o) == 0 {\n\t\treturn nil\n\t}\n\tfor _, opt := range o {\n\t\tif err := opt(session); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}",
"func (o Options) Apply(i *Important) {\n\tfor _, opt := range o {\n\t\topt(i)\n\t}\n}",
"func (c *AppConfig) Apply(opts []AppOption) error {\r\n\tfor _, o := range opts {\r\n\t\tif err := o(c); err != nil {\r\n\t\t\treturn err\r\n\t\t}\r\n\t}\r\n\treturn nil\r\n}",
"func BindOptions(cmd *cobra.Command, opts []Opt) {\n\tfor _, o := range opts {\n\t\tflagset := cmd.Flags()\n\t\tif o.Persistent {\n\t\t\tflagset = cmd.PersistentFlags()\n\t\t}\n\n\t\tif o.Required {\n\t\t\tcmd.MarkFlagRequired(o.Flag)\n\t\t}\n\n\t\tenvVar := o.Flag\n\t\tif o.EnvVar != \"\" {\n\t\t\tenvVar = o.EnvVar\n\t\t}\n\n\t\thasShort := o.Short != 0\n\n\t\tswitch destP := o.DestP.(type) {\n\t\tcase *string:\n\t\t\tvar d string\n\t\t\tif o.Default != nil {\n\t\t\t\td = o.Default.(string)\n\t\t\t}\n\t\t\tif hasShort {\n\t\t\t\tflagset.StringVarP(destP, o.Flag, string(o.Short), d, o.Desc)\n\t\t\t} else {\n\t\t\t\tflagset.StringVar(destP, o.Flag, d, o.Desc)\n\t\t\t}\n\t\t\tmustBindPFlag(o.Flag, flagset)\n\t\t\t*destP = viper.GetString(envVar)\n\t\tcase *int:\n\t\t\tvar d int\n\t\t\tif o.Default != nil {\n\t\t\t\td = o.Default.(int)\n\t\t\t}\n\t\t\tif hasShort {\n\t\t\t\tflagset.IntVarP(destP, o.Flag, string(o.Short), d, o.Desc)\n\t\t\t} else {\n\t\t\t\tflagset.IntVar(destP, o.Flag, d, o.Desc)\n\t\t\t}\n\t\t\tmustBindPFlag(o.Flag, flagset)\n\t\t\t*destP = viper.GetInt(envVar)\n\t\tcase *bool:\n\t\t\tvar d bool\n\t\t\tif o.Default != nil {\n\t\t\t\td = o.Default.(bool)\n\t\t\t}\n\t\t\tif hasShort {\n\t\t\t\tflagset.BoolVarP(destP, o.Flag, string(o.Short), d, o.Desc)\n\t\t\t} else {\n\t\t\t\tflagset.BoolVar(destP, o.Flag, d, o.Desc)\n\t\t\t}\n\t\t\tmustBindPFlag(o.Flag, flagset)\n\t\t\t*destP = viper.GetBool(envVar)\n\t\tcase *time.Duration:\n\t\t\tvar d time.Duration\n\t\t\tif o.Default != nil {\n\t\t\t\td = o.Default.(time.Duration)\n\t\t\t}\n\t\t\tif hasShort {\n\t\t\t\tflagset.DurationVarP(destP, o.Flag, string(o.Short), d, o.Desc)\n\t\t\t} else {\n\t\t\t\tflagset.DurationVar(destP, o.Flag, d, o.Desc)\n\t\t\t}\n\t\t\tmustBindPFlag(o.Flag, flagset)\n\t\t\t*destP = viper.GetDuration(envVar)\n\t\tcase *[]string:\n\t\t\tvar d []string\n\t\t\tif o.Default != nil {\n\t\t\t\td = o.Default.([]string)\n\t\t\t}\n\t\t\tif hasShort {\n\t\t\t\tflagset.StringSliceVarP(destP, o.Flag, string(o.Short), d, o.Desc)\n\t\t\t} else {\n\t\t\t\tflagset.StringSliceVar(destP, o.Flag, d, o.Desc)\n\t\t\t}\n\t\t\tmustBindPFlag(o.Flag, flagset)\n\t\t\t*destP = viper.GetStringSlice(envVar)\n\t\tcase *map[string]string:\n\t\t\tvar d map[string]string\n\t\t\tif o.Default != nil {\n\t\t\t\td = o.Default.(map[string]string)\n\t\t\t}\n\t\t\tif hasShort {\n\t\t\t\tflagset.StringToStringVarP(destP, o.Flag, string(o.Short), d, o.Desc)\n\t\t\t} else {\n\t\t\t\tflagset.StringToStringVar(destP, o.Flag, d, o.Desc)\n\t\t\t}\n\t\t\tmustBindPFlag(o.Flag, flagset)\n\t\t\t*destP = viper.GetStringMapString(envVar)\n\t\tcase pflag.Value:\n\t\t\tif hasShort {\n\t\t\t\tflagset.VarP(destP, o.Flag, string(o.Short), o.Desc)\n\t\t\t} else {\n\t\t\t\tflagset.Var(destP, o.Flag, o.Desc)\n\t\t\t}\n\t\t\tif o.Default != nil {\n\t\t\t\tdestP.Set(o.Default.(string))\n\t\t\t}\n\t\t\tmustBindPFlag(o.Flag, flagset)\n\t\t\tdestP.Set(viper.GetString(envVar))\n\t\tdefault:\n\t\t\t// if you get a panic here, sorry about that!\n\t\t\t// anyway, go ahead and make a PR and add another type.\n\t\t\tpanic(fmt.Errorf(\"unknown destination type %t\", o.DestP))\n\t\t}\n\n\t\t// so weirdness with the flagset her, the flag must be set before marking it\n\t\t// hidden. This is in contrast to the MarkRequired, which can be set before...\n\t\tif o.Hidden {\n\t\t\tflagset.MarkHidden(o.Flag)\n\t\t}\n\t}\n}",
"func (opts *lateInitOptions) apply(opt ...LateInitOption) {\n\tfor _, o := range opt {\n\t\to.apply(opts)\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
VersionString returns a TLS version string.
|
func VersionString(value uint16) string {
if str, found := tlsVersionString[value]; found {
return str
}
return fmt.Sprintf("TLS_VERSION_UNKNOWN_%d", value)
}
|
[
"func VersionString() string {\n\treturn C.GoString(C.LZ4_versionString())\n}",
"func TLSVersion(ver uint16) string {\n\tswitch ver {\n\tcase tls.VersionTLS10:\n\t\treturn \"1.0\"\n\tcase tls.VersionTLS11:\n\t\treturn \"1.1\"\n\tcase tls.VersionTLS12:\n\t\treturn \"1.2\"\n\t}\n\treturn fmt.Sprintf(\"Unknown [%x]\", ver)\n}",
"func VersionString() string {\n\t__ret := C.cairo_version_string()\n\t__v := packPCharString(__ret)\n\treturn __v\n}",
"func VersionString() string {\n\tif VersionStability != \"stable\" {\n\t\treturn fmt.Sprintf(\"%s-%s\", Version, VersionStability)\n\t}\n\treturn Version\n}",
"func GetVersionString() string {\n\tvar version = GetBuildVersion()\n\tvar commithash = GetCommitHash()\n\n\treturn \"version: v\" + *version + \"\\ncommithash: \" + *commithash + \"\\n\"\n}",
"func GetFormattedVersionString() string {\n\tif versionString == \"\" {\n\t\tversionString = \"Unknown\"\n\t}\n\treturn versionString\n}",
"func (pv ProtocolVersion) String() string {\n\tstr := fmt.Sprintf(\"%d.%d.%d\",\n\t\t(pv.Version>>24)&0xFF, // major\n\t\t(pv.Version>>16)&0xFF, // minor\n\t\t(pv.Version>>8)&0xFF, // patch\n\t)\n\n\t// optional build number, only printed when non-0\n\tif build := pv.Version & 0xFF; build != 0 {\n\t\tstr += fmt.Sprintf(\".%d\", build)\n\t}\n\n\t// optional prerelease\n\tif pv.Prerelease != nilPreRelease {\n\t\tindex := 0\n\t\tfor index < 8 && pv.Prerelease[index] != 0 {\n\t\t\tindex++\n\t\t}\n\t\tstr += \"-\" + string(pv.Prerelease[:index])\n\t}\n\n\treturn str\n}",
"func TLSVersionName(version uint16) string {\n\tswitch version {\n\tcase tls.VersionSSL30:\n\t\treturn \"SSL 3.0\"\n\tcase tls.VersionTLS10:\n\t\treturn \"TLS 1.0\"\n\tcase tls.VersionTLS11:\n\t\treturn \"TLS 1.1\"\n\tcase tls.VersionTLS12:\n\t\treturn \"TLS 1.2\"\n\tcase tls.VersionTLS13:\n\t\treturn \"TLS 1.3\"\n\t}\n\n\treturn \"Unknown\"\n}",
"func GetTLSVersion(tr *http.Transport) string {\n switch tr.TLSClientConfig.MinVersion {\n case tls.VersionTLS10:\n return \"TLS 1.0\"\n case tls.VersionTLS11:\n return \"TLS 1.1\"\n case tls.VersionTLS12:\n return \"TLS 1.2\"\n case tls.VersionTLS13:\n return \"TLS 1.3\"\n }\n\n return \"Unknown\"\n}",
"func GetLibuvVersionString() string {\n\treturn C.GoString(C.uv_version_string())\n}",
"func VersionString() string {\n\tif GitVersion != \"\" {\n\t\treturn GitVersion\n\t}\n\treturn Version\n}",
"func GetVersionString() string {\n\treturn fmt.Sprintf(\"%s version %s\", os.Args[0], config.Version)\n}",
"func (v Info) GetVersionString() string {\n\treturn v.versionString\n}",
"func Version(message string) string {\n\treturn Encode(VERSION, message)\n}",
"func OpensslVersion() (ver string) {\n\tver = C.OPENSSL_VERSION_TEXT\n\treturn\n}",
"func (h *ProtocolVersion) Version() string {\n\treturn h.VersionStr\n}",
"func (v IPVSVersion) String() string {\n\treturn fmt.Sprintf(\"%d.%d.%d\", v.Major, v.Minor, v.Patch)\n}",
"func (v Version) String() (s string) {\n\tif v.epoch != 0 {\n\t\ts = strconv.Itoa(v.epoch) + \":\"\n\t}\n\ts += v.version\n\tif v.revision != \"\" {\n\t\ts += \"-\" + v.revision\n\t}\n\treturn\n}",
"func (m *MongoCrypt) CryptSharedLibVersionString() string {\n\t// Pass in a pointer for \"len\", but ignore the value because C.GoString can determine the string\n\t// length without it.\n\tlen := C.uint(0)\n\tstr := C.GoString(C.mongocrypt_crypt_shared_lib_version_string(m.wrapped, &len))\n\treturn str\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
CipherSuiteString returns the TLS cipher suite as a string.
|
func CipherSuiteString(value uint16) string {
if str, found := tlsCipherSuiteString[value]; found {
return str
}
return fmt.Sprintf("TLS_CIPHER_SUITE_UNKNOWN_%d", value)
}
|
[
"func CipherSuiteName(suite uint16) string {\n\tswitch suite {\n\tcase tls.TLS_RSA_WITH_RC4_128_SHA:\n\t\treturn \"TLS_RSA_WITH_RC4_128_SHA\"\n\tcase tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA:\n\t\treturn \"TLS_RSA_WITH_3DES_EDE_CBC_SHA\"\n\tcase tls.TLS_RSA_WITH_AES_128_CBC_SHA:\n\t\treturn \"TLS_RSA_WITH_AES_128_CBC_SHA\"\n\tcase tls.TLS_RSA_WITH_AES_256_CBC_SHA:\n\t\treturn \"TLS_RSA_WITH_AES_256_CBC_SHA\"\n\tcase tls.TLS_RSA_WITH_AES_128_CBC_SHA256:\n\t\treturn \"TLS_RSA_WITH_AES_128_CBC_SHA256\"\n\tcase tls.TLS_RSA_WITH_AES_128_GCM_SHA256:\n\t\treturn \"TLS_RSA_WITH_AES_128_GCM_SHA256\"\n\tcase tls.TLS_RSA_WITH_AES_256_GCM_SHA384:\n\t\treturn \"TLS_RSA_WITH_AES_256_GCM_SHA384\"\n\tcase tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA:\n\t\treturn \"TLS_ECDHE_ECDSA_WITH_RC4_128_SHA\"\n\tcase tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA:\n\t\treturn \"TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA\"\n\tcase tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA:\n\t\treturn \"TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA\"\n\tcase tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA:\n\t\treturn \"TLS_ECDHE_RSA_WITH_RC4_128_SHA\"\n\tcase tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA:\n\t\treturn \"TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\"\n\tcase tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA:\n\t\treturn \"TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA\"\n\tcase tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA:\n\t\treturn \"TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA\"\n\tcase tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256:\n\t\treturn \"TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256\"\n\tcase tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256:\n\t\treturn \"TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256\"\n\tcase tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256:\n\t\treturn \"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\"\n\tcase tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256:\n\t\treturn \"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\"\n\tcase tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384:\n\t\treturn \"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\"\n\tcase tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384:\n\t\treturn \"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\"\n\tcase tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256:\n\t\treturn \"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256\"\n\tcase tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256:\n\t\treturn \"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256\"\n\tcase tls.TLS_AES_128_GCM_SHA256:\n\t\treturn \"TLS_AES_128_GCM_SHA256\"\n\tcase tls.TLS_AES_256_GCM_SHA384:\n\t\treturn \"TLS_AES_256_GCM_SHA384\"\n\tcase tls.TLS_CHACHA20_POLY1305_SHA256:\n\t\treturn \"TLS_CHACHA20_POLY1305_SHA256\"\n\tcase tls.TLS_FALLBACK_SCSV:\n\t\treturn \"TLS_FALLBACK_SCSV\"\n\t}\n\n\treturn \"Unknown\"\n}",
"func getEncryptionAlg(ciphersuite string) string {\n\tswitch ciphersuite {\n\tcase \"XSTREAM_X25519_HKDF_SHA256_AES128_SIV\":\n\t\treturn \"AES-SIV\"\n\tcase \"XSTREAM_X25519_HKDF_SHA256_AES128_PMAC_SIV\":\n\t\treturn \"AES-PMAC-SIV\"\n\tdefault:\n\t\tpanic(\"XSTREAM: unknown ciphersuite\")\n\t}\n}",
"func CipherSuites() []*tls.CipherSuite",
"func cipherToString(cipher packet.CipherFunction) string {\n\tswitch cipher {\n\tcase 2:\n\t\treturn \"3DES\"\n\tcase 3:\n\t\treturn \"CAST5\"\n\tcase 7:\n\t\treturn \"AES128\"\n\tcase 8:\n\t\treturn \"AES192\"\n\tcase 9:\n\t\treturn \"AES256\"\n\tdefault:\n\t\treturn \"NotKnown\"\n\t}\n}",
"func InsecureCipherSuites() []*tls.CipherSuite",
"func (c Cipher) String() string {\n\tswitch c {\n\tcase AES128CCM:\n\t\treturn \"AES-128-CCM\"\n\tcase AES128GCM:\n\t\treturn \"AES-128-GCM\"\n\tdefault:\n\t\treturn \"Cipher-\" + strconv.Itoa(int(c))\n\t}\n}",
"func (c *Provider) CryptoSuite() core.CryptoSuite {\n\treturn c.cryptoSuite\n}",
"func (s SecurityMode) String() string {\n\tswitch s {\n\tcase UnsecureOnly:\n\t\treturn UnsecureOnlyStr\n\tcase SecureOnly:\n\t\treturn SecureOnlyStr\n\tcase SecureAndUnsecure:\n\t\treturn SecureAndUnsecureStr\n\tcase UpgradeSecurity:\n\t\treturn UpgradeSecurityStr\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"invalid security mode value %d\", int(s)))\n\t}\n}",
"func GetTLSVersion(tr *http.Transport) string {\n switch tr.TLSClientConfig.MinVersion {\n case tls.VersionTLS10:\n return \"TLS 1.0\"\n case tls.VersionTLS11:\n return \"TLS 1.1\"\n case tls.VersionTLS12:\n return \"TLS 1.2\"\n case tls.VersionTLS13:\n return \"TLS 1.3\"\n }\n\n return \"Unknown\"\n}",
"func TLSCipher(cs uint16) string {\n\tswitch cs {\n\tcase 0x0005:\n\t\treturn \"TLS_RSA_WITH_RC4_128_SHA\"\n\tcase 0x000a:\n\t\treturn \"TLS_RSA_WITH_3DES_EDE_CBC_SHA\"\n\tcase 0x002f:\n\t\treturn \"TLS_RSA_WITH_AES_128_CBC_SHA\"\n\tcase 0x0035:\n\t\treturn \"TLS_RSA_WITH_AES_256_CBC_SHA\"\n\tcase 0xc007:\n\t\treturn \"TLS_ECDHE_ECDSA_WITH_RC4_128_SHA\"\n\tcase 0xc009:\n\t\treturn \"TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA\"\n\tcase 0xc00a:\n\t\treturn \"TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA\"\n\tcase 0xc011:\n\t\treturn \"TLS_ECDHE_RSA_WITH_RC4_128_SHA\"\n\tcase 0xc012:\n\t\treturn \"TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\"\n\tcase 0xc013:\n\t\treturn \"TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA\"\n\tcase 0xc014:\n\t\treturn \"TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA\"\n\tcase 0xc02f:\n\t\treturn \"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\"\n\tcase 0xc02b:\n\t\treturn \"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\"\n\tcase 0xc030:\n\t\treturn \"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\"\n\tcase 0xc02c:\n\t\treturn \"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\"\n\t}\n\treturn fmt.Sprintf(\"Unknown [%x]\", cs)\n}",
"func defaultCipherSuite() []uint16 {\n\treturn []uint16{\n\t\ttls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,\n\t\ttls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,\n\t\ttls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,\n\t\ttls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,\n\t\ttls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,\n\t\ttls.TLS_RSA_WITH_AES_128_GCM_SHA256,\n\t\ttls.TLS_RSA_WITH_AES_256_GCM_SHA384,\n\t\ttls.TLS_RSA_WITH_AES_128_CBC_SHA,\n\t\ttls.TLS_RSA_WITH_AES_256_CBC_SHA,\n\t}\n}",
"func GetTLSName(rawVersion uint16) string {\n\tvar version string\n\tswitch rawVersion {\n\tcase tls.VersionSSL30:\n\t\tversion = \"SSLv3.0\"\n\tcase tls.VersionTLS10:\n\t\tversion = \"TLSv1.0\"\n\tcase tls.VersionTLS11:\n\t\tversion = \"TLSv1.1\"\n\tcase tls.VersionTLS12:\n\t\tversion = \"TLSv1.2\"\n\tdefault:\n\t\tversion = \"unknown\"\n\t}\n\treturn version\n}",
"func GetCipherName(rawCipher uint16) string {\n\tvar cipher string\n\tswitch rawCipher {\n\tcase tls.TLS_RSA_WITH_RC4_128_SHA:\n\t\tcipher = \"TLS_RSA_WITH_RC4_128_SHA\"\n\tcase tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA:\n\t\tcipher = \"TLS_RSA_WITH_3DES_EDE_CBC_SHA\"\n\tcase tls.TLS_RSA_WITH_AES_128_CBC_SHA:\n\t\tcipher = \"TLS_RSA_WITH_AES_128_CBC_SHA\"\n\tcase tls.TLS_RSA_WITH_AES_256_CBC_SHA:\n\t\tcipher = \"TLS_RSA_WITH_AES_256_CBC_SHA\"\n\tcase tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA:\n\t\tcipher = \"TLS_ECDHE_ECDSA_WITH_RC4_128_SHA\"\n\tcase tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA:\n\t\tcipher = \"TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA\"\n\tcase tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA:\n\t\tcipher = \"TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA\"\n\tcase tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA:\n\t\tcipher = \"TLS_ECDHE_RSA_WITH_RC4_128_SHA\"\n\tcase tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA:\n\t\tcipher = \"TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\"\n\tcase tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA:\n\t\tcipher = \"TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA\"\n\tcase tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA:\n\t\tcipher = \"TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA\"\n\tcase tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256:\n\t\tcipher = \"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\"\n\tcase tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256:\n\t\tcipher = \"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\"\n\tcase tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384:\n\t\tcipher = \"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\"\n\tcase tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384:\n\t\tcipher = \"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\"\n\tdefault:\n\t\tcipher = \"unknown\"\n\t}\n\treturn cipher\n}",
"func weakCipherSuites(details scan.LabsEndpointDetails) string {\n\t//Will require update as more vulnerabilities discovered, display results for TLS v1.2\n\t//https://github.com/ssllabs/research/wiki/SSL-and-TLS-Deployment-Best-Practices#23-use-secure-cipher-suites\n\tvar vulnSuites string\n\tfor _, suite := range details.Suites {\n\t\tfor _, suiteList := range suite.List {\n\t\t\tif !strings.Contains(suiteList.Name, \"DHE_\") {\n\t\t\t\tif suite.Protocol == 771 {\n\t\t\t\t\tvulnSuites += suiteList.Name + \"\\n\"\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn (vulnSuites)\n}",
"func (pc *MockProviderContext) CryptoSuite() apicryptosuite.CryptoSuite {\n\treturn pc.cryptoSuite\n}",
"func getCipherSuiteNames(ids []uint16) []string {\n\tif len(ids) == 0 {\n\t\treturn nil\n\t}\n\tnames := make([]string, len(ids))\n\tfor i, id := range ids {\n\t\tnames[i] = tls.CipherSuiteName(id)\n\t}\n\treturn names\n}",
"func (cl *CommandLineInterface) SuiteStringFlag(name string, shorthand *string, defaultValue *string, description string, validationFn validator) {\n\tcl.StringFlagOnFlagSet(cl.suiteFlags, name, shorthand, defaultValue, description, nil, validationFn)\n}",
"func EncodeTLSInfoToText(tcs *tls.ConnectionState, cri *tls.CertificateRequestInfo) string {\n\tversion := lookup(tlsVersions, tcs.Version)\n\tcipher := lookup(cipherSuites, tcs.CipherSuite)\n\tdescription := TLSDescription{\n\t\tVersion: tlscolor(version),\n\t\tCipher: tlscolor(explainCipher(cipher)),\n\t}\n\ttlsInfoContext := tlsInfoContext{\n\t\tConn: &description,\n\t}\n\tif cri != nil {\n\t\tcriDesc, err := EncodeCRIToObject(cri)\n\t\tif err == nil {\n\t\t\ttlsInfoContext.CRI = criDesc.(*CertificateRequestInfo)\n\t\t}\n\t}\n\n\tfuncMap := sprig.TxtFuncMap()\n\textras := template.FuncMap{\n\t\t\"printCommonName\": PrintCommonName,\n\t\t\"printShortName\": PrintShortName,\n\t\t\"greenify\": greenify,\n\t}\n\tfor k, v := range extras {\n\t\tfuncMap[k] = v\n\t}\n\n\tt := template.New(\"TLS template\").Funcs(funcMap)\n\tt, err := t.Parse(tlsLayout)\n\tif err != nil {\n\t\t// Should never happen\n\t\tpanic(err)\n\t}\n\tvar buffer bytes.Buffer\n\tw := bufio.NewWriter(&buffer)\n\terr = t.Execute(w, tlsInfoContext)\n\tif err != nil {\n\t\t// Should never happen\n\t\tpanic(err)\n\t}\n\tw.Flush()\n\treturn string(buffer.Bytes())\n}",
"func (p *Provider) String() string {\n\tcert, err := p.GetLeafCertificate()\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\treturn cert.Subject.CommonName\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
New creates a new engine with given options. Options can change the timeout, register a signal, execute a prehook callback and many other behaviours.
|
func New(ctx context.Context, options ...Option) (*Engine, error) {
e := &Engine{}
e.parent = ctx
e.init()
for _, o := range options {
if err := o.apply(e); err != nil {
return nil, err
}
}
return e, nil
}
|
[
"func New(opts ...Option) *Engine {\n\toptions := defaultEngineOptions()\n\n\tfor _, opt := range opts {\n\t\topt.apply(&options)\n\t}\n\n\te := &Engine{\n\t\tsimStepper: options.simStepper,\n\t}\n\n\te.World = NewWorld()\n\n\treturn e\n}",
"func NewEngineWithOptions(opt ...EngineOption) *Engine {\n\tengine := &Engine{\n\t\tRWMutex: new(sync.RWMutex),\n\t\tTemplateCode: make(map[string]string),\n\t}\n\n\tengine.applyOptions(opt...)\n\treturn engine\n}",
"func NewEngine(cfg *config.Config, options ...func(*Engine)) *Engine {\n\tengine := &Engine{\n\t\tconfig: cfg,\n\t\tstop: make(chan struct{}),\n\t}\n\n\tfor _, opt := range options {\n\t\topt(engine)\n\t}\n\n\tif engine.timeManager == nil {\n\t\tengine.timeManager = defaultTimeManager{}\n\t}\n\n\treturn engine\n}",
"func New(app dogma.Application, options ...EngineOption) *Engine {\n\tif app != nil {\n\t\toptions = append(options, WithApplication(app))\n\t}\n\n\topts := resolveEngineOptions(options...)\n\n\treturn &Engine{\n\t\topts: opts,\n\t\tdataStores: &persistence.DataStoreSet{\n\t\t\tProvider: opts.PersistenceProvider,\n\t\t},\n\t\tsemaphore: semaphore.NewWeighted(int64(opts.ConcurrencyLimit)),\n\t\tlogger: loggingx.WithPrefix(\n\t\t\topts.Logger,\n\t\t\t\"engine \",\n\t\t),\n\t\tready: make(chan struct{}),\n\t}\n}",
"func New(o *Options, queues ...Queue) (*Engine, error) {\n\tstore, err := o.GetStore()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tgraph, err := o.GetGraphBackend()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &Engine{\n\t\topt: o,\n\t\tstore: store,\n\t\tgraph: graph,\n\t\tqueues: queues,\n\t}, nil\n}",
"func New(subscriber Subscriber, consumer Consumer, opts ...Option) *Engine {\n\treturn newEngine(subscriber, nil, consumer, opts...)\n}",
"func NewEngine(signalClient *signal.Client, mgmClient *mgm.Client, config *EngineConfig, cancel context.CancelFunc) *Engine {\n\treturn &Engine{\n\t\tsignal: signalClient,\n\t\tmgmClient: mgmClient,\n\t\tconns: map[string]*Connection{},\n\t\tpeerMux: &sync.Mutex{},\n\t\tsyncMsgMux: &sync.Mutex{},\n\t\tconfig: config,\n\t\tSTUNs: []*ice.URL{},\n\t\tTURNs: []*ice.URL{},\n\t\tcancel: cancel,\n\t}\n}",
"func New() Engine {\n\treturn new(engine)\n}",
"func CreateEngine(opts Options) (*Engine, error) {\n\t// set default interval\n\tif opts.ExpireInterval == 0 {\n\t\topts.ExpireInterval = 60 * time.Second\n\t}\n\n\t// set default min and max oplog size\n\tif opts.MinOplogSize == 0 {\n\t\topts.MinOplogSize = 100\n\t}\n\tif opts.MaxOplogSize == 0 {\n\t\topts.MaxOplogSize = 1000\n\t}\n\n\t// set default min and max oplog age\n\tif opts.MinOplogAge == 0 {\n\t\topts.MinOplogAge = 5 * time.Minute\n\t}\n\tif opts.MaxOplogAge == 0 {\n\t\topts.MaxOplogAge = time.Hour\n\t}\n\n\t// create engine\n\te := &Engine{\n\t\topts: opts,\n\t\tstore: opts.Store,\n\t\tstreams: map[*Stream]struct{}{},\n\t\ttoken: dbkit.NewSemaphore(1),\n\t}\n\n\t// load catalog\n\tdata, err := e.store.Load()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// set catalog\n\te.catalog = data\n\n\t// run expiry\n\tgo e.expire(opts.ExpireInterval, opts.ExpireErrors)\n\n\treturn e, nil\n}",
"func New() *Engine {\n\treturn &Engine{router: newRouter()}\n}",
"func New(c *config.Config, cc *Clients, l hclog.Logger) *Engine {\n\tp := generateProviders(c, cc, l)\n\n\treturn &Engine{\n\t\tproviders: p,\n\t\tclients: cc,\n\t\tconfig: c,\n\t\tlog: l,\n\t}\n}",
"func NewEngine(opts ...EngineOption) (*Engine, error) {\n\te := &Engine{\n\t\tevalOpts: []cel.ProgramOption{},\n\t\tselectors: []Selector{},\n\t\tlimits: limits.NewLimits(),\n\t\tenvs: map[string]*cel.Env{\n\t\t\t\"\": stdEnv,\n\t\t},\n\t\tschemas: map[string]*model.OpenAPISchema{\n\t\t\t\"#openAPISchema\": model.SchemaDef,\n\t\t\t\"#instanceSchema\": model.InstanceSchema,\n\t\t\t\"#templateSchema\": model.TemplateSchema,\n\t\t},\n\t\ttemplates: map[string]*model.Template{},\n\t\tinstances: map[string][]*model.Instance{},\n\t\truntimes: map[string]*runtime.Template{},\n\t\tactPool: newActivationPool(),\n\t}\n\tvar err error\n\tfor _, opt := range opts {\n\t\te, err = opt(e)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn e, nil\n}",
"func New() (*Engine, error) {\n\n\tvar (\n\t\terr error\n\t\tdefaultOutputDeviceInfo *portaudio.DeviceInfo\n\t\tstreamParameters portaudio.StreamParameters\n\t)\n\n\t// initialize portaudio (this must be done to use *any* of portaudio's API)\n\tif err = portaudio.Initialize(); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// get device info of the default output device\n\tif defaultOutputDeviceInfo, err = portaudio.DefaultOutputDevice(); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// get stream parameters for the default devices\n\t// we're requesting low latency parameters (gotta go fast)\n\tstreamParameters = portaudio.LowLatencyParameters(nil, defaultOutputDeviceInfo)\n\n\t// stereo output is required for anything to work. If it doesn't\n\t// support stereo... well you'll find out when Start() is called.\n\tstreamParameters.Output.Channels = 2\n\n\treturn &Engine{\n\t\tstreamParameters: streamParameters, // <--- default configuration\n\t\tstream: nil,\n\t\ttables: map[int]*table{},\n\t\tactivePlaybackEvents: map[*playbackEvent]bool{},\n\t\tnewPlaybackEvents: make(chan *playbackEvent, 128), // <--- magic number\n\t\tinitialized: true,\n\t\tstarted: false,\n\t\tinputAmplitude: float32(1.0), // 0db gain for audio input\n\t}, nil\n}",
"func New() *PhysicsEngine {\n\tp := new(PhysicsEngine)\n\n\t//Entities\n\tp.entities = make(map[int]*Entity, conEntityLimit)\n\n\t//Time Step\n\tp.timePreviousStep = time.Now()\n\n\t//Event Management\n\tp.eventChannel = make(chan *Note, conEventChannelLimit)\n\tp.eventMux = &sync.Mutex{}\n\n\t//Apply Forces\n\tp.forcesChannel = make(chan *Note, conForcesChannelLimit)\n\n\treturn p\n}",
"func New(c config.Template) *Engine {\n\ts := &Engine{Config: &c, templateCache: make(map[string]*raymond.Template, 0)}\n\treturn s\n}",
"func New() DefaultEngine {\n\treturn DefaultEngine{}\n}",
"func New(exec Executor, opts ...Opt) *Routine {\n\tmopts := &options{\n\t\targs: []interface{}{},\n\t\tsigtraps: []*SigTrap{},\n\t\tcsignals: []os.Signal{},\n\t}\n\tfor _, opt := range opts {\n\t\topt(mopts)\n\t}\n\troutine := &Routine{\n\t\topts: mopts,\n\t\texec: exec,\n\t\tclose: make(chan struct{}),\n\t\tserving: event.New(),\n\t\tstopped: event.New(),\n\t}\n\treturn routine\n}",
"func New(db *storage.Storage) *Engine {\n\teng := &Engine{\n\t\tGames: make(map[string]game.Game),\n\t\tPlayers: make(map[string]player.Player),\n\t\tEvents: make(chan event.Event),\n\t\tHandlers: make(map[string]Handler),\n\t\tStorage: db,\n\t}\n\n\teng.registerDefaultHandlers()\n\treturn eng\n}",
"func New(s interface{}, options ...Option) *Agent {\n\tvar opts agentOptions\n\tfor _, option := range options {\n\t\toption(&opts)\n\t}\n\n\tvar atomOptions []atom.Option\n\tif opts.equalityFn != nil {\n\t\tatomOptions = append(atomOptions,\n\t\t\tatom.EqualityFunc(opts.equalityFn))\n\t}\n\n\treturn &Agent{\n\t\tstate: atom.New(s, atomOptions...),\n\t\tqueue: jobq.New(runAction),\n\t\topts: opts,\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
launch will start given hook.
|
func (e *Engine) launch(h Hook) {
e.wait.Add(1)
go func() {
defer e.wait.Done()
runtime := &HookRuntime{}
// Wait for an event to notify this goroutine that a shutdown is required.
// It could either be from engine's context or during Hook startup if an error has occurred.
// NOTE: If HookRuntime returns an error, we have to shutdown every Hook...
err := runtime.WaitForEvent(e.ctx, h)
if err != nil {
e.mutex.Lock()
e.log(err)
e.cancel()
e.cause = err
e.mutex.Unlock()
}
// Wait for hook to gracefully shutdown, or kill it after timeout.
// This is handled by HookRuntime.
for _, err := range runtime.Shutdown(e.timeout) {
e.mutex.Lock()
e.log(err)
e.mutex.Unlock()
}
}()
}
|
[
"func (m *Manager) Launch(exec string, key string, name plugin.Name, options *types.Any) error {\n\tm.lock.RLock()\n\tdefer m.lock.RUnlock()\n\n\t// check that the plugin is not currently running\n\trunning, err := m.scope.Plugins().List()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlookup, _ := name.GetLookupAndType()\n\tif countMatches([]string{lookup}, running) > 0 {\n\t\tlog.Debug(\"already running\", \"lookup\", lookup, \"name\", name)\n\t\tm.started <- name\n\t\treturn nil\n\t}\n\tm.wgStartAll.Add(1)\n\n\tlog.Debug(\"starting\", \"key\", key, \"name\", name, \"exec\", exec, \"options\", options)\n\tif m.startPlugin == nil {\n\t\tlog.Info(\"monitor not running anymore\")\n\t\treturn nil\n\t}\n\n\tm.startPlugin <- launch.StartPlugin{\n\t\tKey: key,\n\t\tName: name,\n\t\tExec: launch.ExecName(exec),\n\t\tOptions: options,\n\t\tStarted: func(key string, n plugin.Name, config *types.Any) {\n\t\t\tm.started <- n\n\t\t\tm.wgStartAll.Done()\n\t\t\tlog.Debug(\"started\", \"key\", key, \"name\", name, \"exec\", exec, \"options\", options)\n\t\t},\n\t\tError: func(key string, n plugin.Name, config *types.Any, err error) {\n\t\t\tlog.Error(\"error starting\", \"key\", key, \"name\", name, \"exec\", exec, \"options\", options)\n\t\t\tif m.mustAll {\n\t\t\t\tlog.Crit(\"Terminating due to error starting plugin\", \"err\", err,\n\t\t\t\t\t\"key\", key, \"name\", n, \"config\", stringFrom(config))\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tm.wgStartAll.Done()\n\t\t},\n\t}\n\treturn nil\n}",
"func Launch(addr, localDir, filename string,\n\targs []string, logDir string, retry int) error {\n\n\tfields := strings.Split(addr, \":\")\n\tif len(fields) != 2 || len(fields[0]) <= 0 || len(fields[1]) <= 0 {\n\t\treturn fmt.Errorf(\"Launch addr %s not in form of host:port\")\n\t}\n\n\tc, e := connect(fields[0])\n\tif e != nil {\n\t\treturn e\n\t}\n\tdefer c.Close()\n\n\te = c.Call(\"Prism.Launch\",\n\t\t&Cmd{addr, localDir, filename, args, logDir, retry}, nil)\n\tif e != nil {\n\t\treturn fmt.Errorf(\"Prism.Launch failed: %v\", e)\n\t}\n\treturn nil\n}",
"func executeLaunch() {\n\tfmt.Println(\"Launching ...\")\n}",
"func agentLifecycleHook(hookName string, log logger.Logger, cfg AgentStartConfig) error {\n\t// search for hook (including .bat & .ps1 files on Windows)\n\tp, err := hook.Find(cfg.HooksPath, hookName)\n\tif err != nil {\n\t\tif !os.IsNotExist(err) {\n\t\t\tlog.Error(\"Error finding %q hook: %v\", hookName, err)\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}\n\tsh, err := shell.New()\n\tif err != nil {\n\t\tlog.Error(\"creating shell for %q hook: %v\", hookName, err)\n\t\treturn err\n\t}\n\n\t// pipe from hook output to logger\n\tr, w := io.Pipe()\n\tsh.Logger = &shell.WriterLogger{Writer: w, Ansi: !cfg.NoColor} // for Promptf\n\tsh.Writer = w // for stdout+stderr\n\tvar wg sync.WaitGroup\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tscan := bufio.NewScanner(r) // log each line separately\n\t\tlog = log.WithFields(logger.StringField(\"hook\", hookName))\n\t\tfor scan.Scan() {\n\t\t\tlog.Info(scan.Text())\n\t\t}\n\t}()\n\n\t// run hook\n\tsh.Promptf(\"%s\", p)\n\tif err = sh.RunScript(context.Background(), p, nil); err != nil {\n\t\tlog.Error(\"%q hook: %v\", hookName, err)\n\t\treturn err\n\t}\n\tw.Close() // goroutine scans until pipe is closed\n\n\t// wait for hook to finish and output to flush to logger\n\twg.Wait()\n\treturn nil\n}",
"func (l *Launcher) Launch(self string, cmd []string) error {\n\tprocess, err := l.ProcessFor(cmd)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"determine start command\")\n\t}\n\treturn l.LaunchProcess(self, process)\n}",
"func (tr *TaskRunner) prestart() error {\n\t// Determine if the allocation is terminal and we should avoid running\n\t// prestart hooks.\n\tif tr.shouldShutdown() {\n\t\ttr.logger.Trace(\"skipping prestart hooks since allocation is terminal\")\n\t\treturn nil\n\t}\n\n\tif tr.logger.IsTrace() {\n\t\tstart := time.Now()\n\t\ttr.logger.Trace(\"running prestart hooks\", \"start\", start)\n\t\tdefer func() {\n\t\t\tend := time.Now()\n\t\t\ttr.logger.Trace(\"finished prestart hooks\", \"end\", end, \"duration\", end.Sub(start))\n\t\t}()\n\t}\n\n\t// use a join context to allow any blocking pre-start hooks\n\t// to be canceled by either killCtx or shutdownCtx\n\tjoinedCtx, joinedCancel := joincontext.Join(tr.killCtx, tr.shutdownCtx)\n\tdefer joinedCancel()\n\n\tfor _, hook := range tr.runnerHooks {\n\t\tpre, ok := hook.(interfaces.TaskPrestartHook)\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\n\t\tname := pre.Name()\n\n\t\t// Build the request\n\t\treq := interfaces.TaskPrestartRequest{\n\t\t\tTask: tr.Task(),\n\t\t\tTaskDir: tr.taskDir,\n\t\t\tTaskEnv: tr.envBuilder.Build(),\n\t\t\tTaskResources: tr.taskResources,\n\t\t}\n\n\t\torigHookState := tr.hookState(name)\n\t\tif origHookState != nil {\n\t\t\tif origHookState.PrestartDone {\n\t\t\t\ttr.logger.Trace(\"skipping done prestart hook\", \"name\", pre.Name())\n\n\t\t\t\t// Always set env vars from hooks\n\t\t\t\tif name == HookNameDevices {\n\t\t\t\t\ttr.envBuilder.SetDeviceHookEnv(name, origHookState.Env)\n\t\t\t\t} else {\n\t\t\t\t\ttr.envBuilder.SetHookEnv(name, origHookState.Env)\n\t\t\t\t}\n\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// Give the hook it's old data\n\t\t\treq.PreviousState = origHookState.Data\n\t\t}\n\n\t\treq.VaultToken = tr.getVaultToken()\n\t\treq.NomadToken = tr.getNomadToken()\n\n\t\t// Time the prestart hook\n\t\tvar start time.Time\n\t\tif tr.logger.IsTrace() {\n\t\t\tstart = time.Now()\n\t\t\ttr.logger.Trace(\"running prestart hook\", \"name\", name, \"start\", start)\n\t\t}\n\n\t\t// Run the prestart hook\n\t\tvar resp interfaces.TaskPrestartResponse\n\t\tif err := pre.Prestart(joinedCtx, &req, &resp); err != nil {\n\t\t\ttr.emitHookError(err, name)\n\t\t\treturn structs.WrapRecoverable(fmt.Sprintf(\"prestart hook %q failed: %v\", name, err), err)\n\t\t}\n\n\t\t// Store the hook state\n\t\t{\n\t\t\thookState := &state.HookState{\n\t\t\t\tData: resp.State,\n\t\t\t\tPrestartDone: resp.Done,\n\t\t\t\tEnv: resp.Env,\n\t\t\t}\n\n\t\t\t// Store and persist local state if the hook state has changed\n\t\t\tif !hookState.Equal(origHookState) {\n\t\t\t\ttr.stateLock.Lock()\n\t\t\t\ttr.localState.Hooks[name] = hookState\n\t\t\t\ttr.stateLock.Unlock()\n\n\t\t\t\tif err := tr.persistLocalState(); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// Store the environment variables returned by the hook\n\t\tif name == HookNameDevices {\n\t\t\ttr.envBuilder.SetDeviceHookEnv(name, resp.Env)\n\t\t} else {\n\t\t\ttr.envBuilder.SetHookEnv(name, resp.Env)\n\t\t}\n\n\t\t// Store the resources\n\t\tif len(resp.Devices) != 0 {\n\t\t\ttr.hookResources.setDevices(resp.Devices)\n\t\t}\n\t\tif len(resp.Mounts) != 0 {\n\t\t\ttr.hookResources.setMounts(resp.Mounts)\n\t\t}\n\n\t\tif tr.logger.IsTrace() {\n\t\t\tend := time.Now()\n\t\t\ttr.logger.Trace(\"finished prestart hook\", \"name\", name, \"end\", end, \"duration\", end.Sub(start))\n\t\t}\n\t}\n\n\treturn nil\n}",
"func HookOnJobStart(hooks []string, j common.Job) {\n\tlog.WithField(\"id\", j.UUID).Debug(\"Executing hooks against job start.\")\n\n\tdata := copyJobToHookJob(j)\n\n\thooksRun(hooks, data)\n\n}",
"func (h *Handler) OnLaunch(ctx context.Context, request *alexa.Request, session *alexa.Session, ctxPtr *alexa.Context, response *alexa.Response) error {\n\tlog.Printf(\"OnLaunch requestId=%s, sessionId=%s\", request.RequestID, session.SessionID)\n\treturn nil\n}",
"func (b *Bootstrap) executeHook(name string, hookPath string, extraEnviron *env.Environment) error {\n\tif !fileExists(hookPath) {\n\t\tif b.Debug {\n\t\t\tb.shell.Commentf(\"Skipping %s hook, no script at \\\"%s\\\"\", name, hookPath)\n\t\t}\n\t\treturn nil\n\t}\n\n\tb.shell.Headerf(\"Running %s hook\", name)\n\n\tif redactor := b.setupRedactor(); redactor != nil {\n\t\tdefer redactor.Flush()\n\t}\n\n\t// We need a script to wrap the hook script so that we can snaffle the changed\n\t// environment variables\n\tscript, err := newHookScriptWrapper(hookPath)\n\tif err != nil {\n\t\tb.shell.Errorf(\"Error creating hook script: %v\", err)\n\t\treturn err\n\t}\n\tdefer script.Close()\n\n\tcleanHookPath := hookPath\n\n\t// Show a relative path if we can\n\tif strings.HasPrefix(hookPath, b.shell.Getwd()) {\n\t\tvar err error\n\t\tif cleanHookPath, err = filepath.Rel(b.shell.Getwd(), hookPath); err != nil {\n\t\t\tcleanHookPath = hookPath\n\t\t}\n\t}\n\n\t// Show the hook runner in debug, but the thing being run otherwise 💅🏻\n\tif b.Debug {\n\t\tb.shell.Commentf(\"A hook runner was written to \\\"%s\\\" with the following:\", script.Path())\n\t\tb.shell.Promptf(\"%s\", process.FormatCommand(script.Path(), nil))\n\t} else {\n\t\tb.shell.Promptf(\"%s\", process.FormatCommand(cleanHookPath, []string{}))\n\t}\n\n\t// Run the wrapper script\n\tif err := b.shell.RunScript(script.Path(), extraEnviron); err != nil {\n\t\texitCode := shell.GetExitCode(err)\n\t\tb.shell.Env.Set(\"BUILDKITE_LAST_HOOK_EXIT_STATUS\", fmt.Sprintf(\"%d\", exitCode))\n\n\t\t// Give a simpler error if it's just a shell exit error\n\t\tif shell.IsExitError(err) {\n\t\t\treturn &shell.ExitError{\n\t\t\t\tCode: exitCode,\n\t\t\t\tMessage: fmt.Sprintf(\"The %s hook exited with status %d\", name, exitCode),\n\t\t\t}\n\t\t}\n\t\treturn err\n\t}\n\n\t// Store the last hook exit code for subsequent steps\n\tb.shell.Env.Set(\"BUILDKITE_LAST_HOOK_EXIT_STATUS\", \"0\")\n\n\t// Get changed environment\n\tchanges, err := script.Changes()\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"Failed to get environment\")\n\t}\n\n\t// Finally, apply changes to the current shell and config\n\tb.applyEnvironmentChanges(changes.Env, changes.Dir)\n\treturn nil\n}",
"func mstart_stub()",
"func Launch(eventBus eventbus.Broker, rpcBus *rpcbus.RPCBus, k ristretto.Scalar, keys key.ConsensusKeys, publicKey *key.PublicKey, gen Generator, blockGen BlockGenerator, db database.DB) error {\n\tbroker, err := newBroker(eventBus, rpcBus, k, gen, blockGen, keys, publicKey)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tstate := NewState(eventBus)\n\tstate.Wire(broker)\n\treturn nil\n}",
"func (m *TestMod) Hook() error {\n\tm.RegisterCommand(NewTestCommand(m))\n\t//m.RegisterCommand(NewMonkeyCommand(m))\n\n\treturn nil\n}",
"func (gitHubHook *GitHubHook) StartGitHubHook() {\n\trouter := httprouter.New()\n\trouter.POST(gitHubHook.GitHubHookEndPoint, gitHubHook.receiveGiHubDelivery)\n\tlog.Fatal(http.ListenAndServe(\":\"+gitHubHook.GitHubHookPort, router))\n}",
"func (fl failedLauncher) Launch() (TaskHandle, error) {\n\treturn nil, errLaunchFailed\n}",
"func (act *testappActivity) launch(displayID int) error {\n\tinnerAct, err := arc.NewActivityOnDisplay(act.a, dispPkg, act.activityName(), displayID)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = innerAct.Start(act.ctx, act.tconn)\n\tif err != nil {\n\t\tinnerAct.Close()\n\t\treturn err\n\t}\n\tact.activity = innerAct\n\n\treturn ensureActivityReady(act.ctx, act.tconn, innerAct)\n}",
"func Launch(th ThreadWithRole) error {\n\tif e := AllocThread(th); e != nil {\n\t\treturn e\n\t}\n\tth.Launch()\n\treturn nil\n}",
"func (s *BaseGShellListener) EnterStart(ctx *StartContext) {}",
"func (b *BIOS) dispatch(hookName string, args []string, f func() error) error {\n\tb.Log.Printf(\"---- BEGIN HOOK %q ----\\n\", hookName)\n\n\texecutable := fmt.Sprintf(\"./%s.sh\", hookName)\n\n\tcmd := exec.Command(executable, args...)\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\tcmd.Stdin = os.Stdin\n\tcmd.Env = os.Environ()\n\n\t//fmt.Printf(\" Executing hook: %q\\n\", cmd.Args)\n\n\terr := cmd.Run()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tb.Log.Printf(\"---- END HOOK %q ----\\n\", hookName)\n\n\treturn nil\n}",
"func MainStart(args []string) {\n startCmd(getMainCmd, args)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
init configures default parameters for engine.
|
func (e *Engine) init() {
e.mutex.Lock()
defer e.mutex.Unlock()
if e.ctx == nil || e.cancel == nil {
e.ctx, e.cancel = context.WithCancel(e.parent)
}
if e.timeout == 0 {
e.timeout = DefaultTimeout
}
if len(e.signals) == 0 && !e.noSignal {
e.signals = Signals
}
if e.interrupt == nil {
e.interrupt = make(chan os.Signal, 1)
}
}
|
[
"func Init() {\n\tEngine = gin.Default()\n}",
"func InitEngine(t string, hosts []string) {\n\tvar err error\n //currently we only use one host, we will switch to using\n //2 alternatively in the future\n\tEngine, err = xorm.NewEngine(t, hosts[0])\n\tlogger := xorm.NewSimpleLogger(os.Stdout)\n\tlogger.SetLevel(core.LOG_OFF)\n\tEngine.SetLogger(logger)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}",
"func init() {\n\t// set reasonable defaults\n\tsetDefaults()\n\n\t// override defaults with configuration read from configuration file\n\tviper.AddConfigPath(\"$GOPATH/src/github.com/xlab-si/emmy/config\")\n\terr := loadConfig(\"defaults\", \"yml\")\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n}",
"func init() {\n\ttrigger = runtime.GetTrigger()\n\ttoken = newTokenConfig()\n\tctx = storage.GetContext()\n}",
"func init() {\n\tinitconf(configLocation)\n}",
"func init() {\n\tenv.Load()\n\tdatabase.Build()\n\trouter.Build()\n\tinitializeServiceModules()\n}",
"func init() {\n\tflag.Parse()\n\t// Init the models and backend redis store.\n\trs := libstore.NewStore(*redisServer)\n\tuser.Setup(rs)\n\tfeed.Setup(rs)\n\t// Init feeder.\n\tfd = feeder.NewFeeder(\"http://localhost:\" + *keywordServerEndPoint)\n}",
"func (e *AlertEngine) Init() error {\n\te.ticker = NewTicker(time.Now(), time.Second*0, clock.New())\n\te.execQueue = make(chan *Job, 1000)\n\te.scheduler = newScheduler()\n\te.evalHandler = NewEvalHandler()\n\te.ruleReader = newRuleReader()\n\te.log = log.New(\"alerting.engine\")\n\te.resultHandler = newResultHandler(e.RenderService)\n\treturn nil\n}",
"func init() {\n\n\tctx := context.Background()\n\n\tif err := envconfig.Process(ctx, &c); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tvar err error\n\tdsn := fmt.Sprintf(\"user=%v password=%v dbname=%v port=%v\", c.Dbusername, c.Dbpassword, c.Dbdatabase, c.Dbport)\n\tmodels.Db, err = gorm.Open(postgres.Open(dsn), &gorm.Config{})\n\tif err != nil {\n\t\tfmt.Println(\"Gorm open error : \", err)\n\t\treturn\n\t}\n\tfmt.Println(\"Connected to database\")\n\tmodels.Db.AutoMigrate(&models.User{})\n}",
"func (templateEngine *TemplateEngine) Init(env interface{}) {\n\ttemplateEngine.Init1(env, nil, false)\n}",
"func (c *Config) init() {\n\n\tc.logger = logrus.New()\n\n\t// Connect slots\n\tc.ConnectStringSet(func(key string, val string) {\n\t\tc.SetString(key, val)\n\t})\n\tc.ConnectBoolSet(func(key string, val bool) {\n\t\tc.SetBool(key, val)\n\t})\n\tc.ConnectStringValue(func(key string) string {\n\t\treturn c.GetString(key)\n\t})\n\tc.ConnectBoolValue(func(key string) bool {\n\t\treturn c.GetBool(key)\n\t})\n\tc.ConnectSave(func() {\n\t\tc.save()\n\t})\n\tc.ConnectDefaults(func() {\n\t\tc.SetDefaults()\n\t})\n}",
"func (c *AuthConfig) init() {\n\tif c.Provisioners == nil {\n\t\tc.Provisioners = provisioner.List{}\n\t}\n\tif c.Template == nil {\n\t\tc.Template = &ASN1DN{}\n\t}\n\tif c.Backdate == nil {\n\t\tc.Backdate = &provisioner.Duration{\n\t\t\tDuration: DefaultBackdate,\n\t\t}\n\t}\n}",
"func init() {\n\tinitCfgDir()\n\tinitCreds()\n}",
"func init() {\n\tfor group, values := range defaultConfigs {\n\t\tcore.RegisterConfig(group, values)\n\t}\n\tcore.RegisterService(\"indicator\", indicator.Configs, &indicator.IndicatorServiceFactory{})\n\tcore.RegisterService(\"executor\", executor.Configs, &executor.ExecutorServiceFactory{})\n}",
"func (e *EventSourcedEntity) init() error {\n\te.SnapshotEvery = snapshotEveryDefault\n\treturn nil\n}",
"func init() {\n\tc.getConf()\n\trotateLogger()\n}",
"func init() {\n\tdsqle.AddDoltSystemVariables()\n}",
"func init() {\n\tSetup()\n}",
"func init() {\n\tInitSystemVariables()\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Start will launch the engine and start registered hooks. It will block until every hooks has shutdown, gracefully or with force...
|
func (e *Engine) Start() error {
e.init()
go e.waitShutdownNotification()
for _, h := range e.hooks {
e.launch(h)
}
e.wait.Wait()
if e.afterShutdown != nil {
e.afterShutdown()
}
e.mutex.Lock()
defer e.mutex.Unlock()
return e.cause
}
|
[
"func (e *EngineConfig) Start() error {\n\tlogger.Info(\"Engine: Starting...\")\n\n\t// Todo document RunnerType for engine configuration\n\trunnerType := config.GetRunnerType()\n\te.Init(runnerType == \"DIRECT\")\n\n\tactionRunner := e.actionRunner.(interface{})\n\n\tif managedRunner, ok := actionRunner.(util.Managed); ok {\n\t\tutil.StartManaged(\"ActionRunner Service\", managedRunner)\n\t}\n\n\tlogger.Info(\"Engine: Starting Services...\")\n\n\terr := e.serviceManager.Start()\n\n\tif err != nil {\n\t\tlogger.Error(\"Engine: Error Starting Services - \" + err.Error())\n\t} else {\n\t\tlogger.Info(\"Engine: Started Services\")\n\t}\n\n\t// Start the triggers\n\tfor key, value := range e.triggers {\n\t\terr := util.StartManaged(fmt.Sprintf(\"Trigger [ '%s' ]\", key), value.Interf)\n\t\tif err != nil {\n\t\t\tlogger.Infof(\"Trigger [%s] failed to start due to error [%s]\", key, err.Error())\n\t\t\tvalue.Status = trigger.Failed\n\t\t\tvalue.Error = err\n\t\t\tlogger.Debugf(\"StackTrace: %s\", debug.Stack())\n\t\t\tif config.StopEngineOnError() {\n\t\t\t\tlogger.Debugf(\"{%s=true}. Stopping engine\", config.STOP_ENGINE_ON_ERROR_KEY)\n\t\t\t\tlogger.Info(\"Engine: Stopped\")\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\t\t} else {\n\t\t\tlogger.Infof(\"Trigger [%s] started\", key)\n\t\t\tvalue.Status = trigger.Started\n\t\t}\n\t}\n\n\tlogger.Info(\"Engine: Started\")\n\treturn nil\n}",
"func (l *Lifecycle) Start(ctx context.Context) error {\n\tfor _, hook := range l.hooks {\n\t\tif hook.OnStart != nil {\n\t\t\tl.logger.Printf(\"START\\t\\t%s()\", hook.caller)\n\t\t\tif err := hook.OnStart(ctx); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tl.numStarted++\n\t}\n\treturn nil\n}",
"func (l *Lifecycle) Start(ctx context.Context) error {\n\tl.mu.Lock()\n\tl.startRecords = make(HookRecords, 0, len(l.hooks))\n\tl.mu.Unlock()\n\n\tfor _, hook := range l.hooks {\n\t\tif hook.OnStart != nil {\n\t\t\tl.mu.Lock()\n\t\t\tl.runningHook = hook\n\t\t\tl.mu.Unlock()\n\n\t\t\truntime, err := l.runStartHook(ctx, hook)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tl.mu.Lock()\n\t\t\tl.startRecords = append(l.startRecords, HookRecord{\n\t\t\t\tCallerFrame: hook.callerFrame,\n\t\t\t\tFunc: hook.OnStart,\n\t\t\t\tRuntime: runtime,\n\t\t\t})\n\t\t\tl.mu.Unlock()\n\t\t}\n\t\tl.numStarted++\n\t}\n\n\treturn nil\n}",
"func (lr *LogicRunner) Start(ctx context.Context) error {\n\tif lr.Cfg.BuiltIn != nil {\n\t\tif err := lr.initializeBuiltin(ctx); err != nil {\n\t\t\treturn errors.Wrap(err, \"Failed to initialize builtin VM\")\n\t\t}\n\t}\n\n\tif lr.Cfg.GoPlugin != nil {\n\t\tif err := lr.initializeGoPlugin(ctx); err != nil {\n\t\t\treturn errors.Wrap(err, \"Failed to initialize goplugin VM\")\n\t\t}\n\t}\n\n\tif lr.Cfg.RPCListen != \"\" {\n\t\tlr.rpc.Start(ctx)\n\t}\n\n\tlr.ArtifactManager.InjectFinish()\n\n\treturn nil\n}",
"func Start(cfg *config.Config, s *storage.Engine, c *cloud.Engine, m *job.Manager) error {\n\n\tloop, err := newPhoenixLoop(cfg, s, c, m)\n\tif err != nil {\n\t\treturn err\n\t}\n\tloop.start()\n\treturn nil\n}",
"func (o *FakeObjectTrackers) Start() {\n\tgo func() {\n\t\terr := o.ControlMachine.Start()\n\t\tif err != nil {\n\t\t\tklog.Errorf(\"failed to start machine object tracker, Err: %v\", err)\n\t\t}\n\t}()\n\n\tgo func() {\n\t\terr := o.TargetCore.Start()\n\t\tif err != nil {\n\t\t\tklog.Errorf(\"failed to start target core object tracker, Err: %v\", err)\n\t\t}\n\t}()\n}",
"func (e *Engine) Start() error {\n\n\twgIface := e.config.WgIface\n\twgAddr := e.config.WgAddr\n\tmyPrivateKey := e.config.WgPrivateKey\n\n\terr := iface.Create(wgIface, wgAddr)\n\tif err != nil {\n\t\tlog.Errorf(\"failed creating interface %s: [%s]\", wgIface, err.Error())\n\t\treturn err\n\t}\n\n\terr = iface.Configure(wgIface, myPrivateKey.String())\n\tif err != nil {\n\t\tlog.Errorf(\"failed configuring Wireguard interface [%s]: %s\", wgIface, err.Error())\n\t\treturn err\n\t}\n\n\tport, err := iface.GetListenPort(wgIface)\n\tif err != nil {\n\t\tlog.Errorf(\"failed getting Wireguard listen port [%s]: %s\", wgIface, err.Error())\n\t\treturn err\n\t}\n\te.wgPort = *port\n\n\te.receiveSignalEvents()\n\te.receiveManagementEvents()\n\n\treturn nil\n}",
"func (e *Engine) launch(h Hook) {\n\n\te.wait.Add(1)\n\n\tgo func() {\n\n\t\tdefer e.wait.Done()\n\n\t\truntime := &HookRuntime{}\n\n\t\t// Wait for an event to notify this goroutine that a shutdown is required.\n\t\t// It could either be from engine's context or during Hook startup if an error has occurred.\n\t\t// NOTE: If HookRuntime returns an error, we have to shutdown every Hook...\n\t\terr := runtime.WaitForEvent(e.ctx, h)\n\t\tif err != nil {\n\t\t\te.mutex.Lock()\n\t\t\te.log(err)\n\t\t\te.cancel()\n\t\t\te.cause = err\n\t\t\te.mutex.Unlock()\n\t\t}\n\n\t\t// Wait for hook to gracefully shutdown, or kill it after timeout.\n\t\t// This is handled by HookRuntime.\n\t\tfor _, err := range runtime.Shutdown(e.timeout) {\n\t\t\te.mutex.Lock()\n\t\t\te.log(err)\n\t\t\te.mutex.Unlock()\n\t\t}\n\n\t}()\n}",
"func (s *Server) Start(ctx context.Context) {\n\tgo func() {\n\t\t<-ctx.Done()\n\t\tlogger := log.FromContext(ctx)\n\t\tlogger.Info(\"I have to go...\")\n\t\tlogger.Info(\"Stopping server gracefully\")\n\t\ts.Stop()\n\t}()\n\n\ts.tcpEntryPoints.Start()\n\ts.udpEntryPoints.Start()\n\ts.watcher.Start()\n\n\ts.routinesPool.GoCtx(s.listenSignals)\n}",
"func (engine *Engine) Start() {\n\tfor _, pinger := range engine.Pingers {\n\t\tgo pinger.Start()\n\t}\n}",
"func (mgr *Manager) Start(register string) error {\n\terr := mgr.Register(register)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif mgr.tagsMap == nil || mgr.tagsMap[\"pindex\"] {\n\t\tmldd := mgr.options[\"managerLoadDataDir\"]\n\t\tif mldd == \"sync\" || mldd == \"async\" || mldd == \"\" {\n\t\t\terr := mgr.LoadDataDir()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif mgr.tagsMap == nil || mgr.tagsMap[\"planner\"] {\n\t\tgo mgr.PlannerLoop()\n\t\tgo mgr.PlannerKick(\"start\")\n\t}\n\n\tif mgr.tagsMap == nil ||\n\t\t(mgr.tagsMap[\"pindex\"] && mgr.tagsMap[\"janitor\"]) {\n\t\tgo mgr.JanitorLoop()\n\t\tgo mgr.JanitorKick(\"start\")\n\t}\n\n\treturn mgr.StartCfg()\n}",
"func (lr *LogicRunner) Start(ctx context.Context) error {\n\tlr.ArtifactManager = lr.Ledger.GetArtifactManager()\n\n\tif lr.Cfg.BuiltIn != nil {\n\t\tbi := builtin.NewBuiltIn(lr.MessageBus, lr.ArtifactManager)\n\t\tif err := lr.RegisterExecutor(core.MachineTypeBuiltin, bi); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tlr.machinePrefs = append(lr.machinePrefs, core.MachineTypeBuiltin)\n\t}\n\n\tif lr.Cfg.GoPlugin != nil {\n\t\tif lr.Cfg.RPCListen != \"\" {\n\t\t\tStartRPC(ctx, lr)\n\t\t}\n\n\t\tgp, err := goplugin.NewGoPlugin(lr.Cfg, lr.MessageBus, lr.ArtifactManager)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := lr.RegisterExecutor(core.MachineTypeGoPlugin, gp); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tlr.machinePrefs = append(lr.machinePrefs, core.MachineTypeGoPlugin)\n\t}\n\n\t// TODO: use separate handlers\n\tif err := lr.MessageBus.Register(core.TypeCallMethod, lr.Execute); err != nil {\n\t\treturn err\n\t}\n\tif err := lr.MessageBus.Register(core.TypeCallConstructor, lr.Execute); err != nil {\n\t\treturn err\n\t}\n\n\tif err := lr.MessageBus.Register(core.TypeExecutorResults, lr.ExecutorResults); err != nil {\n\t\treturn err\n\t}\n\tif err := lr.MessageBus.Register(core.TypeValidateCaseBind, lr.ValidateCaseBind); err != nil {\n\t\treturn err\n\t}\n\tif err := lr.MessageBus.Register(core.TypeValidationResults, lr.ProcessValidationResults); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (_m *MockHistoryEngine) Start() {\n\t_m.Called()\n}",
"func (rt *Runtime) Start() error {\n\tch := make(chan struct{})\n\tgo func() {\n\t\tch <- struct{}{}\n\t\trt.Worker.Start()\n\t}()\n\t<-ch\n\terr := rt.runLifecycle(rt.Port)\n\trt.Worker.Stop()\n\treturn err\n}",
"func (s *ExtSupervisor) Start(ctx context.Context, registry map[string]extension.RunFunc) error {\n\ts.runmu.Lock()\n\tdefer s.runmu.Unlock()\n\tfor name, runf := range registry {\n\t\ts.startService(ctx, name, runf)\n\t}\n\treturn nil\n}",
"func (bot *Bot) Start() (err error) {\n\tif bot.opts.Webhook != nil {\n\t\treturn bot.startWebhook()\n\t}\n\treturn nil\n}",
"func (p *Init) Start(ctx context.Context) error {\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\n\treturn p.initState.Start(ctx)\n}",
"func (c *Concentrator) Start() {\n\tc.exitWG.Add(1)\n\tgo func() {\n\t\tdefer watchdog.LogOnPanic()\n\t\tdefer c.exitWG.Done()\n\t\tc.Run()\n\t}()\n}",
"func (m *PluginMonitor) Start(ctx context.Context) {\n\tgo m.run(ctx)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
NEW: This method is only available in solanacore v1.7 or newer. Please use getConfirmedBlocks for solanacore v1.6 GetBlocks returns a list of confirmed blocks between two slots Max range allowed is 500,000 slot
|
func (c *RpcClient) GetBlocks(ctx context.Context, startSlot uint64, endSlot uint64) (GetBlocksResponse, error) {
return c.processGetBlocks(c.Call(ctx, "getBlocks", startSlot, endSlot))
}
|
[
"func (s *RpcClient) GetConfirmedBlocks(ctx context.Context, startSlot uint64, endSlot uint64) ([]uint64, error) {\n\tres := struct {\n\t\tGeneralResponse\n\t\tResult []uint64 `json:\"result\"`\n\t}{}\n\terr := s.request(ctx, \"getConfirmedBlocks\", []interface{}{startSlot, endSlot}, &res)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn res.Result, nil\n}",
"func (s *RpcClient) GetConfirmedBlocksWithLimit(ctx context.Context, startSlot uint64, limit uint64) ([]uint64, error) {\n\tres := struct {\n\t\tGeneralResponse\n\t\tResult []uint64 `json:\"result\"`\n\t}{}\n\terr := s.request(ctx, \"getConfirmedBlocksWithLimit\", []interface{}{startSlot, limit}, &res)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn res.Result, nil\n}",
"func (lp *logPoller) GetBlocksRange(ctx context.Context, numbers []uint64, qopts ...pg.QOpt) ([]LogPollerBlock, error) {\n\tvar blocks []LogPollerBlock\n\n\t// Do nothing if no blocks are requested.\n\tif len(numbers) == 0 {\n\t\treturn blocks, nil\n\t}\n\n\t// Assign the requested blocks to a mapping.\n\tblocksRequested := make(map[uint64]struct{})\n\tfor _, b := range numbers {\n\t\tblocksRequested[b] = struct{}{}\n\t}\n\n\t// Retrieve all blocks within this range from the log poller.\n\tblocksFound := make(map[uint64]LogPollerBlock)\n\tqopts = append(qopts, pg.WithParentCtx(ctx))\n\tminRequestedBlock := mathutil.Min(numbers[0], numbers[1:]...)\n\tmaxRequestedBlock := mathutil.Max(numbers[0], numbers[1:]...)\n\tlpBlocks, err := lp.orm.GetBlocksRange(minRequestedBlock, maxRequestedBlock, qopts...)\n\tif err != nil {\n\t\tlp.lggr.Warnw(\"Error while retrieving blocks from log pollers blocks table. Falling back to RPC...\", \"requestedBlocks\", numbers, \"err\", err)\n\t} else {\n\t\tfor _, b := range lpBlocks {\n\t\t\tif _, ok := blocksRequested[uint64(b.BlockNumber)]; ok {\n\t\t\t\t// Only fill requested blocks.\n\t\t\t\tblocksFound[uint64(b.BlockNumber)] = b\n\t\t\t}\n\t\t}\n\t\tlp.lggr.Debugw(\"Got blocks from log poller\", \"blockNumbers\", maps.Keys(blocksFound))\n\t}\n\n\t// Fill any remaining blocks from the client.\n\tblocksFoundFromRPC, err := lp.fillRemainingBlocksFromRPC(ctx, numbers, blocksFound)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor num, b := range blocksFoundFromRPC {\n\t\tblocksFound[num] = b\n\t}\n\n\tvar blocksNotFound []uint64\n\tfor _, num := range numbers {\n\t\tb, ok := blocksFound[num]\n\t\tif !ok {\n\t\t\tblocksNotFound = append(blocksNotFound, num)\n\t\t}\n\t\tblocks = append(blocks, b)\n\t}\n\n\tif len(blocksNotFound) > 0 {\n\t\treturn nil, errors.Errorf(\"blocks were not found in db or RPC call: %v\", blocksNotFound)\n\t}\n\n\treturn blocks, nil\n}",
"func getBlocks() {\n\tURL := \"https://api.sendgrid.com/v3/suppression/blocks?start_time=\" + strconv.Itoa(config.LastTimestamp)\n\n\treq, err := http.NewRequest(\"GET\", URL, nil)\n\treq.Header.Set(\"Authorization\", \"Bearer \"+config.SendGridToken)\n\n\tif err != nil {\n\t\tlogger.Errorf(\"Failed to build Request: %s\", err)\n\t\treturn\n\t}\n\n\tclient := &http.Client{}\n\tres, err := client.Do(req)\n\tif err != nil {\n\t\tlogger.Errorf(\"HTTP Call failed: %s\", err.Error())\n\t\treturn\n\t}\n\tdefer res.Body.Close()\n\n\tdecoder := json.NewDecoder(res.Body)\n\tvar blocks []Block\n\terr = decoder.Decode(&blocks)\n\tif err != nil {\n\t\tlogger.Errorf(\"Failed parsing json: %T\\n%s\\n%#v\\n\", err, err, err)\n\t} else {\n\t\tcheckBlocks(blocks)\n\t}\n}",
"func (nc *NSBClient) GetBlocks(rangeL, rangeR int64) (*BlocksInfo, error) {\n\tb, err := nc.handler.Group(\"/blockchain\").GetWithParams(request.Param{\n\t\t\"minHeight\": rangeL,\n\t\t\"maxHeight\": rangeR,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar bb []byte\n\tbb, err = nc.preloadJSONResponse(b)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar a BlocksInfo\n\terr = json.Unmarshal(bb, &a)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &a, nil\n}",
"func (bc *Blockchain) GenerateBlock(miner string, shard uint32, validators *ValidatorsBook) (*protobufs.Block, error) {\n\thash := []byte{}\n\n\tif bc.CurrentBlock != 0 {\n\t\tcurrBlocks := []byte{}\n\t\tvar err error\n\t\t// There may be holes in the blockchain. Keep going till you find a block\n\t\tfor i := bc.CurrentBlock - 1; i > 0; i-- {\n\t\t\tcurrBlocks, err = bc.GetBlock(i)\n\t\t\tif err == nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tbhash := sha256.Sum256(currBlocks)\n\t\thash = bhash[:]\n\t}\n\n\tblock := protobufs.Block{\n\t\tIndex: bc.CurrentBlock,\n\t\tTimestamp: uint64(time.Now().Unix()),\n\t\tMiner: miner,\n\t\tPrevHash: hash,\n\t\tShard: shard,\n\t}\n\n\tblockHeader, err := proto.Marshal(&block)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar transactions []*protobufs.Transaction\n\n\tcurrentLen := len(blockHeader)\n\n\t// Check that the len is smaller than the max\n\tfor currentLen < bc.Mempool.maxBlockBytes {\n\t\ttx, err := bc.Mempool.queue.Pop()\n\n\t\t// The mempool is empty, that's all the transactions we can include\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\n\t\trtx := interface{}(tx).(*protobufs.Transaction)\n\n\t\t// check if the transazion is form my shard\n\t\tsenderWallet := wallet.BytesToAddress(rtx.GetSender(), rtx.GetShard())\n\t\tshardSender, err := validators.GetShard(senderWallet)\n\t\tif err != nil {\n\t\t\tlog.Error(err)\n\t\t\tcontinue\n\t\t}\n\t\tcurrentShard, err := validators.GetShard(miner)\n\t\tif err != nil {\n\t\t\tlog.Error(err)\n\t\t\tcontinue\n\t\t}\n\t\tif shardSender != currentShard {\n\t\t\tcontinue\n\t\t}\n\n\t\trawTx, err := proto.Marshal(rtx)\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\n\t\t// check if the hash of this transaction is inside bc.TransactionArrived that contain all the hash of the prev n transaction\n\t\tbhash := sha256.Sum256(rawTx)\n\t\thash := bhash[:]\n\t\talreadyReceived := false\n\t\tfor _, h := range bc.TransactionArrived {\n\t\t\tequal := reflect.DeepEqual(h, hash)\n\t\t\tif equal {\n\t\t\t\talreadyReceived = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif alreadyReceived {\n\t\t\tcontinue\n\t\t}\n\n\t\ttransactions = append(transactions, rtx)\n\t\tcurrentLen += len(rawTx)\n\t}\n\n\tblock.Transactions = transactions\n\n\tmerkleRootTransaction := []byte{}\n\tmerkleRootReceipt := []byte{}\n\tif len(transactions) != 0 {\n\t\tmerkleRootTransaction, merkleRootReceipt, err = GenerateMerkleTree(transactions)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tblock.MerkleRootTransaction = merkleRootTransaction\n\tblock.MerkleRootReceipt = merkleRootReceipt\n\n\treturn &block, nil\n}",
"func (core *coreService) RawBlocks(startHeight uint64, count uint64, withReceipts bool, withTransactionLogs bool) ([]*iotexapi.BlockInfo, error) {\n\tif count == 0 || count > core.cfg.RangeQueryLimit {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"range exceeds the limit\")\n\t}\n\n\ttipHeight := core.bc.TipHeight()\n\tif startHeight > tipHeight {\n\t\treturn nil, status.Error(codes.InvalidArgument, \"start height should not exceed tip height\")\n\t}\n\tendHeight := startHeight + count - 1\n\tif endHeight > tipHeight {\n\t\tendHeight = tipHeight\n\t}\n\tvar res []*iotexapi.BlockInfo\n\tfor height := startHeight; height <= endHeight; height++ {\n\t\tblk, err := core.dao.GetBlockByHeight(height)\n\t\tif err != nil {\n\t\t\treturn nil, status.Error(codes.NotFound, err.Error())\n\t\t}\n\t\tvar receiptsPb []*iotextypes.Receipt\n\t\tif withReceipts && height > 0 {\n\t\t\treceipts, err := core.dao.GetReceipts(height)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, status.Error(codes.NotFound, err.Error())\n\t\t\t}\n\t\t\tfor _, receipt := range receipts {\n\t\t\t\treceiptsPb = append(receiptsPb, receipt.ConvertToReceiptPb())\n\t\t\t}\n\t\t}\n\t\tvar transactionLogs *iotextypes.TransactionLogs\n\t\tif withTransactionLogs {\n\t\t\tif transactionLogs, err = core.dao.TransactionLogs(height); err != nil {\n\t\t\t\treturn nil, status.Error(codes.NotFound, err.Error())\n\t\t\t}\n\t\t}\n\t\tres = append(res, &iotexapi.BlockInfo{\n\t\t\tBlock: blk.ConvertToBlockPb(),\n\t\t\tReceipts: receiptsPb,\n\t\t\tTransactionLogs: transactionLogs,\n\t\t})\n\t}\n\treturn res, nil\n}",
"func (synckerManager *SynckerManager) GetS2BBlocksForBeaconValidator(bestViewShardHash map[byte]common.Hash, list map[byte][]common.Hash) (map[byte][]interface{}, error) {\n\ts2bPoolLists := synckerManager.GetS2BBlocksForBeaconProducer(bestViewShardHash, list)\n\n\tmissingBlocks := compareLists(s2bPoolLists, list)\n\t// synckerManager.config.Server.\n\tif len(missingBlocks) > 0 {\n\t\tctx, _ := context.WithTimeout(context.Background(), 5*time.Second)\n\t\tsynckerManager.StreamMissingShardToBeaconBlock(ctx, missingBlocks)\n\t\t//fmt.Println(\"debug finish stream missing s2b block\")\n\n\t\ts2bPoolLists = synckerManager.GetS2BBlocksForBeaconProducer(bestViewShardHash, list)\n\t\tmissingBlocks = compareLists(s2bPoolLists, list)\n\t\tif len(missingBlocks) > 0 {\n\t\t\treturn nil, errors.New(\"Unable to sync required block in time\")\n\t\t}\n\t}\n\n\tfor sid, heights := range list {\n\t\tif len(s2bPoolLists[sid]) != len(heights) {\n\t\t\treturn nil, fmt.Errorf(\"S2BPoolLists not match sid:%v pool:%v producer:%v\", sid, len(s2bPoolLists[sid]), len(heights))\n\t\t}\n\t}\n\n\treturn s2bPoolLists, nil\n}",
"func (gw *Gateway) GetBlocksInRange(start, end uint64) ([]coin.SignedBlock, error) {\n\tvar blocks []coin.SignedBlock\n\tvar err error\n\tgw.strand(\"GetBlocksInRange\", func() {\n\t\tblocks, err = gw.v.GetBlocksInRange(start, end)\n\t})\n\treturn blocks, err\n}",
"func (db *DB) blocksForInterval(mint, maxt int64) []Block {\n\tvar bs []Block\n\n\tfor _, b := range db.blocks {\n\t\tm := b.Meta()\n\t\tif intervalOverlap(mint, maxt, m.MinTime, m.MaxTime) {\n\t\t\tbs = append(bs, b)\n\t\t}\n\t}\n\n\treturn bs\n}",
"func consensusBlocksGetFromBlock(b types.Block, h types.BlockHeight) ConsensusBlocksGet {\n\ttxns := make([]ConsensusBlocksGetTxn, 0, len(b.Transactions))\n\tfor _, t := range b.Transactions {\n\t\t// Get the transaction's SiacoinOutputs.\n\t\tscos := make([]ConsensusBlocksGetSiacoinOutput, 0, len(t.SiacoinOutputs))\n\t\tfor i, sco := range t.SiacoinOutputs {\n\t\t\tscos = append(scos, ConsensusBlocksGetSiacoinOutput{\n\t\t\t\tID: t.SiacoinOutputID(uint64(i)),\n\t\t\t\tValue: sco.Value,\n\t\t\t\tUnlockHash: sco.UnlockHash,\n\t\t\t})\n\t\t}\n\t\t// Get the transaction's SiafundOutputs.\n\t\tsfos := make([]ConsensusBlocksGetSiafundOutput, 0, len(t.SiafundOutputs))\n\t\tfor i, sfo := range t.SiafundOutputs {\n\t\t\tsfos = append(sfos, ConsensusBlocksGetSiafundOutput{\n\t\t\t\tID: t.SiafundOutputID(uint64(i)),\n\t\t\t\tValue: sfo.Value,\n\t\t\t\tUnlockHash: sfo.UnlockHash,\n\t\t\t})\n\t\t}\n\t\t// Get the transaction's FileContracts.\n\t\tfcos := make([]ConsensusBlocksGetFileContract, 0, len(t.FileContracts))\n\t\tfor i, fc := range t.FileContracts {\n\t\t\t// Get the FileContract's valid proof outputs.\n\t\t\tfcid := t.FileContractID(uint64(i))\n\t\t\tvpos := make([]ConsensusBlocksGetSiacoinOutput, 0, len(fc.ValidProofOutputs))\n\t\t\tfor j, vpo := range fc.ValidProofOutputs {\n\t\t\t\tvpos = append(vpos, ConsensusBlocksGetSiacoinOutput{\n\t\t\t\t\tID: fcid.StorageProofOutputID(types.ProofValid, uint64(j)),\n\t\t\t\t\tValue: vpo.Value,\n\t\t\t\t\tUnlockHash: vpo.UnlockHash,\n\t\t\t\t})\n\t\t\t}\n\t\t\t// Get the FileContract's missed proof outputs.\n\t\t\tmpos := make([]ConsensusBlocksGetSiacoinOutput, 0, len(fc.MissedProofOutputs))\n\t\t\tfor j, mpo := range fc.MissedProofOutputs {\n\t\t\t\tmpos = append(mpos, ConsensusBlocksGetSiacoinOutput{\n\t\t\t\t\tID: fcid.StorageProofOutputID(types.ProofMissed, uint64(j)),\n\t\t\t\t\tValue: mpo.Value,\n\t\t\t\t\tUnlockHash: mpo.UnlockHash,\n\t\t\t\t})\n\t\t\t}\n\t\t\tfcos = append(fcos, ConsensusBlocksGetFileContract{\n\t\t\t\tID: fcid,\n\t\t\t\tFileSize: fc.FileSize,\n\t\t\t\tFileMerkleRoot: fc.FileMerkleRoot,\n\t\t\t\tWindowStart: fc.WindowStart,\n\t\t\t\tWindowEnd: fc.WindowEnd,\n\t\t\t\tPayout: fc.Payout,\n\t\t\t\tValidProofOutputs: vpos,\n\t\t\t\tMissedProofOutputs: mpos,\n\t\t\t\tUnlockHash: fc.UnlockHash,\n\t\t\t\tRevisionNumber: fc.RevisionNumber,\n\t\t\t})\n\t\t}\n\t\ttxns = append(txns, ConsensusBlocksGetTxn{\n\t\t\tID: t.ID(),\n\t\t\tSiacoinInputs: t.SiacoinInputs,\n\t\t\tSiacoinOutputs: scos,\n\t\t\tFileContracts: fcos,\n\t\t\tFileContractRevisions: t.FileContractRevisions,\n\t\t\tStorageProofs: t.StorageProofs,\n\t\t\tSiafundInputs: t.SiafundInputs,\n\t\t\tSiafundOutputs: sfos,\n\t\t\tMinerFees: t.MinerFees,\n\t\t\tArbitraryData: t.ArbitraryData,\n\t\t\tTransactionSignatures: t.TransactionSignatures,\n\t\t})\n\t}\n\treturn ConsensusBlocksGet{\n\t\tID: b.ID(),\n\t\tHeight: h,\n\t\tParentID: b.ParentID,\n\t\tNonce: b.Nonce,\n\t\tTimestamp: b.Timestamp,\n\t\tMinerPayouts: b.MinerPayouts,\n\t\tTransactions: txns,\n\t}\n}",
"func GetBlockConfirmations() uint64 {\n\n\tconfirmationCount := Get(\"BlockConfirmations\")\n\tif confirmationCount == \"\" {\n\t\treturn 0\n\t}\n\n\tparsedConfirmationCount, err := strconv.ParseUint(confirmationCount, 10, 64)\n\tif err != nil {\n\t\tlog.Printf(\"[!] Failed to parse block confirmations : %s\\n\", err.Error())\n\t\treturn 0\n\t}\n\n\treturn parsedConfirmationCount\n\n}",
"func TestGetBlocks(t *testing.T) {\n\tpver := ProtocolVersion\n\n\t// Block 99500 hash.\n\thashStr := \"000000000002e7ad7b9eef9479e4aabc65cb831269cc20d2632c13684406dee0\"\n\tlocatorHash, err := chainhash.NewHashFromStr(hashStr)\n\tif err != nil {\n\t\tt.Errorf(\"NewHashFromStr: %v\", err)\n\t}\n\n\t// Block 100000 hash.\n\thashStr = \"3ba27aa200b1cecaad478d2b00432346c3f1f3986da1afd33e506\"\n\thashStop, err := chainhash.NewHashFromStr(hashStr)\n\tif err != nil {\n\t\tt.Errorf(\"NewHashFromStr: %v\", err)\n\t}\n\n\t// Ensure we get the same data back out.\n\tmsg := NewMsgGetBlocks(hashStop)\n\tif !msg.HashStop.IsEqual(hashStop) {\n\t\tt.Errorf(\"NewMsgGetBlocks: wrong stop hash - got %v, want %v\",\n\t\t\tmsg.HashStop, hashStop)\n\t}\n\n\t// Ensure the command is expected value.\n\twantCmd := \"getblocks\"\n\tif cmd := msg.Command(); cmd != wantCmd {\n\t\tt.Errorf(\"NewMsgGetBlocks: wrong command - got %v want %v\",\n\t\t\tcmd, wantCmd)\n\t}\n\n\t// Ensure max payload is expected value for latest protocol version.\n\t// Protocol version 4 bytes + num hashes (varInt) + max block locator\n\t// hashes + hash stop.\n\twantPayload := uint32(16045)\n\tmaxPayload := msg.MaxPayloadLength(pver)\n\tif maxPayload != wantPayload {\n\t\tt.Errorf(\"MaxPayloadLength: wrong max payload length for \"+\n\t\t\t\"protocol version %d - got %v, want %v\", pver,\n\t\t\tmaxPayload, wantPayload)\n\t}\n\n\t// Ensure block locator hashes are added properly.\n\terr = msg.AddBlockLocatorHash(locatorHash)\n\tif err != nil {\n\t\tt.Errorf(\"AddBlockLocatorHash: %v\", err)\n\t}\n\tif msg.BlockLocatorHashes[0] != locatorHash {\n\t\tt.Errorf(\"AddBlockLocatorHash: wrong block locator added - \"+\n\t\t\t\"got %v, want %v\",\n\t\t\tspew.Sprint(msg.BlockLocatorHashes[0]),\n\t\t\tspew.Sprint(locatorHash))\n\t}\n\n\t// Ensure adding more than the max allowed block locator hashes per\n\t// message returns an error.\n\tfor i := 0; i < MaxBlockLocatorsPerMsg; i++ {\n\t\terr = msg.AddBlockLocatorHash(locatorHash)\n\t}\n\tif err == nil {\n\t\tt.Errorf(\"AddBlockLocatorHash: expected error on too many \" +\n\t\t\t\"block locator hashes not received\")\n\t}\n}",
"func (db *Database) QueryBlocks(before int, after int, limit int) ([]schema.Block, error) {\n\tblocks := make([]schema.Block, 0)\n\n\tvar err error\n\n\tswitch {\n\tcase before > 0:\n\t\terr = db.Model(&blocks).\n\t\t\tWhere(\"height < ?\", before).\n\t\t\tLimit(limit).\n\t\t\tOrder(\"id DESC\").\n\t\t\tSelect()\n\tcase after >= 0:\n\t\terr = db.Model(&blocks).\n\t\t\tWhere(\"height > ?\", after).\n\t\t\tLimit(limit).\n\t\t\tOrder(\"id ASC\").\n\t\t\tSelect()\n\tdefault:\n\t\terr = db.Model(&blocks).\n\t\t\tLimit(limit).\n\t\t\tOrder(\"id DESC\").\n\t\t\tSelect()\n\t}\n\n\tif err == pg.ErrNoRows {\n\t\treturn blocks, fmt.Errorf(\"no rows in block table: %s\", err)\n\t}\n\n\tif err != nil {\n\t\treturn blocks, fmt.Errorf(\"unexpected database error: %s\", err)\n\t}\n\n\treturn blocks, nil\n}",
"func (p *Piece) PendingBlocks() int {\n\treturn p.blocks - (p.unrequestedBlocks.Len() + p.completeBlocks.Len())\n}",
"func (b *logEventBuffer) getBlocksInRange(start, end int) []fetchedBlock {\n\tvar blocksInRange []fetchedBlock\n\tstart, end = b.normalRange(start, end)\n\tif start == -1 || end == -1 {\n\t\t// invalid range\n\t\treturn blocksInRange\n\t}\n\tif start < end {\n\t\treturn b.blocks[start:end]\n\t}\n\t// in case we get circular range such as [0, 1, end, ... , start, ..., size-1]\n\t// we need to return the blocks in two ranges: [start, size-1] and [0, end]\n\tblocksInRange = append(blocksInRange, b.blocks[start:]...)\n\tblocksInRange = append(blocksInRange, b.blocks[:end]...)\n\n\treturn blocksInRange\n}",
"func (c *RPCClient) FilterBlocks(\n\treq *FilterBlocksRequest) (*FilterBlocksResponse, er.R) {\n\n\tblockFilterer := NewBlockFilterer(c.chainParams, req)\n\n\t// Construct the watchlist using the addresses and outpoints contained\n\t// in the filter blocks request.\n\twatchList, err := buildFilterBlocksWatchList(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Iterate over the requested blocks, fetching the compact filter for\n\t// each one, and matching it against the watchlist generated above. If\n\t// the filter returns a positive match, the full block is then requested\n\t// and scanned for addresses using the block filterer.\n\tfor i, blk := range req.Blocks {\n\t\trawFilter, err := c.GetCFilter(&blk.Hash, wire.GCSFilterRegular)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Ensure the filter is large enough to be deserialized.\n\t\tif len(rawFilter.Data) < 4 {\n\t\t\tcontinue\n\t\t}\n\n\t\tfilter, err := gcs.FromNBytes(\n\t\t\tbuilder.DefaultP, builder.DefaultM, rawFilter.Data,\n\t\t)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Skip any empty filters.\n\t\tif filter.N() == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tkey := builder.DeriveKey(&blk.Hash)\n\t\tmatched, err := filter.MatchAny(key, watchList)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t} else if !matched {\n\t\t\tcontinue\n\t\t}\n\n\t\tlog.Infof(\"Fetching block height=%d hash=%v\",\n\t\t\tblk.Height, blk.Hash)\n\n\t\trawBlock, err := c.GetBlock(&blk.Hash)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif !blockFilterer.FilterBlock(rawBlock) {\n\t\t\tcontinue\n\t\t}\n\n\t\t// If any external or internal addresses were detected in this\n\t\t// block, we return them to the caller so that the rescan\n\t\t// windows can widened with subsequent addresses. The\n\t\t// `BatchIndex` is returned so that the caller can compute the\n\t\t// *next* block from which to begin again.\n\t\tresp := &FilterBlocksResponse{\n\t\t\tBatchIndex: uint32(i),\n\t\t\tBlockMeta: blk,\n\t\t\tFoundExternalAddrs: blockFilterer.FoundExternal,\n\t\t\tFoundInternalAddrs: blockFilterer.FoundInternal,\n\t\t\tFoundOutPoints: blockFilterer.FoundOutPoints,\n\t\t\tRelevantTxns: blockFilterer.RelevantTxns,\n\t\t}\n\n\t\treturn resp, nil\n\t}\n\n\t// No addresses were found for this range.\n\treturn nil, nil\n}",
"func (m *GetBlocksMessage) GetBlockLocator() []*bc.Hash {\n\tblockLocator := []*bc.Hash{}\n\tfor _, rawHash := range m.RawBlockLocator {\n\t\thash := bc.NewHash(rawHash)\n\t\tblockLocator = append(blockLocator, &hash)\n\t}\n\treturn blockLocator\n}",
"func (dcr *ExchangeWallet) checkForNewBlocks() {\n\tctx, cancel := context.WithTimeout(dcr.ctx, 2*time.Second)\n\tdefer cancel()\n\tnewTip, err := dcr.getBestBlock(ctx)\n\tif err != nil {\n\t\tdcr.tipChange(fmt.Errorf(\"failed to get best block: %w\", err))\n\t\treturn\n\t}\n\n\t// This method is called frequently. Don't hold write lock\n\t// unless tip has changed.\n\tdcr.tipMtx.RLock()\n\tsameTip := dcr.currentTip.hash.IsEqual(newTip.hash)\n\tdcr.tipMtx.RUnlock()\n\tif sameTip {\n\t\treturn\n\t}\n\n\tdcr.tipMtx.Lock()\n\tdefer dcr.tipMtx.Unlock()\n\n\tprevTip := dcr.currentTip\n\tdcr.currentTip = newTip\n\tdcr.log.Debugf(\"tip change: %d (%s) => %d (%s)\", prevTip.height, prevTip.hash, newTip.height, newTip.hash)\n\tdcr.tipChange(nil)\n\n\t// Search for contract redemption in new blocks if there\n\t// are contracts pending redemption.\n\tdcr.findRedemptionMtx.RLock()\n\tpendingContractsCount := len(dcr.findRedemptionQueue)\n\tcontractOutpoints := make([]outPoint, 0, pendingContractsCount)\n\tfor contractOutpoint := range dcr.findRedemptionQueue {\n\t\tcontractOutpoints = append(contractOutpoints, contractOutpoint)\n\t}\n\tdcr.findRedemptionMtx.RUnlock()\n\tif pendingContractsCount == 0 {\n\t\treturn\n\t}\n\n\t// Use the previous tip hash to determine the starting point for\n\t// the redemption search. If there was a re-org, the starting point\n\t// would be the common ancestor of the previous tip and the new tip.\n\t// Otherwise, the starting point would be the block at previous tip\n\t// height + 1.\n\tvar startPoint *block\n\tvar startPointErr error\n\tprevTipBlock, err := dcr.node.GetBlockVerbose(dcr.ctx, prevTip.hash, false)\n\tswitch {\n\tcase err != nil:\n\t\tstartPointErr = fmt.Errorf(\"getBlockHeader error for prev tip hash %s: %w\", prevTip.hash, translateRPCCancelErr(err))\n\tcase prevTipBlock.Confirmations < 0:\n\t\t// There's been a re-org, common ancestor will be height\n\t\t// plus negative confirmation e.g. 155 + (-3) = 152.\n\t\treorgHeight := prevTipBlock.Height + prevTipBlock.Confirmations\n\t\tdcr.log.Debugf(\"reorg detected from height %d to %d\", reorgHeight, newTip.height)\n\t\treorgHash, err := dcr.node.GetBlockHash(dcr.ctx, reorgHeight)\n\t\tif err != nil {\n\t\t\tstartPointErr = fmt.Errorf(\"getBlockHash error for reorg height %d: %w\", reorgHeight, translateRPCCancelErr(err))\n\t\t} else {\n\t\t\tstartPoint = &block{hash: reorgHash, height: reorgHeight}\n\t\t}\n\tcase newTip.height-prevTipBlock.Height > 1:\n\t\t// 2 or more blocks mined since last tip, start at prevTip height + 1.\n\t\tafterPrivTip := prevTipBlock.Height + 1\n\t\thashAfterPrevTip, err := dcr.node.GetBlockHash(dcr.ctx, afterPrivTip)\n\t\tif err != nil {\n\t\t\tstartPointErr = fmt.Errorf(\"getBlockHash error for height %d: %w\", afterPrivTip, translateRPCCancelErr(err))\n\t\t} else {\n\t\t\tstartPoint = &block{hash: hashAfterPrevTip, height: afterPrivTip}\n\t\t}\n\tdefault:\n\t\t// Just 1 new block since last tip report, search the lone block.\n\t\tstartPoint = newTip\n\t}\n\n\t// Redemption search would be compromised if the starting point cannot\n\t// be determined, as searching just the new tip might result in blocks\n\t// being omitted from the search operation. If that happens, cancel all\n\t// find redemption requests in queue.\n\tif startPointErr != nil {\n\t\tdcr.fatalFindRedemptionsError(fmt.Errorf(\"new blocks handler error: %w\", startPointErr), contractOutpoints)\n\t} else {\n\t\tgo dcr.findRedemptionsInBlockRange(startPoint, newTip, contractOutpoints)\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
initDNSServer creates an instance of the dnsforward.Server Please note that we must do it even if we don't start it so that we had access to the query log and the stats
|
func initDNSServer(baseDir string) {
err := os.MkdirAll(baseDir, 0755)
if err != nil {
log.Fatalf("Cannot create DNS data dir at %s: %s", baseDir, err)
}
dnsServer = dnsforward.NewServer(baseDir)
}
|
[
"func (s *Server) initDNSServer(args *PilotArgs) {\n\tif dns.DNSAddr.Get() != \"\" {\n\t\tlog.Info(\"initializing DNS server\")\n\t\tif err := s.initDNSTLSListener(dns.DNSAddr.Get(), args.ServerOptions.TLSOptions); err != nil {\n\t\t\tlog.Warna(\"error initializing DNS-over-TLS listener \", err)\n\t\t}\n\n\t\t// Respond to CoreDNS gRPC queries.\n\t\ts.addStartFunc(func(stop <-chan struct{}) error {\n\t\t\tif s.DNSListener != nil {\n\t\t\t\tdnsSvc := dns.InitDNS()\n\t\t\t\tdnsSvc.StartDNS(dns.DNSAddr.Get(), s.DNSListener)\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\t}\n}",
"func (s *Server) initDNSTLSListener(dns string, tlsOptions TLSOptions) error {\n\tif dns == \"\" {\n\t\treturn nil\n\t}\n\t// Mainly for tests.\n\tif !hasCustomTLSCerts(tlsOptions) && s.ca == nil {\n\t\treturn nil\n\t}\n\n\troot, err := s.getRootCertificate(tlsOptions)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// TODO: check if client certs can be used with coredns or others.\n\t// If yes - we may require or optionally use them\n\tcfg := &tls.Config{\n\t\tGetCertificate: s.getIstiodCertificate,\n\t\tClientAuth: tls.NoClientCert,\n\t\tClientCAs: root,\n\t}\n\n\t// create secure grpc listener\n\tl, err := net.Listen(\"tcp\", dns)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttl := tls.NewListener(l, cfg)\n\ts.DNSListener = tl\n\n\treturn nil\n}",
"func runDNSServer() {\n\n\t// load the blocked domains\n\tblacklist := LoadBlacklistOrFail(blacklistPath)\n\tfmt.Printf(\"Loading list of %d blocked domains...\\n\", blacklist.Size())\n\n\t// make the custom handler function to reply to DNS queries\n\tupstream := getEnvOrDefault(\"UPSTREAM_DNS\", \"1.1.1.1:53\")\n\tlogging := getEnvOrDefault(\"DEBUG\", \"\") == \"true\"\n\thandler := makeDNSHandler(blacklist, upstream, logging)\n\n\t// start the server\n\tport := getEnvOrDefault(\"DNS_PORT\", \"53\")\n\tfmt.Printf(\"Starting DNS server on UDP port %s (logging = %t)...\\n\", port, logging)\n\tserver := &dns.Server{Addr: \":\" + port, Net: \"udp\"}\n\tdns.HandleFunc(\".\", handler)\n\terr := server.ListenAndServe()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}",
"func StartDNSHTTPSServer(newListener ListOptions, aesKey []byte) (bool, *dns.Server) {\n\tlogging.Logger.Println(\"Starting DNS Server\")\n\tm := newListener.Advanced.(map[string]interface{})\n\tif !validation.ValidateMap(m, []string{\"firsttime\", \"checkin\", \"successResponse\", \"failureResponse\", \"jobExists\"}) {\n\t\treturn false, nil\n\t}\n\t//Set the values\n\tfirsttime = m[\"firsttime\"].(string)\n\tcheckin = m[\"checkin\"].(string)\n\tsuccessDNSResponse = m[\"successResponse\"].(string)\n\tfailureDNSResponse = m[\"failureResponse\"].(string)\n\tjobExistDNSResponse = m[\"jobExists\"].(string)\n\n\t//first i need to start the server up we will only allow one\n\t//Doing it all here so ther variables are able to be access by the handle function\n\tdns.HandleFunc(\".\", func(w dns.ResponseWriter, r *dns.Msg) {\n\t\tlogging.Logger.Println(\"Function Called\")\n\t\tmsg := dns.Msg{}\n\t\tmsg.SetReply(r)\n\n\t\t// Setup the response we will send. By default we assume everything\n\t\t// will be successful and flip to failure as needed.\n\t\tmsg.Authoritative = true\n\t\tdomain := msg.Question[0].Name\n\t\tlogging.Logger.Println(\"Domain is:\", domain)\n\t\taRecordResponse := successDNSResponse\n\t\ttxtRecordResponse := noCmdTxtResponse\n\n\t\t// Now, depending on the question we got, parse, split and do what is needed.\n\t\tlogging.Logger.Println(\"The Question is: \", r.Question[0])\n\t\tswitch r.Question[0].Qtype {\n\t\tcase dns.TypeA:\n\t\t\tident, streamType, seq, transferProtocol, byteData, err := parseARRLabels(r)\n\t\t\tif err != nil {\n\t\t\t\taRecordResponse = err.Error()\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\t//now that the data is coming in i need to combine it?\n\t\t\t// A few things can happen here. Many of the code paths rely on\n\t\t\t// knowing whether we have an existing stream for this ident. So\n\t\t\t// get the status of that and save the DNSSteam if we have it.\n\t\t\tbufferRecord, ok := streamSpool[ident]\n\n\t\t\t// Handle new streams by taking note and starting them\n\t\t\tif (streamType == streamStart) && !ok {\n\n\t\t\t\tDNSBuf := &DNSBuffer{\n\t\t\t\t\tIdentifier: ident,\n\t\t\t\t\tSeq: seq,\n\t\t\t\t\tStarted: true,\n\t\t\t\t\tFinished: false,\n\t\t\t\t\tProtocol: transferProtocol,\n\t\t\t\t}\n\n\t\t\t\t// Add this new stream identifier\n\t\t\t\tstreamSpool[ident] = *DNSBuf\n\t\t\t\tlogging.Logger.Println(\"New incoming DNS stream started\")\n\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\t// Error cases for a new stream request\n\t\t\tif (streamType == streamStart) && ok {\n\t\t\t\tlogging.Logger.Println(\"Tried to start a new stream for an already recorded identifier. Bailing\")\n\t\t\t\taRecordResponse = failureDNSResponse\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\t// Handle appending data to streams\n\t\t\tif (streamType == streamData) && ok && !bufferRecord.Finished {\n\n\t\t\t\tbufferRecord.Data = append(bufferRecord.Data, byteData...)\n\t\t\t\tbufferRecord.Seq = seq\n\n\t\t\t\t// update the buffer for this client\n\t\t\t\tstreamSpool[ident] = bufferRecord\n\n\t\t\t\t//logging.Logger.Println(\"Wrote new data chunk\")\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\t// Handle errors for data appends\n\t\t\tif (streamType == streamData) && !ok {\n\t\t\t\tlogging.Logger.Println(\"Tried to append to a steam that is not registered. Bailing\")\n\t\t\t\taRecordResponse = failureDNSResponse\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tif (streamType == streamData) && ok && bufferRecord.Finished {\n\t\t\t\tlogging.Logger.Println(\"Tried to append to a steam that is already finished. Bailing\")\n\t\t\t\taRecordResponse = failureDNSResponse\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\t// Handle closing Streams\n\t\t\tif (streamType == streamEnd) && ok && !bufferRecord.Finished {\n\t\t\t\tbufferRecord.Finished = true\n\t\t\t\tbufferRecord.Started = false\n\t\t\t\tbufferRecord.Seq = seq\n\n\t\t\t\t// update the buffer for this client\n\t\t\t\tstreamSpool[ident] = bufferRecord\n\n\t\t\t\tdecMsg := utils.HandleIncomingData(bufferRecord.Data, aesKey)\n\n\t\t\t\tresponse := handleData(bufferRecord.Protocol, string(decMsg[36:]), string(decMsg[0:36]), newListener.Key)\n\n\t\t\t\taRecordResponse = response\n\t\t\t}\n\n\t\t\t// Handle closing errors\n\t\t\tif (streamType == streamEnd) && !ok {\n\t\t\t\tlogging.Logger.Println(\"Tried to append to a steam that is not known. Bailing\")\n\t\t\t\taRecordResponse = failureDNSResponse\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tbreak\n\n\t\tcase dns.TypeTXT:\n\t\t\tident, err := parseTxtRRLabels(r)\n\t\t\tif err != nil {\n\t\t\t\tlogging.Logger.Println(\"Failed to parse identifer: \", err)\n\t\t\t\ttxtRecordResponse = errorTxtResponse\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tswitch ident {\n\t\t\tcase firsttime:\n\t\t\t\tnName := register(\"\", newListener.Key, true, \"\", \"\")\n\t\t\t\tencMsg := crypto.Encrypt([]byte(nName), aesKey)\n\t\t\t\ttxtRecordResponse = []string{hex.EncodeToString(encMsg)}\n\t\t\tdefault:\n\t\t\t\tif len(ident) == 36 {\n\t\t\t\t\tj := utils.PrepData(\"\", agents.GetJobs(ident), aesKey)\n\t\t\t\t\tvar newResponse []string\n\t\t\t\t\thexJ := hex.EncodeToString(j)\n\n\t\t\t\t\tq, r := len(hexJ)/255, len(hexJ)%255\n\t\t\t\t\tif r != 0 {\n\t\t\t\t\t\tq++\n\t\t\t\t\t}\n\t\t\t\t\tif q > 1 {\n\t\t\t\t\t\tfor i := 0; i < q; i++ {\n\t\t\t\t\t\t\tx := i * 255\n\t\t\t\t\t\t\tif len(hexJ) < x+255 {\n\t\t\t\t\t\t\t\ty := len(hexJ) - x\n\t\t\t\t\t\t\t\tnewResponse = append(newResponse, hexJ[x:(x+y)])\n\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\tnewResponse = append(newResponse, hexJ[x:(x+255)])\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\n\t\t\t\t\t} else {\n\t\t\t\t\t\tnewResponse = []string{hexJ}\n\t\t\t\t\t}\n\n\t\t\t\t\ttxtRecordResponse = newResponse\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tswitch r.Question[0].Qtype {\n\t\tcase dns.TypeA:\n\t\t\tmsg.Answer = append(msg.Answer, &dns.A{\n\t\t\t\tHdr: dns.RR_Header{Name: domain, Rrtype: dns.TypeA, Class: dns.ClassINET, Ttl: 60},\n\t\t\t\tA: net.ParseIP(aRecordResponse),\n\t\t\t})\n\t\t\tbreak\n\t\tcase dns.TypeTXT:\n\t\t\tmsg.Answer = append(msg.Answer, &dns.TXT{\n\t\t\t\tHdr: dns.RR_Header{Name: domain, Rrtype: dns.TypeTXT, Class: dns.ClassINET, Ttl: 1},\n\t\t\t\tTxt: txtRecordResponse,\n\t\t\t})\n\t\t}\n\t\tw.WriteMsg(&msg)\n\t})\n\n\tdnsServer := &dns.Server{\n\t\tAddr: \":53\",\n\t\tNet: \"udp\",\n\t}\n\n\tgo func() bool {\n\t\tlogging.Logger.Println(\"Starting the DNS Server...\")\n\t\tif err := dnsServer.ListenAndServe(); err != nil {\n\t\t\tlogging.Logger.Println(\"Failed to set udp listener\\n\", err.Error())\n\t\t\treturn false\n\t\t}\n\t\treturn true\n\t}()\n\n\treturn true, dnsServer\n}",
"func NewDNSServerDefault() (srv *dns.Server) {\n\tsrv = &dns.Server{Addr: \":\" + strconv.Itoa(53), Net: \"udp\"}\n\tconfig, _ := dns.ClientConfigFromFile(resolvFile)\n\n\tsrv.Handler = &server{config}\n\n\tlog.Info().Msgf(\"Successful load local \" + resolvFile)\n\tfor _, server := range config.Servers {\n\t\tlog.Info().Msgf(\"Success load nameserver %s\\n\", server)\n\t}\n\tfor _, domain := range config.Search {\n\t\tlog.Info().Msgf(\"Success load search %s\\n\", domain)\n\t}\n\treturn\n}",
"func (s *DNSServer) Start() error {\n\tInfo.Printf(\"Using mDNS on %s\", s.IfaceName)\n\terr := s.mdnsCli.Start(s.iface)\n\tCheckFatal(err)\n\terr = s.mdnsSrv.Start(s.iface, s.Domain)\n\tCheckFatal(err)\n\n\twg := new(sync.WaitGroup)\n\twg.Add(2)\n\n\tgo func() {\n\t\tdefer wg.Done()\n\n\t\tDebug.Printf(\"Listening for DNS on %s (UDP)\", s.ListenAddr)\n\t\terr = s.udpSrv.ListenAndServe()\n\t\tCheckFatal(err)\n\t\tDebug.Printf(\"DNS UDP server exiting...\")\n\t}()\n\n\tgo func() {\n\t\tdefer wg.Done()\n\n\t\tDebug.Printf(\"Listening for DNS on %s (TCP)\", s.ListenAddr)\n\t\terr = s.tcpSrv.ListenAndServe()\n\t\tCheckFatal(err)\n\t\tDebug.Printf(\"DNS TCP server exiting...\")\n\t}()\n\n\t// Waiting for all goroutines to finish (otherwise they die as main routine dies)\n\twg.Wait()\n\n\tInfo.Printf(\"WeaveDNS server exiting...\")\n\treturn nil\n}",
"func startProxyServer(s *ProxyServer) error {\n\t// SDNMISSING: upstream handles the --oom-score-adj flag here\n\n\tif s.Broadcaster != nil {\n\t\tstopCh := make(chan struct{})\n\t\ts.Broadcaster.StartRecordingToSink(stopCh)\n\t}\n\n\tvar errCh chan error\n\t// SDNMISSING: upstream handles the --bind-address-hard-fail flag here\n\n\t// Start up a healthz server if requested\n\tserveHealthz(s.HealthzServer, errCh)\n\n\t// Start up a metrics server if requested\n\tserveMetrics(s.MetricsBindAddress, s.ProxyMode, s.EnableProfiling, errCh)\n\n\t// SDNMISSING: upstream handles the --conntrack-max-per-core, --conntrack-min,\n\t// --conntrack-tcp-timeout-close-wait, and --conntrack-tcp-timeout-close-wait\n\t// flags here.\n\n\tnoProxyName, err := labels.NewRequirement(apis.LabelServiceProxyName, selection.DoesNotExist, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tnoHeadlessEndpoints, err := labels.NewRequirement(v1.IsHeadlessService, selection.DoesNotExist, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlabelSelector := labels.NewSelector()\n\tlabelSelector = labelSelector.Add(*noProxyName, *noHeadlessEndpoints)\n\n\t// Make informers that filter out objects that want a non-default service proxy.\n\tinformerFactory := informers.NewSharedInformerFactoryWithOptions(s.Client, s.ConfigSyncPeriod,\n\t\tinformers.WithTweakListOptions(func(options *metav1.ListOptions) {\n\t\t\toptions.LabelSelector = labelSelector.String()\n\t\t}))\n\n\t// Create configs (i.e. Watches for Services and Endpoints or EndpointSlices)\n\t// Note: RegisterHandler() calls need to happen before creation of Sources because sources\n\t// only notify on changes, and the initial update (on process start) may be lost if no handlers\n\t// are registered yet.\n\tserviceConfig := config.NewServiceConfig(informerFactory.Core().V1().Services(), s.ConfigSyncPeriod)\n\tserviceConfig.RegisterEventHandler(s.Proxier)\n\tgo serviceConfig.Run(wait.NeverStop)\n\n\tif s.UseEndpointSlices {\n\t\tendpointSliceConfig := config.NewEndpointSliceConfig(informerFactory.Discovery().V1().EndpointSlices(), s.ConfigSyncPeriod)\n\t\tendpointSliceConfig.RegisterEventHandler(s.Proxier)\n\t\tgo endpointSliceConfig.Run(wait.NeverStop)\n\t} else {\n\t\tendpointsConfig := config.NewEndpointsConfig(informerFactory.Core().V1().Endpoints(), s.ConfigSyncPeriod)\n\t\tendpointsConfig.RegisterEventHandler(s.Proxier)\n\t\tgo endpointsConfig.Run(wait.NeverStop)\n\t}\n\n\t// This has to start after the calls to NewServiceConfig and NewEndpointsConfig because those\n\t// functions must configure their shared informer event handlers first.\n\tinformerFactory.Start(wait.NeverStop)\n\n\t// SDNMISSING: upstream handles features.TopologyAwareHints here\n\n\tgo s.Proxier.SyncLoop()\n\n\treturn nil\n}",
"func InitHttpSrv(addr string) {\n go func() {\n err := http.ListenAndServe(addr, nil)\n if err != nil {\n log.Error(err.Error())\n }\n }()\n}",
"func NewServer(\n\thttpHost string,\n\thttpPort int,\n\thttpReadTimeout int,\n\thttpWriteTimeout int,\n\thttpIdleTimeout int,\n\tstatsdHost string,\n\tstatsdPort int,\n\ttlsCert string,\n\ttlsKey string,\n\tmetricPrefix string,\n\ttokenSecret string,\n\tverbose bool,\n\thttpRouterName string,\n\tstatsdClientName string,\n) *Server {\n\t// configure logging\n\tvar logOutput io.Writer\n\tif verbose == true {\n\t\tlogOutput = os.Stderr\n\t} else {\n\t\tlogOutput = ioutil.Discard\n\t}\n\n\tlog.SetOutput(logOutput)\n\n\tlogger := log.New(logOutput, \"\", log.LstdFlags)\n\n\t// create StatsD Client\n\tvar statsdClient statsdclient.StatsdClientInterface\n\tswitch statsdClientName {\n\tcase \"Cactus\":\n\t\tstatsdClient = statsdclient.NewCactusClient(statsdHost, statsdPort)\n\tcase \"GoMetric\":\n\t\tstatsdClient = statsdclient.NewGoMetricClient(statsdHost, statsdPort)\n\tdefault:\n\t\tpanic(\"Passed statsd client not supported\")\n\t}\n\n\t// build route handler\n\trouteHandler := routehandler.NewRouteHandler(\n\t\tstatsdClient,\n\t\tmetricPrefix,\n\t)\n\n\t// build router\n\tvar httpServerHandler http.Handler\n\tswitch httpRouterName {\n\tcase \"HttpRouter\":\n\t\thttpServerHandler = router.NewHTTPRouter(routeHandler, tokenSecret)\n\tcase \"GorillaMux\":\n\t\thttpServerHandler = router.NewGorillaMuxRouter(routeHandler, tokenSecret)\n\n\tdefault:\n\t\tpanic(\"Passed HTTP router not supported\")\n\t}\n\n\t// get HTTP server address to bind\n\thttpAddress := fmt.Sprintf(\"%s:%d\", httpHost, httpPort)\n\n\t// create http server\n\thttpServer := &http.Server{\n\t\tAddr: httpAddress,\n\t\tHandler: httpServerHandler,\n\t\tErrorLog: logger,\n\t\tReadTimeout: time.Duration(httpReadTimeout) * time.Second,\n\t\tWriteTimeout: time.Duration(httpWriteTimeout) * time.Second,\n\t\tIdleTimeout: time.Duration(httpIdleTimeout) * time.Second,\n\t\tMaxHeaderBytes: 1 << 11,\n\t}\n\n\tstatsdHTTPProxyServer := Server{\n\t\thttpAddress,\n\t\thttpServer,\n\t\tstatsdClient,\n\t\ttlsCert,\n\t\ttlsKey,\n\t}\n\n\treturn &statsdHTTPProxyServer\n}",
"func InitServer() *Server {\n\ts := &Server{\n\t\t&goserver.GoServer{},\n\t\t\"logs\",\n\t\tmake(map[string]*logHolder),\n\t\t&sync.Mutex{},\n\t\ttime.Now(),\n\t\t&sync.Mutex{},\n\t\t0,\n\t\t0,\n\t\tmake(map[string]int),\n\t\t&sync.Mutex{},\n\t\t&pb.Config{},\n\t}\n\ts.Register = s\n\treturn s\n}",
"func InitServer(addr string) *http.Server {\n\trouter := mux.NewRouter()\n\trouter.HandleFunc(\"/pokemon/{name}\", endpoints.PokemonHandler)\n\trouter.HandleFunc(\"/health\", endpoints.HealthHandler)\n\n\trouter.Use(middleware.LoggingMiddleware)\n\n\tsrv := &http.Server{\n\t\tHandler: router,\n\t\tAddr: \"0.0.0.0:8080\",\n\t\tWriteTimeout: 15 * time.Second,\n\t\tReadTimeout: 15 * time.Second,\n\t}\n\n\treturn srv\n}",
"func StartDNSDaemon() (err error) {\n\tsrv := &dns.Server{Addr: \":\" + strconv.Itoa(53), Net: \"udp\"}\n\tsrv.Handler = &dnsHandler{}\n\n\tconfig, _ := dns.ClientConfigFromFile(\"/etc/resolv.conf\")\n\n\tlog.Info().Msgf(\"Successful load local /etc/resolv.conf\")\n\tfor _, server := range config.Servers {\n\t\tlog.Info().Msgf(\"Success load nameserver %s\\n\", server)\n\t}\n\n\tfmt.Printf(\"DNS Server Start At 53...\\n\")\n\terr = srv.ListenAndServe()\n\tif err != nil {\n\t\tlog.Error().Msgf(\"Failed to set udp listener %s\\n\", err.Error())\n\t}\n\treturn\n}",
"func (s *StanServer) startNATSServer() error {\n\tif err := s.configureClusterOpts(); err != nil {\n\t\treturn err\n\t}\n\tif err := s.configureNATSServerTLS(); err != nil {\n\t\treturn err\n\t}\n\topts := s.natsOpts\n\ts.natsServer = server.New(opts)\n\tif s.natsServer == nil {\n\t\treturn fmt.Errorf(\"no NATS Server object returned\")\n\t}\n\ts.log.SetNATSServer(s.natsServer)\n\t// Run server in Go routine.\n\tgo s.natsServer.Start()\n\t// Wait for accept loop(s) to be started\n\tif !s.natsServer.ReadyForConnections(10 * time.Second) {\n\t\treturn fmt.Errorf(\"unable to start a NATS Server on %s:%d\", opts.Host, opts.Port)\n\t}\n\treturn nil\n}",
"func newDNSTestServer(server *dns.Server) *dnsTestServer {\n\treturn &dnsTestServer{Server: server, DNSDatabase: make(dnsDatabase), DNSDatabaseRetry: make(dnsDatabase)}\n}",
"func InitServer() *Server {\n\tvar server = new(Server)\n\tserver.subjects = make(map[string]map[string]float64)\n\tserver.students = make(map[string]map[string]float64)\n\treturn server\n}",
"func runTestDNSServer(t *testing.T, port string) *dnsTestServer {\n\tlistener, err := net.ListenPacket(\"udp\", \"127.0.0.1:\"+port)\n\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tmux := dns.NewServeMux()\n\tserver := &dns.Server{PacketConn: listener, Net: \"udp\", Handler: mux}\n\n\tgo func() {\n\t\tif err := server.ActivateAndServe(); err != nil {\n\t\t\tlog.Printf(\"Error in local DNS server: %s\", err)\n\t\t}\n\t}()\n\n\treturn newDNSTestServer(server)\n}",
"func NewServer(baseDN, configFile string, port int) *Server {\n\tserver := &Server{baseDN: baseDN, configFile: configFile, port: port, Logger: log.Output(zerolog.ConsoleWriter{Out: os.Stderr}).Level(zerolog.InfoLevel)}\n\ts := ldap.NewServer()\n\tserver.s = s\n\n\t// register Bind and Search function handlers\n\ts.BindFunc(baseDN, server)\n\ts.SearchFunc(baseDN, server)\n\n\treturn server\n}",
"func InitServer(ip string) Server {\n\tserver := Server{IP: ip, Port: 8090}\n\treturn server\n}",
"func (s *UDPServer) Init(settings *ServerSettings) error {\n\turl := settings.URL\n\tport := settings.Port\n\tif port == \"\" {\n\t\treturn errors.New(\"udp_server_init: port required\")\n\t}\n\n\taddress := url + \":\" + port\n\n\tudpAddr, err := net.ResolveUDPAddr(\"udp4\", address)\n\tif err != nil {\n\t\treturn errors.New(\"udp_server_init: could not resolve address. err: \" + err.Error())\n\t}\n\n\ts.Conn, err = net.ListenUDP(\"udp\", udpAddr)\n\tif err != nil {\n\t\treturn errors.New(\"udp_server_init: could not start connection. err: \" + err.Error())\n\t}\n\n\treturn nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
checkIfBackupInNewOrProgress check whether there are backups created by this schedule still in New or InProgress state
|
func (c *scheduleReconciler) checkIfBackupInNewOrProgress(schedule *velerov1.Schedule) bool {
log := c.logger.WithField("schedule", kube.NamespaceAndName(schedule))
backupList := &velerov1.BackupList{}
options := &client.ListOptions{
Namespace: schedule.Namespace,
LabelSelector: labels.Set(map[string]string{
velerov1.ScheduleNameLabel: schedule.Name,
}).AsSelector(),
}
err := c.List(context.Background(), backupList, options)
if err != nil {
log.Errorf("fail to list backup for schedule %s/%s: %s", schedule.Namespace, schedule.Name, err.Error())
return true
}
for _, backup := range backupList.Items {
if backup.Status.Phase == velerov1.BackupPhaseNew || backup.Status.Phase == velerov1.BackupPhaseInProgress {
log.Debugf("%s/%s still has backups that are in InProgress or New...", schedule.Namespace, schedule.Name)
return true
}
}
return false
}
|
[
"func (t *Task) hasBackupCompleted(backup *velero.Backup) (bool, []string) {\n\tcompleted := false\n\treasons := []string{}\n\tprogress := []string{}\n\n\tpvbs := t.getPodVolumeBackupsForBackup(backup)\n\n\tswitch backup.Status.Phase {\n\tcase velero.BackupPhaseNew:\n\t\tprogress = append(\n\t\t\tprogress,\n\t\t\tfmt.Sprintf(\n\t\t\t\t\"Backup %s/%s: Not started\",\n\t\t\t\tbackup.Namespace,\n\t\t\t\tbackup.Name))\n\tcase velero.BackupPhaseInProgress:\n\t\titemsBackedUp, totalItems := getBackupStats(backup)\n\t\tprogress = append(\n\t\t\tprogress,\n\t\t\tfmt.Sprintf(\n\t\t\t\t\"Backup %s/%s: %d out of estimated total of %d objects backed up%s\",\n\t\t\t\tbackup.Namespace,\n\t\t\t\tbackup.Name,\n\t\t\t\titemsBackedUp,\n\t\t\t\ttotalItems,\n\t\t\t\tgetBackupDuration(backup)))\n\t\tprogress = append(\n\t\t\tprogress,\n\t\t\tgetPodVolumeBackupsProgress(pvbs)...)\n\tcase velero.BackupPhaseCompleted:\n\t\tcompleted = true\n\t\titemsBackedUp, totalItems := getBackupStats(backup)\n\t\tprogress = append(\n\t\t\tprogress,\n\t\t\tfmt.Sprintf(\n\t\t\t\t\"Backup %s/%s: %d out of estimated total of %d objects backed up%s\",\n\t\t\t\tbackup.Namespace,\n\t\t\t\tbackup.Name,\n\t\t\t\titemsBackedUp,\n\t\t\t\ttotalItems,\n\t\t\t\tgetBackupDuration(backup)))\n\t\tprogress = append(\n\t\t\tprogress,\n\t\t\tgetPodVolumeBackupsProgress(pvbs)...)\n\tcase velero.BackupPhaseFailed:\n\t\tcompleted = true\n\t\tmessage := fmt.Sprintf(\n\t\t\t\"Backup %s/%s: failed.\",\n\t\t\tbackup.Namespace,\n\t\t\tbackup.Name)\n\t\treasons = append(reasons, message)\n\t\titemsBackedUp, totalItems := getBackupStats(backup)\n\t\tmessage = fmt.Sprintf(\n\t\t\t\"%s %d out of estimated total of %d objects backed up%s\",\n\t\t\tmessage,\n\t\t\titemsBackedUp,\n\t\t\ttotalItems,\n\t\t\tgetBackupDuration(backup))\n\t\tprogress = append(progress, message)\n\t\tprogress = append(\n\t\t\tprogress,\n\t\t\tgetPodVolumeBackupsProgress(pvbs)...)\n\tcase velero.BackupPhasePartiallyFailed:\n\t\tcompleted = true\n\t\titemsBackedUp, totalItems := getBackupStats(backup)\n\t\tmessage := fmt.Sprintf(\n\t\t\t\"Backup %s/%s: partially failed. %d out of estimated total of %d objects backed up%s\",\n\t\t\tbackup.Namespace,\n\t\t\tbackup.Name,\n\t\t\titemsBackedUp,\n\t\t\ttotalItems,\n\t\t\tgetBackupDuration(backup))\n\t\tprogress = append(progress, message)\n\t\tprogress = append(\n\t\t\tprogress,\n\t\t\tgetPodVolumeBackupsProgress(pvbs)...)\n\tcase velero.BackupPhaseFailedValidation:\n\t\treasons = backup.Status.ValidationErrors\n\t\treasons = append(\n\t\t\treasons,\n\t\t\tfmt.Sprintf(\n\t\t\t\t\"Backup %s/%s: validation failed.\",\n\t\t\t\tbackup.Namespace,\n\t\t\t\tbackup.Name))\n\t\tcompleted = true\n\t}\n\tt.Log.Info(\"Velero Backup progress report\",\n\t\t\"backup\", path.Join(backup.Namespace, backup.Name),\n\t\t\"backupProgress\", progress)\n\n\tt.setProgress(progress)\n\treturn completed, reasons\n}",
"func (o *RequestsCreateProjectDeploymentRequest) HasScheduledBackup() bool {\n\tif o != nil && o.ScheduledBackup != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func backupScheduleFound(repo v1beta1.PGBackRestRepo, backupType string) bool {\n\tif repo.BackupSchedules != nil {\n\t\tswitch backupType {\n\t\tcase full:\n\t\t\treturn repo.BackupSchedules.Full != nil\n\t\tcase differential:\n\t\t\treturn repo.BackupSchedules.Differential != nil\n\t\tcase incremental:\n\t\t\treturn repo.BackupSchedules.Incremental != nil\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\t}\n\treturn false\n}",
"func isRequireRecentBackup(config *configv1.ClusterVersion, clusterOperatorStatus *configv1.ClusterOperatorStatus) bool {\n\tfor _, condition := range config.Status.Conditions {\n\t\t// Check if ReleaseAccepted is false and Message field containers the string RecentBackup.\n\t\tif condition.Type == \"ReleaseAccepted\" && condition.Status == configv1.ConditionFalse {\n\t\t\tif strings.Contains(condition.Message, backupConditionType) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\n\t// consecutive upgrades case\n\tif backupRequired, err := isNewBackupRequired(config, clusterOperatorStatus); err == nil && backupRequired {\n\t\treturn true\n\t}\n\treturn false\n}",
"func (c *Context) IsBackup() bool {\n\treturn c.MyIndex >= 0 && !c.IsPrimary()\n}",
"func (t *Task) isBackupReplicated(backup *velero.Backup) (bool, error) {\n\tclient, err := t.getDestinationClient()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treplicated := velero.Backup{}\n\tt.Log.Info(\"Checking if Velero Backup has been replicated to destination cluster\",\n\t\t\"backup\", path.Join(backup.Namespace, backup.Name))\n\terr = client.Get(\n\t\tcontext.TODO(),\n\t\ttypes.NamespacedName{\n\t\t\tNamespace: backup.Namespace,\n\t\t\tName: backup.Name,\n\t\t},\n\t\t&replicated)\n\tif err == nil {\n\t\tt.Log.Info(\"FOUND Velero Backup has been replicated to destination cluster\",\n\t\t\t\"backup\", path.Join(replicated.Namespace, replicated.Name))\n\t\treturn true, nil\n\t}\n\tif k8serrors.IsNotFound(err) {\n\t\terr = nil\n\t}\n\treturn false, err\n}",
"func IsInProgressStatus(bkp *apis.CStorBackup) bool {\n\tif string(bkp.Status) == string(apis.BKPCStorStatusInProgress) {\n\t\treturn true\n\t}\n\treturn false\n}",
"func (backupWrapper *v1BackupWrapper) isBackupCompleted() bool {\n\tif backupWrapper.backup.IsFailed() ||\n\t\tbackupWrapper.backup.IsSucceeded() {\n\t\treturn true\n\t}\n\treturn false\n}",
"func (c *CleanupStatusTracker) InProgress(bdName string) bool {\n\treturn c.JobController.IsCleaningJobRunning(bdName)\n}",
"func (r *ReconcileBackup) isAllCreated(bkp *v1alpha1.Backup) error {\n\n\t// Check if was possible found the DB Pod\n\tif !r.isDbPodFound() {\n\t\terr := fmt.Errorf(\"Error: Database Pod is missing\")\n\t\treturn err\n\t}\n\n\t// Check if was possible found the DB Service\n\tif !r.isDbServiceFound() {\n\t\terr := fmt.Errorf(\"Error: Database Service is missing\")\n\t\treturn err\n\t}\n\n\t// Check if DB secret was created\n\tdbSecretName := utils.DbSecretPrefix + bkp.Name\n\t_, err := service.FetchSecret(bkp.Namespace, dbSecretName, r.client)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Error: DB Secret is missing. (%v)\", dbSecretName)\n\t\treturn err\n\t}\n\n\t// Check if AWS secret was created\n\tawsSecretName := utils.GetAWSSecretName(bkp)\n\tawsSecretNamespace := utils.GetAwsSecretNamespace(bkp)\n\t_, err = service.FetchSecret(awsSecretNamespace, awsSecretName, r.client)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"Error: AWS Secret is missing. (name:%v,namespace:%v)\", awsSecretName, awsSecretNamespace)\n\t\treturn err\n\t}\n\n\t// Check if Enc secret was created (if was configured to be used)\n\tif utils.IsEncryptionKeyOptionConfig(bkp) {\n\t\tencSecretName := utils.GetEncSecretName(bkp)\n\t\tencSecretNamespace := utils.GetEncSecretNamespace(bkp)\n\t\t_, err := service.FetchSecret(encSecretNamespace, encSecretName, r.client)\n\t\tif err != nil {\n\t\t\terr := fmt.Errorf(\"Error: Encript Key Secret is missing. (name:%v,namespace:%v)\", encSecretName, encSecretNamespace)\n\t\t\treturn err\n\t\t}\n\t}\n\n\t//check if the cronJob was created\n\t_, err = service.FetchCronJob(bkp.Name, bkp.Namespace, r.client)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"Error: CronJob is missing\")\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func DoesVeleroBackupScheduleExist(apiClient apiextensionsclientset.Interface, namespace string) (bool, error) {\n\n\tif doesVeleroSchedulesResourceExist(apiClient) {\n\t\t// kubectl get schedules.velero.io -n velero -o json\n\t\targs := []string{\"get\", veleroSchedulesResource, \"-n\", namespace, \"-o\", \"json\"}\n\t\tcmd := util.Command{\n\t\t\tName: \"kubectl\",\n\t\t\tArgs: args,\n\t\t}\n\n\t\toutput, err := cmd.RunWithoutRetry()\n\t\tif err != nil {\n\t\t\treturn false, errors.Wrap(err, fmt.Sprintf(\"executing kubectl get %s command\", veleroSchedulesResource))\n\t\t}\n\n\t\tvar veleroShedules veleroScheduleList\n\t\terr = json.Unmarshal([]byte(output), &veleroShedules)\n\t\tif err != nil {\n\t\t\treturn false, errors.Wrap(err, \"unmarshalling kubectl response\")\n\t\t}\n\n\t\tif len(veleroShedules.Items) > 0 {\n\t\t\treturn true, nil\n\t\t}\n\t\treturn false, nil\n\t}\n\treturn false, nil\n}",
"func IsBackup(options []*commonpb.KeyValuePair) bool {\n\tisBackup, err := funcutil.GetAttrByKeyFromRepeatedKV(BackupFlag, options)\n\tif err != nil || strings.ToLower(isBackup) != \"true\" {\n\t\treturn false\n\t}\n\treturn true\n}",
"func (proc *syncProcess) IsSyncInProgress() {}",
"func (w *CrawlerWorker) HasPendingJobs() bool { return len(w.pending) > 0 }",
"func isNewWorker(worker *workerv1.Worker, currentDeployment *appsv1.Deployment) bool {\n\treturn currentDeployment == nil && worker.DeletionTimestamp == nil\n}",
"func (s StackStatus) InProgress() bool {\n\treturn strings.HasSuffix(string(s), \"IN_PROGRESS\")\n}",
"func (o *RequestsCreateProjectDeploymentRequest) GetScheduledBackupOk() (*RequestsDeploymentScheduledBackup, bool) {\n\tif o == nil || o.ScheduledBackup == nil {\n\t\treturn nil, false\n\t}\n\treturn o.ScheduledBackup, true\n}",
"func (a *actionRebuildOutSyncedShards) CheckProgress(ctx context.Context) (bool, bool, error) {\n\tif !features.RebuildOutSyncedShards().Enabled() {\n\t\t// RebuildOutSyncedShards feature is not enabled\n\t\treturn true, false, nil\n\t}\n\n\tclientSync, err := a.actionCtx.GetMembersState().GetMemberClient(a.action.MemberID)\n\tif err != nil {\n\t\treturn false, false, errors.Wrapf(err, \"Unable to create client (SyncMode)\")\n\t}\n\n\tclientAsync, err := a.actionCtx.GetServerAsyncClient(a.action.MemberID)\n\tif err != nil {\n\t\treturn false, false, errors.Wrapf(err, \"Unable to create client (AsyncMode)\")\n\t}\n\n\tjobID, ok := a.actionCtx.Get(a.action, actionRebuildOutSyncedShardsLocalJobID)\n\tif !ok {\n\t\treturn false, true, errors.Newf(\"Local Key is missing in action: %s\", actionRebuildOutSyncedShardsLocalJobID)\n\t}\n\n\tbatchID, ok := a.actionCtx.Get(a.action, actionRebuildOutSyncedShardsBatchID)\n\tif !ok {\n\t\treturn false, true, errors.Newf(\"Local Key is missing in action: %s\", actionRebuildOutSyncedShardsBatchID)\n\t}\n\n\tdatabase, ok := a.actionCtx.Get(a.action, actionRebuildOutSyncedShardsLocalDatabase)\n\tif !ok {\n\t\treturn false, true, errors.Newf(\"Local Key is missing in action: %s\", actionRebuildOutSyncedShardsLocalDatabase)\n\t}\n\n\tshardID, ok := a.actionCtx.Get(a.action, actionRebuildOutSyncedShardsLocalShard)\n\tif !ok {\n\t\treturn false, true, errors.Newf(\"Local Key is missing in action: %s\", actionRebuildOutSyncedShardsLocalShard)\n\t}\n\n\t// check first if there is rebuild job running\n\trebuildInProgress, err := a.checkRebuildShardProgress(ctx, clientAsync, clientSync, shardID, database, jobID, batchID)\n\tif err != nil {\n\t\tif rebuildInProgress {\n\t\t\ta.log.Err(err).Error(\"Rebuild job failed but we will retry\", shardID, database, a.action.MemberID)\n\t\t\treturn false, false, err\n\t\t} else {\n\t\t\ta.log.Err(err).Error(\"Rebuild job failed\", shardID, database, a.action.MemberID)\n\t\t\treturn false, true, err\n\t\t}\n\n\t}\n\tif rebuildInProgress {\n\t\ta.log.Debug(\"Rebuild job is still in progress\", shardID, database, a.action.MemberID)\n\t\treturn false, false, nil\n\t}\n\n\t// rebuild job is done\n\ta.log.Info(\"Rebuild Shard Tree is done\", shardID, database, a.action.MemberID)\n\treturn true, false, nil\n}",
"func (d *Deploy) IsUpdateInProgress() bool {\n\treturn d.object.Status.AvailableReplicas < d.object.Status.UpdatedReplicas\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
ifDue check whether schedule is due to create a new backup.
|
func (c *scheduleReconciler) ifDue(schedule *velerov1.Schedule, cronSchedule cron.Schedule) bool {
isDue, nextRunTime := getNextRunTime(schedule, cronSchedule, c.clock.Now())
log := c.logger.WithField("schedule", kube.NamespaceAndName(schedule))
if !isDue {
log.WithField("nextRunTime", nextRunTime).Debug("Schedule is not due, skipping")
return false
}
return true
}
|
[
"func (t *task) IsDue() bool {\n\treturn t.NextDue() <= 0\n}",
"func isDue(i *invoice) bool {\n\tif i.Status == quicka.InvoiceAwaitingTerms && time.Now().After(i.DueDate) {\n\t\treturn true\n\t}\n\treturn false\n}",
"func isDue(now time.Time, rule Rule) bool {\n\trules := [][]int{\n\t\trule.Second,\n\t\trule.Minute,\n\t\trule.Hour,\n\t\trule.DayOfMonth,\n\t\trule.Month,\n\t\trule.DayOfWeek,\n\t}\n\n\ttoCheck := []int{}\n\tfor k, v := range rules {\n\t\tif len(v) > 0 {\n\t\t\ttoCheck = append(toCheck, k)\n\t\t}\n\t}\n\n\tif len(toCheck) == 0 {\n\t\treturn true\n\t}\n\n\t_, m, d := now.Date()\n\th, i, s := now.Clock()\n\tw := now.Weekday()\n\n\tnows := []int{s, i, h, d, int(m), int(w)}\n\tfor _, k := range toCheck {\n\t\tif !assert.IsContains(rules[k], nows[k]) {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}",
"func (c *scheduleReconciler) checkIfBackupInNewOrProgress(schedule *velerov1.Schedule) bool {\n\tlog := c.logger.WithField(\"schedule\", kube.NamespaceAndName(schedule))\n\tbackupList := &velerov1.BackupList{}\n\toptions := &client.ListOptions{\n\t\tNamespace: schedule.Namespace,\n\t\tLabelSelector: labels.Set(map[string]string{\n\t\t\tvelerov1.ScheduleNameLabel: schedule.Name,\n\t\t}).AsSelector(),\n\t}\n\n\terr := c.List(context.Background(), backupList, options)\n\tif err != nil {\n\t\tlog.Errorf(\"fail to list backup for schedule %s/%s: %s\", schedule.Namespace, schedule.Name, err.Error())\n\t\treturn true\n\t}\n\n\tfor _, backup := range backupList.Items {\n\t\tif backup.Status.Phase == velerov1.BackupPhaseNew || backup.Status.Phase == velerov1.BackupPhaseInProgress {\n\t\t\tlog.Debugf(\"%s/%s still has backups that are in InProgress or New...\", schedule.Namespace, schedule.Name)\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func backupScheduleFound(repo v1beta1.PGBackRestRepo, backupType string) bool {\n\tif repo.BackupSchedules != nil {\n\t\tswitch backupType {\n\t\tcase full:\n\t\t\treturn repo.BackupSchedules.Full != nil\n\t\tcase differential:\n\t\t\treturn repo.BackupSchedules.Differential != nil\n\t\tcase incremental:\n\t\t\treturn repo.BackupSchedules.Incremental != nil\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\t}\n\treturn false\n}",
"func (s *Schedule) NextDue() time.Duration {\n\n\tif s.standByMode {\n\t\treturn s.standByTask.NextDue()\n\t}\n\td := 1000000 * time.Hour\n\n\tfor _, t := range s.tasks {\n\t\tif due := t.NextDue(); due < d {\n\t\t\td = due\n\t\t}\n\t}\n\n\treturn d\n}",
"func AssignmentIsScheduled(allocatedResources *sproto.ResourcesAllocated) bool {\n\treturn allocatedResources != nil\n}",
"func DueDate(project_name, project_owner, task_name, dueDate string, db *sql.DB) bool {\n\tsqlStatement1 := `SELECT id FROM projects WHERE owner = $1 AND name = $2;`\n\n\tvar parentID string\n\terr = db.QueryRow(sqlStatement1, project_owner, project_name).Scan(&parentID)\n\n\tif err == sql.ErrNoRows {\n\t\treturn false\n\t} else if err != nil {\n\t\treturn false\n\t}\n\n\tsqlStatement := `UPDATE tasks\n \tSET due_date = $1\n \tWHERE project = $2 AND name = $3;`\n\n\t_, err = db.Exec(sqlStatement, dueDate, parentID, task_name)\n\n\tif err != nil {\n\t\treturn false\n\t}\n\n\treturn true\n}",
"func (o *RequestsCreateProjectDeploymentRequest) HasScheduledBackup() bool {\n\tif o != nil && o.ScheduledBackup != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (solver *MapSolver)HasScheduled(arc *data.Arc) bool{\n\tfor _, cap := range arc.Capacity {\n\t\tif cap > 0 { // if dstNode is not machineNode, we can assume that if capacity > 0, then this task still not scheduled\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func (o *TransactionSplit) HasDueDate() bool {\n\tif o != nil && o.DueDate.IsSet() {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (ScheduledForLaterError) IsScheduledForLater() bool {\n\treturn true\n}",
"func CheckSchedule(schedule string) bool {\n\t_, err := cron.Parse(schedule)\n\treturn err == nil\n}",
"func (o *RequestsCreateProjectDeploymentRequest) GetScheduledBackupOk() (*RequestsDeploymentScheduledBackup, bool) {\n\tif o == nil || o.ScheduledBackup == nil {\n\t\treturn nil, false\n\t}\n\treturn o.ScheduledBackup, true\n}",
"func (t *task) NextDue() time.Duration {\n\treturn t.interval - time.Since(t.timer)\n}",
"func (c *Circuit) allowNewRun(now time.Time) bool {\n\tif !c.IsOpen() {\n\t\treturn true\n\t}\n\tif c.OpenToClose.Allow(now) {\n\t\treturn true\n\t}\n\treturn false\n}",
"func (e *ExternalServiceStore) SyncDue(ctx context.Context, intIDs []int64, d time.Duration) (bool, error) {\n\tif len(intIDs) == 0 {\n\t\treturn false, nil\n\t}\n\tids := make([]*sqlf.Query, 0, len(intIDs))\n\tfor _, id := range intIDs {\n\t\tids = append(ids, sqlf.Sprintf(\"%s\", id))\n\t}\n\tidFilter := sqlf.Sprintf(\"IN (%s)\", sqlf.Join(ids, \",\"))\n\tdeadline := time.Now().Add(d)\n\n\tq := sqlf.Sprintf(`\nSELECT TRUE\nWHERE EXISTS(\n SELECT\n FROM external_services\n WHERE id %s\n AND (\n next_sync_at IS NULL\n OR next_sync_at <= %s)\n )\n OR EXISTS(\n SELECT\n FROM external_service_sync_jobs\n WHERE external_service_id %s\n AND state IN ('queued', 'processing')\n );\n`, idFilter, deadline, idFilter)\n\n\tv, exists, err := basestore.ScanFirstBool(e.Query(ctx, q))\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn v && exists, nil\n}",
"func (c *scheduleReconciler) submitBackup(ctx context.Context, schedule *velerov1.Schedule) error {\n\tc.logger.WithField(\"schedule\", schedule.Namespace+\"/\"+schedule.Name).Info(\"Schedule is due, going to submit backup.\")\n\n\tnow := c.clock.Now()\n\t// Don't attempt to \"catch up\" if there are any missed or failed runs - simply\n\t// trigger a Backup if it's time.\n\tbackup := getBackup(schedule, now)\n\tif err := c.Create(ctx, backup); err != nil {\n\t\treturn errors.Wrap(err, \"error creating Backup\")\n\t}\n\n\toriginal := schedule.DeepCopy()\n\tschedule.Status.LastBackup = &metav1.Time{Time: now}\n\n\tif err := c.Patch(ctx, schedule, client.MergeFrom(original)); err != nil {\n\t\treturn errors.Wrapf(err, \"error updating Schedule's LastBackup time to %v\", schedule.Status.LastBackup)\n\t}\n\n\treturn nil\n}",
"func (repo *DBRepo) ScheduledCheck(hostServiceID int) {\n\tvar hs models.HostService\n\tvar err error\n\n\ths, err = repo.DB.GetHostServiceById(hostServiceID)\n\tif err != nil {\n\t\tif err != nil {\n\t\t\tlog.Error(fmt.Errorf(\"ERROR - Could not find host-service with provided id - %v\", err))\n\t\t\treturn\n\t\t}\n\t}\n\n\tmsg, newStatus, err := repo.testServiceForHost(hs)\n\tif err != nil {\n\t\tlog.Error(fmt.Errorf(\"ERROR - Encountered error when testing service - %v\", err))\n\t\treturn\n\t}\n\tif newStatus != hs.Status {\n\t\trepo.updateHostServiceStatusCount(hs, newStatus, msg)\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
submitBackup create a backup from schedule.
|
func (c *scheduleReconciler) submitBackup(ctx context.Context, schedule *velerov1.Schedule) error {
c.logger.WithField("schedule", schedule.Namespace+"/"+schedule.Name).Info("Schedule is due, going to submit backup.")
now := c.clock.Now()
// Don't attempt to "catch up" if there are any missed or failed runs - simply
// trigger a Backup if it's time.
backup := getBackup(schedule, now)
if err := c.Create(ctx, backup); err != nil {
return errors.Wrap(err, "error creating Backup")
}
original := schedule.DeepCopy()
schedule.Status.LastBackup = &metav1.Time{Time: now}
if err := c.Patch(ctx, schedule, client.MergeFrom(original)); err != nil {
return errors.Wrapf(err, "error updating Schedule's LastBackup time to %v", schedule.Status.LastBackup)
}
return nil
}
|
[
"func createBackup(w http.ResponseWriter, r *http.Request) {\n\tlogrus.Infof(\">>>>CreateBackup r=%s\", r)\n\n\tif RunningBackupAPIID != \"\" {\n\t\tlogrus.Infof(\"Another backup id %s is already running. Aborting.\", RunningBackupAPIID)\n\t\thttp.Error(w, fmt.Sprintf(\"Another backup id %s is already running. Aborting.\", RunningBackupAPIID), http.StatusConflict)\n\t\treturn\n\t}\n\n\tRunningBackupAPIID = createAPIID()\n\tCurrentBackupStartTime = time.Now()\n\n\t//run backup assyncronouslly\n\tgo runBackup(RunningBackupAPIID)\n\n\tsendSchellyResponse(RunningBackupAPIID, \"\", \"running\", \"backup triggered\", -1, http.StatusAccepted, w)\n}",
"func CreateScheduledBackup(backupScheduleName, backupScheduleUID, schedulePolicyName, schedulePolicyUID string,\n\tinterval time.Duration, namespaces []string) (err error) {\n\tvar ctx context1.Context\n\tlabelSelectors := make(map[string]string)\n\tStep(fmt.Sprintf(\"Create scheduled backup %s of namespaces %v on cluster %s in organization %s\",\n\t\tbackupScheduleNamePrefix+backupScheduleName, namespaces, sourceClusterName, OrgID), func() {\n\t\tbackupDriver := Inst().Backup\n\n\t\t// Create a schedule policy\n\t\tschedulePolicyCreateRequest := &api.SchedulePolicyCreateRequest{\n\t\t\tCreateMetadata: &api.CreateMetadata{\n\t\t\t\tName: schedulePolicyName,\n\t\t\t\tUid: schedulePolicyUID,\n\t\t\t\tOrgId: OrgID,\n\t\t\t},\n\n\t\t\tSchedulePolicy: &api.SchedulePolicyInfo{\n\t\t\t\tInterval: &api.SchedulePolicyInfo_IntervalPolicy{\n\t\t\t\t\t// Retain 5 backups at a time for ease of inspection\n\t\t\t\t\tRetain: 5,\n\t\t\t\t\tMinutes: int64(interval / time.Minute),\n\t\t\t\t\tIncrementalCount: &api.SchedulePolicyInfo_IncrementalCount{\n\t\t\t\t\t\tCount: 0,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\t//ctx, err = backup.GetPxCentralAdminCtx()\n\t\tctx, err = backup.GetAdminCtxFromSecret()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\t_, err = backupDriver.CreateSchedulePolicy(ctx, schedulePolicyCreateRequest)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\t// Create a backup schedule\n\t\tbkpScheduleCreateRequest := &api.BackupScheduleCreateRequest{\n\t\t\tCreateMetadata: &api.CreateMetadata{\n\t\t\t\tName: backupScheduleNamePrefix + backupScheduleName,\n\t\t\t\tUid: backupScheduleUID,\n\t\t\t\tOrgId: OrgID,\n\t\t\t},\n\n\t\t\tNamespaces: namespaces,\n\n\t\t\tReclaimPolicy: api.BackupScheduleInfo_Delete,\n\t\t\t// Name of Cluster\n\t\t\tCluster: sourceClusterName,\n\t\t\t// Label selectors to choose resources\n\t\t\tLabelSelectors: labelSelectors,\n\n\t\t\tSchedulePolicyRef: &api.ObjectRef{\n\t\t\t\tName: schedulePolicyName,\n\t\t\t\tUid: schedulePolicyUID,\n\t\t\t},\n\t\t\tBackupLocationRef: &api.ObjectRef{\n\t\t\t\tName: backupLocationName,\n\t\t\t\tUid: BackupLocationUID,\n\t\t\t},\n\t\t}\n\t\t//ctx, err = backup.GetPxCentralAdminCtx()\n\t\tctx, err = backup.GetAdminCtxFromSecret()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\t_, err = backupDriver.CreateBackupSchedule(ctx, bkpScheduleCreateRequest)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t})\n\treturn err\n}",
"func (f *FakeInstance) SetBackupSchedule(_ context.Context, _ string, _ *govultr.BackupScheduleReq) (*http.Response, error) {\n\tpanic(\"implement me\")\n}",
"func CreateBackup(serviceID string, settings *models.Settings) *models.Task {\n\tbackup := map[string]string{\n\t\t\"archiveType\": \"cf\",\n\t\t\"encryptionType\": \"aes\",\n\t}\n\tb, err := json.Marshal(backup)\n\tif err != nil {\n\t\tfmt.Println(err.Error())\n\t\tos.Exit(1)\n\t}\n\tresp := httpclient.Post(b, fmt.Sprintf(\"%s/v1/environments/%s/services/%s/backup\", settings.PaasHost, settings.EnvironmentID, serviceID), true, settings)\n\tvar m map[string]string\n\tjson.Unmarshal(resp, &m)\n\treturn &models.Task{\n\t\tID: m[\"taskId\"],\n\t}\n}",
"func (i *InstanceServiceHandler) SetBackupSchedule(ctx context.Context, instanceID string, backup *BackupScheduleReq) error {\n\turi := fmt.Sprintf(\"%s/%s/backup-schedule\", instancePath, instanceID)\n\treq, err := i.client.NewRequest(ctx, http.MethodPost, uri, backup)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn i.client.DoWithContext(ctx, req, nil)\n}",
"func NewBackupCronJob(bkp *v1alpha1.Backup, db *v1alpha1.MariaDB, scheme *runtime.Scheme) *v1beta1.CronJob {\n\n\tbkpPVClaimName := GetMariadbBkpVolumeClaimName(bkp)\n\tdbPort := db.Spec.Port\n\n\thostname := mariadbBkpServiceName(bkp) + \".\" + bkp.Namespace\n\t// currentTime := time.Now()\n\t//formatedDate := currentTime.Format(\"2006-01-02_15:04:05\")\n\t// filename := \"/var/lib/mysql/backup/backup_\" + formatedDate + \".sql\"\n\tfilename := \"/var/lib/mysql/backup_`date +%F_%T`.sql\"\n\tbackupCommand := \"echo 'Starting DB Backup' && \" +\n\t\t\"mysqldump -P \" + fmt.Sprint(dbPort) + \" -h '\" + hostname +\n\t\t\"' --lock-tables --all-databases > \" + filename +\n\t\t\"&& echo 'Completed DB Backup'\"\n\n\tcron := &v1beta1.CronJob{\n\t\tObjectMeta: v1.ObjectMeta{\n\t\t\tName: bkp.Name,\n\t\t\tNamespace: bkp.Namespace,\n\t\t\tLabels: utils.Labels(db, \"mariadb\"),\n\t\t},\n\t\tSpec: v1beta1.CronJobSpec{\n\t\t\tSchedule: bkp.Spec.Schedule,\n\t\t\tJobTemplate: v1beta1.JobTemplateSpec{\n\t\t\t\tSpec: batchv1.JobSpec{\n\t\t\t\t\tTemplate: corev1.PodTemplateSpec{\n\t\t\t\t\t\tSpec: corev1.PodSpec{\n\t\t\t\t\t\t\tServiceAccountName: \"mariadb-operator\",\n\t\t\t\t\t\t\tVolumes: []corev1.Volume{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName: pvStorageName,\n\t\t\t\t\t\t\t\t\tVolumeSource: corev1.VolumeSource{\n\t\t\t\t\t\t\t\t\t\tPersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{\n\t\t\t\t\t\t\t\t\t\t\tClaimName: bkpPVClaimName,\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tContainers: []corev1.Container{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName: bkp.Name,\n\t\t\t\t\t\t\t\t\tImage: db.Spec.Image,\n\t\t\t\t\t\t\t\t\tCommand: []string{\"/bin/sh\", \"-c\"},\n\t\t\t\t\t\t\t\t\tArgs: []string{backupCommand},\n\t\t\t\t\t\t\t\t\tVolumeMounts: []corev1.VolumeMount{\n\t\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\tName: pvStorageName,\n\t\t\t\t\t\t\t\t\t\t\tMountPath: \"/var/lib/mysql\",\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\tEnv: []corev1.EnvVar{\n\t\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\tName: \"MYSQL_PWD\",\n\t\t\t\t\t\t\t\t\t\t\tValue: db.Spec.Rootpwd,\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\tName: \"USER\",\n\t\t\t\t\t\t\t\t\t\t\tValue: \"root\",\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tRestartPolicy: corev1.RestartPolicyOnFailure,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tcontrollerutil.SetControllerReference(bkp, cron, scheme)\n\treturn cron\n}",
"func (c *Client) CreateBackup(instanceId string) error {\n\n\tresult := &bce.BceResponse{}\n\terr := bce.NewRequestBuilder(c).\n\t\tWithMethod(http.POST).\n\t\tWithURL(getDdcUriWithInstanceId(instanceId)+\"/snapshot\").\n\t\tWithHeader(http.CONTENT_TYPE, bce.DEFAULT_CONTENT_TYPE).\n\t\tWithResult(result).\n\t\tDo()\n\n\treturn err\n}",
"func (esop *ElasticsearchOperation) Backup(backupCtx Context, _ ObjectManifest, progChan chan OperationProgress) error {\n\tbackupID := backupCtx.backupTask.TaskID()\n\n\tbackupCtxDeadline, _ := backupCtx.ctx.Deadline()\n\tlog := logrus.WithFields(logrus.Fields{\n\t\t\"backup_id\": backupID,\n\t\t\"service_name\": esop.ServiceName,\n\t\t\"multi_index_spec\": esop.MultiIndexSpec,\n\t\t\"operation\": \"elasticsearch_snapshot\",\n\t\t\"action\": \"backup\",\n\t\t\"deadline\": backupCtxDeadline.Format(time.RFC3339),\n\t})\n\tlog.Info(\"Running backup operation\")\n\n\tconn, err := backupCtx.connFactory.DialContext(backupCtx.ctx, \"es-sidecar-service\", backupCtx.esSidecarInfo.Address())\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Could not connect to es-sidecar-service for backup operation\")\n\t}\n\tdefer conn.Close()\n\n\tclient := es_sidecar.NewEsSidecarServiceClient(conn)\n\n\t_, err = client.CreateSnapshot(backupCtx.ctx, &es_sidecar.CreateSnapshotRequest{\n\t\tServiceName: esop.ServiceName,\n\t\tMultiIndexSpecification: esop.MultiIndexSpec,\n\t\tBackupId: backupID,\n\t})\n\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"es-sidecar-service failed to create snapshot\")\n\t}\n\n\treturn monitorEsProgress(backupCtx.ctx, esop.String(), log, progChan, func() (esProgress, error) {\n\t\treturn client.CreateSnapshotStatus(backupCtx.ctx, &es_sidecar.CreateSnapshotStatusRequest{\n\t\t\tServiceName: esop.ServiceName,\n\t\t\tBackupId: backupID,\n\t\t})\n\t})\n}",
"func (x DynamoDBAccessor) CreateBackup(ctx context.Context, in1 *dynamodb.CreateBackupInput) (*dynamodb.CreateBackupOutput, error) {\n action := func(rawPeer interface{}, span *TracingSpan) ([]interface{}, error) {\n client, peer, err := x.Client(rawPeer)\n if err != nil {\n return nil, err\n }\n in1.TableName = &peer.Name\n res := make([]interface{}, 1)\n res[0], err = client.CreateBackup(in1)\n return res, err\n }\n execRes, execErr := execute(ctx, x.info.peers, \"CreateBackup\", action)\n if execErr != nil {\n return nil, execErr\n }\n return execRes[0].(*dynamodb.CreateBackupOutput), nil\n}",
"func (a *backupAction) Execute(ctx context.Context, syndesis *synapi.Syndesis, operatorNamespace string) error {\n\tentries := c.Entries()\n\n\tif s := syndesis.Spec.Backup.Schedule; s != \"\" {\n\t\tif len(entries) == 0 {\n\t\t\ta.log.Info(\"scheduling backup job\", \"frequency\", string(s))\n\t\t\tc.AddFunc(strings.Join([]string{\"@\", string(s)}, \"\"), func() {\n\t\t\t\tb, err := backup.NewBackup(ctx, a.clientTools, syndesis, \"/tmp/foo\")\n\t\t\t\tif err != nil {\n\t\t\t\t\ta.log.Error(err, \"backup initialisation failed with error\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tb.SetDelete(true)\n\t\t\t\tb.Run()\n\t\t\t})\n\n\t\t\tc.Start()\n\t\t} else if len(entries) == 1 {\n\t\t\tsyndesis.Status.Backup.Next = entries[0].Next.String()\n\t\t\tsyndesis.Status.Backup.Previous = entries[0].Prev.String()\n\n\t\t\tclient, _ := a.clientTools.RuntimeClient()\n\t\t\treturn client.Status().Update(ctx, syndesis)\n\t\t} else {\n\t\t\treturn fmt.Errorf(\"unsupported number of entries for cron instance, cron %v\", c)\n\t\t}\n\t} else {\n\t\tif len(entries) == 1 {\n\t\t\te := entries[0]\n\n\t\t\ta.log.Info(\"removing backup job from scheduler\", \"job\", e.ID)\n\t\t\tc.Remove(e.ID)\n\t\t\tc.Stop()\n\t\t} else if len(entries) > 1 {\n\t\t\treturn fmt.Errorf(\"unsupported number of entries for cron instance, cron %v\", c)\n\t\t}\n\t}\n\n\treturn nil\n}",
"func toBackup(backup *ps.Backup) *Backup {\n\treturn &Backup{\n\t\tPublicID: backup.PublicID,\n\t\tName: backup.Name,\n\t\tState: backup.State,\n\t\tSize: backup.Size,\n\t\tCreatedAt: backup.CreatedAt.UTC().UnixNano() / (int64(time.Millisecond) / int64(time.Nanosecond)),\n\t\tUpdatedAt: backup.UpdatedAt.UTC().UnixNano() / (int64(time.Millisecond) / int64(time.Nanosecond)),\n\t\tStartedAt: backup.StartedAt.UTC().UnixNano() / (int64(time.Millisecond) / int64(time.Nanosecond)),\n\t\tExpiresAt: backup.ExpiresAt.UTC().UnixNano() / (int64(time.Millisecond) / int64(time.Nanosecond)),\n\t\tCompletedAt: backup.CompletedAt.UTC().UnixNano() / (int64(time.Millisecond) / int64(time.Nanosecond)),\n\t\torig: backup,\n\t}\n}",
"func (client CloudEndpointsClient) PostBackupSender(req *http.Request) (future CloudEndpointsPostBackupFuture, err error) {\n\tvar resp *http.Response\n\tfuture.FutureAPI = &azure.Future{}\n\tresp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))\n\tif err != nil {\n\t\treturn\n\t}\n\tvar azf azure.Future\n\tazf, err = azure.NewFutureFromResponse(resp)\n\tfuture.FutureAPI = &azf\n\tfuture.Result = future.result\n\treturn\n}",
"func (sb Backy2Backuper) CreateNewBackup(apiID string, timeout time.Duration, shellContext *schellyhook.ShellContext) error {\n\tlogrus.Infof(\"CreateNewBackup() apiID=%s timeout=%d s\", apiID, timeout.Seconds)\n\n\tlogrus.Infof(\"Running Backy2 backup\")\n\tout, err := schellyhook.ExecShellTimeout(\"backy2 backup \"+*sourcePath+\" \"+*sourcePath, timeout, shellContext)\n\tif err != nil {\n\t\tstatus := (*shellContext).CmdRef.Status()\n\t\tif status.Exit == -1 {\n\t\t\tlogrus.Warnf(\"Backy2 command timeout enforced (%d seconds)\", (status.StopTs-status.StartTs)/1000000000)\n\t\t}\n\t\tlogrus.Debugf(\"Backy2 error. out=%s; err=%s\", out, err.Error())\n\t\treturn err\n\t} else {\n\t\tlogrus.Debug(\"Backy2 backup success\")\n\t}\n\n\trex, _ := regexp.Compile(\"New version\\\\: ([\\\\-a-z0-9]+) \\\\(Tags\")\n\tid := rex.FindStringSubmatch(out)\n\tif len(id) == 2 && strings.Contains(out, \"Backy complete\") {\n\t\tbackyID := id[1]\n\t\tlogrus.Infof(\"Backup success\")\n\t\tsaveDataID(apiID, backyID)\n\t} else {\n\t\tlogrus.Errorf(\"Couldn't find 'Backy complete' or id in command output. out=%s\", out)\n\t\treturn fmt.Errorf(\"Couldn't find 'Backy complete' or id in command output. out=%s\", out)\n\t}\n\n\tlogrus.Infof(\"Backy2 backup finished\")\n\treturn nil\n}",
"func (v *VirtualMachine) CreateBackup(req *CreateBackupRequest, dest *OmniStackCluster) (*Backup, error) {\n\tvar (\n\t\tpath = fmt.Sprintf(\"/virtual_machines/%s/backup\", v.Id)\n\t)\n\n\tif dest != nil {\n\t\treq.Destination = dest.Id\n\t}\n\n\tresp, err := commonClient.DoRequest(\"POST\", path, \"\", req, nil)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn nil, err\n\t}\n\n\ttask, err := commonClient.Tasks.WaitForTask(resp)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn nil, err\n\t}\n\n\tresources := task.AffectedResources\n\tif len(resources) < 1 {\n\t\terr_message := \"Backup was not successful. Error code:\" + string(task.ErrorCode)\n\t\treturn nil, errors.New(err_message)\n\t}\n\n\tresource_id := resources[0].ObjectId\n\tbackup, err := commonClient.Backups.GetById(resource_id)\n\n\treturn backup, nil\n}",
"func (f *FakeInstance) GetBackupSchedule(_ context.Context, _ string) (*govultr.BackupSchedule, *http.Response, error) {\n\tpanic(\"implement me\")\n}",
"func (client CloudEndpointsClient) PostBackup(ctx context.Context, resourceGroupName string, storageSyncServiceName string, syncGroupName string, cloudEndpointName string, parameters BackupRequest) (result CloudEndpointsPostBackupFuture, err error) {\n\tif tracing.IsEnabled() {\n\t\tctx = tracing.StartSpan(ctx, fqdn+\"/CloudEndpointsClient.PostBackup\")\n\t\tdefer func() {\n\t\t\tsc := -1\n\t\t\tif result.FutureAPI != nil && result.FutureAPI.Response() != nil {\n\t\t\t\tsc = result.FutureAPI.Response().StatusCode\n\t\t\t}\n\t\t\ttracing.EndSpan(ctx, sc, err)\n\t\t}()\n\t}\n\tif err := validation.Validate([]validation.Validation{\n\t\t{TargetValue: client.SubscriptionID,\n\t\t\tConstraints: []validation.Constraint{{Target: \"client.SubscriptionID\", Name: validation.MinLength, Rule: 1, Chain: nil}}},\n\t\t{TargetValue: resourceGroupName,\n\t\t\tConstraints: []validation.Constraint{{Target: \"resourceGroupName\", Name: validation.MaxLength, Rule: 90, Chain: nil},\n\t\t\t\t{Target: \"resourceGroupName\", Name: validation.MinLength, Rule: 1, Chain: nil},\n\t\t\t\t{Target: \"resourceGroupName\", Name: validation.Pattern, Rule: `^[-\\w\\._\\(\\)]+$`, Chain: nil}}}}); err != nil {\n\t\treturn result, validation.NewError(\"storagesync.CloudEndpointsClient\", \"PostBackup\", err.Error())\n\t}\n\n\treq, err := client.PostBackupPreparer(ctx, resourceGroupName, storageSyncServiceName, syncGroupName, cloudEndpointName, parameters)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"storagesync.CloudEndpointsClient\", \"PostBackup\", nil, \"Failure preparing request\")\n\t\treturn\n\t}\n\n\tresult, err = client.PostBackupSender(req)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"storagesync.CloudEndpointsClient\", \"PostBackup\", result.Response(), \"Failure sending request\")\n\t\treturn\n\t}\n\n\treturn\n}",
"func AddBackupBase(clientset *kubernetes.Clientset, client *rest.RESTClient, job *crv1.Pgbackup, namespace string) {\n\tvar err error\n\n\tif job.Spec.BackupStatus == crv1.UpgradeCompletedStatus {\n\t\tlog.Warn(\"pgbackup \" + job.Spec.Name + \" already completed, not recreating it\")\n\t\treturn\n\t}\n\n\tlog.Info(\"creating Pgbackup object\" + \" in namespace \" + namespace)\n\tlog.Info(\"created with Name=\" + job.Spec.Name + \" in namespace \" + namespace)\n\n\t//create the PVC if necessary\n\tvar pvcName string\n\tpvcName, err = pvc.CreatePVC(clientset, job.Spec.Name+\"-backup\", &job.Spec.StorageSpec, namespace)\n\tif err != nil {\n\t\tlog.Error(err.Error())\n\t} else {\n\t\tlog.Info(\"created backup PVC =\" + pvcName + \" in namespace \" + namespace)\n\t}\n\n\t//update the pvc name in the CRD\n\terr = util.Patch(client, \"/spec/storagespec/name\", pvcName, \"pgbackups\", job.Spec.Name, namespace)\n\n\t//create the job -\n\tjobFields := jobTemplateFields{\n\t\tName: job.Spec.Name,\n\t\tPvcName: util.CreatePVCSnippet(job.Spec.StorageSpec.StorageType, pvcName),\n\t\tCCPImagePrefix: operator.CCPImagePrefix,\n\t\tCCPImageTag: job.Spec.CCPImageTag,\n\t\tSecurityContext: util.CreateSecContext(job.Spec.StorageSpec.Fsgroup, job.Spec.StorageSpec.SupplementalGroups),\n\t\tBackupHost: job.Spec.BackupHost,\n\t\tBackupUser: job.Spec.BackupUser,\n\t\tBackupPass: job.Spec.BackupPass,\n\t\tBackupPort: job.Spec.BackupPort,\n\t}\n\n\tvar doc2 bytes.Buffer\n\terr = jobTemplate.Execute(&doc2, jobFields)\n\tif err != nil {\n\t\tlog.Error(err.Error())\n\t\treturn\n\t}\n\tjobDocString := doc2.String()\n\tlog.Debug(jobDocString)\n\n\tnewjob := v1batch.Job{}\n\terr = json.Unmarshal(doc2.Bytes(), &newjob)\n\tif err != nil {\n\t\tlog.Error(\"error unmarshalling json into Job \" + err.Error())\n\t\treturn\n\t}\n\n\tresultJob, err := clientset.Batch().Jobs(namespace).Create(&newjob)\n\tif err != nil {\n\t\tlog.Error(\"error creating Job \" + err.Error())\n\t\treturn\n\t}\n\tlog.Info(\"created Job \" + resultJob.Name)\n\n\t//update the backup CRD status to submitted\n\terr = util.Patch(client, \"/spec/backupstatus\", crv1.UpgradeSubmittedStatus, \"pgbackups\", job.Spec.Name, namespace)\n\tif err != nil {\n\t\tlog.Error(err.Error())\n\t}\n\n}",
"func (kb *kubernetesBackupper) Backup(backup *api.Backup, backupFile, logFile io.Writer) error {\n\tgzippedData := gzip.NewWriter(backupFile)\n\tdefer gzippedData.Close()\n\n\ttw := tar.NewWriter(gzippedData)\n\tdefer tw.Close()\n\n\tgzippedLog := gzip.NewWriter(logFile)\n\tdefer gzippedLog.Close()\n\n\tvar errs []error\n\n\tctx := &backupContext{\n\t\tbackup: backup,\n\t\tw: tw,\n\t\tlogger: &logger{w: gzippedLog},\n\t\tnamespaceIncludesExcludes: getNamespaceIncludesExcludes(backup),\n\t}\n\n\tctx.log(\"Starting backup\")\n\n\tctx.resourceIncludesExcludes = ctx.getResourceIncludesExcludes(kb.discoveryHelper, backup.Spec.IncludedResources, backup.Spec.ExcludedResources)\n\n\tfor _, group := range kb.discoveryHelper.Resources() {\n\t\tctx.log(\"Processing group %s\", group.GroupVersion)\n\t\tif err := kb.backupGroup(ctx, group); err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\n\terr := kuberrs.NewAggregate(errs)\n\tif err == nil {\n\t\tctx.log(\"Backup completed successfully\")\n\t} else {\n\t\tctx.log(\"Backup completed with errors: %v\", err)\n\t}\n\n\treturn err\n}",
"func SetupBackup(testName string) {\n\tlogrus.Infof(\"Backup driver: %v\", Inst().Backup)\n\tprovider := GetProvider()\n\tlogrus.Infof(\"Run Setup backup with object store provider: %s\", provider)\n\tOrgID = \"default\"\n\tBucketName = fmt.Sprintf(\"%s-%s\", BucketNamePrefix, Inst().InstanceID)\n\tCloudCredUID = uuid.New()\n\t//cloudCredUID = \"5a48be84-4f63-40ae-b7f1-4e4039ab7477\"\n\tBackupLocationUID = uuid.New()\n\t//backupLocationUID = \"64d908e7-40cf-4c9e-a5cf-672e955fd0ca\"\n\n\tCreateBucket(provider, BucketName)\n\tCreateOrganization(OrgID)\n\tCreateCloudCredential(provider, CredName, CloudCredUID, OrgID)\n\tCreateBackupLocation(provider, backupLocationName, BackupLocationUID, CredName, CloudCredUID, BucketName, OrgID)\n\tCreateSourceAndDestClusters(CredName, OrgID)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
GetUserList get user list from paas
|
func (m *publicUser) GetUserList(c *gin.Context) (int, interface{}) {
user := plugins.CurrentPlugin(c, m.config.LoginVersion)
userList, err := user.GetUserList(c, m.config.ConfigMap)
rspBody := metadata.LonginSystemUserListResult{}
if nil != err {
rspBody.Code = common.CCErrCommHTTPDoRequestFailed
rspBody.ErrMsg = err.Error()
rspBody.Result = false
}
rspBody.Result = true
rspBody.Data = userList
return 200, rspBody
}
|
[
"func getUserList() string {\n\tvar userlist string\n\tfmt.Println(len(Users))\n\tfor key, value := range Users {\n\t\tfmt.Println(\"key\", key, \"value\", value)\n\t\tuserlist = userlist + key + \"|\"\n\n\t}\n\treturn strings.TrimRight(userlist, \"|\")\n}",
"func GetUserList(currentPage, lineSize uint, term string) (num int64, userListJSON []User, err error) {\n\to := orm.NewOrm()\n\tvar sql = `SELECT \n\t\t\t\t\tT0.i_d,\n\t\t\t\t\tT0.name,\n\t\t\t\t\tT0.lock\t,\n\t\t\t\t\tT0.username,\n\t\t\t\t\tT0.tel,\n\t\t\t\t\tT0.depart, \n\t\t\t\t\tT0.active\t\t\t\t\t \n\t\t\t FROM \"user\" T0\t JOIN \"role\" T1 ON T0.role_id = T1.i_d\n\t\t\t WHERE (lower(T0.name) like lower(?)\n\t\t\t or lower(T0.depart) like lower(?) \n\t\t\t ) order by T0.name`\n\tnum, _ = o.Raw(sql, \"%\"+term+\"%\", \"%\"+term+\"%\").QueryRows(&userListJSON)\n\tif lineSize+currentPage > uint(num) {\n\t\tlineSize = uint(num)\n\t} else if currentPage > 0 {\n\t\tlineSize = lineSize + currentPage\n\t}\n\tif currentPage > lineSize {\n\t\tcurrentPage = 0\n\t}\n\tuserListJSON = userListJSON[currentPage:lineSize]\n\treturn num, userListJSON, err\n}",
"func (m *Mgr) list(ctx context.Context) (users []*User, err error) {\n\trows, err := m.db.QueryContext(ctx, `SELECT username, password FROM users`)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer rows.Close()\n\n\tfor rows.Next() {\n\t\tvar u User\n\t\tif err = rows.Scan(&u.Username, &u.Password); err != nil {\n\t\t\treturn\n\t\t}\n\t\tusers = append(users, &u)\n\t}\n\treturn users, rows.Err()\n}",
"func (db *UserStorage) List() ([]socialnet.User, error) {\n\tq := \"SELECT * FROM users\"\n\tvar uu []socialnet.User\n\n\terr := db.Select(&uu, q)\n\tif err != nil {\n\t\treturn []socialnet.User{}, err\n\t}\n\n\treturn uu, nil\n}",
"func (u *User) List() (boiled.UserSlice, error) {\n\treturn boiled.Users().All(context.Background(), u.db)\n}",
"func List() []string {\n\tvar list []string\n\tfor user, _ := range usertable {\n\t\tlist = append(list, user)\n\t}\n\tsort.Strings(list)\n\n\treturn list\n}",
"func (h *ServiceUsersHandler) List(ctx context.Context, project, serviceName string) ([]*ServiceUser, error) {\n\t// Aiven API does not provide list operation for service users, need to get them via service info instead\n\tservice, err := h.client.Services.Get(ctx, project, serviceName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn service.Users, nil\n}",
"func (c *UsersClient) List(ctx context.Context, filter string) (*[]models.User, int, error) {\n\tparams := url.Values{}\n\tif filter != \"\" {\n\t\tparams.Add(\"$filter\", filter)\n\t}\n\tresp, status, _, err := c.BaseClient.Get(ctx, base.GetHttpRequestInput{\n\t\tValidStatusCodes: []int{http.StatusOK},\n\t\tUri: base.Uri{\n\t\t\tEntity: \"/users\",\n\t\t\tParams: params,\n\t\t\tHasTenantId: true,\n\t\t},\n\t})\n\tif err != nil {\n\t\treturn nil, status, err\n\t}\n\tdefer resp.Body.Close()\n\trespBody, _ := ioutil.ReadAll(resp.Body)\n\tvar data struct {\n\t\tUsers []models.User `json:\"value\"`\n\t}\n\tif err := json.Unmarshal(respBody, &data); err != nil {\n\t\treturn nil, status, err\n\t}\n\treturn &data.Users, status, nil\n}",
"func (remoteAccessVpnUserApi *RemoteAccessVpnUserApi) List() ([]RemoteAccessVpnUser, error) {\n\tdata, err := remoteAccessVpnUserApi.entityService.List(map[string]string{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn parseRemoteAccessVpnUserList(data), nil\n}",
"func (manager *UsersManager) UserIDList() []string {\n\ts := set.New(set.ThreadSafe)\n\t_ = manager.EachEntry(func(authyID string, publicKey string) {\n\t\ts.Add(authyID)\n\t})\n\n\treturn set.StringSlice(s)\n}",
"func GetUserList() ([]User, error) {\n\tInfo.Println(\"GetUserList()\")\n\tvar users []User\n\terr := db.Select(&users, \"select * from \\\"user\\\"\")\n\tif err != nil {\n\t\treturn []User{}, err\n\t}\n\treturn users, nil\n}",
"func (s *Service) List(w http.ResponseWriter, r *http.Request) {\n\n\tw.Header().Add(\"Content-Type\", \"application/json\")\n\n\t// Retrieve the full list of users\n\tresources, err, statusCode := s.GetResources(ResourceParser)\n\n\tif err != nil {\n\t\tw.WriteHeader(statusCode)\n\t\tw.Write([]byte(`{\"error\":\"could not read users\"}`))\n\t\treturn\n\t}\n\n\t// Get the list and assert them to the proper type\n\tresourceList := resources.([]interface{})\n\tuserList := make([]models.User, len(resourceList))\n\tfor i, r := range resourceList {\n\t\tuserList[i] = r.(models.User)\n\t}\n\n\t// Retrieve any query parameters\n\tparams := r.URL.Query()\n\n\t// If the query URI is used and query parameters exist, then apply the filters\n\tif len(params) > 0 && strings.Index(r.RequestURI, \"/users/query?\") != -1 {\n\t\tuserList, err = s.filter(userList, params)\n\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tw.Write([]byte(fmt.Sprintf(`{\"error\":\"%s\"}`, err.Error())))\n\t\t\treturn\n\t\t}\n\t}\n\n\t// Convert the list of users to byte data\n\trespData, err := json.Marshal(userList)\n\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tw.Write([]byte(`{\"error\":\"user data error\"}`))\n\t}\n\n\t// Write the response; no need to write the response code as 200 is the default\n\t_, err = w.Write(respData)\n\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tw.Write([]byte(`{\"error\":\"unknown user error\"}`))\n\t}\n}",
"func (m *manager) List(ctx context.Context, query *q.Query) (models.Users, error) {\n\tquery = q.MustClone(query)\n\tif query.Sorting == \"\" {\n\t\tquery.Sorting = \"username\"\n\t}\n\n\texcludeAdmin := true\n\tfor key := range query.Keywords {\n\t\tstr := strings.ToLower(key)\n\t\tif str == \"user_id__in\" {\n\t\t\texcludeAdmin = false\n\t\t\tbreak\n\t\t} else if str == \"user_id\" {\n\t\t\texcludeAdmin = false\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif excludeAdmin {\n\t\t// Exclude admin account when not filter by UserIDs, see https://github.com/goharbor/harbor/issues/2527\n\t\tquery.Keywords[\"user_id__gt\"] = 1\n\t}\n\n\treturn m.dao.List(ctx, query)\n}",
"func (cli *OpsGenieUserClient) List(req user.ListUsersRequest) (*user.ListUsersResponse, error) {\n\treq.APIKey = cli.apiKey\n\tresp, err := cli.sendRequest(cli.buildGetRequest(userURL, req))\n\n\tif resp == nil {\n\t\treturn nil, errors.New(err.Error())\n\t}\n\tdefer resp.Body.Close()\n\n\tvar listUsersResp user.ListUsersResponse\n\n\tif err = resp.Body.FromJsonTo(&listUsersResp); err != nil {\n\t\tmessage := \"Server response can not be parsed, \" + err.Error()\n\t\tlogging.Logger().Warn(message)\n\t\treturn nil, errors.New(message)\n\t}\n\treturn &listUsersResp, nil\n}",
"func (us *UserService) List(ctx context.Context) ([]*resources.User, error) {\n\tdoc, err := us.list(ctx, \"one.userpool.info\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\telements := doc.FindElements(\"USER_POOL/USER\")\n\n\tusers := make([]*resources.User, len(elements))\n\tfor i, e := range elements {\n\t\tusers[i] = resources.CreateUserFromXML(e)\n\t}\n\n\treturn users, nil\n}",
"func (s *RepoUserService) List(path string) ([]*api.User, error) {\n\treturn s.ListFunc(path)\n}",
"func GetUsers(req *http.Request, render render.Render, account services.Account) {\n qs := req.URL.Query()\n userIDs := qs[\"userId\"]\n var users []models.User\n for _, userID := range userIDs {\n if user, err := account.GetUser(userID); err != nil {\n render.JSON(err.HttpCode, err)\n return\n } else {\n users = append(users, *user)\n }\n }\n render.JSON(http.StatusOK, users)\n}",
"func UserListAll(w http.ResponseWriter, r *http.Request) {\n\n\tvar err error\n\tvar pageSize int\n\tvar paginatedUsers auth.PaginatedUsers\n\n\t// Init output\n\toutput := []byte(\"\")\n\n\t// Add content type header to the response\n\tcontentType := \"application/json\"\n\tcharset := \"utf-8\"\n\tw.Header().Add(\"Content-Type\", fmt.Sprintf(\"%s; charset=%s\", contentType, charset))\n\n\t// Grab context references\n\trefStr := gorillaContext.Get(r, \"str\").(stores.Store)\n\trefRoles := gorillaContext.Get(r, \"auth_roles\").([]string)\n\n\t// Grab url path variables\n\turlValues := r.URL.Query()\n\tpageToken := urlValues.Get(\"pageToken\")\n\tstrPageSize := urlValues.Get(\"pageSize\")\n\tprojectName := urlValues.Get(\"project\")\n\tprojectUUID := \"\"\n\n\tif projectName != \"\" {\n\t\tprojectUUID = projects.GetUUIDByName(projectName, refStr)\n\t\tif projectUUID == \"\" {\n\t\t\terr := APIErrorNotFound(\"ProjectUUID\")\n\t\t\trespondErr(w, err)\n\t\t\treturn\n\t\t}\n\t}\n\n\tif strPageSize != \"\" {\n\t\tif pageSize, err = strconv.Atoi(strPageSize); err != nil {\n\t\t\tlog.Errorf(\"Pagesize %v produced an error while being converted to int: %v\", strPageSize, err.Error())\n\t\t\terr := APIErrorInvalidData(\"Invalid page size\")\n\t\t\trespondErr(w, err)\n\t\t\treturn\n\t\t}\n\t}\n\n\t// check that user is indeed a service admin in order to be priviledged to see full user info\n\tpriviledged := auth.IsServiceAdmin(refRoles)\n\n\t// Get Results Object - call is always priviledged because this handler is only accessible by service admins\n\tif paginatedUsers, err = auth.PaginatedFindUsers(pageToken, int32(pageSize), projectUUID, priviledged, refStr); err != nil {\n\t\terr := APIErrorInvalidData(\"Invalid page token\")\n\t\trespondErr(w, err)\n\t\treturn\n\t}\n\n\t// Output result to JSON\n\tresJSON, err := paginatedUsers.ExportJSON()\n\n\tif err != nil {\n\t\terr := APIErrExportJSON()\n\t\trespondErr(w, err)\n\t\treturn\n\t}\n\n\t// Write response\n\toutput = []byte(resJSON)\n\trespondOK(w, output)\n\n}",
"func (s *initServer) GetUserListFilter(ctx context.Context, in *pb.UserListRequest) (*pb.UserListResponse, error) {\t\n\treturn userListTempl(ctx, in, \"userListFilter:user\", true)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
CgoEform Earth reference ellipsoids. E f o r m This function is part of the International Astronomical Union's SOFA (Standards of Fundamental Astronomy) software collection. Status: canonical. Given: n int ellipsoid identifier (Note 1) Returned: a double equatorial radius (meters, Note 2) f double flattening (Note 2) Returned (function value): err error nil = OK errEformE1 = illegal identifier (Note 3) Notes: 1) The identifier n is a number that specifies the choice of reference ellipsoid. The following are supported: n ellipsoid 1 WGS84 2 GRS80 3 WGS72 The n value has no significance outside the SOFA software. For convenience, symbols WGS84 etc. are defined in sofam.h. 2) The ellipsoid parameters are returned in the form of equatorial radius in meters (a) and flattening (f). The latter is a number around 0.00335, i.e. around 1/298. 3) For the case where an unsupported n value is supplied, zero a and f are returned, as well as error status. References: Department of Defense World Geodetic System 1984, National Imagery and Mapping Agency Technical Report 8350.2, Third Edition, p32. Moritz, H., Bull. Geodesique 662, 187 (1992). The Department of Defense World Geodetic System 1972, World Geodetic System Committee, May 1974. Explanatory Supplement to the Astronomical Almanac, P. Kenneth Seidelmann (ed), University Science Books (1992), p220. This revision: 2013 June 18 SOFA release 20200721 Copyright (C) 2020 IAU SOFA Board. See notes at end. CgoEform Earth reference ellipsoids.
|
func CgoEform(n int) (a, f float64, err en.ErrNum) {
var cA, cF C.double
var cStatus C.int
cStatus = C.iauEform(C.int(n), &cA, &cF)
switch int(cStatus) {
case 0:
case -1:
err = errEform.Set(-1)
default:
err = errEform.Set(0)
}
return float64(cA), float64(cF), err
}
|
[
"func GoEform(n int) (a, f float64, err en.ErrNum) {\n\n\t// Look up a and f for the specified reference ellipsoid.\n\tswitch n {\n\tcase WGS84:\n\t\ta = 6378137.0\n\t\tf = 1.0 / 298.257223563\n\tcase GRS80:\n\t\ta = 6378137.0\n\t\tf = 1.0 / 298.257222101\n\tcase WGS72:\n\t\ta = 6378135.0\n\t\tf = 1.0 / 298.26\n\tdefault:\n\t\t// Invalid identifier.\n\t\ta = 0.0\n\t\tf = 0.0\n\t\terr = errEform.Set(-1)\n\t}\n\treturn\n}",
"func SunEcef(t time.Time) (ecef [6]float64) {\n\tlatitude, longitude, radiusVector := SunLlr(t)\n\tecef = sunLlrToEcef(latitude, longitude, radiusVector)\n\treturn\n}",
"func FromLatLonF(lat, lon float64) (easting, northing float64) {\n\tif !(-80.0 <= lat && lat <= 84.0) {\n\t\tpanic(\"latitude out of range (must be between 80 deg S and 84 deg N)\")\n\t}\n\tif !(-180.0 <= lon && lon <= 180.0) {\n\t\tpanic(\"longitude out of range (must be between 180 deg W and 180 deg E)\")\n\t}\n\n\tlat_rad := rad(lat)\n\tlat_sin := math.Sin(lat_rad)\n\tlat_cos := math.Cos(lat_rad)\n\n\tlat_tan := lat_sin / lat_cos\n\tlat_tan2 := lat_tan * lat_tan\n\tlat_tan4 := lat_tan2 * lat_tan2\n\tzoneNumber := int((lon + 180) / 6) + 1\n\tif 56 <= lat && lat <= 64 && 3 <= lon && lon <= 12 {\n\t\tzoneNumber = 32\n\t}\n\tif 72 <= lat && lat <= 84 && lon >= 0 {\n\t\tif lon <= 9 {\n\t\t\tzoneNumber = 31\n\t\t} else if lon <= 21 {\n\t\t\tzoneNumber = 33\n\t\t} else if lon <= 33 {\n\t\t\tzoneNumber = 35\n\t\t} else if lon <= 42 {\n\t\t\tzoneNumber = 37\n\t\t}\n\t}\n\tcentral_lon := (zoneNumber - 1) * 6 - 180 + 3\n\n\tlon_rad := rad(lon)\n\tcentral_lon_rad := rad(float64(central_lon))\n\n\tn := r / math.Sqrt(1 - e * lat_sin * lat_sin)\n\tc := e_p2 * lat_cos * lat_cos\n\n\ta := lat_cos * (lon_rad - central_lon_rad)\n\ta2 := a * a\n\ta3 := a2 * a\n\ta4 := a3 * a\n\ta5 := a4 * a\n\ta6 := a5 * a\n\tm := r * (m1 * lat_rad -\n\t\tm2 * math.Sin(2 * lat_rad) +\n\t\tm3 * math.Sin(4 * lat_rad) -\n\t\tm4 * math.Sin(6 * lat_rad))\n\teasting = k0 * n * (a +\n\t\ta3 / 6 * (1 - lat_tan2 + c) +\n\t\ta5 / 120 * (5 - 18 * lat_tan2 + lat_tan4 + 72 * c - 58 * e_p2)) + 500000\n\tnorthing = k0 * (m + n * lat_tan * (a2 / 2 +\n\t\ta4 / 24 * (5 - lat_tan2 + 9 * c + 4 * c * c) +\n\t\ta6 / 720 * (61 - 58 * lat_tan2 + lat_tan4 + 600 * c - 330 * e_p2)))\n\n\tif lat < 0 {\n\t\tnorthing += 10000000\n\t}\n\n\treturn\n}",
"func NewEllipsoid(radius, flattening float64) *Ellipsoid {\n\te := new(Ellipsoid)\n\tC.geod_init(&e.g, C.double(radius), C.double(flattening))\n\treturn e\n}",
"func GetEquationOfCenter(anomaly float64) float64 {\n\t// The numbers being multiplied below are coefficients for the Equation of Center for Earth\n\tvar anomalyInRad float64\n\tanomalyInRad = anomaly * (math.Pi/180)\n\treturn 1.9148 * (math.Sin(anomalyInRad)) + 0.0200 * math.Sin(2 * anomalyInRad) + 0.0003 * math.Sin(3 * anomalyInRad)\n}",
"func earthRadius(x float64) float64 {\n\tmasm := 6378137.0 // Major semiaxis [m]\n\tmism := 6356752.3 // Minor semiaxis [m]\n\n\tan := masm * masm * math.Cos(x)\n\tbn := mism * mism * math.Sin(x)\n\tad := masm * math.Cos(x)\n\tbd := mism * math.Sin(x)\n\treturn math.Sqrt((an*an + bn*bn) / (ad*ad + bd*bd))\n}",
"func calcEccentEarthOrbit(julianCentury []float64) (eccentEarthOrbit []float64) {\n\tfor index := 0; index < len(julianCentury); index++ {\n\t\ttemp := 0.016708634 - julianCentury[index]*(0.000042037+0.0000001267*julianCentury[index])\n\t\teccentEarthOrbit = append(eccentEarthOrbit, temp)\n\t}\n\treturn\n}",
"func TestEllipticE(t *testing.T) {\n\tt.Parallel()\n\tconst tol = 1.0e-14\n\trnd := rand.New(rand.NewSource(1))\n\n\t// The following EllipticE(pi/3,m), m=0.1(0.1)0.9 was computed in Maxima 5.38.0 using Bigfloat arithmetic.\n\tvE := [...]float64{\n\t\t1.0316510822817691068014397636905610074934300946730,\n\t\t1.0156973658341766636288643556414001451527597364432,\n\t\t9.9929636467826398814855428365155224243586391115108e-1,\n\t\t9.8240033979859736941287149003648737502960015189033e-1,\n\t\t9.6495145764299257550956863602992167490195750321518e-1,\n\t\t9.4687829659158090935158610908054896203271861698355e-1,\n\t\t9.2809053417715769009517654522979827392794124845027e-1,\n\t\t9.0847044378047233264777277954768245721857017157916e-1,\n\t\t8.8785835036531301307661603341327881634688308777383e-1,\n\t}\n\tphi := math.Pi / 3\n\tfor m := 1; m <= 9; m++ {\n\t\tmf := float64(m) / 10\n\t\tdelta := math.Abs(EllipticE(phi, mf) - vE[m-1])\n\t\tif delta > tol {\n\t\t\tt.Fatalf(\"EllipticE(pi/3,m) test fail for m=%v\", mf)\n\t\t}\n\t}\n\n\tfor test := 0; test < 100; test++ {\n\t\talpha := rnd.Float64() * math.Pi / 4\n\t\tbeta := rnd.Float64() * math.Pi / 4\n\t\tfor mi := 0; mi < 9999; mi++ {\n\t\t\tm := float64(mi) / 10000\n\t\t\tEa := EllipticE(alpha, m)\n\t\t\tEb := EllipticE(beta, m)\n\t\t\tsina, cosa := math.Sincos(alpha)\n\t\t\tsinb, cosb := math.Sincos(beta)\n\t\t\ttan := (sina*math.Sqrt(1-m*sinb*sinb) + sinb*math.Sqrt(1-m*sina*sina)) / (cosa + cosb)\n\t\t\tgamma := 2 * math.Atan(tan)\n\t\t\tEg := EllipticE(gamma, m)\n\t\t\tdelta := math.Abs(Ea + Eb - Eg - m*sina*sinb*math.Sin(gamma))\n\t\t\tif delta > tol {\n\t\t\t\tt.Fatalf(\"EllipticE test fail for m=%v, alpha=%v, beta=%v\", m, alpha, beta)\n\t\t\t}\n\t\t}\n\t}\n}",
"func East(value float64) *SimpleElement { return newSEFloat(\"east\", value) }",
"func E3(a float64) Euler {\n\te := Euler{}\n\ta = ToRadians(a)\n\ts := math.Sin(a)\n\tc := math.Cos(a)\n\te[0] = [3]float64{c, -s, 0}\n\te[1] = [3]float64{s, c, 0}\n\te[2] = [3]float64{0.0, 0.0, 1.0}\n\treturn e\n}",
"func NewOrbitFromOE(a, e, i, Ω, ω, ν float64, c CelestialObject) *Orbit {\n\t// Convert angles to radians\n\ti = i * deg2rad\n\tΩ = Ω * deg2rad\n\tω = ω * deg2rad\n\tν = ν * deg2rad\n\n\t// Algorithm from Vallado, 4th edition, page 118 (COE2RV).\n\tif e < eccentricityε {\n\t\t// Circular...\n\t\tif i < angleε {\n\t\t\t// ... equatorial\n\t\t\tΩ = 0\n\t\t\tω = 0\n\t\t\tν = math.Mod(ω+Ω+ν, 2*math.Pi)\n\t\t} else {\n\t\t\t// ... inclined\n\t\t\tω = 0\n\t\t\tν = math.Mod(ν+ω, 2*math.Pi)\n\t\t}\n\t} else if i < angleε && !(c.Equals(Sun) && config.meeus) {\n\t\t// Meeus breaks if doing this correction by Vallado\n\t\t// Elliptical equatorial\n\t\tΩ = 0\n\t\tω = math.Mod(ω+Ω, 2*math.Pi)\n\t}\n\tp := a * (1 - e*e)\n\tif floats.EqualWithinAbs(e, 1, eccentricityε) || e > 1 {\n\t\tpanic(\"[ERROR] should initialize parabolic or hyperbolic orbits with R, V\")\n\t}\n\tμOp := math.Sqrt(c.μ / p)\n\tsinν, cosν := math.Sincos(ν)\n\trPQW := []float64{p * cosν / (1 + e*cosν), p * sinν / (1 + e*cosν), 0}\n\tvPQW := []float64{-μOp * sinν, μOp * (e + cosν), 0}\n\trIJK := Rot313Vec(-ω, -i, -Ω, rPQW)\n\tvIJK := Rot313Vec(-ω, -i, -Ω, vPQW)\n\torbit := Orbit{rIJK, vIJK, c, a, e, i, Ω, ω, ν, 0, 0, 0, 0.0}\n\torbit.Elements()\n\treturn &orbit\n}",
"func E1(a float64) Euler {\n\te := Euler{}\n\ta = ToRadians(a)\n\ts := math.Sin(a)\n\tc := math.Cos(a)\n\te[0] = [3]float64{1.0, 0.0, 0.0}\n\te[1] = [3]float64{0.0, c, -s}\n\te[2] = [3]float64{0.0, s, c}\n\treturn e\n}",
"func (o Orbit) SinCosE() (sinE, cosE float64) {\n\t_, e, _, _, _, ν, _, _, _ := o.Elements()\n\tsinν, cosν := math.Sincos(ν)\n\tdenom := 1 + e*cosν\n\tif e > 1 {\n\t\t// Hyperbolic orbit\n\t\tsinE = math.Sqrt(e*e-1) * sinν / denom\n\t} else {\n\t\tsinE = math.Sqrt(1-e*e) * sinν / denom\n\t}\n\tcosE = (e + cosν) / denom\n\treturn\n}",
"func ae0(slat, slon, lat, lon float64, useArc bool) (azimuth, elevation float64) {\n\n\tomega := lon - slon\n\tsinlat := math.Sin(lat)\n\tcoslat := math.Cos(lat)\n\tsinslat := math.Sin(slat)\n\tcosslat := math.Cos(slat)\n\n\tcsz := sinlat*sinslat + coslat*cosslat*math.Cos(omega)\n\n\tif csz > 1.0 {\n\t\tcsz = 1.0\n\t} else {\n\t\tif csz < -1.0 {\n\t\t\tcsz = -1.0\n\t\t}\n\t}\n\n\televation = math.Asin(csz)\n\tazDenom := coslat * math.Cos(elevation)\n\tazimuth = (sinlat*csz - sinslat) / azDenom\n\n\tif math.IsInf(azimuth, 0) {\n\t\tif lat > 0.0 {\n\t\t\tazimuth = math.Pi\n\t\t} else {\n\t\t\tazimuth = 0.0\n\t\t}\n\t} else {\n\t\tif math.Abs(azimuth) > 1.0 {\n\t\t\tif azimuth < 0.0 {\n\t\t\t\tazimuth = -1.0\n\t\t\t} else {\n\t\t\t\tazimuth = 1.0\n\t\t\t}\n\t\t}\n\n\t\tazimuth = math.Pi - math.Acos(azimuth)\n\t\tif omega > 0.0 {\n\t\t\tazimuth = -azimuth\n\t\t}\n\t}\n\n\tif azimuth < 0.0 {\n\t\tazimuth += 2.0 * math.Pi\n\t}\n\n\tif useArc {\n\t\televation += atmoRefractionCorrection(elevation)\n\t}\n\n\t// if elevation < degToRad*(-18.0) {\n\t// \tputs(\"A Night at the Roxbury\")\n\t// }\n\n\treturn\n}",
"func fnETransform(ctx Context, doc *JDoc, params []string) interface{} {\n\tstats := ctx.Value(EelTotalStats).(*ServiceStats)\n\tif params == nil || len(params) == 0 || len(params) > 1 {\n\t\tctx.Log().Error(\"error_type\", \"func_etransform\", \"op\", \"etransform\", \"cause\", \"wrong_number_of_parameters\", \"params\", params)\n\t\tstats.IncErrors()\n\t\tAddError(ctx, SyntaxError{fmt.Sprintf(\"wrong number of parameters in call to etransform function\"), \"etransform\", params})\n\t\treturn nil\n\t}\n\t// prepare event\n\tevent, err := NewJDocFromString(extractStringParam(params[0]))\n\tif err != nil {\n\t\tctx.Log().Error(\"error_type\", \"func_etransform\", \"op\", \"etransform\", \"cause\", \"invalid_json\", \"params\", params, \"error\", err.Error())\n\t\tstats.IncErrors()\n\t\tAddError(ctx, SyntaxError{fmt.Sprintf(\"non json parameters in call to etransform function\"), \"etransform\", params})\n\t\treturn nil\n\t}\n\t// pick handler\n\thandlers := GetHandlerFactory(ctx).GetHandlersForEvent(ctx, event)\n\tif len(handlers) == 0 {\n\t\tctx.Log().Error(\"error_type\", \"func_etransform\", \"op\", \"etransform\", \"cause\", \"no_matching_handler\", \"params\", params)\n\t\tstats.IncErrors()\n\t\tAddError(ctx, RuntimeError{fmt.Sprintf(\"no matching handler found in call to etransform function\"), \"etransform\", params})\n\t\treturn nil\n\t}\n\tif len(handlers) > 1 {\n\t\tctx.Log().Error(\"error_type\", \"func_etransform\", \"op\", \"etransform\", \"cause\", \"too_many_matching_handlers\", \"params\", params)\n\t\tstats.IncErrors()\n\t\tAddError(ctx, RuntimeError{fmt.Sprintf(\"too many matching handlers found in call to etransform function\"), \"etransform\", params})\n\t\treturn nil\n\t}\n\t// apply debug logs\n\tlogParams := GetConfig(ctx).LogParams\n\tif logParams != nil {\n\t\tfor k, v := range logParams {\n\t\t\tev := event.ParseExpression(ctx, v)\n\t\t\tctx.AddLogValue(k, ev)\n\t\t}\n\t}\n\t// apply handler / transformation\n\teps, err := handlers[0].ProcessEvent(Gctx.SubContext(), event)\n\tif err != nil {\n\t\tctx.Log().Error(\"error_type\", \"func_etransform\", \"op\", \"etransform\", \"cause\", \"bad_transformation\", \"params\", params)\n\t\tstats.IncErrors()\n\t\tAddError(ctx, RuntimeError{fmt.Sprintf(\"failed to process external transformation in call to etransform function\"), \"etransform\", params})\n\t\treturn nil\n\t}\n\tif len(eps) == 0 {\n\t\tctx.Log().Error(\"error_type\", \"func_etransform\", \"op\", \"etransform\", \"cause\", \"no_results\", \"params\", params)\n\t\tstats.IncErrors()\n\t\tAddError(ctx, RuntimeError{fmt.Sprintf(\"no results found in call to etransform function\"), \"etransform\", params})\n\t\treturn nil\n\t}\n\t// if this check is present some unit tests will fail\n\t/*if len(eps) > 1 {\n\t\tctx.Log().Error(\"error_type\", \"func_etransform\", \"op\", \"etransform\", \"cause\", \"too_many_results\", \"params\", params, \"count\", len(eps))\n\t\tstats.IncErrors()\n\t\tAddError(ctx, RuntimeError{fmt.Sprintf(\"too many results found in call to etransform function\"), \"etransform\", params})\n\t\treturn nil\n\t}*/\n\tresult := eps[0].GetPayloadParsed().GetOriginalObject()\n\treturn result\n}",
"func EquationOfCenter(solarAnomaly float64) float64 {\n\tvar (\n\t\tanomalyInRad = solarAnomaly * (math.Pi / 180)\n\t\tanomalySin = math.Sin(anomalyInRad)\n\t\tanomaly2Sin = math.Sin(2 * anomalyInRad)\n\t\tanomaly3Sin = math.Sin(3 * anomalyInRad)\n\t)\n\treturn 1.9148*anomalySin + 0.0200*anomaly2Sin + 0.0003*anomaly3Sin\n}",
"func NewEllipsoid(sys *System) (*Ellipsoid, error) {\n\tellipsoid := &Ellipsoid{}\n\n\terr := ellipsoid.initialize(sys)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn ellipsoid, nil\n}",
"func get_arc_center(x1, y1, x2, y2, fa, fs, rx, ry, sin_phi, cos_phi float64) []float64 {\n\t// Step 1.\n\t//\n\t// Moving an ellipse so origin will be the middlepoint between our two\n\t// points. After that, rotate it to line up ellipse axes with coordinate\n\t// axes.\n\t//\n\tx1p := cos_phi*(x1-x2)/2 + sin_phi*(y1-y2)/2\n\ty1p := -sin_phi*(x1-x2)/2 + cos_phi*(y1-y2)/2\n\n\trx_sq := rx * rx\n\try_sq := ry * ry\n\tx1p_sq := x1p * x1p\n\ty1p_sq := y1p * y1p\n\n\t// Step 2.\n\t//\n\t// Compute coordinates of the centre of this ellipse (cx', cy')\n\t// in the new coordinate system.\n\t//\n\tradicant := (rx_sq * ry_sq) - (rx_sq * y1p_sq) - (ry_sq * x1p_sq)\n\n\tif radicant < 0 {\n\t\t// due to rounding errors it might be e.g. -1.3877787807814457e-17\n\t\tradicant = 0\n\t}\n\n\tradicant /= (rx_sq * y1p_sq) + (ry_sq * x1p_sq)\n\tsign := 1.0\n\tif fa == fs {\n\t\tsign = -1.0\n\t}\n\tradicant = math.Sqrt(radicant) * sign\n\n\tcxp := radicant * rx / ry * y1p\n\tcyp := radicant * -ry / rx * x1p\n\n\t// Step 3.\n\t//\n\t// Transform back to get centre coordinates (cx, cy) in the original\n\t// coordinate system.\n\t//\n\tcx := cos_phi*cxp - sin_phi*cyp + (x1+x2)/2\n\tcy := sin_phi*cxp + cos_phi*cyp + (y1+y2)/2\n\n\t// Step 4.\n\t//\n\t// Compute angles (theta1, delta_theta).\n\t//\n\tv1x := (x1p - cxp) / rx\n\tv1y := (y1p - cyp) / ry\n\tv2x := (-x1p - cxp) / rx\n\tv2y := (-y1p - cyp) / ry\n\n\ttheta1 := unit_vector_angle(1, 0, v1x, v1y)\n\tdelta_theta := unit_vector_angle(v1x, v1y, v2x, v2y)\n\n\tif fs == 0 && delta_theta > 0 {\n\t\tdelta_theta -= TAU\n\t}\n\tif fs == 1 && delta_theta < 0 {\n\t\tdelta_theta += TAU\n\t}\n\n\treturn []float64{cx, cy, theta1, delta_theta}\n}",
"func Erf(value gcv.Value) (gcv.Value, error) {\n\tif value.Type() == gcv.Complex {\n\t\treturn nil, errors.New(\"Erf is not supported for Complex numbers\")\n\t}\n\treturn gcv.MakeValue(math.Erf(value.Real())), nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
GoEform Earth reference ellipsoids.
|
func GoEform(n int) (a, f float64, err en.ErrNum) {
// Look up a and f for the specified reference ellipsoid.
switch n {
case WGS84:
a = 6378137.0
f = 1.0 / 298.257223563
case GRS80:
a = 6378137.0
f = 1.0 / 298.257222101
case WGS72:
a = 6378135.0
f = 1.0 / 298.26
default:
// Invalid identifier.
a = 0.0
f = 0.0
err = errEform.Set(-1)
}
return
}
|
[
"func NewEllipsoid(radius, flattening float64) *Ellipsoid {\n\te := new(Ellipsoid)\n\tC.geod_init(&e.g, C.double(radius), C.double(flattening))\n\treturn e\n}",
"func (e *Ellipsoid) Inverse(\n\tlat1, lon1, lat2, lon2 float64,\n\ts12, azi1, azi2 *float64,\n) {\n\tC.geod_inverse(&e.g,\n\t\tC.double(lat1), C.double(lon1), C.double(lat2), C.double(lon2),\n\t\t(*C.double)(s12), (*C.double)(azi1), (*C.double)(azi2))\n}",
"func calcEccentEarthOrbit(julianCentury []float64) (eccentEarthOrbit []float64) {\n\tfor index := 0; index < len(julianCentury); index++ {\n\t\ttemp := 0.016708634 - julianCentury[index]*(0.000042037+0.0000001267*julianCentury[index])\n\t\teccentEarthOrbit = append(eccentEarthOrbit, temp)\n\t}\n\treturn\n}",
"func eastDelta(e, w float64) float64 {\n\te = normEasting(e)\n\tw = normEasting(w)\n\tif e < w {\n\t\treturn 360 + e - w\n\t}\n\treturn e - w\n}",
"func SunEcef(t time.Time) (ecef [6]float64) {\n\tlatitude, longitude, radiusVector := SunLlr(t)\n\tecef = sunLlrToEcef(latitude, longitude, radiusVector)\n\treturn\n}",
"func TestEarthOrbit(t *testing.T) {\n\ts := NewIntegrator(SunGravity(), 1e-3)\n\tstart := Vec{EarthSMA, 0}\n\ts.P = start\n\ts.V = Vec{0, OrbitalV(SunMu, EarthSMA)}\n\ts.Advance(EarthP / 2)\n\thave := s.P\n\twant := Vec{-EarthSMA, 0}\n\n\tif have.Sub(want).Len() > EarthSMA/1e4 {\n\t\tt.Errorf(\"have: %v, want %v\", have, want)\n\t}\n}",
"func NewEllipsoid(sys *System) (*Ellipsoid, error) {\n\tellipsoid := &Ellipsoid{}\n\n\terr := ellipsoid.initialize(sys)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn ellipsoid, nil\n}",
"func GetEquationOfCenter(anomaly float64) float64 {\n\t// The numbers being multiplied below are coefficients for the Equation of Center for Earth\n\tvar anomalyInRad float64\n\tanomalyInRad = anomaly * (math.Pi/180)\n\treturn 1.9148 * (math.Sin(anomalyInRad)) + 0.0200 * math.Sin(2 * anomalyInRad) + 0.0003 * math.Sin(3 * anomalyInRad)\n}",
"func East(value float64) *SimpleElement { return newSEFloat(\"east\", value) }",
"func (doc *DOC) Eig() []float64 {\n\n\treturn doc.eig\n}",
"func Project(zone int, south bool, latitude, longitude float64) (float64, float64) {\n\n\t// False northing\n\tfn := 0.\n\tif south {\n\t\tfn = utmSouthernHemisphereFalseNorthing\n\t}\n\n\th1 := n/2 - n2*2/3 + n3*5/16 + n4*41/180\n\th2 := n2*13/48 - n3*3/5 + n4*557/1440\n\th3 := n3*61/240 - n4*103/140\n\th4 := n4 * 49561 / 161280\n\n\tq := math.Asinh(math.Tan(latitude)) - e*math.Atanh(e*math.Sin(latitude))\n\tβ := math.Atan(math.Sinh(q))\n\n\tη0 := math.Atanh(math.Cos(β) * math.Sin(longitude-λO(zone)))\n\tξ0 := math.Asin(math.Sin(β) * math.Cosh(η0))\n\n\tη1 := h1 * math.Cos(2*ξ0) * math.Sinh(2*η0)\n\tη2 := h2 * math.Cos(4*ξ0) * math.Sinh(4*η0)\n\tη3 := h3 * math.Cos(6*ξ0) * math.Sinh(6*η0)\n\tη4 := h4 * math.Cos(8*ξ0) * math.Sinh(8*η0)\n\n\tξ1 := h1 * math.Sin(2*ξ0) * math.Cosh(2*η0)\n\tξ2 := h2 * math.Sin(4*ξ0) * math.Cosh(4*η0)\n\tξ3 := h3 * math.Sin(6*ξ0) * math.Cosh(6*η0)\n\tξ4 := h4 * math.Sin(8*ξ0) * math.Cosh(8*η0)\n\n\tξ := ξ0 + ξ1 + ξ2 + ξ3 + ξ4\n\tη := η0 + η1 + η2 + η3 + η4\n\n\te := fe + kO*b*η\n\tn := fn + kO*b*ξ\n\treturn e, n\n}",
"func earthRadius(x float64) float64 {\n\tmasm := 6378137.0 // Major semiaxis [m]\n\tmism := 6356752.3 // Minor semiaxis [m]\n\n\tan := masm * masm * math.Cos(x)\n\tbn := mism * mism * math.Sin(x)\n\tad := masm * math.Cos(x)\n\tbd := mism * math.Sin(x)\n\treturn math.Sqrt((an*an + bn*bn) / (ad*ad + bd*bd))\n}",
"func (me XsdGoPkgHasElem_East) EastDefault() Tangle180Type {\r\n\tvar x = new(Tangle180Type)\r\n\tx.Set(\"180.0\")\r\n\treturn *x\r\n}",
"func (me XsdGoPkgHasElems_East) EastDefault() Tangle180Type {\r\n\tvar x = new(Tangle180Type)\r\n\tx.Set(\"180.0\")\r\n\treturn *x\r\n}",
"func (b *Bound) East() float64 {\n\treturn b.ne[0]\n}",
"func (o *Orbit) Elements() (a, e, i, Ω, ω, ν, λ, tildeω, u float64) {\n\tif o.hashValid() {\n\t\treturn o.ccha, o.cche, o.cchi, o.cchΩ, o.cchω, o.cchν, o.cchλ, o.cchtildeω, o.cchu\n\t}\n\t// Algorithm from Vallado, 4th edition, page 113 (RV2COE).\n\thVec := Cross(o.rVec, o.vVec)\n\tn := Cross([]float64{0, 0, 1}, hVec)\n\tv := Norm(o.vVec)\n\tr := Norm(o.rVec)\n\tξ := (v*v)/2 - o.Origin.μ/r\n\ta = -o.Origin.μ / (2 * ξ)\n\teVec := make([]float64, 3, 3)\n\tfor i := 0; i < 3; i++ {\n\t\teVec[i] = ((v*v-o.Origin.μ/r)*o.rVec[i] - Dot(o.rVec, o.vVec)*o.vVec[i]) / o.Origin.μ\n\t}\n\te = Norm(eVec)\n\t// Prevent nil values for e\n\tif e < eccentricityε {\n\t\te = eccentricityε\n\t}\n\ti = math.Acos(hVec[2] / Norm(hVec))\n\tif i < angleε {\n\t\ti = angleε\n\t}\n\tω = math.Acos(Dot(n, eVec) / (Norm(n) * e))\n\tif math.IsNaN(ω) {\n\t\tω = 0\n\t}\n\tif eVec[2] < 0 {\n\t\tω = 2*math.Pi - ω\n\t}\n\tΩ = math.Acos(n[0] / Norm(n))\n\tif math.IsNaN(Ω) {\n\t\tΩ = angleε\n\t}\n\tif n[1] < 0 {\n\t\tΩ = 2*math.Pi - Ω\n\t}\n\tcosν := Dot(eVec, o.rVec) / (e * r)\n\tif abscosν := math.Abs(cosν); abscosν > 1 && floats.EqualWithinAbs(abscosν, 1, 1e-12) {\n\t\t// Welcome to the edge case which took about 1.5 hours of my time.\n\t\tcosν = Sign(cosν) // GTFO NaN!\n\t}\n\tν = math.Acos(cosν)\n\tif math.IsNaN(ν) {\n\t\tν = 0\n\t}\n\tif Dot(o.rVec, o.vVec) < 0 {\n\t\tν = 2*math.Pi - ν\n\t}\n\t// Fix rounding errors.\n\ti = math.Mod(i, 2*math.Pi)\n\tΩ = math.Mod(Ω, 2*math.Pi)\n\tω = math.Mod(ω, 2*math.Pi)\n\tν = math.Mod(ν, 2*math.Pi)\n\tλ = math.Mod(ω+Ω+ν, 2*math.Pi)\n\ttildeω = math.Mod(ω+Ω, 2*math.Pi)\n\tif e < eccentricityε {\n\t\t// Circular\n\t\tu = math.Acos(Dot(n, o.rVec) / (Norm(n) * r))\n\t} else {\n\t\tu = math.Mod(ν+ω, 2*math.Pi)\n\t}\n\t// Cache values\n\to.ccha = a\n\to.cche = e\n\to.cchi = i\n\to.cchΩ = Ω\n\to.cchω = ω\n\to.cchν = ν\n\to.cchλ = λ\n\to.cchtildeω = tildeω\n\to.cchu = u\n\to.computeHash()\n\treturn\n}",
"func NewOrbitFromOE(a, e, i, Ω, ω, ν float64, c CelestialObject) *Orbit {\n\t// Convert angles to radians\n\ti = i * deg2rad\n\tΩ = Ω * deg2rad\n\tω = ω * deg2rad\n\tν = ν * deg2rad\n\n\t// Algorithm from Vallado, 4th edition, page 118 (COE2RV).\n\tif e < eccentricityε {\n\t\t// Circular...\n\t\tif i < angleε {\n\t\t\t// ... equatorial\n\t\t\tΩ = 0\n\t\t\tω = 0\n\t\t\tν = math.Mod(ω+Ω+ν, 2*math.Pi)\n\t\t} else {\n\t\t\t// ... inclined\n\t\t\tω = 0\n\t\t\tν = math.Mod(ν+ω, 2*math.Pi)\n\t\t}\n\t} else if i < angleε && !(c.Equals(Sun) && config.meeus) {\n\t\t// Meeus breaks if doing this correction by Vallado\n\t\t// Elliptical equatorial\n\t\tΩ = 0\n\t\tω = math.Mod(ω+Ω, 2*math.Pi)\n\t}\n\tp := a * (1 - e*e)\n\tif floats.EqualWithinAbs(e, 1, eccentricityε) || e > 1 {\n\t\tpanic(\"[ERROR] should initialize parabolic or hyperbolic orbits with R, V\")\n\t}\n\tμOp := math.Sqrt(c.μ / p)\n\tsinν, cosν := math.Sincos(ν)\n\trPQW := []float64{p * cosν / (1 + e*cosν), p * sinν / (1 + e*cosν), 0}\n\tvPQW := []float64{-μOp * sinν, μOp * (e + cosν), 0}\n\trIJK := Rot313Vec(-ω, -i, -Ω, rPQW)\n\tvIJK := Rot313Vec(-ω, -i, -Ω, vPQW)\n\torbit := Orbit{rIJK, vIJK, c, a, e, i, Ω, ω, ν, 0, 0, 0, 0.0}\n\torbit.Elements()\n\treturn &orbit\n}",
"func el(y float64, a *ca, t, e [][]float64) (jde float64, elongation unit.Angle) {\n\tJ, M, T := mean(y, micA)\n\treturn J + sum(T, M, t), unit.AngleFromDeg(sum(T, M, e))\n}",
"func FromLatLonF(lat, lon float64) (easting, northing float64) {\n\tif !(-80.0 <= lat && lat <= 84.0) {\n\t\tpanic(\"latitude out of range (must be between 80 deg S and 84 deg N)\")\n\t}\n\tif !(-180.0 <= lon && lon <= 180.0) {\n\t\tpanic(\"longitude out of range (must be between 180 deg W and 180 deg E)\")\n\t}\n\n\tlat_rad := rad(lat)\n\tlat_sin := math.Sin(lat_rad)\n\tlat_cos := math.Cos(lat_rad)\n\n\tlat_tan := lat_sin / lat_cos\n\tlat_tan2 := lat_tan * lat_tan\n\tlat_tan4 := lat_tan2 * lat_tan2\n\tzoneNumber := int((lon + 180) / 6) + 1\n\tif 56 <= lat && lat <= 64 && 3 <= lon && lon <= 12 {\n\t\tzoneNumber = 32\n\t}\n\tif 72 <= lat && lat <= 84 && lon >= 0 {\n\t\tif lon <= 9 {\n\t\t\tzoneNumber = 31\n\t\t} else if lon <= 21 {\n\t\t\tzoneNumber = 33\n\t\t} else if lon <= 33 {\n\t\t\tzoneNumber = 35\n\t\t} else if lon <= 42 {\n\t\t\tzoneNumber = 37\n\t\t}\n\t}\n\tcentral_lon := (zoneNumber - 1) * 6 - 180 + 3\n\n\tlon_rad := rad(lon)\n\tcentral_lon_rad := rad(float64(central_lon))\n\n\tn := r / math.Sqrt(1 - e * lat_sin * lat_sin)\n\tc := e_p2 * lat_cos * lat_cos\n\n\ta := lat_cos * (lon_rad - central_lon_rad)\n\ta2 := a * a\n\ta3 := a2 * a\n\ta4 := a3 * a\n\ta5 := a4 * a\n\ta6 := a5 * a\n\tm := r * (m1 * lat_rad -\n\t\tm2 * math.Sin(2 * lat_rad) +\n\t\tm3 * math.Sin(4 * lat_rad) -\n\t\tm4 * math.Sin(6 * lat_rad))\n\teasting = k0 * n * (a +\n\t\ta3 / 6 * (1 - lat_tan2 + c) +\n\t\ta5 / 120 * (5 - 18 * lat_tan2 + lat_tan4 + 72 * c - 58 * e_p2)) + 500000\n\tnorthing = k0 * (m + n * lat_tan * (a2 / 2 +\n\t\ta4 / 24 * (5 - lat_tan2 + 9 * c + 4 * c * c) +\n\t\ta6 / 720 * (61 - 58 * lat_tan2 + lat_tan4 + 600 * c - 330 * e_p2)))\n\n\tif lat < 0 {\n\t\tnorthing += 10000000\n\t}\n\n\treturn\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Register a new tunnel on this control connection
|
func (c *Control) registerTunnel(rawTunnelReq *msg.ReqTunnel) {
for _, proto := range strings.Split(rawTunnelReq.Protocol, "+") {
tunnelReq := *rawTunnelReq
tunnelReq.Protocol = proto
c.conn.Debug("Registering new tunnel")
t, err := NewTunnel(&tunnelReq, c)
if err != nil {
ack := &msg.NewTunnel{Error: err.Error()}
if len(c.tunnels) == 0 {
// you can't fail your first tunnel registration
// terminate the control connection
c.stop <- ack
} else {
// inform client of failure
c.out <- ack
}
// we're done
return
}
// add it to the list of tunnels
c.tunnels = append(c.tunnels, t)
// acknowledge success
c.out <- &msg.NewTunnel{
Url: t.url,
Protocol: proto,
ReqId: rawTunnelReq.ReqId,
}
rawTunnelReq.Hostname = strings.Replace(t.url, proto+"://", "", 1)
}
}
|
[
"func (r *TunnelRegistry) Register(url string, t *Tunnel) error {\n\tr.Lock()\n\tdefer r.Unlock()\n\n\tif r.tunnels[url] != nil {\n\t\treturn fmt.Errorf(\"The tunnel %s is already registered.\", url)\n\t}\n\n\tr.tunnels[url] = t\n\n\treturn nil\n}",
"func (rndr *Renderer) AddGreTunnel(serviceInfo *common.ServiceInfo, sp *sasemodel.IPSecVpnTunnel, reSync bool) error {\n\tvppGreTunnel := &vpp_interfaces.GreLink{\n\t\tTunnelType: vpp_interfaces.GreLink_L3,\n\t\tSrcAddr: sp.TunnelSourceIp,\n\t\tDstAddr: sp.TunnelDestinationIp,\n\t}\n\n\tvppGreInterface := &vpp_interfaces.Interface{\n\t\tName: sp.TunnelName,\n\t\tType: vpp_interfaces.Interface_GRE_TUNNEL,\n\t\tEnabled: true,\n\t\tLink: &vpp_interfaces.Interface_Gre{\n\t\t\tGre: vppGreTunnel,\n\t\t},\n\t}\n\n\t// Check for Tunnel Interface IP configuration\n\tif sp.InterfaceType == config.UnnumberedIP {\n\t\tintfName := rndr.GetInterfaceNameWithIP(serviceInfo, sp.TunnelSourceIp)\n\t\trndr.Log.Debug(\"AddGreTunnel: unnummbered Interface: \", intfName)\n\t\tif intfName != config.Invalid {\n\t\t\tvppGreInterface.Unnumbered = &vpp_interfaces.Interface_Unnumbered{\n\t\t\t\tInterfaceWithIp: intfName,\n\t\t\t}\n\t\t}\n\t} else {\n\t\tvppGreInterface.IpAddresses = append(vppGreInterface.IpAddresses, sp.TunnelSourceIp)\n\t}\n\n\trndr.Log.Info(\"AddGreTunnel: vppGreInterface: \", vppGreInterface)\n\n\t// Test Purpose\n\tif rndr.MockTest {\n\t\treturn renderer.MockCommit(serviceInfo.GetServicePodLabel(), vpp_interfaces.InterfaceKey(vppGreInterface.Name), vppGreInterface, config.Add)\n\t}\n\n\t// Commit is for local base vpp vswitch\n\tif serviceInfo.GetServicePodLabel() == common.GetBaseServiceLabel() {\n\t\trndr.Log.Info(\" AddGreTunnel: Post txn to local vpp agent\",\n\t\t\t\"Key: \", vpp_interfaces.InterfaceKey(vppGreInterface.Name), \"Value: \", vppGreInterface)\n\t\tif reSync == true {\n\t\t\ttxn := rndr.ResyncTxnFactory()\n\t\t\ttxn.Put(vpp_interfaces.InterfaceKey(vppGreInterface.Name), vppGreInterface)\n\t\t} else {\n\t\t\ttxn := rndr.UpdateTxnFactory(fmt.Sprintf(\"GreTunnel %s\", vpp_interfaces.InterfaceKey(vppGreInterface.Name)))\n\t\t\ttxn.Put(vpp_interfaces.InterfaceKey(vppGreInterface.Name), vppGreInterface)\n\t\t}\n\t} else {\n\t\treturn renderer.Commit(rndr.RemoteDB, serviceInfo.GetServicePodLabel(), vpp_interfaces.InterfaceKey(vppGreInterface.Name), vppGreInterface, config.Add)\n\t}\n\treturn nil\n}",
"func (na *Nagent) CreateTunnel(tun *netproto.Tunnel) error {\n\terr := na.validateMeta(tun.Kind, tun.ObjectMeta)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// check if tunnel already exists\n\toldTun, err := na.FindTunnel(tun.ObjectMeta)\n\tif err == nil {\n\t\t// check if tunnel contents are same\n\t\tif !proto.Equal(&oldTun.Spec, &tun.Spec) {\n\t\t\tlog.Errorf(\"Tunnel %+v already exists\", oldTun)\n\t\t\treturn errors.New(\"tunnel already exists\")\n\t\t}\n\n\t\tlog.Infof(\"Received duplicate tunnel create for {%+v}\", tun)\n\t\treturn nil\n\t}\n\t// find the corresponding namespace\n\tns, err := na.FindNamespace(tun.ObjectMeta)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// find the corresponding vrf for the route\n\tvrf, err := na.ValidateVrf(tun.Tenant, tun.Namespace, tun.Spec.VrfName)\n\tif err != nil {\n\t\tlog.Errorf(\"Failed to find the vrf %v\", tun.Spec.VrfName)\n\t\treturn err\n\t}\n\t// Allocate ID only on first object creates and use existing ones during config replay\n\tif tun.Status.TunnelID == 0 {\n\t\t// Tunnel IDs and Interface IDs must be unique in the datapath as tunnel is modeled as an interface in HAL.\n\t\ttunnelID, err := na.Store.GetNextID(types.InterfaceID, 0)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Could not allocate tunnel id. {%+v}\", err)\n\t\t\treturn err\n\t\t}\n\t\ttun.Status.TunnelID = tunnelID + types.UplinkOffset + types.TunnelOffset\n\t}\n\n\t// create it in datapath\n\terr = na.Datapath.CreateTunnel(tun, vrf)\n\tif err != nil {\n\t\tlog.Errorf(\"Error creating tunnel in datapath. Nw {%+v}. Err: %v\", tun, err)\n\t\treturn err\n\t}\n\n\t// Add the current tunnel as a dependency to the namespace.\n\terr = na.Solver.Add(ns, tun)\n\tif err != nil {\n\t\tlog.Errorf(\"Could not add dependency. Parent: %v. Child: %v\", ns, tun)\n\t\treturn err\n\t}\n\n\t// Add the current tunnel as a dependency to the vrf.\n\terr = na.Solver.Add(vrf, tun)\n\tif err != nil {\n\t\tlog.Errorf(\"Could not add dependency. Parent: %v. Child: %v\", vrf, tun)\n\t\treturn err\n\t}\n\n\t// save it in db\n\tkey := na.Solver.ObjectKey(tun.ObjectMeta, tun.TypeMeta)\n\tna.Lock()\n\tna.TunnelDB[key] = tun\n\tna.Unlock()\n\tdat, _ := tun.Marshal()\n\terr = na.Store.RawWrite(tun.GetKind(), tun.GetKey(), dat)\n\n\treturn err\n}",
"func (db *DB) NewTunnel(tunnel *Tunnel) error {\n\tq := `\nINSERT INTO tunnel (port, forward_port, forward_address) VALUES(?, ?, ?);\n `\n\t_, err := db.Exec(q, tunnel.Port, tunnel.ForwardPort, tunnel.ForwardAddress)\n\treturn err\n}",
"func (hd *Datapath) CreateTunnel(tun *netproto.Tunnel, vrf *netproto.Vrf) error {\n\t// This will ensure that only one datapath config will be active at a time. This is a temporary restriction\n\t// to ensure that HAL will use a single config thread , this will be removed prior to FCS to allow parallel configs to go through.\n\t// TODO Remove Global Locking\n\thd.Lock()\n\tdefer hd.Unlock()\n\n\tvrfKey := &halproto.VrfKeyHandle{\n\t\tKeyOrHandle: &halproto.VrfKeyHandle_VrfId{\n\t\t\tVrfId: vrf.Status.VrfID,\n\t\t},\n\t}\n\n\tifInfo, err := convertIfInfo(&tun.Spec, vrfKey)\n\tif err != nil {\n\t\tlog.Errorf(\"invalid tunnel interface spec. Tunnel: {%v} Err: %v\", tun.Spec, err)\n\t\treturn err\n\t}\n\n\tstatus, err := hd.convertIfAdminStatus(tun.Spec.AdminStatus)\n\tif err != nil {\n\t\treturn err\n\t}\n\ttunReqMsg := &halproto.InterfaceRequestMsg{\n\t\tRequest: []*halproto.InterfaceSpec{\n\t\t\t{\n\t\t\t\tKeyOrHandle: &halproto.InterfaceKeyHandle{\n\t\t\t\t\tKeyOrHandle: &halproto.InterfaceKeyHandle_InterfaceId{\n\t\t\t\t\t\tInterfaceId: tun.Status.TunnelID,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tType: halproto.IfType_IF_TYPE_TUNNEL,\n\t\t\t\tAdminStatus: status,\n\t\t\t\tIfInfo: ifInfo,\n\t\t\t},\n\t\t},\n\t}\n\n\t// create route object\n\tif hd.Kind == \"hal\" {\n\t\tresp, err := hd.Hal.Ifclient.InterfaceCreate(context.Background(), tunReqMsg)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Error creating tunnel interface. Err: %v\", err)\n\t\t\treturn err\n\t\t}\n\t\tif !(resp.Response[0].ApiStatus == halproto.ApiStatus_API_STATUS_OK || resp.Response[0].ApiStatus == halproto.ApiStatus_API_STATUS_EXISTS_ALREADY) {\n\t\t\tlog.Errorf(\"HAL returned non OK status. %v\", resp.Response[0].ApiStatus.String())\n\t\t\treturn fmt.Errorf(\"HAL returned non OK status. %v\", resp.Response[0].ApiStatus.String())\n\t\t}\n\t} else {\n\t\t_, err := hd.Hal.Ifclient.InterfaceCreate(context.Background(), tunReqMsg)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Error creating tunnel interface. Err: %v\", err)\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n\n}",
"func newTunnel(opts ...Option) *tun {\n\toptions := DefaultOptions()\n\tfor _, o := range opts {\n\t\to(&options)\n\t}\n\n\treturn &tun{\n\t\toptions: options,\n\t\tsend: make(chan *message, 128),\n\t\tclosed: make(chan bool),\n\t\tsockets: make(map[string]*socket),\n\t\tlinks: make(map[string]*link),\n\t}\n}",
"func newTunnel(nc *nats.Conn, subject string, readTimeout time.Duration, respHandler func(response *Response)) *Tunnel {\n\treturn &Tunnel{\n\t\tsubject: subject,\n\t\tnc: nc,\n\t\tdone: make(chan bool),\n\t\trespHandler: respHandler,\n\t\trandSuffix: &RandomSuffix{\n\t\t\trandomGenerator: rand.New(rand.NewSource(time.Now().UnixNano())), //nolint gosec\n\t\t},\n\t\tmon: tunnelMon{\n\t\t\treadTimeout: readTimeout,\n\t\t},\n\t}\n}",
"func (c *service) OpenTunnel(localAddress, remoteAddress string) error {\n\tlocal, err := net.Listen(\"tcp\", localAddress)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to listen on local: %v %v\", localAddress, err)\n\t}\n\tvar forwarding = NewForwarding(c.client, remoteAddress, local)\n\tif len(c.forwarding) == 0 {\n\t\tc.forwarding = make([]*Tunnel, 0)\n\t}\n\tc.forwarding = append(c.forwarding, forwarding)\n\tgo forwarding.Handle()\n\treturn nil\n}",
"func (c *service) OpenTunnel(localAddress, remoteAddress string) error {\n\tlocal, err := net.Listen(\"tcp\", localAddress)\n\tif err != nil {\n\t\treturn errors.Wrap(err, fmt.Sprintf(\"failed to listen on local: %v\", localAddress))\n\t}\n\tvar forwarding = NewForwarding(c.client, remoteAddress, local)\n\tif len(c.forwarding) == 0 {\n\t\tc.forwarding = make([]*Tunnel, 0)\n\t}\n\tc.forwarding = append(c.forwarding, forwarding)\n\tgo forwarding.Handle()\n\treturn nil\n}",
"func (sc stakingClient) RegisterProxy(fromInfo keys.Info, passWd, memo string, accNum, seqNum uint64) (\n\tresp sdk.TxResponse, err error) {\n\tif err = params.CheckKeyParams(fromInfo, passWd); err != nil {\n\t\treturn\n\t}\n\n\tmsg := types.NewMsgRegProxy(fromInfo.GetAddress(), true)\n\n\treturn sc.BuildAndBroadcast(fromInfo.GetName(), passWd, memo, []sdk.Msg{msg}, accNum, seqNum)\n\n}",
"func NewTunnel(dst, src string) *Tunnel {\n\ttunnel := &Tunnel{Src: src, Dst: dst}\n\treturn tunnel\n}",
"func (rp *HTTPReverseProxy) Register(routeCfg RouteConfig) error {\n\terr := rp.vhostRouter.Add(routeCfg.Domain, routeCfg.Location, routeCfg.RouteByHTTPUser, &routeCfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func (rndr *Renderer) AddIPSecIPinIPVpnTunnel(serviceInfo *common.ServiceInfo, sp *sasemodel.IPSecVpnTunnel, reSync bool) error {\n\tvppIPIPTunnel := &vpp_interfaces.IPIPLink{\n\t\tTunnelMode: vpp_interfaces.IPIPLink_POINT_TO_POINT,\n\t\tSrcAddr: sp.TunnelSourceIp,\n\t\tDstAddr: sp.TunnelDestinationIp,\n\t}\n\n\tvppIPinIPInterface := &vpp_interfaces.Interface{\n\t\tName: sp.TunnelName,\n\t\tType: vpp_interfaces.Interface_IPIP_TUNNEL,\n\t\tEnabled: true,\n\t\tLink: &vpp_interfaces.Interface_Ipip{\n\t\t\tIpip: vppIPIPTunnel,\n\t\t},\n\t}\n\n\t// Check for Tunnel Interface IP configuration\n\tif sp.InterfaceType == config.UnnumberedIP {\n\t\tintfName := rndr.GetInterfaceNameWithIP(serviceInfo, sp.TunnelSourceIp)\n\t\trndr.Log.Debug(\"AddIPSecIPinIPVpnTunnel: unnummbered Interface: \", intfName)\n\t\tif intfName != config.Invalid {\n\t\t\tvppIPinIPInterface.Unnumbered = &vpp_interfaces.Interface_Unnumbered{\n\t\t\t\tInterfaceWithIp: intfName,\n\t\t\t}\n\t\t}\n\t} else {\n\t\tvppIPinIPInterface.IpAddresses = append(vppIPinIPInterface.IpAddresses, sp.TunnelSourceIp)\n\t}\n\n\trndr.Log.Info(\"AddIPSecIPinIPVpnTunnel: vppIPinIPInterface: \", vppIPinIPInterface)\n\n\t// Test Purpose\n\tif rndr.MockTest {\n\t\treturn renderer.MockCommit(serviceInfo.GetServicePodLabel(), vpp_interfaces.InterfaceKey(vppIPinIPInterface.Name), vppIPinIPInterface, config.Add)\n\t}\n\n\t// Commit is for local base vpp vswitch\n\tif serviceInfo.GetServicePodLabel() == common.GetBaseServiceLabel() {\n\t\trndr.Log.Info(\" AddIPSecIPinIPVpnTunnel: Post txn to local vpp agent\",\n\t\t\t\"Key: \", vpp_interfaces.InterfaceKey(vppIPinIPInterface.Name), \"Value: \", vppIPinIPInterface)\n\t\tif reSync == true {\n\t\t\ttxn := rndr.ResyncTxnFactory()\n\t\t\ttxn.Put(vpp_interfaces.InterfaceKey(vppIPinIPInterface.Name), vppIPinIPInterface)\n\t\t} else {\n\t\t\ttxn := rndr.UpdateTxnFactory(fmt.Sprintf(\"IPinIPVpnTunnel %s\", vpp_interfaces.InterfaceKey(vppIPinIPInterface.Name)))\n\t\t\ttxn.Put(vpp_interfaces.InterfaceKey(vppIPinIPInterface.Name), vppIPinIPInterface)\n\t\t}\n\t} else {\n\t\trenderer.Commit(rndr.RemoteDB, serviceInfo.GetServicePodLabel(), vpp_interfaces.InterfaceKey(vppIPinIPInterface.Name), vppIPinIPInterface, config.Add)\n\t}\n\n\t// Get Security Association Information from the SA Name reference\n\tsa, err := rndr.CacheSAConfigGet(sp.SecurityAssociation)\n\tif err != nil {\n\t\trndr.Log.Debug(\"AddIPSecIPinIPVpnTunnel: Security Association Not Found: \", sp.SecurityAssociation)\n\n\t\t// Add the dependency in the pending tunnel protect list\n\t\trndr.AddTunnelToPendingTunnelProtectList(sp.TunnelName, sp.SecurityAssociation)\n\t\treturn nil\n\t}\n\n\tvar saIn, saOut []uint32\n\tsaIn = append(saIn, uint32(sa.InboundID))\n\tsaOut = append(saOut, uint32(sa.OutboundID))\n\n\trndr.Log.Info(\"AddIPSecIPinIPVpnTunnel: Protect the Tunnel with SA: \")\n\treturn rndr.IPSecTunnelProtectionAdd(serviceInfo, sp.TunnelName, saIn, saOut, reSync)\n}",
"func (s *replayService) OpenTunnel(localAddress, remoteAddress string) error {\n\treturn nil\n}",
"func (node *Proxy) Register() error {\n\tnode.session.Register()\n\tmetrics.NumNodes.WithLabelValues(fmt.Sprint(paramtable.GetNodeID()), typeutil.ProxyRole).Inc()\n\tlog.Info(\"Proxy Register Finished\")\n\tnode.session.LivenessCheck(node.ctx, func() {\n\t\tlog.Error(\"Proxy disconnected from etcd, process will exit\", zap.Int64(\"Server Id\", node.session.ServerID))\n\t\tif err := node.Stop(); err != nil {\n\t\t\tlog.Fatal(\"failed to stop server\", zap.Error(err))\n\t\t}\n\t\tmetrics.NumNodes.WithLabelValues(fmt.Sprint(paramtable.GetNodeID()), typeutil.ProxyRole).Dec()\n\t\tif node.session.TriggerKill {\n\t\t\tif p, err := os.FindProcess(os.Getpid()); err == nil {\n\t\t\t\tp.Signal(syscall.SIGINT)\n\t\t\t}\n\t\t}\n\t})\n\t// TODO Reset the logger\n\t//Params.initLogCfg()\n\treturn nil\n}",
"func (rndr *Renderer) AddIPSecVpnTunnel(serviceInfo *common.ServiceInfo, sp *sasemodel.IPSecVpnTunnel) error {\n\tvppIPSecTunnel := &vpp_interfaces.IPSecLink{\n\t\tLocalIp: sp.TunnelSourceIp,\n\t\tRemoteIp: sp.TunnelDestinationIp,\n\t\tLocalSpi: config.DefaultOutboundSPIIndex,\n\t\tRemoteSpi: config.DefaultInboundSPIIndex,\n\t}\n\n\tvppIPSecInterface := &vpp_interfaces.Interface{\n\t\tName: sp.TunnelName,\n\t\tType: vpp_interfaces.Interface_IPSEC_TUNNEL,\n\t\tEnabled: true,\n\t\tIpAddresses: []string{sp.TunnelSourceIp},\n\t\tLink: &vpp_interfaces.Interface_Ipsec{\n\t\t\tIpsec: vppIPSecTunnel,\n\t\t},\n\t}\n\n\trndr.Log.Infof(\"AddIPSecVpnTunnel: vppIPSecInterface: %v\", vppIPSecInterface)\n\n\t// Test Purpose\n\tif rndr.MockTest {\n\t\treturn renderer.MockCommit(serviceInfo.GetServicePodLabel(), vpp_interfaces.InterfaceKey(vppIPSecInterface.Name), vppIPSecInterface, config.Add)\n\t}\n\n\treturn renderer.Commit(rndr.RemoteDB, serviceInfo.GetServicePodLabel(), vpp_interfaces.InterfaceKey(vppIPSecInterface.Name), vppIPSecInterface, config.Add)\n}",
"func (rndr *Renderer) IPSecTunnelProtectionAdd(serviceInfo *common.ServiceInfo, tunnelName string, saIn, saOut []uint32, reSync bool) error {\n\n\ttunnelProtect := &vpp_ipsec.TunnelProtection{\n\t\tInterface: tunnelName,\n\t\tSaIn: saIn,\n\t\tSaOut: saOut,\n\t}\n\n\trndr.Log.Info(\"IPSecTunnelProtectionAdd: tunnelProtect: \", tunnelProtect)\n\n\t// Test Purpose\n\tif rndr.MockTest {\n\t\treturn renderer.MockCommit(serviceInfo.GetServicePodLabel(), models.Key(tunnelProtect), tunnelProtect, config.Add)\n\t}\n\n\t// Commit is for local base vpp vswitch\n\tif serviceInfo.GetServicePodLabel() == common.GetBaseServiceLabel() {\n\t\trndr.Log.Info(\" IPSecTunnelProtectionAdd: Post txn to local vpp agent\",\n\t\t\t\"Key: \", models.Key(tunnelProtect), \"Value: \", tunnelProtect)\n\t\tif reSync == true {\n\t\t\ttxn := rndr.ResyncTxnFactory()\n\t\t\ttxn.Put(models.Key(tunnelProtect), tunnelProtect)\n\t\t} else {\n\t\t\ttxn := rndr.UpdateTxnFactory(fmt.Sprintf(\"IPSecTunnelProtectionAdd %s\", models.Key(tunnelProtect)))\n\t\t\ttxn.Put(models.Key(tunnelProtect), tunnelProtect)\n\t\t}\n\t\treturn nil\n\t}\n\n\treturn renderer.Commit(rndr.RemoteDB, serviceInfo.GetServicePodLabel(), models.Key(tunnelProtect), tunnelProtect, config.Add)\n\n}",
"func (rp *HTTPReverseProxy) Register(routeCfg RouteConfig) error {\n\terr := rp.vhostRouter.Add(routeCfg.Domain, routeCfg.Location, &routeCfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func (p *Pool) Tunnel(host string, local, remote string) (*Tunnel, error) {\n\n\tlistener, err := net.Listen(\"tcp\", local)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttunnel := &Tunnel{\n\t\tlistener: listener,\n\t\thost: host,\n\t\tremote: remote,\n\t\tpool: p,\n\t}\n\n\tgo tunnel.accept()\n\n\treturn tunnel, nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Remove a proxy connection from the pool and return it If not proxy connections are in the pool, request one and wait until it is available Returns an error if we couldn't get a proxy because it took too long or the tunnel is closing
|
func (c *Control) GetProxy() (proxyConn conn.Conn, err error) {
// initial timeout is zero to try to get a proxy connection without asking for one
timeout := time.NewTimer(0)
// get a proxy connection. if we timeout, request one over the control channel
for proxyConn == nil {
var ok bool
select {
case proxyConn, ok = <-c.proxies:
if !ok {
err = fmt.Errorf("No proxy connections available, control is closing")
return
}
continue
case <-timeout.C:
c.conn.Debug("Requesting new proxy connection")
// request a proxy connection
c.out <- &msg.ReqProxy{}
// timeout after 1 second if we don't get one
timeout.Reset(1 * time.Second)
}
}
// To try to reduce latency hanndling tunnel connections, we employ
// the following curde heuristic:
// If the proxy connection pool is empty, request a new one.
// The idea is to always have at least one proxy connection available for immediate use.
// There are two major issues with this strategy: it's not thread safe and it's not predictive.
// It should be a good start though.
if len(c.proxies) == 0 {
c.out <- &msg.ReqProxy{}
}
return
}
|
[
"func (router *Router) GetTunnel() *Tunnel {\n\trouter.mu.Lock()\n\tdefer router.mu.Unlock()\n\n\t// Keep trying to find a healthy tunnel until the pool exhausted and\n\t// actively remove unhealthy tunnels from the pool.\n\tfor {\n\t\tif len(router.pool) < 1 {\n\t\t\treturn nil\n\t\t}\n\t\ti := rand.Intn(len(router.pool))\n\t\ttunnel := router.pool[i]\n\t\tif err := tunnel.Err(); err != nil {\n\t\t\trouter.pool = append(router.pool[:i], router.pool[i+1:]...)\n\t\t\tlog.Println(\"Tunnel removed:\", tunnel.conn.RemoteAddr().String()+\":\", err)\n\t\t\tcontinue\n\t\t}\n\t\treturn tunnel\n\t}\n}",
"func (nc *NodeController) getTunnel(reqBrokerMsg *ReqBroker) (tunConn net.Conn, err error) {\n\tvar ok bool\n\tfor {\n\t\tfor {\n\t\t\t// get a tunnel connection from the pool\n\t\t\tselect {\n\t\t\tcase tunConn, ok = <-nc.tunnels:\n\t\t\t\tif !ok {\n\t\t\t\t\treturn nil, errors.New(\"no tunnel connections available, control is closing\")\n\t\t\t\t}\n\t\t\t\tlog.Debug(\"NodeController::getTunnel get a tunnel connection from pool\")\n\t\t\t\tgoto end\n\t\t\tdefault:\n\t\t\t\t// no tunnel available in the pool, ask for one over the control channel\n\t\t\t\tlog.Debug(\"NodeController::getTunnel no tunnel in pool, send ReqTunnel message...\")\n\t\t\t\tif err = util.PanicToError(func() { nc.out <- new(ReqTunnel) }); err != nil {\n\t\t\t\t\t// c.out is closed, impossible to get a tunnel connection\n\t\t\t\t\tlog.Debug(\"NodeController::getTunnel send message to c.out error: %v\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tselect {\n\t\t\t\tcase <-time.After(dialTimeout):\n\t\t\t\t\t// try again, never stop believing\n\t\t\t\t\tcontinue\n\t\t\t\tcase tunConn, ok = <-nc.tunnels:\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\treturn nil, errors.New(\"no tunnel connections available, control is closing\")\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tlog.Debug(\"NodeController::getTunnel get a tunnel connection after sending ReqTunnel\")\n\t\t\t\tgoto end\n\t\t\t}\n\t\t}\n\n\tend:\n\t\t{\n\t\t\t// try to send StartTunnel message\n\t\t\tif err := WriteMsg(tunConn, &StartTunnel{*reqBrokerMsg}); err != nil {\n\t\t\t\t// this tunnel connection is reached deadline\n\t\t\t\tlog.Debug(\"NodeController::getTunnel failed to send ping: %v\", err)\n\t\t\t\ttunConn.Close()\n\t\t\t\ttunConn = nil\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// receive response message\n\t\t\tresp := new(AccessResp)\n\t\t\ttunConn.SetReadDeadline(time.Now().Add(rwTimeout))\n\t\t\tif err := ReadMsgInto(tunConn, resp); err != nil {\n\t\t\t\tlog.Debug(\"NodeController::getTunnel failed to receive response message: %v\", err)\n\t\t\t\ttunConn.Close()\n\t\t\t\ttunConn = nil\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif resp.Error != \"\" {\n\t\t\t\tlog.Debug(\"NodeController::getTunnel failed with response: %s\", resp.Error)\n\t\t\t\ttunConn.Close()\n\t\t\t\treturn nil, errors.New(resp.Error)\n\t\t\t}\n\n\t\t\ttunConn.SetDeadline(time.Time{})\n\n\t\t\tutil.PanicToError(func() { nc.out <- new(ReqTunnel) })\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn\n}",
"func (p *Pool) Get() (*redis.Client, error) {\n\tselect {\n\tcase conn := <- p.pool:\n\t\treturn conn, nil\n\n\tdefault:\n\t\tselect {\n\t\tcase conn := <- p.pool:\n\t\t\treturn conn, nil\n\n\t\tcase addr := <- p.spare:\n\t\t\tvar conn *redis.Client\n\t\t\tvar err error\n\n\t\t\tdefer func() {\n\t\t\t\tif err != nil {\n\t\t\t\t\tp.replenish(p.network, addr)\n\t\t\t\t}\n\t\t\t}()\n\n\t\t\tconn, err = p.df(p.network, addr)\n\t\t\treturn conn, err\n\n\t\tcase <-time.After(time.Second * 5):\n\t\t\treturn nil, errors.New(\"pool exhausted\")\n\t\t}\n\t}\n}",
"func (pool *Pool) Get() (*Conn, error) {\n\n\tfor {\n\t\tif n := atomic.LoadInt32(&pool.numIdle); n > 0 {\n\t\t\tif atomic.CompareAndSwapInt32(&pool.numIdle, n, n-1) {\n\t\t\t\tdeadline := pool.Deadline()\n\t\t\t\treturn pool.get(deadline.UnixNano())\n\t\t\t}\n\t\t} else if n < 0 {\n\t\t\treturn nil, errPoolClosed\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\tmax := pool.maxConnections()\n\tfor {\n\t\tif n := atomic.LoadInt32(&pool.numOpen); 0 <= n && n < max {\n\t\t\tif atomic.CompareAndSwapInt32(&pool.numOpen, n, n+1) {\n\t\t\t\tconn, err := pool.dial()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\treturn conn, nil\n\t\t\t}\n\t\t} else if n < 0 {\n\t\t\treturn nil, errPoolClosed\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\tdeadline := pool.Deadline()\n\treturn pool.get(deadline.UnixNano())\n}",
"func (s *ProxyServer) Proxy(ctx context.Context, proxyCtx *common.ProxyContext, clientConn, serviceConn net.Conn) error {\n\t// Wrap a client connection into monitor that auto-terminates\n\t// idle connection and connection with expired cert.\n\ttc, err := monitorConn(ctx, monitorConnConfig{\n\t\tconn: clientConn,\n\t\tlockWatcher: s.cfg.LockWatcher,\n\t\tlockTargets: proxyCtx.AuthContext.LockTargets(),\n\t\tidentity: proxyCtx.AuthContext.Identity.GetIdentity(),\n\t\tchecker: proxyCtx.AuthContext.Checker,\n\t\tclock: s.cfg.Clock,\n\t\tserverID: s.cfg.ServerID,\n\t\tauthClient: s.cfg.AuthClient,\n\t\tteleportUser: proxyCtx.AuthContext.Identity.GetIdentity().Username,\n\t\temitter: s.cfg.Emitter,\n\t\tlog: s.log,\n\t\tctx: s.closeCtx,\n\t})\n\tif err != nil {\n\t\tclientConn.Close()\n\t\tserviceConn.Close()\n\t\treturn trace.Wrap(err)\n\t}\n\terrCh := make(chan error, 2)\n\tgo func() {\n\t\tdefer s.log.Debug(\"Stop proxying from client to service.\")\n\t\tdefer serviceConn.Close()\n\t\tdefer tc.Close()\n\t\t_, err := io.Copy(serviceConn, tc)\n\t\terrCh <- err\n\t}()\n\tgo func() {\n\t\tdefer s.log.Debug(\"Stop proxying from service to client.\")\n\t\tdefer serviceConn.Close()\n\t\tdefer tc.Close()\n\t\t_, err := io.Copy(tc, serviceConn)\n\t\terrCh <- err\n\t}()\n\tvar errs []error\n\tfor i := 0; i < 2; i++ {\n\t\tselect {\n\t\tcase err := <-errCh:\n\t\t\tif err != nil && !utils.IsOKNetworkError(err) {\n\t\t\t\ts.log.WithError(err).Warn(\"Connection problem.\")\n\t\t\t\terrs = append(errs, err)\n\t\t\t}\n\t\tcase <-ctx.Done():\n\t\t\treturn trace.ConnectionProblem(nil, \"context is closing\")\n\t\t}\n\t}\n\treturn trace.NewAggregate(errs...)\n}",
"func (p *connPool) wait() *conn {\n\tdeadline := time.After(p.opt.getPoolTimeout())\n\tfor {\n\t\tselect {\n\t\tcase cn := <-p.freeConns:\n\t\t\tif p.isIdle(cn) {\n\t\t\t\tvar err error\n\t\t\t\tcn, err = p.replace(cn)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Printf(\"redis: replace failed: %s\", err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn cn\n\t\tcase <-deadline:\n\t\t\treturn nil\n\t\t}\n\t}\n\tpanic(\"not reached\")\n}",
"func (p *Pool) Get() (*PooledConnection, error) {\n\t// Lock the pool to keep the kids out.\n\tp.mu.Lock()\n\n\t// Clean this place up.\n\tp.purge()\n\n\t// Wait loop\n\tfor {\n\t\t// Try to grab first available idle connection\n\t\tif conn := p.first(); conn != nil {\n\n\t\t\t// Remove the connection from the idle slice\n\t\t\tp.idle = append(p.idle[:0], p.idle[1:]...)\n\t\t\tp.active++\n\t\t\tp.mu.Unlock()\n\t\t\tpc := &PooledConnection{Pool: p, Client: conn.pc.Client}\n\t\t\treturn pc, nil\n\n\t\t}\n\n\t\t// No idle connections, try dialing a new one\n\t\tif p.MaxActive == 0 || p.active < p.MaxActive {\n\t\t\tp.active++\n\t\t\tdial := p.Dial\n\n\t\t\t// Unlock here so that any other connections that need to be\n\t\t\t// dialed do not have to wait.\n\t\t\tp.mu.Unlock()\n\n\t\t\tdc, err := dial()\n\t\t\tif err != nil {\n\t\t\t\tp.mu.Lock()\n\t\t\t\tp.release()\n\t\t\t\tp.mu.Unlock()\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tpc := &PooledConnection{Pool: p, Client: dc}\n\t\t\treturn pc, nil\n\t\t}\n\n\t\t//No idle connections and max active connections, let's wait.\n\t\tif p.cond == nil {\n\t\t\tp.cond = sync.NewCond(&p.mu)\n\t\t}\n\n\t\tp.cond.Wait()\n\t}\n}",
"func (p *pool) Fetch(w http.ResponseWriter, r *http.Request) {\n\tstart := time.Now()\n\tconn := <-p.connections\n\n\tctxAttempt := r.Context().Value(attemptsKey)\n\tvar attempt int\n\n\tif ctxAttempt != nil {\n\t\tattempt = ctxAttempt.(int) + 1\n\t}\n\n\tif attempt > p.maxRetries {\n\t\treturn\n\t}\n\n\tduration := time.Since(start).Seconds()\n\tstats.Durations.WithLabelValues(\"get_connection\").Observe(duration)\n\tstats.AvailableConnectionsGauge.WithLabelValues(\"in_use\").Add(1)\n\tdefer func() {\n\t\tstats.AvailableConnectionsGauge.WithLabelValues(\"in_use\").Sub(1)\n\t\tstats.Attempts.WithLabelValues().Observe(float64(attempt))\n\t\tduration = time.Since(start).Seconds()\n\t\tstats.Durations.WithLabelValues(\"return_connection\").Observe(duration)\n\n\t\tif !conn.Shut {\n\t\t\tp.connections <- conn\n\t\t}\n\t}()\n\n\tif p.cache != nil && r.Method == \"GET\" {\n\t\tvalue, found := p.cache.Get(r.URL.Path)\n\t\tif found {\n\t\t\tstats.CacheCounter.WithLabelValues(r.URL.Path, \"hit\").Add(1)\n\t\t\tres := value.(string)\n\t\t\t_, err := w.Write([]byte(res))\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"Error writing: %s\", err.Error())\n\t\t\t}\n\t\t\treturn\n\t\t}\n\n\t\tstats.CacheCounter.WithLabelValues(r.URL.Path, \"miss\").Add(1)\n\t}\n\n\tusableProxy, err := conn.Get()\n\tctx := context.WithValue(r.Context(), attemptsKey, attempt)\n\n\tif err != nil {\n\t\tlog.Printf(\"retrying err with request: %s\", err.Error())\n\t\tp.Fetch(w, r.WithContext(ctx))\n\t} else {\n\t\tusableProxy.ServeHTTP(w, r)\n\t}\n}",
"func proxy(ctx context.Context, logger hclog.Logger, destAddr string, l net.Listener) {\n\t// Wait for all connections to be done before exiting to prevent\n\t// goroutine leaks.\n\twg := sync.WaitGroup{}\n\tctx, cancel := context.WithCancel(ctx)\n\tdefer func() {\n\t\t// Must cancel context and close listener before waiting\n\t\tcancel()\n\t\t_ = l.Close()\n\t\twg.Wait()\n\t}()\n\n\t// Close Accept() when context is cancelled\n\tgo func() {\n\t\t<-ctx.Done()\n\t\t_ = l.Close()\n\t}()\n\n\tfor ctx.Err() == nil {\n\t\tconn, err := l.Accept()\n\t\tif err != nil {\n\t\t\tif ctx.Err() != nil {\n\t\t\t\t// Accept errors during shutdown are to be expected\n\t\t\t\treturn\n\t\t\t}\n\t\t\tlogger.Error(\"error in socket proxy; shutting down proxy\", \"error\", err, \"dest\", destAddr)\n\t\t\treturn\n\t\t}\n\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tproxyConn(ctx, logger, destAddr, conn)\n\t\t}()\n\t}\n}",
"func Proxy(listener *net.TCPListener, idleDuration time.Duration) loop.Option {\n\treturn loop.WithHandler(listener, func(ctx context.Context, incoming net.Conn) error {\n\t\t// An initial, happy-path backend will already be reserved\n\t\t// for us by the loop preflight.\n\t\ttoken := ctx.Value(balancer.KeyBackendToken).(*balancer.BackendToken)\n\n\t\tfor {\n\t\t\taddr := token.Entry().Addr()\n\t\t\tconn, err := net.Dial(addr.Network(), addr.String())\n\t\t\tif err != nil {\n\t\t\t\ttoken.Release()\n\t\t\t\ttoken, err = token.Balancer().Wait(ctx)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// CONTEXT BREAK: We want to isolate the copy operation from\n\t\t\t// any external context cancellation to allow for graceful\n\t\t\t// drain operations.\n\t\t\treturn proxy(context.Background(), token, incoming.(*net.TCPConn), conn.(*net.TCPConn), idleDuration)\n\t\t}\n\t})\n}",
"func (p *Pool) purge() {\n\tif timeout := p.IdleTimeout; timeout > 0 {\n\t\tvar valid []*idleConnection\n\t\tnow := time.Now()\n\t\tfor _, v := range p.idle {\n\t\t\t// If the client has an error then exclude it from the pool\n\t\t\tif v.pc.Client.Errored {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif v.t.Add(timeout).After(now) {\n\t\t\t\tvalid = append(valid, v)\n\t\t\t} else {\n\t\t\t\t// Force underlying connection closed\n\t\t\t\tv.pc.Client.Close()\n\t\t\t}\n\t\t}\n\t\tp.idle = valid\n\t}\n}",
"func DialProxy() (net.Conn, error) {\n\tvar d Dialer\n\treturn d.DialProxy()\n}",
"func (h *http1) Dial(network, addr string) (net.Conn, error) {\n\tswitch network {\n\tcase \"tcp\", \"tcp6\", \"tcp4\":\n\tdefault:\n\t\treturn nil, errors.New(\"proxy: no support for HTTP proxy connections of type \" + network)\n\t}\n\n\tconn, err := h.forward.Dial(h.network, h.addr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcloseConn := &conn\n\tdefer func() {\n\t\tif closeConn != nil {\n\t\t\t(*closeConn).Close()\n\t\t}\n\t}()\n\n\thost, portStr, err := net.SplitHostPort(addr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tport, err := strconv.Atoi(portStr)\n\tif err != nil {\n\t\treturn nil, errors.New(\"proxy: failed to parse port number: \" + portStr)\n\t}\n\tif port < 1 || port > 0xffff {\n\t\treturn nil, errors.New(\"proxy: port number out of range: \" + portStr)\n\t}\n\n\tif h.resolver != nil {\n\t\thosts, err := h.resolver.LookupHost(host)\n\t\tif err == nil && len(hosts) > 0 {\n\t\t\thost = hosts[0]\n\t\t}\n\t}\n\n\tb := bufPool.Get().(*bytes.Buffer)\n\tb.Reset()\n\n\tfmt.Fprintf(b, \"CONNECT %s:%s HTTP/1.1\\r\\nHost: %s:%s\\r\\n\", host, portStr, host, portStr)\n\tif h.user != \"\" {\n\t\tfmt.Fprintf(b, \"Proxy-Authorization: Basic %s\\r\\n\", base64.StdEncoding.EncodeToString([]byte(h.user+\":\"+h.password)))\n\t}\n\tio.WriteString(b, \"\\r\\n\")\n\n\tbb := b.Bytes()\n\tbufPool.Put(b)\n\n\tif _, err := conn.Write(bb); err != nil {\n\t\treturn nil, errors.New(\"proxy: failed to write greeting to HTTP proxy at \" + h.addr + \": \" + err.Error())\n\t}\n\n\tbuf := make([]byte, 2048)\n\tb0 := buf\n\ttotal := 0\n\n\tfor {\n\t\tn, err := conn.Read(buf)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ttotal += n\n\t\tbuf = buf[n:]\n\n\t\tif i := bytes.Index(b0, CRLFCRLF); i > 0 {\n\t\t\tconn = &preReaderConn{conn, b0[i+4 : total]}\n\t\t\tb0 = b0[:i+4]\n\t\t\tbreak\n\t\t}\n\t}\n\n\tresp, err := http.ReadResponse(bufio.NewReader(bytes.NewReader(b0)), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn nil, errors.New(\"proxy: failed to read greeting from HTTP proxy at \" + h.addr + \": \" + resp.Status)\n\t}\n\n\tcloseConn = nil\n\treturn conn, nil\n}",
"func (p *Pool) Get(ctx context.Context) (*PoolConn, error) {\n\tselect {\n\tcase conn := <-p.ch:\n\t\tnow := p.nowfunc()\n\t\tif (p.maxIdleTime > 0 && now.Sub(conn.freedAt) > p.maxIdleTime) ||\n\t\t\t(p.maxConnTime > 0 && now.Sub(conn.CreatedAt()) > p.maxConnTime) {\n\t\t\tp.closeconn(conn)\n\t\t\treturn p.Get(ctx)\n\t\t}\n\t\tconn.p = p\n\t\treturn conn, nil\n\tcase <-ctx.Done():\n\t\treturn nil, ctx.Err()\n\tdefault:\n\t}\n\tactive := atomic.AddInt64(&p.active, 1)\n\tif p.maxActive > 0 && active > p.maxActive {\n\t\tatomic.AddInt64(&p.active, -1)\n\t\treturn nil, ErrMaxActive\n\t}\n\tc, err := p.dial(ctx)\n\tif err != nil {\n\t\tatomic.AddInt64(&p.active, -1)\n\t\treturn nil, err\n\t}\n\treturn &PoolConn{Conn: c, p: p, createdAt: p.nowfunc()}, nil\n}",
"func connectPool() *http.Client {\n\tif clientConnect == nil {\n\t\tclientConnect = new(httpClient)\n\t\thttptr := &http.Transport{\n\t\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: true},\n\n\t\t\tMaxIdleConns: 50,\n\t\t\tMaxIdleConnsPerHost: 50,\n\t\t}\n\t\tclientConnect.Client = &http.Client{\n\t\t\tTransport: httptr,\n\t\t}\n\t}\n\treturn clientConnect.Client\n}",
"func establishConn(p *pool.Pool, dest pool.Destination) (net.Conn, error) {\n\tretry := 0\n\tretryMax := 3\n\n\tfor {\n\t\t// If it's not registered, abort.\n\t\tif _, destinationRegistered := p.Registered[dest.Name]; !destinationRegistered {\n\t\t\treturn nil, errors.New(\"Destination not registered\")\n\t\t}\n\n\t\t_, connectionIsInPool := p.Conns[dest.Name]\n\n\t\t// Are we retrying a previously established connection that failed?\n\t\tif retry >= retryMax && connectionIsInPool {\n\t\t\tlog.Printf(\"Exceeded retry count (%d) for destination %s\\n\", retryMax, dest.Name)\n\t\t\tp.RemoveConn(dest)\n\t\t}\n\n\t\t// Try a connection every 10s.\n\t\tconn, err := net.DialTimeout(\"tcp\", dest.Addr, time.Duration(3*time.Second))\n\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Destination error: %s, retrying in 10s\\n\", err)\n\t\t\ttime.Sleep(10 * time.Second)\n\t\t\t// Increment failure count.\n\t\t\tretry++\n\t\t\tcontinue\n\t\t} else {\n\t\t\t// If this connection succeeds and is not in the pool\n\t\t\tif !connectionIsInPool {\n\t\t\t\tlog.Printf(\"Adding destination to connection pool: %s\\n\", dest.Name)\n\t\t\t\tp.AddConn(dest)\n\t\t\t} else {\n\t\t\t\t// If this connection is still in the pool, we're\n\t\t\t\t// likely here due to a temporary disconnect.\n\t\t\t\tlog.Printf(\"Reconnected to destination: %s\\n\", dest.Name)\n\t\t\t}\n\n\t\t\treturn conn, nil\n\t\t}\n\n\t}\n\n}",
"func (pool *ComplexPool) New() (Proxy, error) {\n\tlength := pool.SizeUnused()\n\n\tif length == 0 {\n\t\tif !pool.Config.ReloadWhenEmpty {\n\t\t\treturn Proxy{}, fmt.Errorf(\"prox (%p): cannot select proxy, no unused proxies left in pool\", pool)\n\t\t}\n\n\t\terr := pool.Load()\n\t\tif err != nil {\n\t\t\treturn Proxy{}, fmt.Errorf(\"prox (%p): cannot select unused proxy, error occurred while reloading pool: %v\", pool, err)\n\t\t}\n\n\t\tlength = pool.SizeUnused()\n\t\tif length == 0 {\n\t\t\treturn Proxy{}, fmt.Errorf(\"prox (%p): cannot select proxy, no unused proxies even after reload\", pool)\n\t\t}\n\t}\n\n\trawProxy := pool.Unused.Random()\n\tpool.Unused.Remove(rawProxy)\n\n\treturn *CastProxy(rawProxy), nil\n}",
"func TestSourcePool_connsReturning(t *testing.T) {\n\tsrc := &testSource{}\n\texpires := time.Millisecond\n\tpool := store.Pool(src, 1, expires)\n\tpool.Open()\n\n\tgetConn := func() <-chan store.Conn {\n\t\tout := make(chan store.Conn)\n\t\tgo func() {\n\t\t\tconn, _ := pool.Open()\n\t\t\tout <- conn\n\t\t\tclose(out)\n\t\t}()\n\t\treturn out\n\t}\n\n\tselect {\n\tcase <-getConn():\n\t\t// skip\n\tcase <-time.After((expires * 15) / 10):\n\t\t// wait some more to see if get new connection\n\t\tt.Errorf(\"failed to get connection after conn expires\")\n\t}\n}",
"func proxyListener() {\n\t// A pool of concurrent proxy requests. Doesn't have to be threadsafe\n\t// because it's protected by a single channel recieve.\n\tactiveFiles := make(map[string]chan *proxiedFile)\n\n\tfor {\n\t\treq := <-proxyRequests\n\t\tif pipe, active := activeFiles[req.Filename]; active {\n\t\t\t// If the one sending the pipe is a client, then the server got\n\t\t\t// here first. Send the pipe that the server left behind so that\n\t\t\t// the client can send the file to the server.\n\t\t\t// If it was a server, then the client got here first. Send the\n\t\t\t// server the pipe so that it can recieve the file and start\n\t\t\t// sending it to the remote end.\n\t\t\t//\n\t\t\t// In either case, this case means the negotiation was successful.\n\t\t\t// Delete the transaction from the pool.\n\t\t\treq.Pipe.File <- pipe\n\t\t\tdelete(activeFiles, req.Filename)\n\t\t} else {\n\t\t\t// If the one sending the pipe is a client, then we're storing the\n\t\t\t// pipe so that the server can recieve the file from it later. The\n\t\t\t// client was first in this case.\n\t\t\t// If it was a server, that means the server got here first. We're\n\t\t\t// storing the pipe here so that the client can send on it when it\n\t\t\t// arrives.\n\t\t\tpipe = make(chan *proxiedFile)\n\t\t\tactiveFiles[req.Filename] = pipe\n\t\t\treq.Pipe.File <- pipe\n\t\t}\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Up20200929144301 adds Visual Studio Code as a workspace template.
|
func Up20200929144301(tx *sql.Tx) error {
client, err := getClient()
if err != nil {
return err
}
defer client.DB.Close()
migrationsRan, err := getRanSQLMigrations(client)
if err != nil {
return err
}
if _, ok := migrationsRan[20200929144301]; ok {
return nil
}
namespaces, err := client.ListOnepanelEnabledNamespaces()
if err != nil {
return err
}
workspaceTemplate := &v1.WorkspaceTemplate{
Name: vscodeWorkspaceTemplateName,
Manifest: vscodeWorkspaceTemplate,
}
// Adding description
workspaceTemplate.Description = "Open source code editor"
for _, namespace := range namespaces {
if _, err := client.CreateWorkspaceTemplate(namespace.Name, workspaceTemplate); err != nil {
return err
}
}
return nil
}
|
[
"func main() {\n\tfmt.Println(\"/// Go Template Creator v0.1 ///\")\n\tprojectName := getProjectName()\n\tcreateFolders(projectName)\n\tcopy(dwgFilePath, basePath+projectName+\".sys/Blank_100_100.dwg\")\n\tcopy(cnfFilePath, basePath+projectName+\".p8k/\"+projectName+\".cnf\")\n\ttime.Sleep(5 * time.Second)\n}",
"func createWorkspace(client secrethub.ClientInterface, io ui.IO, org string, orgDescription string, progressPrinter progress.Printer) error {\n\tif org == \"\" {\n\t\tcreateWorkspace, err := ui.AskYesNo(io, \"Do you want to create a shared workspace for your team?\", ui.DefaultYes)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfmt.Fprintln(io.Output())\n\t\tif !createWorkspace {\n\t\t\tfmt.Fprint(io.Output(), \"You can create a shared workspace later using `secrethub org init`.\\n\\n\")\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tvar err error\n\tif org == \"\" {\n\t\torg, err = ui.AskAndValidate(io, \"Workspace name (e.g. your company name): \", 2, api.ValidateOrgName)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif orgDescription == \"\" {\n\t\torgDescription, err = ui.AskAndValidate(io, \"A description (max 144 chars) for your team workspace so others will recognize it:\\n\", 2, api.ValidateOrgDescription)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tfmt.Fprint(io.Output(), \"Creating your shared workspace...\")\n\tprogressPrinter.Start()\n\n\t_, err = client.Orgs().Create(org, orgDescription)\n\tprogressPrinter.Stop()\n\tif err == api.ErrOrgAlreadyExists {\n\t\tfmt.Fprintf(io.Output(), \"The workspace %s already exists. If it is your organization, ask a colleague to invite you to the workspace. You can also create a new one using `secrethub org init`.\\n\", org)\n\t} else if err != nil {\n\t\treturn err\n\t} else {\n\t\tfmt.Fprint(io.Output(), \"Created your shared workspace.\\n\\n\")\n\t}\n\treturn nil\n}",
"func (a *App) VSCode(ctx context.Context, browser browser, codespaceName string, useInsiders bool) error {\n\tif codespaceName == \"\" {\n\t\tcodespace, err := chooseCodespace(ctx, a.apiClient)\n\t\tif err != nil {\n\t\t\tif err == errNoCodespaces {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn fmt.Errorf(\"error choosing codespace: %w\", err)\n\t\t}\n\t\tcodespaceName = codespace.Name\n\t}\n\n\turl := vscodeProtocolURL(codespaceName, useInsiders)\n\tif err := browser.Browse(url); err != nil {\n\t\treturn fmt.Errorf(\"error opening Visual Studio Code: %w\", err)\n\t}\n\n\treturn nil\n}",
"func ProjectAddTemplateHook() ent.Hook {\n\thk := func(next ent.Mutator) ent.Mutator {\n\t\treturn hook.ProjectFunc(func(ctx context.Context, mutation *ent.ProjectMutation) (ent.Value, error) {\n\t\t\tclient := mutation.Client()\n\t\t\ttypeID, exists := mutation.TypeID()\n\t\t\tif !exists {\n\t\t\t\treturn nil, errors.New(\"project must have type\")\n\t\t\t}\n\t\t\tprojectTemplate, err := addProjectTemplate(ctx, client, typeID)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"failed to create project template: %w\", err)\n\t\t\t}\n\t\t\tmutation.SetTemplateID(projectTemplate.ID)\n\t\t\treturn next.Mutate(ctx, mutation)\n\t\t})\n\t}\n\treturn hook.On(hk, ent.OpCreate)\n}",
"func NewWorkspace(ctx *pulumi.Context,\n\tname string, args *WorkspaceArgs, opts ...pulumi.ResourceOption) (*Workspace, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.ResourceGroupName == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'ResourceGroupName'\")\n\t}\n\tif args.AllowPublicAccessWhenBehindVnet == nil {\n\t\targs.AllowPublicAccessWhenBehindVnet = pulumi.BoolPtr(false)\n\t}\n\tif args.HbiWorkspace == nil {\n\t\targs.HbiWorkspace = pulumi.BoolPtr(false)\n\t}\n\taliases := pulumi.Aliases([]pulumi.Alias{\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:machinelearningservices/v20200901preview:Workspace\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:machinelearningservices:Workspace\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:machinelearningservices:Workspace\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:machinelearningservices/v20180301preview:Workspace\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:machinelearningservices/v20180301preview:Workspace\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:machinelearningservices/v20181119:Workspace\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:machinelearningservices/v20181119:Workspace\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:machinelearningservices/v20190501:Workspace\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:machinelearningservices/v20190501:Workspace\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:machinelearningservices/v20190601:Workspace\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:machinelearningservices/v20190601:Workspace\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:machinelearningservices/v20191101:Workspace\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:machinelearningservices/v20191101:Workspace\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:machinelearningservices/v20200101:Workspace\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:machinelearningservices/v20200101:Workspace\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:machinelearningservices/v20200218preview:Workspace\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:machinelearningservices/v20200218preview:Workspace\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:machinelearningservices/v20200301:Workspace\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:machinelearningservices/v20200301:Workspace\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:machinelearningservices/v20200401:Workspace\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:machinelearningservices/v20200401:Workspace\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:machinelearningservices/v20200501preview:Workspace\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:machinelearningservices/v20200501preview:Workspace\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:machinelearningservices/v20200515preview:Workspace\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:machinelearningservices/v20200515preview:Workspace\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:machinelearningservices/v20200601:Workspace\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:machinelearningservices/v20200601:Workspace\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:machinelearningservices/v20200801:Workspace\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:machinelearningservices/v20200801:Workspace\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:machinelearningservices/v20210101:Workspace\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:machinelearningservices/v20210101:Workspace\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:machinelearningservices/v20210301preview:Workspace\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:machinelearningservices/v20210301preview:Workspace\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-native:machinelearningservices/v20210401:Workspace\"),\n\t\t},\n\t\t{\n\t\t\tType: pulumi.String(\"azure-nextgen:machinelearningservices/v20210401:Workspace\"),\n\t\t},\n\t})\n\topts = append(opts, aliases)\n\tvar resource Workspace\n\terr := ctx.RegisterResource(\"azure-native:machinelearningservices/v20200901preview:Workspace\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}",
"func InitProject() {\n\tfmt.Println(\"\\nDownloading template...\")\n\tresp, err := http.Get(\"https://github.com/ryanlbrown/spapp/archive/master.zip\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer resp.Body.Close()\n\tb, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tr, err := zip.NewReader(bytes.NewReader(b), int64(len(b)))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tfor _, f := range r.File {\n\t\t// Ignore dot files.\n\t\tif path.Base(f.Name)[0] == '.' {\n\t\t\tcontinue\n\t\t}\n\t\tpathTokens := strings.Split(f.Name, \"/\")\n\t\tsubName := strings.Join(pathTokens[1:], \"/\")\n\t\tif subName == \"\" || subName == \"README.md\" {\n\t\t\tcontinue\n\t\t}\n\t\tif subName[len(subName)-1] == '/' {\n\t\t\tdirName := subName[:len(subName)-1]\n\t\t\tfmt.Println(\"Created:\", dirName)\n\t\t\terr = os.Mkdir(dirName, 0755)\n\t\t\tif err != nil && !os.IsExist(err) {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t} else {\n\t\t\tfa, err := f.Open()\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tdefer fa.Close()\n\t\t\tfb, err := os.OpenFile(subName, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0644)\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tdefer fb.Close()\n\t\t\t_, err = io.Copy(fb, fa)\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tfmt.Println(\"Created:\", subName)\n\t\t}\n\t}\n\tfmt.Println()\n}",
"func GetNewGitIgnoreTemplate(cc CodeConfig) string {\n\treturn `\n# See http://help.github.com/ignore-files/ for more about ignoring files.\n#\n# If you find yourself ignoring temporary files generated by your text editor\n# or operating system, you probably want to add a global ignore instead:\n# git config --global core.excludesfile '~/.gitignore_global'\n\n# Ignore tags\n/tags\n\n# Ignore tmp\n/tmp\n\n# Ignore test coverage files\n*.coverprofile\n*coverage.out\n\n# Ignore swap files\n*.swp\n*.swo\n\n# Ignore config files\n# /config.json`\n}",
"func createNewSolutionScriptEntry(solution string, flags *flag.FlagSet) (store.Script, error) {\n\ttimeNow := time.Now()\n\n\tcomment, err := flags.GetString(\"comment\")\n\tif err != nil {\n\t\treturn store.Script{}, fmt.Errorf(\"could not parse `comment` flag: %s\", err)\n\t}\n\tpath, err := flags.GetString(\"path\")\n\tif err != nil {\n\t\treturn store.Script{}, fmt.Errorf(\"could not parse `path` flag: %s\", err)\n\t}\n\n\tif path != \"\" {\n\t\tbytes, err := ioutil.ReadFile(path)\n\t\tif err != nil {\n\t\t\treturn store.Script{}, fmt.Errorf(\"failed to read %s: %s\", path, err)\n\t\t}\n\t\tsolution = string(bytes)\n\t}\n\n\t//Generate script entry unique id\n\tid, err := store.GenereateIdempotentID(\"\", comment, \"\", solution)\n\tif err != nil {\n\t\treturn store.Script{}, fmt.Errorf(\"failed to generate idem potent id: %s\", err)\n\t}\n\n\treturn store.Script{\n\t\tID: id,\n\t\tComment: comment,\n\t\tCreationTime: timeNow,\n\t\tUpdateTime: timeNow,\n\t\tSolution: store.Solution{\n\t\t\tContent: solution,\n\t\t},\n\t}, nil\n}",
"func Create(t *testing.T, knFunc *TestShellCmdRunner, project FunctionTestProject) {\n\tvar result TestShellCmdResult\n\tif project.RemoteRepository == \"\" {\n\t\tresult = knFunc.Exec(\"create\", project.ProjectPath, \"--language\", project.Runtime, \"--template\", project.Template)\n\t} else {\n\t\tresult = knFunc.Exec(\"create\", project.ProjectPath, \"--language\", project.Runtime, \"--template\", project.Template, \"--repository\", project.RemoteRepository)\n\t}\n\tif result.Error != nil {\n\t\tt.Fatal()\n\t}\n}",
"func defaultTemplateDir() string {\n return filepath.Join(\"contrib\", \"templates\", \"default\")\n}",
"func CreateProjects(t *testing.T, appFS afero.Fs) {\n\tname := t.Name()\n\n\t// projects outside GOPATH\n\tappFS.MkdirAll(\"/code/\"+name+\"/invalid/repo/.git\", 0755)\n\n\t// projects under base\n\tappFS.MkdirAll(\"/code/\"+name+\"/base/src/github.com/kalbasit/swm/.git\", 0755)\n\tappFS.MkdirAll(\"/code/\"+name+\"/base/src/github.com/kalbasit/dotfiles/.git\", 0755)\n\tappFS.MkdirAll(\"/code/\"+name+\"/base/src/github.com/kalbasit/workflow/.git\", 0755)\n\n\t// projects under STORY-123\n\tappFS.MkdirAll(\"/code/\"+name+\"/stories/STORY-123/src/github.com/kalbasit/dotfiles\", 0755)\n\tafero.WriteFile(appFS, \"/code/\"+name+\"/stories/STORY-123/src/github.com/kalbasit/dotfiles/.git\", []byte(\n\t\t\"gitdir: /code/\"+name+\"/base/src/github.com/kalbasit/.git/worktrees/dotfiles\",\n\t), 0644)\n\tappFS.MkdirAll(\"/code/\"+name+\"/stories/STORY-123/src/github.com/kalbasit/swm\", 0755)\n\tafero.WriteFile(appFS, \"/code/\"+name+\"/stories/STORY-123/src/github.com/kalbasit/swm/.git\", []byte(\n\t\t\"gitdir: /code/\"+name+\"/base/src/github.com/kalbasit/.git/worktrees/swm\",\n\t), 0644)\n\n\t// projects ignored\n\n\t// projects outside GOPATH\n\tappFS.MkdirAll(\"/code/.snapshots/\"+name+\"/invalid/repo/.git\", 0755)\n\n\t// projects under base\n\tappFS.MkdirAll(\"/code/.snapshots/\"+name+\"/base/src/github.com/kalbasit/swm/.git\", 0755)\n\tappFS.MkdirAll(\"/code/.snapshots/\"+name+\"/base/src/github.com/kalbasit/dotfiles/.git\", 0755)\n\n\t// projects under STORY-123\n\tappFS.MkdirAll(\"/code/.snapshots/\"+name+\"/stories/STORY-123/src/github.com/kalbasit/dotfiles\", 0755)\n\tafero.WriteFile(appFS, \"/code/.snapshots/\"+name+\"/stories/STORY-123/src/github.com/kalbasit/dotfiles/.git\", []byte(\n\t\t\"gitdir: /code/\"+name+\"/base/src/github.com/kalbasit/.git/worktrees/dotfiles\",\n\t), 0644)\n}",
"func (_ *Frontend) GenerateNewVueFile(ctx NewTemplateVueCtx) (FileContainer, error) {\n\tcontent, err := ExecuteTemplateFile(\"templates/ui/new-template.vue\", \"uiNewModel\", ctx) // TODO: magic strings\n\tif err != nil {\n\t\treturn FileContainer{}, err\n\t}\n\n\tout := FileContainer{\n\t\tContent: content,\n\t\tPath: NewPaths().UIComponents,\n\t\tFileName: ctx.FileName,\n\t}\n\treturn out, nil\n}",
"func (m *MigrationService) CreateTemplate(args []string) error {\n\tif m.existMigration() {\n\t\tfileName := args[0]\n\t\ttypes := args[1]\n\t\tprojectID := args[2]\n\t\ttargetBranch := args[3]\n\t\tnameBranch := args[4]\n\t\tcommitMessage := args[5]\n\t\tfolderToCreate, _ := m.getCurrentFolder()\n\n\t\tm.file.CreateFile(folderToCreate, fileName, projectID, targetBranch, nameBranch, commitMessage)\n\t\tm.file.WriteToFile(folderToCreate, fileName, types)\n\t} else {\n\t\tfmt.Println(\"doesn't exist migration. please run [fastshop migrations new] \")\n\t}\n\n\treturn nil\n}",
"func InfraProjectName() []Prompt {\n\treturn []Prompt{\n\t\t{\n\t\t\tName: \"projectName\",\n\t\t\tQuestion: \"Enter the name of your project (will be prefixed to most resources):\",\n\t\t},\n\t}\n}",
"func GenerateModifyLaunchTemplateInput(cr *svcapitypes.LaunchTemplate) *svcsdk.ModifyLaunchTemplateInput {\n\tres := &svcsdk.ModifyLaunchTemplateInput{}\n\n\tif cr.Spec.ForProvider.LaunchTemplateName != nil {\n\t\tres.SetLaunchTemplateName(*cr.Spec.ForProvider.LaunchTemplateName)\n\t}\n\n\treturn res\n}",
"func addSourceFile(t *Template, typ sourceType, file string) {}",
"func newCreateProjectCmd() *cobra.Command {\n\tvar (\n\t\toptions core.CreateProjectOptions\n\t)\n\n\tcreateProjectCmd := cobra.Command{\n\t\tUse: \"project NAME\",\n\t\tShort: `Create a new verless project`,\n\t\tArgs: cobra.ExactArgs(1),\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tpath := args[0]\n\t\t\treturn core.CreateProject(path, options)\n\t\t},\n\t}\n\n\tcreateProjectCmd.Flags().BoolVar(&options.Overwrite, \"overwrite\",\n\t\tfalse, `overwrite the directory if it already exists`)\n\n\treturn &createProjectCmd\n}",
"func getTemplTest(c *gin.Context) {\n\tfmt.Println(\".............................\")\n\tfmt.Println(\"template test\")\n\tfmt.Println(\".............................\")\n\n\tc.HTML(http.StatusOK, \"templtest.tmpl.html\", gin.H{\n\t\t\"title\": \"Main website\",\n\t})\n}",
"func AddSourceAttributesForTemplate(sourceID string, template *dw.DevWorkspaceTemplateSpec) {\n\tfor idx, component := range template.Components {\n\t\tif component.Attributes == nil {\n\t\t\ttemplate.Components[idx].Attributes = attributes.Attributes{}\n\t\t}\n\t\ttemplate.Components[idx].Attributes.PutString(constants.PluginSourceAttribute, sourceID)\n\t}\n\tfor idx, command := range template.Commands {\n\t\tif command.Attributes == nil {\n\t\t\ttemplate.Commands[idx].Attributes = attributes.Attributes{}\n\t\t}\n\t\ttemplate.Commands[idx].Attributes.PutString(constants.PluginSourceAttribute, sourceID)\n\t}\n\tfor idx, project := range template.Projects {\n\t\tif project.Attributes == nil {\n\t\t\ttemplate.Projects[idx].Attributes = attributes.Attributes{}\n\t\t}\n\t\ttemplate.Projects[idx].Attributes.PutString(constants.PluginSourceAttribute, sourceID)\n\t}\n\tfor idx, project := range template.StarterProjects {\n\t\tif project.Attributes == nil {\n\t\t\ttemplate.StarterProjects[idx].Attributes = attributes.Attributes{}\n\t\t}\n\t\ttemplate.StarterProjects[idx].Attributes.PutString(constants.PluginSourceAttribute, sourceID)\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Down20200929144301 removes Visual Studio Code from workspace templates.
|
func Down20200929144301(tx *sql.Tx) error {
client, err := getClient()
if err != nil {
return err
}
namespaces, err := client.ListOnepanelEnabledNamespaces()
if err != nil {
return err
}
uid, err := uid2.GenerateUID(vscodeWorkspaceTemplateName, 30)
if err != nil {
return err
}
for _, namespace := range namespaces {
if _, err := client.ArchiveWorkspaceTemplate(namespace.Name, uid); err != nil {
return err
}
}
return nil
}
|
[
"func Up20200929144301(tx *sql.Tx) error {\n\tclient, err := getClient()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer client.DB.Close()\n\n\tmigrationsRan, err := getRanSQLMigrations(client)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif _, ok := migrationsRan[20200929144301]; ok {\n\t\treturn nil\n\t}\n\n\tnamespaces, err := client.ListOnepanelEnabledNamespaces()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tworkspaceTemplate := &v1.WorkspaceTemplate{\n\t\tName: vscodeWorkspaceTemplateName,\n\t\tManifest: vscodeWorkspaceTemplate,\n\t}\n\n\t// Adding description\n\tworkspaceTemplate.Description = \"Open source code editor\"\n\n\tfor _, namespace := range namespaces {\n\t\tif _, err := client.CreateWorkspaceTemplate(namespace.Name, workspaceTemplate); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}",
"func FixWorkspace(f *rule.File) {\n\tremoveLegacyGoRepository(f)\n}",
"func DropLegacyStatics(ctx context.Context) error {\n\n\tfrontRoot := config.Get(\"defaults\", \"frontRoot\").String(filepath.Join(config.ApplicationDataDir(), \"static\", \"pydio\"))\n\tif frontRoot != \"\" {\n\t\tif er := os.RemoveAll(frontRoot); er != nil {\n\t\t\tlog.Logger(ctx).Error(\"Could not remove old PHP data from \"+frontRoot+\". You may safely delete this folder. Error was\", zap.Error(er))\n\t\t} else {\n\t\t\tlog.Logger(ctx).Info(\"Successfully removed old PHP data from \" + frontRoot)\n\t\t}\n\t}\n\n\tlog.Logger(ctx).Info(\"Clearing unused configurations\")\n\tconfig.Del(\"defaults\", \"frontRoot\")\n\tconfig.Del(\"defaults\", \"fpm\")\n\tconfig.Del(\"defaults\", \"fronts\")\n\tconfig.Del(\"services\", \"pydio.frontends\")\n\tif config.Get(\"frontend\", \"plugin\", \"core.pydio\", \"APPLICATION_TITLE\").String(\"\") == \"\" {\n\t\tconfig.Set(\"Pydio Cells\", \"frontend\", \"plugin\", \"core.pydio\", \"APPLICATION_TITLE\")\n\t}\n\tif e := config.Save(common.PYDIO_SYSTEM_USERNAME, \"Upgrade to 1.2.0\"); e == nil {\n\t\tlog.Logger(ctx).Info(\"[Upgrade] Cleaned unused configurations\")\n\t}\n\n\treturn nil\n}",
"func fixTemplateNames(templates []string) {\n\tfor i := range templates {\n\t\ttemplates[i] = \"templates/\" + templates[i]\n\t}\n}",
"func RevertTemplate(name string) error {\n\treturn pathx.CreateTemplate(category, name, deploymentTemplate)\n}",
"func removeDoNotEdit(dir string) error {\n\tsrcDir := filepath.Join(dir, \"src\")\n\treturn filepath.Walk(srcDir, func(path string, info os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif info.IsDir() || filepath.Ext(path) != GoExtension {\n\t\t\treturn nil\n\t\t}\n\n\t\tset := token.NewFileSet()\n\t\tfile, err := parser.ParseFile(set, path, nil, parser.ParseComments)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tf, err := os.OpenFile(path, os.O_RDWR, 0755)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer f.Close()\n\t\tfor _, comment := range file.Comments {\n\t\t\tdata := make([]byte, comment.End()-comment.Pos())\n\t\t\tif _, err := f.Seek(int64(comment.Pos()-1), io.SeekStart); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif _, err := io.ReadFull(f, data); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tcommentStr := string(data)\n\t\t\tif strings.Contains(commentStr, \"DO NOT EDIT\") {\n\t\t\t\tcommentStr = strings.Replace(commentStr, \"DO NOT EDIT\", \"XXXXXXXXXXX\", -1)\n\t\t\t\tif _, err := f.WriteAt([]byte(commentStr), int64(comment.Pos()-1)); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t})\n}",
"func projectCleanup(env environment.Environment, macro *model.Macro) error {\n\tpartialComparisonService := env.ServiceFactory().MustPartialComparisonService()\n\treturn partialComparisonService.DeleteFrom(macro)\n}",
"func deleteSchemaFiles(path string) {\n\tfor k, _ := range CapnpFileMap {\n\t\tos.Remove(path + \"/sandstorm/\" + k)\n\t}\n\tos.Remove(path + \"/sandstorm\")\n\tos.Remove(path)\n}",
"func undoUtilityChanges(prevProjPath, projPath string) {\n\tfiles, err := ioutil.ReadDir(prevProjPath) //ReadDir returns a slice of FileInfo structs\n\tif isError(err) {\n\t\treturn\n\t}\n\tfor _, file := range files { //loop through each files and directories from previous project\n\t\tvar fileName = file.Name()\n\t\tif file.IsDir() { //if directory...\n\t\t\tif fileName == \"Pods\" || fileName == \".git\" { //ignore Pods and .git directories\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tprevProjPath = prevProjPath + \"/\" + fileName //update directory path by adding /fileName\n\t\t\tundoUtilityChanges(prevProjPath, projPath) //recursively call this function again\n\t\t\tprevProjPath = trimPathAfterLastSlash(prevProjPath) //reset path by removing the / + fileName\n\t\t} else { //for each .swift and .strings file in previous project\n\t\t\tvar fileExtension = filepath.Ext(strings.TrimSpace(fileName)) //gets the file extension from file name\n\t\t\tif fileExtension == \".swift\" || fileExtension == \".strings\" { //only undo .swift and .strings files\n\t\t\t\tvar prevProjPathToSearch = trimPathBeforeLastSlash(prevProjPath, false) + \"/\" + fileName //get the path of a language file e.g. \"en.lproj/Localize.strings\"\n\t\t\t\tprevProjPath = prevProjPath + \"/\" + fileName //update the prevProjPath like always\n\t\t\t\tvar isFound, filePath = searchForFilePath(projPath, prevProjPathToSearch) //search project for file with the same name as .swift file from previour version\n\t\t\t\tif isFound { //if found... read both file's content\n\t\t\t\t\tvar prevProjContents = readFile(prevProjPath)\n\t\t\t\t\tvar currentProjContents = readFile(filePath)\n\t\t\t\t\tif prevProjContents != currentProjContents { //if contents are not the same, replace project's file contents with the previous project's contents\n\t\t\t\t\t\t// fmt.Println(\"\\nCopying contents of \" + prevProjPath + \" to \" + filePath)\n\t\t\t\t\t\treplaceFile(filePath, prevProjContents)\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tfmt.Print(\"Error: Failed to find \", fileName, \" during undo. Please remove all changes using version control instead.\\n\")\n\t\t\t\t}\n\t\t\t\tprevProjPath = trimPathAfterLastSlash(prevProjPath) //reset path by removing the / + fileName\n\t\t\t}\n\t\t}\n\t}\n}",
"func CleanSidecar(instanceName string, deployment *appsv1.Deployment) {\n\tdelete(deployment.Labels, Label)\n\tfor c := 0; c < len(deployment.Spec.Template.Spec.Containers); c++ {\n\t\tif deployment.Spec.Template.Spec.Containers[c].Name == \"jaeger-agent\" {\n\t\t\t// delete jaeger-agent container\n\t\t\tdeployment.Spec.Template.Spec.Containers = append(deployment.Spec.Template.Spec.Containers[:c], deployment.Spec.Template.Spec.Containers[c+1:]...)\n\t\t\tbreak\n\t\t}\n\t}\n\tif autodetect.OperatorConfiguration.GetPlatform() == autodetect.OpenShiftPlatform {\n\t\tnames := map[string]bool{\n\t\t\tca.TrustedCANameFromString(instanceName): true,\n\t\t\tca.ServiceCANameFromString(instanceName): true,\n\t\t}\n\t\t// Remove the managed volumes, if present\n\t\tfor v := 0; v < len(deployment.Spec.Template.Spec.Volumes); v++ {\n\t\t\tif _, ok := names[deployment.Spec.Template.Spec.Volumes[v].Name]; ok {\n\t\t\t\t// delete managed volume\n\t\t\t\tdeployment.Spec.Template.Spec.Volumes = append(deployment.Spec.Template.Spec.Volumes[:v], deployment.Spec.Template.Spec.Volumes[v+1:]...)\n\t\t\t\tv--\n\t\t\t}\n\t\t}\n\t}\n}",
"func cleanOldNodes(assets assets, target string) {\n\td, err := os.ReadDir(target)\n\tif err != nil {\n\t\treturn\n\t}\n\t// Find deleted nodes by selecting one generated\n\t// file from standard templates (<T>_query.go).\n\tvar deleted []*Type\n\tfor _, f := range d {\n\t\tif !strings.HasSuffix(f.Name(), \"_query.go\") {\n\t\t\tcontinue\n\t\t}\n\t\ttyp := &Type{Name: strings.TrimSuffix(f.Name(), \"_query.go\")}\n\t\tpath := filepath.Join(target, typ.PackageDir())\n\t\tif _, ok := assets.dirs[path]; ok {\n\t\t\tcontinue\n\t\t}\n\t\t// If it is a node, it must have a model file and a dir (e.g. ent/t.go, ent/t).\n\t\t_, err1 := os.Stat(path + \".go\")\n\t\tf2, err2 := os.Stat(path)\n\t\tif err1 == nil && err2 == nil && f2.IsDir() {\n\t\t\tdeleted = append(deleted, typ)\n\t\t}\n\t}\n\tfor _, typ := range deleted {\n\t\tfor _, t := range Templates {\n\t\t\terr := os.Remove(filepath.Join(target, t.Format(typ)))\n\t\t\tif err != nil && !os.IsNotExist(err) {\n\t\t\t\tlog.Printf(\"remove old file %s: %s\\n\", filepath.Join(target, t.Format(typ)), err)\n\t\t\t}\n\t\t}\n\t\terr := os.Remove(filepath.Join(target, typ.PackageDir()))\n\t\tif err != nil && !os.IsNotExist(err) {\n\t\t\tlog.Printf(\"remove old dir %s: %s\\n\", filepath.Join(target, typ.PackageDir()), err)\n\t\t}\n\t}\n}",
"func CleanSDKGeneratedFiles(path string) error {\n\tlog.Printf(\"Removing all sdk generated files in '%s'...\", path)\n\tfiles, err := ioutil.ReadDir(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, file := range files {\n\t\tif strings.HasPrefix(file.Name(), sdk_generated_file_prefix) || strings.HasPrefix(file.Name(), sdk_example_file_prefix) || strings.HasPrefix(file.Name(), sdk_test_file_prefix) {\n\t\t\terr = os.Remove(filepath.Join(path, file.Name()))\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}",
"func cleanUpAfterProjects(projects []string) {\n\tfor _, p := range projects {\n\t\todoDeleteProject(p)\n\t}\n}",
"func RemoveView(name string) {\n\tfmt.Println()\n\tRemoveFile(ViewFilename(name))\n\tRemoveFile(SassFilename(name))\n\tRemoveFile(TemplateFilename(name))\n\tRemoveFile(CssFilename(name))\n\tRemoveLinkTag(name)\n\tfmt.Println()\n}",
"func (k JsSDK) DefinitionsClean(context.Context) error {\n\terr := sh.Rm(filepath.Join(\"sdk\", \"js\", \"generated\", \"allowed-field-mask-paths.json\"))\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn sh.Rm(filepath.Join(\"sdk\", \"js\", \"generated\", \"api-definition.json\"))\n}",
"func deleteClonedProject(path string) {\n\tos.RemoveAll(path)\n}",
"func (ct *ContentTypes) RemoveContent(fileName string) {\n\tfor i, part := range ct.ml.Overrides {\n\t\tif part.PartName == fileName {\n\t\t\tct.ml.Overrides = append(ct.ml.Overrides[:i], ct.ml.Overrides[i+1:]...)\n\t\t\tct.file.MarkAsUpdated()\n\t\t\treturn\n\t\t}\n\t}\n}",
"func CleanUp(ctx context.Context, cfg *config.Config, pipeline *pipelines.Pipeline, name names.Name) error {\n\tkubectlPath, err := cfg.Tools[config.Kubectl].Resolve()\n\tif err != nil {\n\t\treturn err\n\t}\n\tcmd := proc.GracefulCommandContext(ctx, kubectlPath, \"delete\",\n\t\t\"all\",\n\t\t\"-l\", k8s.StackLabel+\"=\"+name.DNSName(),\n\t)\n\tif err := cmd.Run(); err != nil {\n\t\treturn fmt.Errorf(\"could not delete k8s resources: %v\", err)\n\t}\n\treturn nil\n}",
"func removeText(fileName string) {\n\t//Skip main.go file\n\tif fileName != \"main.go\" {\n\t\t//Read file bytes from filename param, a success call return err==null, not err==EOF\n\t\tinput, err := ioutil.ReadFile(fileName)\n\t\tif err != nil {\n\t\t\tlog.Fatalln(err)\n\t\t}\n\n\t\t//Convert content to string\n\t\ttext := string(input)\n\n\t\t//Replace keyword 'TODO' by regex\n\t\tre := regexp.MustCompile(\".*TODO.*\\r?\\n\")\n\t\tlines := re.ReplaceAllString(text, \"\")\n\n\t\t//Write string into a file\n\t\terr = WriteToFile(fileName, lines)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
GenerateModels parses the magmagenmeta key of the given swagger YAML file, copies the files that the target file depends on into the current working directory, shells out to `swagger generate models`, then cleans up the dependency files.
|
func GenerateModels(targetFilepath string, configFilepath string, rootDir string, specs map[string]MagmaSwaggerSpec) error {
absTargetFilepath, err := filepath.Abs(targetFilepath)
if err != nil {
return fmt.Errorf("target filepath %s is invalid: %w", targetFilepath, err)
}
tmpGenDir, err := ioutil.TempDir(".", "tmpgen")
if err != nil {
return fmt.Errorf("could not create temporary gen directory: %w", err)
}
defer os.RemoveAll(tmpGenDir)
// For each dependency, strip the magma-gen-meta and write the result to
// the filename specified by `dependent-filename`
err = StripAndWriteSwaggerSpecs(specs, tmpGenDir)
if err != nil {
return err
}
// Shell out to go-swagger
targetSpec := specs[absTargetFilepath]
outputDir := filepath.Join(rootDir, targetSpec.MagmaGenMeta.OutputDir)
absConfigFilepath, err := filepath.Abs(configFilepath)
if err != nil {
return err
}
cmd := exec.Command(
"swagger", "generate", "model",
"--spec", filepath.Join(tmpGenDir, targetSpec.MagmaGenMeta.TempGenFilename),
"--target", outputDir,
"--config-file", absConfigFilepath,
)
stdoutBuf := &strings.Builder{}
stderrBuf := &strings.Builder{}
cmd.Stdout = stdoutBuf
cmd.Stderr = stderrBuf
err = cmd.Run()
if err != nil {
return fmt.Errorf("failed to generate models; stdout:\n%s\nstderr:\n%s: %w", stdoutBuf.String(), stderrBuf.String(), err)
}
return nil
}
|
[
"func (g *Generator) generateResources(api *design.APIDefinition) error {\n\tos.MkdirAll(modelDir(), 0755)\n\tapp := kingpin.New(\"Model generator\", \"model generator\")\n\tcodegen.RegisterFlags(app)\n\t_, err := app.Parse(os.Args[1:])\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tgopath := filepath.SplitList(os.Getenv(\"GOPATH\"))[0]\n\n\tmainimp, err := filepath.Rel(filepath.Join(gopath, \"src\"), codegen.OutputDir)\n\tif err != nil {\n\t\treturn err\n\t}\n\tmainimp = filepath.ToSlash(mainimp)\n\timp := path.Join(mainimp, \"app\")\n\timports := []*codegen.ImportSpec{\n\t\tcodegen.SimpleImport(imp),\n\t\tcodegen.SimpleImport(\"github.com/jinzhu/copier\"),\n\t}\n\t// get the imports for the app packages\n\tapi.IterateVersions(func(v *design.APIVersionDefinition) error {\n\t\tif v.IsDefault() {\n\t\t\treturn nil\n\t\t}\n\t\timports = append(imports, codegen.SimpleImport(filepath.Join(filepath.ToSlash(imp), codegen.Goify(codegen.VersionPackage(v.Version), false))))\n\t\treturn nil\n\t})\n\n\ttitle := fmt.Sprintf(\"%s: Media Helpers\", api.Name)\n\n\terr = api.IterateVersions(func(v *design.APIVersionDefinition) error {\n\t\terr = v.IterateResources(func(res *design.ResourceDefinition) error {\n\t\t\tactionable := false\n\t\t\terr = res.IterateActions(func(ad *design.ActionDefinition) error {\n\t\t\t\tif hasUserType(ad) {\n\t\t\t\t\tactionable = true\n\t\t\t\t}\n\t\t\t\treturn nil\n\n\t\t\t})\n\t\t\tif !actionable {\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\tif !res.SupportsVersion(v.Version) {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tprefix := \"_resource\"\n\t\t\tif v.Version != \"\" {\n\t\t\t\tprefix = prefix + \"_\" + codegen.Goify(v.Version, false)\n\n\t\t\t}\n\t\t\tname := strings.ToLower(codegen.Goify(res.Name, false))\n\n\t\t\terr := os.MkdirAll(filepath.Join(modelDir(), name), 0755)\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\n\t\t\tmediafilename := filepath.Join(modelDir(), name, name+prefix+\"_gen.go\")\n\t\t\tos.Remove(mediafilename)\n\n\t\t\tresw, err := NewResourceWriter(mediafilename)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\"Error executing Gorma: \", err.Error())\n\t\t\t\tpanic(err)\n\t\t\t}\n\n\t\t\trd := NewResourceData(v.Version, res)\n\t\t\tfor k, _ := range rd.RequiredPackages {\n\t\t\t\timports = append(imports, codegen.SimpleImport(path.Join(mainimp, \"models\", k)))\n\t\t\t}\n\t\t\tresw.WriteHeader(title, name, imports)\n\n\t\t\terr = resw.Execute(&rd)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\"Error executing Gorma: \", err.Error())\n\t\t\t\tg.Cleanup()\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err := resw.FormatCode(); err != nil {\n\t\t\t\tfmt.Println(\"Error executing Gorma: \", err.Error())\n\t\t\t\tg.Cleanup()\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err == nil {\n\t\t\t\tg.genfiles = append(g.genfiles, mediafilename)\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\t\treturn nil\n\t})\n\treturn err\n}",
"func Generate() {\n\tfor _, minorVersion := range minorVersions {\n\t\tversionStrs = append(versionStrs, \"v\"+majorVersion+\".\"+minorVersion)\n\t}\n\n\tdefinitionFileName := \"full-v\" + majorVersion + \".yml\"\n\tfullBytes, err := ioutil.ReadFile(\"./\" + definitionFileName)\n\tif err != nil {\n\t\tlog.Fatalf(\"Error reading %s: %s\", definitionFileName, err)\n\t}\n\n\tvar swagger map[interface{}]interface{}\n\tif err := yaml.Unmarshal(fullBytes, &swagger); err != nil {\n\t\tlog.Fatalf(\"Error unmarshaling swagger yml: %s\", err)\n\t}\n\n\tfor _, versionStr := range versionStrs {\n\t\tclientBytes, err := generateClientYml(swagger, versionStr)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Error generating client yml: %s\", err)\n\t\t}\n\t\tif err := ioutil.WriteFile(versionStr+\"-client.yml\", clientBytes, 0644); err != nil {\n\t\t\tlog.Fatalf(\"Error writing client %s API: %s\", versionStr, err)\n\t\t}\n\n\t\tversionData, err := generateDataAPIYml(swagger, versionStr)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Error generating data %s API: %s\", versionStr, err)\n\t\t}\n\t\tif err := ioutil.WriteFile(versionStr+\".yml\", versionData, 0644); err != nil {\n\t\t\tlog.Fatalf(\"Error writing data %s API: %s\", versionStr, err)\n\t\t}\n\n\t\tversionEvents, err := generateEventsAPIYml(swagger, versionStr)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Error generating events %s API: %s\", versionStr, err)\n\t\t}\n\t\tif err := ioutil.WriteFile(versionStr+\"-events.yml\", versionEvents, 0644); err != nil {\n\t\t\tlog.Fatalf(\"Error writing events %s API: %s\", versionStr, err)\n\t\t}\n\t}\n}",
"func GenerateModels(w *Writer, packagename string, spec *swagme.Spec) error {\n\tw.WriteSinglelineComment(\"Code generated by swagme\")\n\tw.WriteBlankLine()\n\n\tif info := spec.Info; info != nil {\n\t\tw.WriteSinglelineComment(fmt.Sprintf(\"package %s defines models for %s %s\", packagename, info.Title, info.Version))\n\t}\n\n\tw.WritePackage(packagename)\n\n\t// generate enums\n\tfor _, enum := range buildEnums(spec.Definitions) {\n\t\tw.WriteBlankLine()\n\t\tif err := generateEnum(w, &enum); err != nil {\n\t\t\tw.WriteMultilineCommentf(`ERROR with enum %q:\\n%v`, enum.Name, err)\n\t\t}\n\t}\n\n\t// generate structs\n\tfor name, definition := range spec.Definitions {\n\t\tw.WriteBlankLine()\n\t\tif err := generateDefinition(w, name, definition); err != nil {\n\t\t\tw.WriteMultilineCommentf(`ERROR with definition %q:\\n%v`, name, err)\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (g *Generator) generateMedia(api *design.APIDefinition) error {\n\tos.MkdirAll(modelDir(), 0755)\n\tapp := kingpin.New(\"Model generator\", \"model generator\")\n\tcodegen.RegisterFlags(app)\n\t_, err := app.Parse(os.Args[1:])\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tgopath := filepath.SplitList(os.Getenv(\"GOPATH\"))[0]\n\n\tmainimp, err := filepath.Rel(filepath.Join(gopath, \"src\"), codegen.OutputDir)\n\tif err != nil {\n\t\treturn err\n\t}\n\tmainimp = filepath.ToSlash(mainimp)\n\timp := path.Join(mainimp, \"app\")\n\timports := []*codegen.ImportSpec{\n\t\tcodegen.SimpleImport(imp),\n\t\tcodegen.SimpleImport(\"github.com/jinzhu/copier\"),\n\t}\n\t// get the imports for the app packages\n\tapi.IterateVersions(func(v *design.APIVersionDefinition) error {\n\t\tif v.IsDefault() {\n\t\t\treturn nil\n\t\t}\n\t\timports = append(imports, codegen.SimpleImport(imp+\"/\"+codegen.Goify(codegen.VersionPackage(v.Version), false)))\n\t\treturn nil\n\t})\n\n\ttitle := fmt.Sprintf(\"%s: Media Helpers\", api.Name)\n\n\terr = api.IterateVersions(func(v *design.APIVersionDefinition) error {\n\t\terr = v.IterateMediaTypes(func(res *design.MediaTypeDefinition) error {\n\t\t\tif res.Reference == nil {\n\t\t\t\t// not a mediatype that references a model\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tif model, ok := res.Reference.(*design.UserTypeDefinition); ok {\n\t\t\t\tif !modelMetadata(model.Definition()) {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif res.Type.IsObject() {\n\t\t\t\tif !res.SupportsVersion(v.Version) {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\tprefix := \"_media\"\n\t\t\t\tif v.Version != \"\" {\n\t\t\t\t\tprefix = prefix + \"_\" + codegen.Goify(v.Version, false)\n\n\t\t\t\t}\n\t\t\t\tname := strings.ToLower(codegen.Goify(res.TypeName, false))\n\n\t\t\t\terr := os.MkdirAll(filepath.Join(modelDir(), name), 0755)\n\t\t\t\tif err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\n\t\t\t\tmediafilename := filepath.Join(modelDir(), name, name+prefix+\"_gen.go\")\n\n\t\t\t\tos.Remove(mediafilename)\n\t\t\t\tresw, err := NewMediaWriter(mediafilename)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(\"Error executing Gorma: \", err.Error())\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\n\t\t\t\tmd := NewMediaData(v.Version, res)\n\t\t\t\tfor k, _ := range md.RequiredPackages {\n\t\t\t\t\timports = append(imports, codegen.SimpleImport(path.Join(mainimp, \"models\", k)))\n\t\t\t\t}\n\t\t\t\tresw.WriteHeader(title, name, imports)\n\n\t\t\t\terr = resw.Execute(&md)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(\"Error executing Gorma: \", err.Error())\n\t\t\t\t\tg.Cleanup()\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif err := resw.FormatCode(); err != nil {\n\t\t\t\t\tfmt.Println(\"Error executing Gorma: \", err.Error())\n\t\t\t\t\tg.Cleanup()\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif err == nil {\n\t\t\t\t\tg.genfiles = append(g.genfiles, mediafilename)\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn nil\n\n\t\t})\n\t\treturn err\n\t})\n\treturn err\n}",
"func GenerateSchema(outputDir string, inputPackage string, inputBase string, title string, version string) error {\n\tschemaWriterSrc := filepath.Join(inputPackage, OpenApiDir, SchemaWriterSrcFileName)\n\tschemaWriterBinary, err := ioutil.TempFile(\"\", \"\")\n\toutputDir = filepath.Join(outputDir, \"openapi-spec\")\n\tdefer func() {\n\t\terr := util.DeleteFile(schemaWriterBinary.Name())\n\t\tif err != nil {\n\t\t\tlog.Warnf(\"Error cleaning up tempfile %s created to compile %s to %v\",\n\t\t\t\tschemaWriterBinary.Name(), SchemaWriterSrcFileName, err)\n\t\t}\n\t}()\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"creating tempfile to compile %s to %v\", SchemaWriterSrcFileName, err)\n\t}\n\tcmd := util.Command{\n\t\tDir: inputBase,\n\t\tName: \"go\",\n\t\tArgs: []string{\n\t\t\t\"build\",\n\t\t\t\"-o\",\n\t\t\tschemaWriterBinary.Name(),\n\t\t\tschemaWriterSrc,\n\t\t},\n\t}\n\tout, err := cmd.RunWithoutRetry()\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"running %s, output %s\", cmd.String(), out)\n\t}\n\tfileJSON := filepath.Join(outputDir, OpenApiV2JSON)\n\tfileYAML := filepath.Join(outputDir, OpenApiV2YAML)\n\tcmd = util.Command{\n\t\tName: schemaWriterBinary.Name(),\n\t\tArgs: []string{\n\t\t\t\"--output-directory\",\n\t\t\toutputDir,\n\t\t\t\"--title\",\n\t\t\ttitle,\n\t\t\t\"--version\",\n\t\t\tversion,\n\t\t},\n\t}\n\tout, err = cmd.RunWithoutRetry()\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"running %s, output %s\", cmd.String(), out)\n\t}\n\t// Convert to YAML as well\n\tbytes, err := ioutil.ReadFile(fileJSON)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"reading %s\", fileJSON)\n\t}\n\tyamlBytes, err := yaml.JSONToYAML(bytes)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"converting %s to yaml\", fileJSON)\n\t}\n\terr = ioutil.WriteFile(fileYAML, yamlBytes, 0644)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"writing %s\", fileYAML)\n\t}\n\treturn nil\n}",
"func GenerateSchema(outputDir string, inputPackage string, inputBase string, title string, version string, gopath string) error {\n\tschemaWriterSrc := filepath.Join(inputPackage, OpenApiDir, SchemaWriterSrcFileName)\n\tschemaWriterBinary, err := ioutil.TempFile(\"\", \"\")\n\toutputDir = filepath.Join(outputDir, \"openapi-spec\")\n\tdefer func() {\n\t\terr := util.DeleteFile(schemaWriterBinary.Name())\n\t\tif err != nil {\n\t\t\tutil.AppLogger().Warnf(\"error cleaning up tempfile %s created to compile %s to %v\",\n\t\t\t\tschemaWriterBinary.Name(), SchemaWriterSrcFileName, err)\n\t\t}\n\t}()\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"creating tempfile to compile %s to %v\", SchemaWriterSrcFileName, err)\n\t}\n\tcmd := util.Command{\n\t\tDir: inputBase,\n\t\tName: \"go\",\n\t\tArgs: []string{\n\t\t\t\"build\",\n\t\t\t\"-o\",\n\t\t\tschemaWriterBinary.Name(),\n\t\t\tschemaWriterSrc,\n\t\t},\n\t\tEnv: map[string]string{\n\t\t\t\"GO111MODULE\": \"on\",\n\t\t\t\"GOPATH\": gopath,\n\t\t},\n\t}\n\tout, err := cmd.RunWithoutRetry()\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"running %s, output %s\", cmd.String(), out)\n\t}\n\tfileJSON := filepath.Join(outputDir, OpenApiV2JSON)\n\tfileYAML := filepath.Join(outputDir, OpenApiV2YAML)\n\tcmd = util.Command{\n\t\tName: schemaWriterBinary.Name(),\n\t\tArgs: []string{\n\t\t\t\"--output-directory\",\n\t\t\toutputDir,\n\t\t\t\"--title\",\n\t\t\ttitle,\n\t\t\t\"--version\",\n\t\t\tversion,\n\t\t},\n\t}\n\tout, err = cmd.RunWithoutRetry()\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"running %s, output %s\", cmd.String(), out)\n\t}\n\t// Convert to YAML as well\n\tbytes, err := ioutil.ReadFile(fileJSON)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"reading %s\", fileJSON)\n\t}\n\tyamlBytes, err := yaml.JSONToYAML(bytes)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"converting %s to yaml\", fileJSON)\n\t}\n\terr = ioutil.WriteFile(fileYAML, yamlBytes, 0600)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"writing %s\", fileYAML)\n\t}\n\treturn nil\n}",
"func (s *schg) Generate(schemaInBase, schemaOutBase string) (err error) {\n\ts.definitions = nil\n\ts.services = make(map[string]string, 0)\n\ts.pkg = filepath.Base(schemaOutBase)\n\tif schemaInBase, err = filepath.Abs(filepath.Clean(schemaInBase)); err != nil {\n\t\treturn\n\t}\n\tif schemaOutBase, err = filepath.Abs(filepath.Clean(schemaOutBase)); err != nil {\n\t\treturn\n\t}\n\ts.defFile = filepath.Join(schemaInBase, definitionsFile)\n\n\tif err = s.loadDefinitions(schemaInBase); err != nil {\n\t\tlog.Println(fmt.Sprintf(cannotReadFileErr, definitionsFile, err))\n\t}\n\tif err = filepath.Walk(schemaInBase, s.walkFunc()); err != nil {\n\t\treturn\n\t}\n\t// remove created temporary files/dirs at the end.\n\tdefer func() {\n\t\tif e := s.dropTmpDirs(); e != nil {\n\t\t\tlog.Println(fmt.Sprintf(cannotRemoveTempDirsErr, e))\n\t\t}\n\t}()\n\tif err = s.createPaths(schemaOutBase); err != nil {\n\t\treturn\n\t}\n\tif err = s.saveAsGoBinData(schemaOutBase); err != nil {\n\t\treturn\n\t}\n\tif err = s.createBindSchemaFiles(schemaOutBase); err != nil {\n\t\treturn\n\t}\n\treturn\n}",
"func GenerateCmd(cmd *cobra.Command, args []string) {\n\tselectedModels := utils.SelectMethodsModels(\"everything\")\n\n\t// Step 1: Generating controllers\n\tfor _, selectedModel := range selectedModels {\n\t\tutils.GenerateFile(\"controller.tmpl\", \"controllers/\"+strings.ToLower(selectedModel.ModelName)+\".go\", selectedModel)\n\t}\n\n\t// Step 2: Generating router\n\tutils.GenerateFile(\"router.tmpl\", \"server/router.go\", selectedModels)\n\n\t// Step 3.1: Generate store.go that indexes methods\n\tutils.GenerateFile(\"index.store.tmpl\", \"store/store.go\", selectedModels)\n\n\tfor _, selectedModel := range selectedModels {\n\t\t// Step 3.2: Generate interfaces in entity.go\n\t\tutils.GenerateFile(\"entity.store.tmpl\", \"store/\"+strings.ToLower(selectedModel.ModelName)+\".go\", selectedModel)\n\n\t\t// Step 3.3: Generate mongo methods in mongodb/entity.go\n\t\tutils.GenerateFile(\"mongo.store.tmpl\", \"store/mongodb/\"+strings.ToLower(selectedModel.ModelName)+\".go\", selectedModel)\n\t}\n\n\tos.Exit(1)\n\n}",
"func (gs *Server) Generate() error {\n\tif err := commons.CheckDuplicatedTitleTypes(gs.apiDef); err != nil {\n\t\treturn err\n\t}\n\tif gs.RootImportPath == \"\" {\n\t\treturn fmt.Errorf(\"invalid import path = empty. please set --import-path or set target dir under gopath\")\n\t}\n\t// helper package\n\tgh := goramlHelper{\n\t\trootImportPath: gs.RootImportPath,\n\t\tpackageName: \"goraml\",\n\t\tpackageDir: \"goraml\",\n\t}\n\tif err := gh.generate(gs.TargetDir); err != nil {\n\t\treturn err\n\t}\n\n\tif err := generateAllStructs(gs.apiDef, gs.TargetDir); err != nil {\n\t\treturn err\n\t}\n\n\t// security scheme\n\tif err := generateSecurity(gs.apiDef.SecuritySchemes, gs.TargetDir, gs.PackageName); err != nil {\n\t\tlog.Errorf(\"failed to generate security scheme:%v\", err)\n\t\treturn err\n\t}\n\n\t// genereate resources\n\trds, err := gs.generateServerResources(gs.TargetDir)\n\tif err != nil {\n\t\treturn err\n\t}\n\tgs.ResourcesDef = rds\n\n\t// libraries\n\tif err := generateLibraries(gs.apiDef.Libraries, gs.TargetDir, gs.libsRootURLs); err != nil {\n\t\treturn err\n\t}\n\n\t// routes\n\tif err := commons.GenerateFile(gs, \"./templates/golang/server_routes.tmpl\", \"server_routes\", filepath.Join(gs.TargetDir, \"routes.go\"), true); err != nil {\n\t\treturn err\n\t}\n\n\t// generate main\n\tif gs.withMain {\n\t\t// HTML front page\n\t\tif err := commons.GenerateFile(gs, \"./templates/index.html.tmpl\", \"index.html\", filepath.Join(gs.TargetDir, \"index.html\"), false); err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// main file\n\t\treturn commons.GenerateFile(gs, \"./templates/golang/server_main_go.tmpl\", \"server_main_go\", filepath.Join(gs.TargetDir, \"main.go\"), true)\n\t}\n\n\treturn nil\n}",
"func (m *MigrationParams) GenerateFiles() (err error) {\n\tvar forwardFile, reverseFile *os.File\n\n\tif forwardFile, err = newMigrationFile(m.Forward, m.Dirpath); err != nil {\n\t\treturn\n\t}\n\tfmt.Fprintln(os.Stderr, \"created forward file:\", forwardFile.Name())\n\tdefer func() { _ = forwardFile.Close() }()\n\n\tif !m.Reversible {\n\t\tfmt.Fprintln(os.Stderr, \"migration marked irreversible, did not create reverse file\")\n\t\treturn\n\t}\n\n\tif reverseFile, err = newMigrationFile(m.Reverse, m.Dirpath); err != nil {\n\t\treturn\n\t}\n\tfmt.Fprintln(os.Stderr, \"created reverse file:\", reverseFile.Name())\n\tdefer func() { _ = reverseFile.Close() }()\n\treturn\n}",
"func generateSchema(models map[string]SourceModel, objectName string, fileName string) {\n\tnode := formatHead(buildObject(models, objectName))\n\n\tschema, err := json.MarshalIndent(node, \"\", \" \")\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\toutput, err := os.Create(filepath.Join(homeDir(), \"go\", \"src\", \"github.com\", \"twuillemin\", \"kuboxy\", \"docs\", \"json_schemas\", fileName))\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\tdefer func() {\n\t\terr = output.Close()\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t}\n\t}()\n\n\t_, err = output.Write(schema)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n}",
"func actionGenerate(ctx *cli.Context) (err error) {\n\tif err = autoUpdate(ctx); err != nil {\n\t\treturn\n\t}\n\terr = installDependencies()\n\tif err != nil {\n\t\treturn\n\t}\n\tf := ctx.Args().Get(0)\n\n\tgoPath := initGopath()\n\tif !fileExist(goPath) {\n\t\treturn cli.NewExitError(fmt.Sprintf(\"GOPATH not exist: \"+goPath), 1)\n\t}\n\n\tfilesToGenerate := []string{f}\n\tiPath := ctx.String(\"i\")\n\tif iPath != \"\" {\n\t\tiPath = goPath + \"/src/go-common/app/interface/\" + iPath\n\t\tif !fileExist(iPath) {\n\t\t\treturn cli.NewExitError(fmt.Sprintf(\"interface project not found: \"+iPath), 1)\n\t\t}\n\t\tpbs := filesWithSuffix(iPath+\"/api\", \".pb\", \".proto\")\n\t\tif len(pbs) == 0 {\n\t\t\treturn cli.NewExitError(fmt.Sprintf(\"no pbs found in path: \"+iPath+\"/api\"), 1)\n\t\t}\n\t\tfilesToGenerate = pbs\n\t\tfmt.Printf(\".pb files found %v\\n\", pbs)\n\t} else {\n\t\tif f == \"\" {\n\t\t\t// if is is empty, look up project that contains current dir\n\t\t\tabs, _ := filepath.Abs(\".\")\n\t\t\tproj := lookupProjPath(abs)\n\t\t\tif proj == \"\" {\n\t\t\t\treturn cli.NewExitError(\"current dir is not in any project : \"+abs, 1)\n\t\t\t}\n\t\t\tif proj != \"\" {\n\t\t\t\tpbs := filesWithSuffix(proj+\"/api\", \".pb\", \".proto\")\n\t\t\t\tif len(pbs) == 0 {\n\t\t\t\t\treturn cli.NewExitError(fmt.Sprintf(\"no pbs found in path: \"+proj+\"/api\"), 1)\n\t\t\t\t}\n\t\t\t\tfilesToGenerate = pbs\n\t\t\t\tfmt.Printf(\".pb files found %v\\n\", pbs)\n\t\t\t}\n\t\t}\n\t}\n\n\tfor _, p := range filesToGenerate {\n\t\tif !fileExist(p) {\n\t\t\treturn cli.NewExitError(fmt.Sprintf(\"file not exist: \"+p), 1)\n\t\t}\n\t\tgenerateForFile(p, goPath)\n\t}\n\tif syncLiveDoc {\n\t\terr = actionSyncLiveDoc(ctx)\n\t}\n\treturn\n}",
"func (pg *ProjectGenerator) Generate(s Settings) {\n\n\tname := s.ProjectName\n\tmodule := s.ModuleName\n\tbase := s.BaseFolder\n\n\tprojectDir := filepath.Join(base, name)\n\tconfDir := filepath.Join(projectDir, \"config\")\n\tcompDir := filepath.Join(projectDir, \"comp-def\")\n\n\tpg.mkDir(projectDir)\n\tpg.mkDir(confDir)\n\tpg.mkDir(compDir)\n\n\tpg.CompWriterFunc(compDir, pg)\n\tpg.ConfWriterFunc(confDir, pg)\n\tpg.writeMainFile(base, name, module)\n\tpg.writeGitIgnore(base, name)\n\tpg.ModFileFunc(projectDir, module, pg)\n}",
"func (g *ModelGenerator) Generate() error {\n\tconst (\n\t\t// master = `Names:{{block \"list\" .}}{{\"\\n\"}}{{range .}}{{println \"-\" .}}{{end}}{{end}}`\n\t\tclassDeclaration = `class {{toCamel .Name}}Model {`\n\t\tfields = `{{range .}} {{\"\\n\"}}{{\"\\t\"}}String {{\"_\"}}{{lowerCamel .Name}}{{\";\"}}{{end}} `\n\t\tstaticFields = `{{range .}} {{\"\\n\"}}{{\"\\t\"}}static const {{toScreamingSnake .Name}}{{\" = \\\"\"}}{{lowerCamel .Name}}{{\"\\\";\"}}{{end}} `\n\t\tstaticImport = `import {{.}}`\n\t)\n\tvar (\n\t\tfuncs = template.FuncMap{\n\t\t\t\"lowerCamel\": strcase.ToLowerCamel,\n\t\t\t\"toCamel\": strcase.ToCamel,\n\t\t\t\"toScreamingSnake\": strcase.ToScreamingSnake,\n\t\t}\n\t\t// guardians = []string{\"Gamora\", \"Groot\", \"Nebula\", \"Rocket\", \"Star-Lord\"}\n\t)\n\tentity := g.Config.Entity\n\tdestination := g.Config.Destination\n\t// t := g.Config.Template\n\t// t.Funcs(funcs)\n\tfmt.Printf(\"\\tGenerating Model: %v%s...\\n\", entity.Name, \"_model\")\n\t// staticPath := filepath.Join(destination, entity.Name)\n\tstaticPath := filepath.Join(destination, \"lib\", \"features\", \"home\", \"models\")\n\tif err := os.MkdirAll(staticPath, os.ModePerm); err != nil {\n\t\treturn fmt.Errorf(\"error creating directory at %s: %v\", staticPath, err)\n\t}\n\timportBlock := bytes.Buffer{}\n\tWriteModelImport(&importBlock, g.Config)\n\tclassDeclTemplate, err := template.New(\"classDeclaration\").Funcs(funcs).Parse(classDeclaration)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error creating template %s: %v\", \"classDeclaration\", err)\n\t}\n\tfieldsTemplate, err := template.New(\"fields\").Funcs(funcs).Parse(fields)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error creating template %s: %v\", \"fields\", err)\n\t}\n\tstaticFieldsTemplate, err := template.New(\"static_fields\").Funcs(funcs).Parse(staticFields)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error creating template %s: %v\", \"static_fields\", err)\n\t}\n\tclassDeclBlock := bytes.Buffer{}\n\terr = classDeclTemplate.Execute(&classDeclBlock, entity)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error executing template %s: %v\", \"classDeclaration\", err)\n\t}\n\tfieldsBlock := bytes.Buffer{}\n\terr = fieldsTemplate.Execute(&fieldsBlock, entity.Attributes)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error executing template %s: %v\", \"fields\", err)\n\t}\n\tstaticFieldsBlock := bytes.Buffer{}\n\terr = staticFieldsTemplate.Execute(&staticFieldsBlock, entity.Attributes)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error executing template %s: %v\", \"static_fields\", err)\n\t}\n\t// md := ModelData{\n\t// \tClassDeclaration: classDeclBlock.String(),\n\t// \tImportStatements: importBlock.String(),\n\t// \tStaticFields: staticFieldsBlock.String(),\n\t// \tFields: fieldsBlock.String(),\n\t// \tTransformer: \"transformer\",\n\t// \tName: \"User Model\",\n\t// }\n\tfilePath := filepath.Join(staticPath, fmt.Sprintf(\"%s%s\", entity.Name, \"_model.dart\"))\n\tf, err := os.Create(filePath)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error creating file %s: %v\", filePath, err)\n\t}\n\tdefer f.Close()\n\tmodelWriter := &ModelWriter{\n\t\tModelName: entity.Name,\n\t}\n\tif err := modelWriter.WriteModel(staticPath, classDeclBlock.String(), importBlock.String(), staticFieldsBlock.String(), fieldsBlock.String(), \"transformers\", f, g.Config); err != nil {\n\t\treturn err\n\t}\n\tfmt.Printf(\"\\tFinished generating Model: %s...\\n\", entity.Name)\n\treturn nil\n}",
"func (s *Schema) Generate(root string) error {\n\treturn inSubDirectory(root, s.Package, func(dir string) error {\n\t\tk, p, err := s.getFirstEndpoint()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif len(s.Endpoints) == 1 && isDefaultEndpoint(k) {\n\t\t\tif err = s.genSingleHandler(dir, p); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\treturn fixImports(dir, handlerDst)\n\t\t}\n\n\t\tif err = s.genEndpointsInterface(dir); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err = s.genHandler(dir); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err = s.genDispatcher(dir); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err = s.genHandlers(dir); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn fixImports(dir, endpointsDst)\n\t})\n}",
"func Generate(c *goproject.Config, p *Project) error {\n\tcwd, err := os.Getwd()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to get current working directory: %v\", err)\n\t}\n\n\tfullPath := filepath.Join(cwd, p.Name)\n\n\terr = os.Mkdir(fullPath, 0750)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to create project directory: %v\", err)\n\t}\n\n\terr = copyFiles(p.Tpl.path, fullPath)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to copy template files: %v\", err)\n\t}\n\n\terr = applyProjectToTemplates(p, fullPath)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to execute templates: %v\", err)\n\t}\n\n\terr = fixCmdProjectFolderName(p, fullPath)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to rename cmd project folder: %v\", err)\n\t}\n\n\terr = gitCleanup(fullPath)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to initialize git: %v\", err)\n\t}\n\n\treturn nil\n}",
"func Generate(env environ.Values, cfg *Config) error {\n\tinfo, err := cfg.Driver.Parse(env.Log, cfg.ConnStr, cfg.Schemas, makeFilter(cfg.IncludeTables, cfg.ExcludeTables))\n\tif err != nil {\n\t\treturn err\n\t}\n\tdb, err := makeData(env.Log, info, cfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(cfg.SchemaPaths) == 0 {\n\t\tenv.Log.Println(\"No SchemaPaths specified, skipping schemas.\")\n\t} else {\n\t\tif err := generateSchemas(env, cfg, db); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif len(cfg.EnumPaths) == 0 {\n\t\tenv.Log.Println(\"No EnumPath specified, skipping enums.\")\n\t} else {\n\t\tif err := generateEnums(env, cfg, db); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif len(cfg.TablePaths) == 0 {\n\t\tenv.Log.Println(\"No table path specified, skipping tables.\")\n\t} else {\n\t\tif err := generateTables(env, cfg, db); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn copyStaticFiles(env, cfg.StaticDir, cfg.OutputDir)\n}",
"func Generate() error {\n\tflag.Parse()\n\n\tsrc, err := getQAPI(*inputSpec)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttree, err := qapischema.Parse(string(src))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttree, err = completeParseTree(tree, *inputSpec)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsymbols := lowerParseTree(tree)\n\n\tneed := neededTypes(symbols)\n\tbs, err := renderAPI(*templates, symbols, need)\n\tif err != nil {\n\t\treturn err\n\t}\n\tformatted, err := format.Source(bs)\n\tif err != nil {\n\t\t// Ignore the error, we're just trying to write out the\n\t\t// unformatted code to help debugging.\n\t\tioutil.WriteFile(*outputGo, bs, 0640)\n\t\treturn err\n\t}\n\n\treturn ioutil.WriteFile(*outputGo, formatted, 0640)\n}",
"func (p *OrmPlugin) CleanFiles(response *plugin.CodeGeneratorResponse) {\n\n\tfor i := 0; i < len(response.File); i++ {\n\t\tfile := response.File[i]\n\t\tfor _, skip := range p.EmptyFiles {\n\t\t\tif strings.Contains(file.GetName(), strings.Trim(skip, \".proto\")) {\n\t\t\t\tresponse.File = append(response.File[:i], response.File[i+1:]...)\n\t\t\t\ti--\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tfile.Content = CleanImports(file.Content)\n\n\t\tsections := strings.SplitAfterN(file.GetContent(), \"\\n\", 3)\n\t\tversionString := \"\"\n\t\tif ProtocGenGormVersion != \"\" {\n\t\t\tversionString = fmt.Sprintf(\"\\n// Generated with protoc-gen-gorm version: %s\\n\", ProtocGenGormVersion)\n\t\t}\n\t\tif AtlasAppToolkitVersion != \"\" {\n\t\t\tversionString = fmt.Sprintf(\"%s// Anticipating compatibility with atlas-app-toolkit version: %s\\n\", versionString, AtlasAppToolkitVersion)\n\t\t}\n\t\tfile.Content = proto.String(fmt.Sprintf(\"%s%s%s%s\", sections[0], sections[1], versionString, sections[2]))\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
ParseSwaggerDependencyTree parses the entire dependency tree of a magma swagger spec file specified by the rootFilepath parameter. The returned value maps between the absolute specified dependency filepath and the parsed struct for the dependency file.
|
func ParseSwaggerDependencyTree(rootFilepath string, rootDir string) (map[string]MagmaSwaggerSpec, error) {
absRootFilepath, err := filepath.Abs(rootFilepath)
if err != nil {
return nil, fmt.Errorf("root filepath %s is invalid: %w", rootFilepath, err)
}
targetSpec, err := readSwaggerSpec(absRootFilepath)
if err != nil {
return nil, err
}
type mscAndPath struct {
MagmaSwaggerSpec
path string
}
// Do a BFS to parse the entire dependency tree of swagger spec files
// into structs
openedFiles := map[string]bool{absRootFilepath: true}
allSpecs := map[string]MagmaSwaggerSpec{}
specsToVisit := []mscAndPath{{MagmaSwaggerSpec: targetSpec, path: absRootFilepath}}
for len(specsToVisit) > 0 {
nextSpec := specsToVisit[0]
specsToVisit = specsToVisit[1:]
allSpecs[nextSpec.path] = nextSpec.MagmaSwaggerSpec
for _, dependencyPath := range nextSpec.MagmaGenMeta.Dependencies {
absDependencyPath, err := filepath.Abs(filepath.Join(rootDir, dependencyPath))
if err != nil {
return nil, fmt.Errorf("dependency filepath %s is invalid: %w", dependencyPath, err)
}
if _, alreadyOpened := openedFiles[absDependencyPath]; alreadyOpened {
continue
}
openedFiles[absDependencyPath] = true
dependencySpec, err := readSwaggerSpec(absDependencyPath)
if err != nil {
return nil, fmt.Errorf("failed to read dependency tree of swagger specs: %w", err)
}
specsToVisit = append(
specsToVisit,
mscAndPath{
MagmaSwaggerSpec: dependencySpec,
path: absDependencyPath,
},
)
}
}
return allSpecs, nil
}
|
[
"func (c *Context) ParseFileList(rootDir string, filePaths []string) (deps []string, errs []error) {\n\n\tif len(filePaths) < 1 {\n\t\treturn nil, []error{fmt.Errorf(\"no paths provided to parse\")}\n\t}\n\n\tc.dependenciesReady = false\n\n\tmoduleCh := make(chan *moduleInfo)\n\terrsCh := make(chan []error)\n\tdoneCh := make(chan struct{})\n\tvar numErrs uint32\n\tvar numGoroutines int32\n\n\t// handler must be reentrant\n\thandleOneFile := func(file *parser.File) {\n\t\tif atomic.LoadUint32(&numErrs) > maxErrors {\n\t\t\treturn\n\t\t}\n\n\t\tfor _, def := range file.Defs {\n\t\t\tvar module *moduleInfo\n\t\t\tvar errs []error\n\t\t\tswitch def := def.(type) {\n\t\t\tcase *parser.Module:\n\t\t\t\tmodule, errs = c.processModuleDef(def, file.Name)\n\t\t\tcase *parser.Assignment:\n\t\t\t\t// Already handled via Scope object\n\t\t\tdefault:\n\t\t\t\tpanic(\"unknown definition type\")\n\t\t\t}\n\n\t\t\tif len(errs) > 0 {\n\t\t\t\tatomic.AddUint32(&numErrs, uint32(len(errs)))\n\t\t\t\terrsCh <- errs\n\t\t\t} else if module != nil {\n\t\t\t\tmoduleCh <- module\n\t\t\t}\n\t\t}\n\t}\n\n\tatomic.AddInt32(&numGoroutines, 1)\n\tgo func() {\n\t\tvar errs []error\n\t\tdeps, errs = c.WalkBlueprintsFiles(rootDir, filePaths, handleOneFile)\n\t\tif len(errs) > 0 {\n\t\t\terrsCh <- errs\n\t\t}\n\t\tdoneCh <- struct{}{}\n\t}()\n\nloop:\n\tfor {\n\t\tselect {\n\t\tcase newErrs := <-errsCh:\n\t\t\terrs = append(errs, newErrs...)\n\t\tcase module := <-moduleCh:\n\t\t\tnewErrs := c.addModule(module)\n\t\t\tif len(newErrs) > 0 {\n\t\t\t\terrs = append(errs, newErrs...)\n\t\t\t}\n\t\tcase <-doneCh:\n\t\t\tn := atomic.AddInt32(&numGoroutines, -1)\n\t\t\tif n == 0 {\n\t\t\t\tbreak loop\n\t\t\t}\n\t\t}\n\t}\n\n\treturn deps, errs\n}",
"func (m *Matcher) GeneratorDependencyTree(manifestFilePath string) string {\n\tlog.Debug().Msgf(\"Executing: Generate dependencies.txt\")\n\tmaven, err := exec.LookPath(\"mvn\")\n\tif err != nil {\n\t\tlog.Fatal().Err(err).Msgf(\"Please make sure Maven is installed. Hint: Check same by executing: mvn --version \\n\")\n\t}\n\ttreePath, _ := filepath.Abs(filepath.Join(os.TempDir(), m.DepsTreeFileName()))\n\toutcmd := fmt.Sprintf(\"-DoutputFile=%s\", treePath)\n\tcleanRepo := exec.Command(maven, \"--quiet\", \"clean\", \"-f\", manifestFilePath)\n\tdependencyTree := exec.Command(maven, \"--quiet\", \"org.apache.maven.plugins:maven-dependency-plugin:3.0.2:tree\", \"-f\", manifestFilePath, outcmd, \"-DoutputType=dot\", \"-DappendOutput=true\")\n\tlog.Debug().Msgf(\"Clean Repo Command: %s\", cleanRepo)\n\tlog.Debug().Msgf(\"dependencyTree Command: %s\", dependencyTree)\n\tif err := cleanRepo.Run(); err != nil {\n\t\tlog.Fatal().Err(err).Msgf(err.Error())\n\t}\n\tif err := dependencyTree.Run(); err != nil {\n\t\tlog.Fatal().Err(err).Msgf(err.Error())\n\t}\n\tlog.Debug().Msgf(\"Success: buildDepsTree\")\n\treturn treePath\n}",
"func GetDepRecursive(project string, getType string, returnType string) (DepMap, error) {\n\t// Handle path, if it don't exist, get it.\n\t// To get project as they come\n\t//GetPkg(project)\n\t// Convert slice to map, since it's fast in searching.\n\timportSlice, err := GetImports(project, getType)\n\tif err != nil {\n\t\tvar m DepMap\n\t\treturn m, err\n\t}\n\timportMap := SliceToMap(importSlice)\n\t// Remove standard libs from users libs\n\timportMap = RemoveMap(importMap, StdMap)\n\t// Convert importMap to slice again\n\timportSlice = MapToSlice(importMap)\n\t// Convert slice to DepMap now\n\timportDepMap := SliceToDepMap(importSlice)\n\n\tfor key := range importDepMap.deps {\n\t\tif VendorUsed {\n\t\t\tkey = strings.Replace(key, ProjectName+\"/vendor/\", \"\", -1)\n\t\t}\n\t\tswitch returnType {\n\t\tcase \"tree\":\n\t\t\timportDepMap.deps[key], _ = GetDepRecursive(key, getType, returnType)\n\t\tcase \"graph\":\n\t\t\tDepGraph.deps[key], _ = GetDepRecursive(key, getType, returnType)\n\t\tcase \"list\":\n\t\t\treturn importDepMap, nil\n\t\tdefault:\n\t\t\timportDepMap.deps[key], _ = GetDepRecursive(key, getType, returnType)\n\t\t}\n\t}\n\n\treturn importDepMap, nil\n}",
"func ParseDepFile(content []byte) ([]string, []string) {\n\tcontent = bytes.Replace(content, []byte(\"\\\\\\n\"), nil, -1)\n\tcomponents := bytes.Split(content, []byte(\":\"))\n\tif len(components) != 2 {\n\t\treturn nil, nil\n\t}\n\n\ttargetStrs := bytes.Split(components[0], []byte(\" \"))\n\tdepStrs := bytes.Split(components[1], []byte(\" \"))\n\n\tvar targets, deps []string\n\tfor _, t := range targetStrs {\n\t\tif len(t) > 0 {\n\t\t\ttargets = append(targets, string(t))\n\t\t}\n\t}\n\tfor _, d := range depStrs {\n\t\tif len(d) > 0 {\n\t\t\tdeps = append(deps, string(d))\n\t\t}\n\t}\n\n\treturn targets, deps\n}",
"func (c *Context) WalkBlueprintsFiles(rootDir string, filePaths []string, visitor FileHandler) (deps []string, errs []error) {\n\n\t// make a mapping from ancestors to their descendants to facilitate parsing ancestors first\n\tdescendantsMap, err := findBlueprintDescendants(filePaths)\n\tif err != nil {\n\t\tpanic(err.Error())\n\t\treturn nil, []error{err}\n\t}\n\tblueprintsSet := make(map[string]bool)\n\n\t// Channels to receive data back from openAndParse goroutines\n\tblueprintsCh := make(chan fileParseContext)\n\terrsCh := make(chan []error)\n\tdepsCh := make(chan string)\n\n\t// Channel to notify main loop that a openAndParse goroutine has finished\n\tdoneParsingCh := make(chan fileParseContext)\n\n\t// Number of outstanding goroutines to wait for\n\tactiveCount := 0\n\tvar pending []fileParseContext\n\ttooManyErrors := false\n\n\t// Limit concurrent calls to parseBlueprintFiles to 200\n\t// Darwin has a default limit of 256 open files\n\tmaxActiveCount := 200\n\n\t// count the number of pending calls to visitor()\n\tvisitorWaitGroup := sync.WaitGroup{}\n\n\tstartParseBlueprintsFile := func(blueprint fileParseContext) {\n\t\tif blueprintsSet[blueprint.fileName] {\n\t\t\treturn\n\t\t}\n\t\tblueprintsSet[blueprint.fileName] = true\n\t\tactiveCount++\n\t\tdeps = append(deps, blueprint.fileName)\n\t\tvisitorWaitGroup.Add(1)\n\t\tgo func() {\n\t\t\tfile, blueprints, deps, errs := c.openAndParse(blueprint.fileName, blueprint.Scope, rootDir, &blueprint)\n\t\t\tif len(errs) > 0 {\n\t\t\t\terrsCh <- errs\n\t\t\t}\n\t\t\tfor _, blueprint := range blueprints {\n\t\t\t\tblueprintsCh <- blueprint\n\t\t\t}\n\t\t\tfor _, dep := range deps {\n\t\t\t\tdepsCh <- dep\n\t\t\t}\n\t\t\tdoneParsingCh <- blueprint\n\n\t\t\tif blueprint.parent != nil && blueprint.parent.doneVisiting != nil {\n\t\t\t\t// wait for visitor() of parent to complete\n\t\t\t\t<-blueprint.parent.doneVisiting\n\t\t\t}\n\n\t\t\tif len(errs) == 0 {\n\t\t\t\t// process this file\n\t\t\t\tvisitor(file)\n\t\t\t}\n\t\t\tif blueprint.doneVisiting != nil {\n\t\t\t\tclose(blueprint.doneVisiting)\n\t\t\t}\n\t\t\tvisitorWaitGroup.Done()\n\t\t}()\n\t}\n\n\tfoundParseableBlueprint := func(blueprint fileParseContext) {\n\t\tif activeCount >= maxActiveCount {\n\t\t\tpending = append(pending, blueprint)\n\t\t} else {\n\t\t\tstartParseBlueprintsFile(blueprint)\n\t\t}\n\t}\n\n\tstartParseDescendants := func(blueprint fileParseContext) {\n\t\tdescendants, hasDescendants := descendantsMap[blueprint.fileName]\n\t\tif hasDescendants {\n\t\t\tfor _, descendant := range descendants {\n\t\t\t\tfoundParseableBlueprint(fileParseContext{descendant, parser.NewScope(blueprint.Scope), &blueprint, make(chan struct{})})\n\t\t\t}\n\t\t}\n\t}\n\n\t// begin parsing any files that have no ancestors\n\tstartParseDescendants(fileParseContext{\"\", parser.NewScope(nil), nil, nil})\n\nloop:\n\tfor {\n\t\tif len(errs) > maxErrors {\n\t\t\ttooManyErrors = true\n\t\t}\n\n\t\tselect {\n\t\tcase newErrs := <-errsCh:\n\t\t\terrs = append(errs, newErrs...)\n\t\tcase dep := <-depsCh:\n\t\t\tdeps = append(deps, dep)\n\t\tcase blueprint := <-blueprintsCh:\n\t\t\tif tooManyErrors {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfoundParseableBlueprint(blueprint)\n\t\tcase blueprint := <-doneParsingCh:\n\t\t\tactiveCount--\n\t\t\tif !tooManyErrors {\n\t\t\t\tstartParseDescendants(blueprint)\n\t\t\t}\n\t\t\tif activeCount < maxActiveCount && len(pending) > 0 {\n\t\t\t\t// start to process the next one from the queue\n\t\t\t\tnext := pending[len(pending)-1]\n\t\t\t\tpending = pending[:len(pending)-1]\n\t\t\t\tstartParseBlueprintsFile(next)\n\t\t\t}\n\t\t\tif activeCount == 0 {\n\t\t\t\tbreak loop\n\t\t\t}\n\t\t}\n\t}\n\n\tsort.Strings(deps)\n\n\t// wait for every visitor() to complete\n\tvisitorWaitGroup.Wait()\n\n\treturn\n}",
"func DecomposeTree(genericJSON []byte) ([]*devicechange.PathValue, error) {\n\tvar f interface{}\n\terr := json.Unmarshal(genericJSON, &f)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvalues := extractValuesIntermediate(f, \"\")\n\treturn values, nil\n}",
"func flattenDepsToRoot(manager gps.SourceManager, deps []Dependency) (map[string]string, error) {\n\tdepMap := make(map[string]string)\n\tfor _, d := range deps {\n\t\troot, err := manager.DeduceProjectRoot(d.ImportPath)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdepMap[string(root)] = d.Rev\n\t}\n\treturn depMap, nil\n}",
"func loadTree(root string) (*importTree, error) {\n\tvar f fileLoaderFunc\n\tswitch ext(root) {\n\tcase \".tf\", \".tf.json\":\n\t\tf = loadFileHcl\n\tdefault:\n\t}\n\n\tif f == nil {\n\t\treturn nil, fmt.Errorf(\n\t\t\t\"%s: unknown configuration format. Use '.tf' or '.tf.json' extension\",\n\t\t\troot)\n\t}\n\n\tc, imps, err := f(root)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tchildren := make([]*importTree, len(imps))\n\tfor i, imp := range imps {\n\t\tt, err := loadTree(imp)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tchildren[i] = t\n\t}\n\n\treturn &importTree{\n\t\tPath: root,\n\t\tRaw: c,\n\t\tChildren: children,\n\t}, nil\n}",
"func dependencyCache(chartdir string) (map[string]*chart.Chartfile, error) {\n\tcache := map[string]*chart.Chartfile{}\n\tdir, err := os.Open(chartdir)\n\tif err != nil {\n\t\treturn cache, err\n\t}\n\tdefer dir.Close()\n\n\tfis, err := dir.Readdir(0)\n\tif err != nil {\n\t\treturn cache, err\n\t}\n\n\tfor _, fi := range fis {\n\t\tif !fi.IsDir() {\n\t\t\tcontinue\n\t\t}\n\t\tcf, err := chart.LoadChartfile(filepath.Join(chartdir, fi.Name(), \"Chart.yaml\"))\n\t\tif err != nil {\n\t\t\t// If the chartfile does not load, we ignore it.\n\t\t\tcontinue\n\t\t}\n\n\t\tcache[fi.Name()] = cf\n\t}\n\treturn cache, nil\n}",
"func (w *RootWalker) DeserializeFromRef(ref skyobject.Reference, p interface{}) error {\n\tif w.r == nil {\n\t\treturn ErrRootNotFound\n\t}\n\tdata, got := w.r.Get(ref)\n\tif !got {\n\t\treturn ErrObjNotFound\n\t}\n\treturn encoder.DeserializeRaw(data, p)\n}",
"func parseNpmDependenciesList(dependencies map[string]*npmutils.Dependency, packageInfo *npmutils.PackageInfo) (xrDependencyTree *services.GraphNode) {\n\ttreeMap := make(map[string][]string)\n\tfor dependencyId, dependency := range dependencies {\n\t\tdependencyId = npmPackageTypeIdentifier + dependencyId\n\t\tparent := npmPackageTypeIdentifier + dependency.GetPathToRoot()[0][0]\n\t\tif children, ok := treeMap[parent]; ok {\n\t\t\ttreeMap[parent] = append(children, dependencyId)\n\t\t} else {\n\t\t\ttreeMap[parent] = []string{dependencyId}\n\t\t}\n\t}\n\treturn buildXrayDependencyTree(treeMap, npmPackageTypeIdentifier+packageInfo.BuildInfoModuleId())\n}",
"func decodeDependencyBlocks(filename string, terragruntOptions *tgoptions.TerragruntOptions, dependencyOutputs *cty.Value, include *tgconfig.IncludeConfig) (map[string]tgconfig.Dependency, error) {\n\tparser := hclparse.NewParser()\n\tfile, diags := parser.ParseHCLFile(filename)\n\tif diags != nil && diags.HasErrors() {\n\t\treturn nil, fmt.Errorf(\"could not parse hcl file %s to decode dependency blocks %w\", filename, diags)\n\t}\n\n\tlocalsAsCty, trackInclude, err := tgconfig.DecodeBaseBlocks(terragruntOptions, parser, file, filename, include, nil)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not parse base hcl blocks %w\", err)\n\t}\n\n\tcontextExtensions := tgconfig.EvalContextExtensions{\n\t\tLocals: localsAsCty,\n\t\tTrackInclude: trackInclude,\n\t\tDecodedDependencies: dependencyOutputs,\n\t}\n\n\tevalContext, err := tgconfig.CreateTerragruntEvalContext(filename, terragruntOptions, contextExtensions)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar deps terragruntDependency\n\tdecodeDiagnostics := gohcl.DecodeBody(file.Body, evalContext, &deps)\n\tif decodeDiagnostics != nil && decodeDiagnostics.HasErrors() {\n\t\treturn nil, decodeDiagnostics\n\t}\n\n\tdepmap := make(map[string]tgconfig.Dependency)\n\tkeymap := make(map[string]struct{})\n\tfor _, dep := range deps.Dependencies {\n\t\tdepmap[getCleanedTargetConfigPath(dep.ConfigPath, filename)] = dep\n\t\tkeymap[dep.Name] = struct{}{}\n\t}\n\n\tif trackInclude != nil {\n\t\tfor _, includeConfig := range trackInclude.CurrentList {\n\t\t\tincludeConfig := includeConfig\n\t\t\tstrategy, _ := includeConfig.GetMergeStrategy()\n\t\t\tif strategy != tgconfig.NoMerge {\n\t\t\t\trawPath := getCleanedTargetConfigPath(includeConfig.Path, filename)\n\t\t\t\tincl, err := decodeDependencyBlocks(rawPath, terragruntOptions, dependencyOutputs, &includeConfig)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, fmt.Errorf(\"could not decode dependency blocks for included config '%s' path: %s %w\", includeConfig.Name, includeConfig.Path, err)\n\t\t\t\t}\n\n\t\t\t\tfor _, dep := range incl {\n\t\t\t\t\tif _, includedInParent := keymap[dep.Name]; includedInParent {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\tdepmap[getCleanedTargetConfigPath(dep.ConfigPath, filename)] = dep\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn depmap, nil\n}",
"func collectReferencesAndFilesForPath(root string) ([]string, []string, error) {\n\treferences := []string{}\n\tfiles := []string{}\n\terr := filepath.Walk(root,\n\t\tfunc(path string, info os.FileInfo, err error) error {\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif strings.HasSuffix(path, \".proto\") {\n\t\t\t\tname := strings.TrimPrefix(path, root+\"/\")\n\t\t\t\tfiles = append(files, name)\n\t\t\t\treferences, err = collectReferencesForProto(references, path)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\treturn references, files, err\n}",
"func Parse(dir string) ([]*cfg.Dependency, error) {\n\tpath := filepath.Join(dir, \"Godeps/Godeps.json\")\n\tif _, err := os.Stat(path); err != nil {\n\t\treturn []*cfg.Dependency{}, nil\n\t}\n\tmsg.Info(\"Found Godeps.json file in %s\", gpath.StripBasepath(dir))\n\tmsg.Info(\"--> Parsing Godeps metadata...\")\n\n\tbuf := []*cfg.Dependency{}\n\n\tgodeps := &Godeps{}\n\n\t// Get a handle to the file.\n\tfile, err := os.Open(path)\n\tif err != nil {\n\t\treturn buf, err\n\t}\n\tdefer file.Close()\n\n\tdec := json.NewDecoder(file)\n\tif err := dec.Decode(godeps); err != nil {\n\t\treturn buf, err\n\t}\n\n\tseen := map[string]bool{}\n\tfor _, d := range godeps.Deps {\n\t\tpkg, _ := util.NormalizeName(d.ImportPath)\n\t\tif !seen[pkg] {\n\t\t\tseen[pkg] = true\n\t\t\tdep := &cfg.Dependency{Name: pkg, Version: d.Rev}\n\t\t\tbuf = append(buf, dep)\n\t\t}\n\t}\n\n\treturn buf, nil\n}",
"func parseGoListOutput(output, rootVersion string) (map[string]Module, error) {\n\tdependencies := map[string]Module{}\n\tdecoder := json.NewDecoder(strings.NewReader(output))\n\n\tfor {\n\t\tvar module jsonModule\n\t\tif err := decoder.Decode(&module); err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Stash original name before applying replacement\n\t\timportPath := module.Name\n\n\t\t// If there's a replace directive, use that module instead\n\t\tif module.Replace != nil {\n\t\t\tmodule = *module.Replace\n\t\t}\n\n\t\t// Local file paths and root modules\n\t\tif module.Version == \"\" {\n\t\t\tmodule.Version = rootVersion\n\t\t}\n\n\t\tdependencies[importPath] = Module{\n\t\t\tName: module.Name,\n\t\t\tVersion: cleanVersion(module.Version),\n\t\t}\n\t}\n\n\treturn dependencies, nil\n}",
"func ParseDeps(fileName string) []string {\n\tfset := token.NewFileSet()\n\n\tf, err := parser.ParseFile(fset, fileName, nil, parser.ImportsOnly)\n\tCheck(err)\n\n\tdepsArray := make([]string, len(f.Imports))\n\tfor index, s := range f.Imports {\n\t\tdepName := strings.Replace(s.Path.Value, string('\"'), \" \", 2)\n\t\tdepName = strings.Replace(depName, \" \", \"\", 10)\n\t\tdepsArray[index] = depName\n\t}\n\n\treturn depsArray\n}",
"func ParsePackageJSON(rawFile []byte) map[string]string {\n\tv, _ := jason.NewObjectFromBytes(rawFile)\n\n\tdevDependecies, _ := v.GetObject(\"devDependencies\")\n\tdeps, _ := v.GetObject(\"dependencies\")\n\n\tversions := map[string]string{}\n\tassignVersionsFromDeps(devDependecies, versions)\n\tassignVersionsFromDeps(deps, versions)\n\n\treturn versions\n}",
"func Parse(r io.ReadSeeker, pkgpath string) (*Package, error)",
"func Decompose(root string) ([]byte, error) {\n\tm := make(map[string]interface{})\n\n\t// TODO: actually support arbitrary depths :)\n\t// TODO: yaml support would be nice as well\n\n\terr := filepath.Walk(root, func(path string, info os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tbase, err := filepath.Rel(root, path)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif !info.Mode().IsRegular() {\n\t\t\treturn nil\n\t\t}\n\n\t\tif !strings.HasSuffix(path, \".json\") {\n\t\t\treturn nil\n\t\t}\n\n\t\tf, err := os.Open(path)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tvar v map[string]interface{}\n\t\tif err := json.NewDecoder(f).Decode(&v); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tname := strings.Replace(base, \".json\", \"\", -1)\n\n\t\tif \"index\" == name {\n\t\t\tfor k, v := range v {\n\t\t\t\tm[k] = v\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\n\t\tm[name] = v\n\t\treturn nil\n\t})\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn json.MarshalIndent(m, \"\", \" \")\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
MarshalToYAML marshals a MagmaSwaggerSpec to a YAMLformatted string.
|
func (m *MagmaSwaggerSpec) MarshalToYAML() (string, error) {
d, err := yaml.Marshal(&m)
if err != nil {
return "", err
}
return string(d), nil
}
|
[
"func (s *K3OSConfigFile) MarshalYAML() ([]byte, error) {\n\treturn yaml.Marshal(s.Spec)\n}",
"func (h Hz) MarshalYAML() (interface{}, error) {\n\treturn h.String(), nil\n}",
"func (t *MetricSanitizationType) MarshalYAML() (interface{}, error) {\n\treturn t.String(), nil\n}",
"func (tr TimeRange) MarshalYAML() (out interface{}, err error) {\n\tstartHr := tr.StartMinute / 60\n\tendHr := tr.EndMinute / 60\n\tstartMin := tr.StartMinute % 60\n\tendMin := tr.EndMinute % 60\n\n\tstartStr := fmt.Sprintf(\"%02d:%02d\", startHr, startMin)\n\tendStr := fmt.Sprintf(\"%02d:%02d\", endHr, endMin)\n\n\tyTr := yamlTimeRange{startStr, endStr}\n\treturn interface{}(yTr), err\n}",
"func (ir InclusiveRange) MarshalYAML() (interface{}, error) {\n\tbytes, err := ir.MarshalText()\n\treturn string(bytes), err\n}",
"func (tz Location) MarshalYAML() (interface{}, error) {\n\tbytes, err := tz.MarshalText()\n\treturn string(bytes), err\n}",
"func (r ParseKind) MarshalYAML() ([]byte, error) {\n\tif s, ok := interface{}(r).(fmt.Stringer); ok {\n\t\treturn yaml.Marshal(s.String())\n\t}\n\ts, ok := _ParseKindValueToName[r]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"invalid ParseKind: %d\", r)\n\t}\n\treturn yaml.Marshal(s)\n}",
"func (s *Spec020) Marshal() ([]byte, error) {\n\treturn yaml.Marshal(s)\n}",
"func (m Map) MarshalYAML() (interface{}, error) {\n\treturn yaml.MapSlice(m), nil\n}",
"func (r *Regexp) MarshalYAML() (interface{}, error) {\n\treturn r.String(), nil\n}",
"func (i SecVerb) MarshalYAML() (interface{}, error) {\n\treturn i.String(), nil\n}",
"func (r WeekdayRange) MarshalYAML() (interface{}, error) {\n\tbytes, err := r.MarshalText()\n\treturn string(bytes), err\n}",
"func (s GitEvent) MarshalYAML() (interface{}, error) {\n\treturn toString[s], nil\n}",
"func BuildSpec_FromObjectToYaml(value *map[string]interface{}) BuildSpec {\n\t_init_.Initialize()\n\n\tvar returns BuildSpec\n\n\t_jsii_.StaticInvoke(\n\t\t\"monocdk.aws_codebuild.BuildSpec\",\n\t\t\"fromObjectToYaml\",\n\t\t[]interface{}{value},\n\t\t&returns,\n\t)\n\n\treturn returns\n}",
"func (i UOM) MarshalYAML() (interface{}, error) {\n\treturn i.String(), nil\n}",
"func (o *OAuthFlow) MarshalYAML() (interface{}, error) {\n\tnb := high.NewNodeBuilder(o, o.low)\n\treturn nb.Render(), nil\n}",
"func (o Op) MarshalYAML() (interface{}, error) {\n\treturn map[string]interface{}{\n\t\to.Type(): o.OpApplier,\n\t}, nil\n}",
"func (i ChannelName) MarshalYAML() (interface{}, error) {\n\treturn i.String(), nil\n}",
"func (f Field) MarshalYAML() (interface{}, error) {\n\treturn f.String(), nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
/ The function NewObjectLength calls NewUnaryFunctionBase to create a function named OBJECT_LENGTH with an expression as input.
|
func NewObjectLength(operand Expression) Function {
rv := &ObjectLength{
*NewUnaryFunctionBase("object_length", operand),
}
rv.expr = rv
return rv
}
|
[
"func NewLength(operand Expression) Function {\n\trv := &Length{\n\t\t*NewUnaryFunctionBase(\"length\", operand),\n\t}\n\n\trv.expr = rv\n\treturn rv\n}",
"func (this *ObjectLength) Constructor() FunctionConstructor {\n\treturn func(operands ...Expression) Function {\n\t\treturn NewObjectLength(operands[0])\n\t}\n}",
"func (obj *BaseObject) Length() (int64, error) {\n\tret := C.PyObject_Length(c(obj))\n\treturn int64(ret), exception()\n}",
"func (o *FakeObject) Length() int { return reflect.ValueOf(o.Value).Len() }",
"func (this *Length) Constructor() FunctionConstructor {\n\treturn func(operands ...Expression) Function {\n\t\treturn NewLength(operands[0])\n\t}\n}",
"func (object Object) Length() int {\n\treturn len(object)\n}",
"func BuiltinObjectSize(ptr []byte, theType int32) int32 {\n\treturn 5\n}",
"func (o *object) Len(ctx context.Context) Val {\n\tif v, ok := o.callMetaMethod(ctx, \"__len\"); ok {\n\t\treturn v\n\t}\n\treturn Number(len(o.m))\n}",
"func BlobLength32() LengthTypeInstance {\n return &blobLength32{}\n}",
"func BlobLength64() LengthTypeInstance {\n return &blobLength64{}\n}",
"func BlobLength16() LengthTypeInstance {\n return &blobLength16{}\n}",
"func NewLen(length int) string {\r\n\treturn NewLenChars(length, StdChars)\r\n}",
"func BlobLength8() LengthTypeInstance {\n return &blobLength8{}\n}",
"func JSONObjLen(conn redis.Conn, key, path string) (res interface{}, err error) {\n\tname, args, _ := CommandBuilder(\"JSON.OBJLEN\", key, path)\n\treturn conn.Do(name, args...)\n}",
"func (p Path) Length(reference, value interface{}) Path { return p.with(Length, reference, value) }",
"func NewObjectNames(operand Expression) Function {\n\trv := &ObjectNames{\n\t\t*NewUnaryFunctionBase(\"object_names\", operand),\n\t}\n\n\trv.expr = rv\n\treturn rv\n}",
"func (registry *Registry) Length() int {\n\treturn len(registry.constructor)\n}",
"func ClosureNewObject(sizeofClosure uint32, object *Object) *Closure {\n\tc_sizeof_closure := (C.guint)(sizeofClosure)\n\n\tc_object := (*C.GObject)(C.NULL)\n\tif object != nil {\n\t\tc_object = (*C.GObject)(object.ToC())\n\t}\n\n\tretC := C.g_closure_new_object(c_sizeof_closure, c_object)\n\tretGo := ClosureNewFromC(unsafe.Pointer(retC))\n\n\treturn retGo\n}",
"func (this *ObjectRemove) Constructor() FunctionConstructor {\n\treturn func(operands ...Expression) Function {\n\t\treturn NewObjectRemove(operands[0], operands[1])\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
/ The constructor returns a NewObjectLength with the an operand cast to a Function as the FunctionConstructor.
|
func (this *ObjectLength) Constructor() FunctionConstructor {
return func(operands ...Expression) Function {
return NewObjectLength(operands[0])
}
}
|
[
"func (this *Length) Constructor() FunctionConstructor {\n\treturn func(operands ...Expression) Function {\n\t\treturn NewLength(operands[0])\n\t}\n}",
"func (this *ObjectAdd) Constructor() FunctionConstructor {\n\treturn func(operands ...Expression) Function {\n\t\treturn NewObjectAdd(operands[0], operands[1], operands[2])\n\t}\n}",
"func (this *ObjectRemove) Constructor() FunctionConstructor {\n\treturn func(operands ...Expression) Function {\n\t\treturn NewObjectRemove(operands[0], operands[1])\n\t}\n}",
"func (this *ObjectUnwrap) Constructor() FunctionConstructor {\n\treturn func(operands ...Expression) Function {\n\t\treturn NewObjectUnwrap(operands[0])\n\t}\n}",
"func (this *ObjectPut) Constructor() FunctionConstructor {\n\treturn func(operands ...Expression) Function {\n\t\treturn NewObjectPut(operands[0], operands[1], operands[2])\n\t}\n}",
"func (this *ObjectValues) Constructor() FunctionConstructor {\n\treturn func(operands ...Expression) Function {\n\t\treturn NewObjectValues(operands[0])\n\t}\n}",
"func (this *ObjectNames) Constructor() FunctionConstructor {\n\treturn func(operands ...Expression) Function {\n\t\treturn NewObjectNames(operands[0])\n\t}\n}",
"func NewLength(operand Expression) Function {\n\trv := &Length{\n\t\t*NewUnaryFunctionBase(\"length\", operand),\n\t}\n\n\trv.expr = rv\n\treturn rv\n}",
"func (this *DateAddMillis) Constructor() FunctionConstructor {\n\treturn func(operands ...Expression) Function {\n\t\treturn NewDateAddMillis(operands[0], operands[1], operands[2])\n\t}\n}",
"func (this *DateAddStr) Constructor() FunctionConstructor {\n\treturn func(operands ...Expression) Function {\n\t\treturn NewDateAddStr(operands[0], operands[1], operands[2])\n\t}\n}",
"func NewObjectLength(operand Expression) Function {\n\trv := &ObjectLength{\n\t\t*NewUnaryFunctionBase(\"object_length\", operand),\n\t}\n\n\trv.expr = rv\n\treturn rv\n}",
"func (this *DateDiffMillis) Constructor() FunctionConstructor {\n\treturn func(operands ...Expression) Function {\n\t\treturn NewDateDiffMillis(operands[0], operands[1], operands[2])\n\t}\n}",
"func (this *Repeat) Constructor() FunctionConstructor {\n\treturn func(operands ...Expression) Function {\n\t\treturn NewRepeat(operands[0], operands[1])\n\t}\n}",
"func (this *Mod) Constructor() FunctionConstructor {\n\treturn func(operands ...Expression) Function {\n\t\treturn NewMod(operands[0], operands[1])\n\t}\n}",
"func (this *ObjectInnerValues) Constructor() FunctionConstructor {\n\treturn func(operands ...Expression) Function {\n\t\treturn NewObjectInnerValues(operands[0])\n\t}\n}",
"func (this *ObjectPairs) Constructor() FunctionConstructor {\n\treturn func(operands ...Expression) Function {\n\t\treturn NewObjectPairs(operands[0])\n\t}\n}",
"func (this *RTrim) Constructor() FunctionConstructor { return NewRTrim }",
"func (this *Sub) Constructor() FunctionConstructor {\n\treturn func(operands ...Expression) Function {\n\t\treturn NewSub(operands[0], operands[1])\n\t}\n}",
"func NewConstructor(x interface{}) (*Constructor, error) {\n\tif x == nil {\n\t\treturn nil, kerror.New(kerror.EViolation, \"function expected, nil given\")\n\t}\n\tft := reflect.TypeOf(x)\n\tfv := reflect.ValueOf(x)\n\tif ft.Kind() != reflect.Func {\n\t\treturn nil, kerror.Newf(kerror.EViolation, \"function expected, %s given\", ft)\n\t}\n\tif fv.IsNil() {\n\t\treturn nil, kerror.New(kerror.EViolation, \"function expected, nil given\")\n\t}\n\tc := &Constructor{\n\t\tfunction: fv,\n\t}\n\tnumIn := ft.NumIn()\n\tif ft.IsVariadic() {\n\t\tnumIn--\n\t}\n\tc.inTypes = make([]reflect.Type, numIn)\n\tfor i := 0; i < numIn; i++ {\n\t\tc.inTypes[i] = ft.In(i)\n\t}\n\tswitch ft.NumOut() {\n\tdefault:\n\t\treturn nil, kerror.Newf(kerror.EViolation, \"function %s is not a constructor\", ft)\n\tcase 1:\n\t\tc.t = ft.Out(0)\n\t\tc.objectOutIndex = 0\n\t\tc.destructorOutIndex = -1\n\t\tc.errorOutIndex = -1\n\tcase 2:\n\t\tif ft.Out(1) != errorType {\n\t\t\treturn nil, kerror.Newf(kerror.EViolation, \"function %s is not a constructor\", ft)\n\t\t}\n\t\tc.t = ft.Out(0)\n\t\tc.objectOutIndex = 0\n\t\tc.destructorOutIndex = -1\n\t\tc.errorOutIndex = 1\n\tcase 3:\n\t\tif ft.Out(1) != destructorType || ft.Out(2) != errorType {\n\t\t\treturn nil, kerror.Newf(kerror.EViolation, \"function %s is not a constructor\", ft)\n\t\t}\n\t\tc.t = ft.Out(0)\n\t\tc.objectOutIndex = 0\n\t\tc.destructorOutIndex = 1\n\t\tc.errorOutIndex = 2\n\t}\n\treturn c, nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
/ The function NewObjectNames calls NewUnaryFunctionBase to create a function named OBJECT_NAMES with an expression as input.
|
func NewObjectNames(operand Expression) Function {
rv := &ObjectNames{
*NewUnaryFunctionBase("object_names", operand),
}
rv.expr = rv
return rv
}
|
[
"func (this *ObjectNames) Constructor() FunctionConstructor {\n\treturn func(operands ...Expression) Function {\n\t\treturn NewObjectNames(operands[0])\n\t}\n}",
"func newSymName(o types.Object) SymName {\n\tif fun, ok := o.(*types.Func); ok {\n\t\tsig := fun.Type().(*types.Signature)\n\t\tif recv := sig.Recv(); recv != nil {\n\t\t\t// Special case: disambiguate names for different types' methods.\n\t\t\ttyp := recv.Type()\n\t\t\tif ptr, ok := typ.(*types.Pointer); ok {\n\t\t\t\treturn SymName{\n\t\t\t\t\tPkgPath: o.Pkg().Path(),\n\t\t\t\t\tName: \"(*\" + ptr.Elem().(*types.Named).Obj().Name() + \").\" + o.Name(),\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn SymName{\n\t\t\t\tPkgPath: o.Pkg().Path(),\n\t\t\t\tName: typ.(*types.Named).Obj().Name() + \".\" + o.Name(),\n\t\t\t}\n\t\t}\n\t}\n\treturn SymName{\n\t\tPkgPath: o.Pkg().Path(),\n\t\tName: o.Name(),\n\t}\n}",
"func (s Obj_value) NewName(n int32) (capnp.TextList, error) {\n\ts.Struct.SetUint16(4, 0)\n\tl, err := capnp.NewTextList(s.Struct.Segment(), n)\n\tif err != nil {\n\t\treturn capnp.TextList{}, err\n\t}\n\terr = s.Struct.SetPtr(0, l.List.ToPtr())\n\treturn l, err\n}",
"func NewListTableNames(ctx context.Context, opts ...func(Call) error) (*ListTableNames, error) {\n\ttn := &ListTableNames{\n\t\tbase: base{\n\t\t\tctx: ctx,\n\t\t\tresultch: make(chan RPCResult, 1),\n\t\t},\n\t\tregex: \".*\",\n\t}\n\tif err := applyOptions(tn, opts...); err != nil {\n\t\treturn nil, err\n\t}\n\treturn tn, nil\n}",
"func execNewNamed(_ int, p *gop.Context) {\n\targs := p.GetArgs(3)\n\tret := types.NewNamed(args[0].(*types.TypeName), args[1].(types.Type), args[2].([]*types.Func))\n\tp.Ret(3, ret)\n}",
"func NewObjectValues(operand Expression) Function {\n\trv := &ObjectValues{\n\t\t*NewUnaryFunctionBase(\"object_values\", operand),\n\t}\n\n\trv.expr = rv\n\treturn rv\n}",
"func InitNames() {\n\tsyscall.Syscall(gpInitNames, 0, 0, 0, 0)\n}",
"func NewNameIn(vs ...string) predicate.User {\n\treturn predicate.User(sql.FieldIn(FieldNewName, vs...))\n}",
"func NewNameIn(vs ...string) predicate.User {\n\tv := make([]interface{}, len(vs))\n\tfor i := range v {\n\t\tv[i] = vs[i]\n\t}\n\treturn predicate.User(func(s *sql.Selector) {\n\t\t// if not arguments were provided, append the FALSE constants,\n\t\t// since we can't apply \"IN ()\". This will make this predicate falsy.\n\t\tif len(v) == 0 {\n\t\t\ts.Where(sql.False())\n\t\t\treturn\n\t\t}\n\t\ts.Where(sql.In(s.C(FieldNewName), v...))\n\t})\n}",
"func RegisterObject(name string, f NewObjectFct) {\n\tObjectList[name] = f\n}",
"func NewNameGTE(v string) predicate.User {\n\treturn predicate.User(sql.FieldGTE(FieldNewName, v))\n}",
"func NewOperationNamesStorage(\n\tsession cassandra.Session,\n\twriteCacheTTL time.Duration,\n\tmetricsFactory metrics.Factory,\n\tlogger *zap.Logger,\n) *OperationNamesStorage {\n\treturn &OperationNamesStorage{\n\t\tsession: session,\n\t\tInsertStmt: insertOperationName,\n\t\tQueryStmt: queryOperationNames,\n\t\tmetrics: casMetrics.NewTable(metricsFactory, \"OperationNames\"),\n\t\twriteCacheTTL: writeCacheTTL,\n\t\tlogger: logger,\n\t\toperationNames: cache.NewLRUWithOptions(\n\t\t\t100000,\n\t\t\t&cache.Options{\n\t\t\t\tTTL: writeCacheTTL,\n\t\t\t\tInitialCapacity: 0000,\n\t\t\t}),\n\t}\n}",
"func newnamel(pos src.XPos, s *types.Sym) *Node {\n\tif s == nil {\n\t\tFatalf(\"newnamel nil\")\n\t}\n\n\tvar x struct {\n\t\tn Node\n\t\tm Name\n\t\tp Param\n\t}\n\tn := &x.n\n\tn.Name = &x.m\n\tn.Name.Param = &x.p\n\n\tn.Op = ONAME\n\tn.Pos = pos\n\tn.Orig = n\n\n\tn.Sym = s\n\treturn n\n}",
"func objToNames(obj interface{}, names []string, tag string) []string {\n\tvar typ reflect.Type\n\n\tif sf, ok := obj.(reflect.StructField); ok {\n\t\ttyp = sf.Type\n\t} else {\n\t\ttyp = reflect.TypeOf(obj)\n\t\tif typ.Kind() == reflect.Ptr {\n\t\t\ttyp = typ.Elem()\n\t\t}\n\t}\n\n\tfor i := 0; i < typ.NumField(); i++ {\n\t\tfield := typ.Field(i)\n\n\t\tif field.Type.Kind() == reflect.Struct {\n\t\t\tnames = objToNames(field, names, tag)\n\t\t\tcontinue\n\t\t}\n\n\t\t// If tag is passed to the function, we only append if the field is tagged and that it matches tag.\n\t\tif tag == \"\" || field.Tag.Get(tag) != \"\" {\n\t\t\tnames = append(names, field.Name)\n\t\t}\n\t}\n\n\treturn names\n}",
"func NewObject(t ...[2]*Term) Object {\n\tobj := newobject(len(t))\n\tfor i := range t {\n\t\tobj.Insert(t[i][0], t[i][1])\n\t}\n\treturn obj\n}",
"func NewObjectRemove(first, second Expression) Function {\n\trv := &ObjectRemove{\n\t\t*NewBinaryFunctionBase(\"object_remove\", first, second),\n\t}\n\n\trv.expr = rv\n\treturn rv\n}",
"func NewObjectLength(operand Expression) Function {\n\trv := &ObjectLength{\n\t\t*NewUnaryFunctionBase(\"object_length\", operand),\n\t}\n\n\trv.expr = rv\n\treturn rv\n}",
"func GetNewRegisteredUserNames(miscTable *db.Table) ([]string, error) {\n\tnewRegisteredUserNames := []string{}\n\terr := miscTable.GetObj(keyNewRegisteredUsers, &newRegisteredUserNames)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn newRegisteredUserNames, nil\n}",
"func (n NamedRuntimeFunc) NewFunction() Function { return n.fn() }"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
/ The constructor returns a NewObjectNames with the an operand cast to a Function as the FunctionConstructor.
|
func (this *ObjectNames) Constructor() FunctionConstructor {
return func(operands ...Expression) Function {
return NewObjectNames(operands[0])
}
}
|
[
"func (this *ObjectAdd) Constructor() FunctionConstructor {\n\treturn func(operands ...Expression) Function {\n\t\treturn NewObjectAdd(operands[0], operands[1], operands[2])\n\t}\n}",
"func (this *StrToZoneName) Constructor() FunctionConstructor {\n\treturn func(operands ...Expression) Function {\n\t\treturn NewStrToZoneName(operands[0], operands[1])\n\t}\n}",
"func (this *DateAddStr) Constructor() FunctionConstructor {\n\treturn func(operands ...Expression) Function {\n\t\treturn NewDateAddStr(operands[0], operands[1], operands[2])\n\t}\n}",
"func (this *ObjectRemove) Constructor() FunctionConstructor {\n\treturn func(operands ...Expression) Function {\n\t\treturn NewObjectRemove(operands[0], operands[1])\n\t}\n}",
"func NewObjectNames(operand Expression) Function {\n\trv := &ObjectNames{\n\t\t*NewUnaryFunctionBase(\"object_names\", operand),\n\t}\n\n\trv.expr = rv\n\treturn rv\n}",
"func (this *ObjectValues) Constructor() FunctionConstructor {\n\treturn func(operands ...Expression) Function {\n\t\treturn NewObjectValues(operands[0])\n\t}\n}",
"func (this *ObjectUnwrap) Constructor() FunctionConstructor {\n\treturn func(operands ...Expression) Function {\n\t\treturn NewObjectUnwrap(operands[0])\n\t}\n}",
"func (this *DatePartStr) Constructor() FunctionConstructor {\n\treturn func(operands ...Expression) Function {\n\t\treturn NewDatePartStr(operands[0], operands[1])\n\t}\n}",
"func (this *ObjectPairs) Constructor() FunctionConstructor {\n\treturn func(operands ...Expression) Function {\n\t\treturn NewObjectPairs(operands[0])\n\t}\n}",
"func NewConstructor(x interface{}) (*Constructor, error) {\n\tif x == nil {\n\t\treturn nil, kerror.New(kerror.EViolation, \"function expected, nil given\")\n\t}\n\tft := reflect.TypeOf(x)\n\tfv := reflect.ValueOf(x)\n\tif ft.Kind() != reflect.Func {\n\t\treturn nil, kerror.Newf(kerror.EViolation, \"function expected, %s given\", ft)\n\t}\n\tif fv.IsNil() {\n\t\treturn nil, kerror.New(kerror.EViolation, \"function expected, nil given\")\n\t}\n\tc := &Constructor{\n\t\tfunction: fv,\n\t}\n\tnumIn := ft.NumIn()\n\tif ft.IsVariadic() {\n\t\tnumIn--\n\t}\n\tc.inTypes = make([]reflect.Type, numIn)\n\tfor i := 0; i < numIn; i++ {\n\t\tc.inTypes[i] = ft.In(i)\n\t}\n\tswitch ft.NumOut() {\n\tdefault:\n\t\treturn nil, kerror.Newf(kerror.EViolation, \"function %s is not a constructor\", ft)\n\tcase 1:\n\t\tc.t = ft.Out(0)\n\t\tc.objectOutIndex = 0\n\t\tc.destructorOutIndex = -1\n\t\tc.errorOutIndex = -1\n\tcase 2:\n\t\tif ft.Out(1) != errorType {\n\t\t\treturn nil, kerror.Newf(kerror.EViolation, \"function %s is not a constructor\", ft)\n\t\t}\n\t\tc.t = ft.Out(0)\n\t\tc.objectOutIndex = 0\n\t\tc.destructorOutIndex = -1\n\t\tc.errorOutIndex = 1\n\tcase 3:\n\t\tif ft.Out(1) != destructorType || ft.Out(2) != errorType {\n\t\t\treturn nil, kerror.Newf(kerror.EViolation, \"function %s is not a constructor\", ft)\n\t\t}\n\t\tc.t = ft.Out(0)\n\t\tc.objectOutIndex = 0\n\t\tc.destructorOutIndex = 1\n\t\tc.errorOutIndex = 2\n\t}\n\treturn c, nil\n}",
"func (this *DateAddMillis) Constructor() FunctionConstructor {\n\treturn func(operands ...Expression) Function {\n\t\treturn NewDateAddMillis(operands[0], operands[1], operands[2])\n\t}\n}",
"func (this *ObjectLength) Constructor() FunctionConstructor {\n\treturn func(operands ...Expression) Function {\n\t\treturn NewObjectLength(operands[0])\n\t}\n}",
"func (this *StrToMillis) Constructor() FunctionConstructor {\n\treturn func(operands ...Expression) Function {\n\t\treturn NewStrToMillis(operands[0])\n\t}\n}",
"func (this *NowStr) Constructor() FunctionConstructor { return NewNowStr }",
"func (this *ObjectPut) Constructor() FunctionConstructor {\n\treturn func(operands ...Expression) Function {\n\t\treturn NewObjectPut(operands[0], operands[1], operands[2])\n\t}\n}",
"func (this *ObjectInnerValues) Constructor() FunctionConstructor {\n\treturn func(operands ...Expression) Function {\n\t\treturn NewObjectInnerValues(operands[0])\n\t}\n}",
"func (this *RTrim) Constructor() FunctionConstructor { return NewRTrim }",
"func (this *DateDiffMillis) Constructor() FunctionConstructor {\n\treturn func(operands ...Expression) Function {\n\t\treturn NewDateDiffMillis(operands[0], operands[1], operands[2])\n\t}\n}",
"func (this *Sub) Constructor() FunctionConstructor {\n\treturn func(operands ...Expression) Function {\n\t\treturn NewSub(operands[0], operands[1])\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
/ The function NewObjectPairs calls NewUnaryFunctionBase to create a function named OBJECT_PAIRS with an expression as input.
|
func NewObjectPairs(operand Expression) Function {
rv := &ObjectPairs{
*NewUnaryFunctionBase("object_pairs", operand),
}
rv.expr = rv
return rv
}
|
[
"func NewObjectInnerPairs(operand Expression) Function {\n\trv := &ObjectInnerPairs{\n\t\t*NewUnaryFunctionBase(\"object_innerpairs\", operand),\n\t}\n\n\trv.expr = rv\n\treturn rv\n}",
"func NewPairs(array *expression.ArrayConstruct) (pairs Pairs, err error) {\n\toperands := array.Operands()\n\tpairs = make(Pairs, len(operands))\n\tfor i, op := range operands {\n\t\tpairs[i], err = NewPair(op)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn\n}",
"func NewPair(expr expression.Expression) (*Pair, error) {\n\tarray, ok := expr.(*expression.ArrayConstruct)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"Invalid VALUES expression %s\", expr.String())\n\t}\n\n\toperands := array.Operands()\n\tif len(operands) != 2 {\n\t\treturn nil, fmt.Errorf(\"Invalid VALUES expression %s\", expr.String())\n\t}\n\n\tpair := &Pair{\n\t\tKey: operands[0],\n\t\tValue: operands[1],\n\t}\n\n\treturn pair, nil\n}",
"func (s Record) NewPairs(n int32) (Pair_List, error) {\n\tl, err := NewPair_List(s.Struct.Segment(), n)\n\tif err != nil {\n\t\treturn Pair_List{}, err\n\t}\n\terr = s.Struct.SetPtr(7, l.List.ToPtr())\n\treturn l, err\n}",
"func (this *ObjectPairs) Constructor() FunctionConstructor {\n\treturn func(operands ...Expression) Function {\n\t\treturn NewObjectPairs(operands[0])\n\t}\n}",
"func NewObject(t ...[2]*Term) Object {\n\tobj := newobject(len(t))\n\tfor i := range t {\n\t\tobj.Insert(t[i][0], t[i][1])\n\t}\n\treturn obj\n}",
"func ProxyPairs(L *lua.State) int {\n\t// See Lua >=5.2 source code.\n\tif L.GetMetaField(1, \"__pairs\") {\n\t\tL.PushValue(1)\n\t\tL.Call(1, 3)\n\t\treturn 3\n\t}\n\n\tL.CheckType(1, lua.LUA_TTABLE)\n\tL.PushGoFunction(pairsAux)\n\tL.PushValue(1)\n\tL.PushNil()\n\treturn 3\n}",
"func (this *ObjectInnerPairs) Constructor() FunctionConstructor {\n\treturn func(operands ...Expression) Function {\n\t\treturn NewObjectInnerPairs(operands[0])\n\t}\n}",
"func NewObjectValues(operand Expression) Function {\n\trv := &ObjectValues{\n\t\t*NewUnaryFunctionBase(\"object_values\", operand),\n\t}\n\n\trv.expr = rv\n\treturn rv\n}",
"func NewPairs(local, remote Candidates) Pairs {\n\tp := make(Pairs, 0, 100)\n\tfor l := range local {\n\t\tfor r := range remote {\n\t\t\t// Same data stream.\n\t\t\tif local[l].ComponentID != remote[r].ComponentID {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tipL, ipR := local[l].Addr.IP, remote[r].Addr.IP\n\t\t\t// Same IP address family.\n\t\t\tif !sameFamily(ipL, ipR) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif ipL.To4() == nil && ipL.IsLinkLocalUnicast() {\n\t\t\t\t// IPv6 link-local addresses MUST NOT be paired with other\n\t\t\t\t// than link-local addresses.\n\t\t\t\tif !ipR.IsLinkLocalUnicast() {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t\tpair := Pair{\n\t\t\t\tLocal: local[l],\n\t\t\t\tRemote: remote[r],\n\t\t\t\tComponentID: local[l].ComponentID,\n\t\t\t}\n\t\t\tpair.SetFoundation()\n\t\t\tp = append(p, pair)\n\t\t}\n\t}\n\treturn p\n}",
"func (o *Object) AddPair(key, value string) {\n\to.Pairs = append(o.Pairs, Pair{Key: key, Value: value})\n}",
"func NewPair(key string, value interface{}) *Pair {\n\treturn &Pair{\n\t\tkey: key,\n\t\tvalue: value,\n\t}\n}",
"func NewPairs(local, remote Candidates) Pairs {\n\tp := make(Pairs, 0, 100)\n\tfor l := range local {\n\t\tfor r := range remote {\n\t\t\t// Same data stream.\n\t\t\tif local[l].ComponentID != remote[r].ComponentID {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tvar (\n\t\t\t\tipL, ipR = local[l].Addr.IP, remote[r].Addr.IP\n\t\t\t)\n\t\t\t// Same IP address family.\n\t\t\tif !sameFamily(ipL, ipR) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif ipL.To4() == nil && ipL.IsLinkLocalUnicast() {\n\t\t\t\t// IPv6 link-local addresses MUST NOT be paired with other\n\t\t\t\t// than link-local addresses.\n\t\t\t\tif !ipR.IsLinkLocalUnicast() {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t\tp = append(p, Pair{\n\t\t\t\tLocal: local[l], Remote: local[r],\n\t\t\t})\n\t\t}\n\t}\n\treturn p\n}",
"func NewPair(base, counter Asset) Pair {\n\treturn Pair{\n\t\tBase: base,\n\t\tCounter: counter,\n\t}\n}",
"func (s StringPairFactory) CreatePair(first, second interface{}) Pair {\n\tcreated := NewStringPair(first.(string), second.(string))\n\treturn created\n}",
"func NewObjectNames(operand Expression) Function {\n\trv := &ObjectNames{\n\t\t*NewUnaryFunctionBase(\"object_names\", operand),\n\t}\n\n\trv.expr = rv\n\treturn rv\n}",
"func Make(e1, e2 interface{}) (p Pair) {\n\tp[0] = e1\n\tp[1] = e2\n\n\treturn p\n}",
"func NewPair(baseCurrency, quoteCurrency Code) Pair {\n\treturn Pair{\n\t\tBase: baseCurrency,\n\t\tQuote: quoteCurrency,\n\t}\n}",
"func NewObjectInnerValues(operand Expression) Function {\n\trv := &ObjectInnerValues{\n\t\t*NewUnaryFunctionBase(\"object_innervalues\", operand),\n\t}\n\n\trv.expr = rv\n\treturn rv\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
/ The constructor returns a NewObjectPairs with the an operand cast to a Function as the FunctionConstructor.
|
func (this *ObjectPairs) Constructor() FunctionConstructor {
return func(operands ...Expression) Function {
return NewObjectPairs(operands[0])
}
}
|
[
"func (this *ObjectInnerPairs) Constructor() FunctionConstructor {\n\treturn func(operands ...Expression) Function {\n\t\treturn NewObjectInnerPairs(operands[0])\n\t}\n}",
"func (this *ObjectValues) Constructor() FunctionConstructor {\n\treturn func(operands ...Expression) Function {\n\t\treturn NewObjectValues(operands[0])\n\t}\n}",
"func (this *ObjectAdd) Constructor() FunctionConstructor {\n\treturn func(operands ...Expression) Function {\n\t\treturn NewObjectAdd(operands[0], operands[1], operands[2])\n\t}\n}",
"func (this *ObjectPut) Constructor() FunctionConstructor {\n\treturn func(operands ...Expression) Function {\n\t\treturn NewObjectPut(operands[0], operands[1], operands[2])\n\t}\n}",
"func (this *ObjectUnwrap) Constructor() FunctionConstructor {\n\treturn func(operands ...Expression) Function {\n\t\treturn NewObjectUnwrap(operands[0])\n\t}\n}",
"func (this *ObjectNames) Constructor() FunctionConstructor {\n\treturn func(operands ...Expression) Function {\n\t\treturn NewObjectNames(operands[0])\n\t}\n}",
"func (this *ObjectInnerValues) Constructor() FunctionConstructor {\n\treturn func(operands ...Expression) Function {\n\t\treturn NewObjectInnerValues(operands[0])\n\t}\n}",
"func (this *DateAddStr) Constructor() FunctionConstructor {\n\treturn func(operands ...Expression) Function {\n\t\treturn NewDateAddStr(operands[0], operands[1], operands[2])\n\t}\n}",
"func (this *ObjectRemove) Constructor() FunctionConstructor {\n\treturn func(operands ...Expression) Function {\n\t\treturn NewObjectRemove(operands[0], operands[1])\n\t}\n}",
"func (this *DateAddMillis) Constructor() FunctionConstructor {\n\treturn func(operands ...Expression) Function {\n\t\treturn NewDateAddMillis(operands[0], operands[1], operands[2])\n\t}\n}",
"func Function(args ...Type) *Operator {\n\treturn &Operator{functionName, args}\n}",
"func NewConstructor(x interface{}) (*Constructor, error) {\n\tif x == nil {\n\t\treturn nil, kerror.New(kerror.EViolation, \"function expected, nil given\")\n\t}\n\tft := reflect.TypeOf(x)\n\tfv := reflect.ValueOf(x)\n\tif ft.Kind() != reflect.Func {\n\t\treturn nil, kerror.Newf(kerror.EViolation, \"function expected, %s given\", ft)\n\t}\n\tif fv.IsNil() {\n\t\treturn nil, kerror.New(kerror.EViolation, \"function expected, nil given\")\n\t}\n\tc := &Constructor{\n\t\tfunction: fv,\n\t}\n\tnumIn := ft.NumIn()\n\tif ft.IsVariadic() {\n\t\tnumIn--\n\t}\n\tc.inTypes = make([]reflect.Type, numIn)\n\tfor i := 0; i < numIn; i++ {\n\t\tc.inTypes[i] = ft.In(i)\n\t}\n\tswitch ft.NumOut() {\n\tdefault:\n\t\treturn nil, kerror.Newf(kerror.EViolation, \"function %s is not a constructor\", ft)\n\tcase 1:\n\t\tc.t = ft.Out(0)\n\t\tc.objectOutIndex = 0\n\t\tc.destructorOutIndex = -1\n\t\tc.errorOutIndex = -1\n\tcase 2:\n\t\tif ft.Out(1) != errorType {\n\t\t\treturn nil, kerror.Newf(kerror.EViolation, \"function %s is not a constructor\", ft)\n\t\t}\n\t\tc.t = ft.Out(0)\n\t\tc.objectOutIndex = 0\n\t\tc.destructorOutIndex = -1\n\t\tc.errorOutIndex = 1\n\tcase 3:\n\t\tif ft.Out(1) != destructorType || ft.Out(2) != errorType {\n\t\t\treturn nil, kerror.Newf(kerror.EViolation, \"function %s is not a constructor\", ft)\n\t\t}\n\t\tc.t = ft.Out(0)\n\t\tc.objectOutIndex = 0\n\t\tc.destructorOutIndex = 1\n\t\tc.errorOutIndex = 2\n\t}\n\treturn c, nil\n}",
"func (this *RTrim) Constructor() FunctionConstructor { return NewRTrim }",
"func (this *DatePartStr) Constructor() FunctionConstructor {\n\treturn func(operands ...Expression) Function {\n\t\treturn NewDatePartStr(operands[0], operands[1])\n\t}\n}",
"func (this *Mod) Constructor() FunctionConstructor {\n\treturn func(operands ...Expression) Function {\n\t\treturn NewMod(operands[0], operands[1])\n\t}\n}",
"func (this *ObjectLength) Constructor() FunctionConstructor {\n\treturn func(operands ...Expression) Function {\n\t\treturn NewObjectLength(operands[0])\n\t}\n}",
"func (this *DateDiffMillis) Constructor() FunctionConstructor {\n\treturn func(operands ...Expression) Function {\n\t\treturn NewDateDiffMillis(operands[0], operands[1], operands[2])\n\t}\n}",
"func (this *Element) Constructor() FunctionConstructor {\n\treturn func(operands ...Expression) Function {\n\t\treturn NewElement(operands[0], operands[1])\n\t}\n}",
"func (this *StrToZoneName) Constructor() FunctionConstructor {\n\treturn func(operands ...Expression) Function {\n\t\treturn NewStrToZoneName(operands[0], operands[1])\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
/ The function NewObjectValues calls NewUnaryFunctionBase to create a function named OBJECT_VALUES with an expression as input.
|
func NewObjectValues(operand Expression) Function {
rv := &ObjectValues{
*NewUnaryFunctionBase("object_values", operand),
}
rv.expr = rv
return rv
}
|
[
"func NewObjectInnerValues(operand Expression) Function {\n\trv := &ObjectInnerValues{\n\t\t*NewUnaryFunctionBase(\"object_innervalues\", operand),\n\t}\n\n\trv.expr = rv\n\treturn rv\n}",
"func NewValuesFunc(ctx sessionctx.Context, offset int, retTp *types.FieldType) *ScalarFunction {\n\tfc := &valuesFunctionClass{baseFunctionClass{ast.Values, 0, 0}, offset, retTp}\n\tbt, err := fc.getFunction(ctx, nil)\n\tterror.Log(err)\n\treturn &ScalarFunction{\n\t\tFuncName: model.NewCIStr(ast.Values),\n\t\tRetType: retTp,\n\t\tFunction: bt,\n\t}\n}",
"func (this *ObjectValues) Constructor() FunctionConstructor {\n\treturn func(operands ...Expression) Function {\n\t\treturn NewObjectValues(operands[0])\n\t}\n}",
"func NewValues() *Values {\n\tv := &Values{\n\t\tm: map[string]interface{}{\n\t\t\tInputKey: map[string]interface{}{},\n\t\t\tResolverInputKey: map[string]interface{}{},\n\t\t\tStepKey: map[string]interface{}{},\n\t\t\tTaskKey: map[string]interface{}{},\n\t\t\tConfigKey: map[string]interface{}{},\n\t\t\tVarKey: map[string]*Variable{},\n\t\t\tIteratorKey: nil,\n\t\t},\n\t}\n\tv.funcMap = sprig.FuncMap()\n\tv.funcMap[\"field\"] = v.fieldTmpl\n\tv.funcMap[\"jsonfield\"] = v.jsonFieldTmpl\n\tv.funcMap[\"jsonmarshal\"] = v.jsonMarshal\n\tv.funcMap[\"eval\"] = v.varEval\n\treturn v\n}",
"func (s Stream) NewValues(n int32) (capnp.Float64List, error) {\n\tl, err := capnp.NewFloat64List(s.Struct.Segment(), n)\n\tif err != nil {\n\t\treturn capnp.Float64List{}, err\n\t}\n\terr = s.Struct.SetPtr(1, l.List.ToPtr())\n\treturn l, err\n}",
"func NewValues() Values {\n\treturn Values{}\n}",
"func NewObject() *Value {\n\treturn &Value{kind: kindObject, objectContent: make(map[string]Value)}\n}",
"func (this *ObjectInnerValues) Constructor() FunctionConstructor {\n\treturn func(operands ...Expression) Function {\n\t\treturn NewObjectInnerValues(operands[0])\n\t}\n}",
"func NewObjectNames(operand Expression) Function {\n\trv := &ObjectNames{\n\t\t*NewUnaryFunctionBase(\"object_names\", operand),\n\t}\n\n\trv.expr = rv\n\treturn rv\n}",
"func NewObjectPairs(operand Expression) Function {\n\trv := &ObjectPairs{\n\t\t*NewUnaryFunctionBase(\"object_pairs\", operand),\n\t}\n\n\trv.expr = rv\n\treturn rv\n}",
"func NewValues() Values {\n\treturn make(values)\n}",
"func NewValues(ps ...VarValue) Values {\r\n\t_, capacity := minMax(1<<bits.Len(uint(len(ps))), 4)\r\n\tvs := &valueList{\r\n\t\tkeys: make([]Var, len(ps), capacity),\r\n\t\tvals: make([]interface{}, len(ps), capacity),\r\n\t}\r\n\tfor i, p := range ps {\r\n\t\tvs.keys[i] = p.Var\r\n\t\tvs.vals[i] = p.Value\r\n\t}\r\n\treturn vs\r\n}",
"func NewValues() *Values {\n\treturn NewValuesScope(nil)\n}",
"func NewFromValues(values map[string]interface{}) *Values {\n\treturn &Values{values}\n}",
"func NewObjectInnerPairs(operand Expression) Function {\n\trv := &ObjectInnerPairs{\n\t\t*NewUnaryFunctionBase(\"object_innerpairs\", operand),\n\t}\n\n\trv.expr = rv\n\treturn rv\n}",
"func NewObject(t ...[2]*Term) Object {\n\tobj := newobject(len(t))\n\tfor i := range t {\n\t\tobj.Insert(t[i][0], t[i][1])\n\t}\n\treturn obj\n}",
"func NewLiteral(value Object) Expression {\n\treturn &literal{value: value}\n}",
"func NewObjectExpression() *ObjectExpression {\n\treturn &ObjectExpression{\n\t\tprops: make(map[string]Expression),\n\t}\n}",
"func NewObjectAdd(first, second, third Expression) Function {\n\trv := &ObjectAdd{\n\t\t*NewTernaryFunctionBase(\"object_add\", first, second, third),\n\t}\n\n\trv.expr = rv\n\treturn rv\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
/ The constructor returns a NewObjectValues with the an operand cast to a Function as the FunctionConstructor.
|
func (this *ObjectValues) Constructor() FunctionConstructor {
return func(operands ...Expression) Function {
return NewObjectValues(operands[0])
}
}
|
[
"func (this *ObjectInnerValues) Constructor() FunctionConstructor {\n\treturn func(operands ...Expression) Function {\n\t\treturn NewObjectInnerValues(operands[0])\n\t}\n}",
"func (this *ObjectAdd) Constructor() FunctionConstructor {\n\treturn func(operands ...Expression) Function {\n\t\treturn NewObjectAdd(operands[0], operands[1], operands[2])\n\t}\n}",
"func (this *DateAddMillis) Constructor() FunctionConstructor {\n\treturn func(operands ...Expression) Function {\n\t\treturn NewDateAddMillis(operands[0], operands[1], operands[2])\n\t}\n}",
"func (this *ObjectPairs) Constructor() FunctionConstructor {\n\treturn func(operands ...Expression) Function {\n\t\treturn NewObjectPairs(operands[0])\n\t}\n}",
"func (this *ObjectUnwrap) Constructor() FunctionConstructor {\n\treturn func(operands ...Expression) Function {\n\t\treturn NewObjectUnwrap(operands[0])\n\t}\n}",
"func (this *ObjectPut) Constructor() FunctionConstructor {\n\treturn func(operands ...Expression) Function {\n\t\treturn NewObjectPut(operands[0], operands[1], operands[2])\n\t}\n}",
"func (this *DateAddStr) Constructor() FunctionConstructor {\n\treturn func(operands ...Expression) Function {\n\t\treturn NewDateAddStr(operands[0], operands[1], operands[2])\n\t}\n}",
"func (this *ObjectNames) Constructor() FunctionConstructor {\n\treturn func(operands ...Expression) Function {\n\t\treturn NewObjectNames(operands[0])\n\t}\n}",
"func (this *DateDiffMillis) Constructor() FunctionConstructor {\n\treturn func(operands ...Expression) Function {\n\t\treturn NewDateDiffMillis(operands[0], operands[1], operands[2])\n\t}\n}",
"func (this *ObjectRemove) Constructor() FunctionConstructor {\n\treturn func(operands ...Expression) Function {\n\t\treturn NewObjectRemove(operands[0], operands[1])\n\t}\n}",
"func (this *ObjectInnerPairs) Constructor() FunctionConstructor {\n\treturn func(operands ...Expression) Function {\n\t\treturn NewObjectInnerPairs(operands[0])\n\t}\n}",
"func (this *ObjectLength) Constructor() FunctionConstructor {\n\treturn func(operands ...Expression) Function {\n\t\treturn NewObjectLength(operands[0])\n\t}\n}",
"func Function(args ...Type) *Operator {\n\treturn &Operator{functionName, args}\n}",
"func (this *DatePartMillis) Constructor() FunctionConstructor {\n\treturn func(operands ...Expression) Function {\n\t\treturn NewDatePartMillis(operands[0], operands[1])\n\t}\n}",
"func (this *RTrim) Constructor() FunctionConstructor { return NewRTrim }",
"func (this *DatePartStr) Constructor() FunctionConstructor {\n\treturn func(operands ...Expression) Function {\n\t\treturn NewDatePartStr(operands[0], operands[1])\n\t}\n}",
"func (this *Mod) Constructor() FunctionConstructor {\n\treturn func(operands ...Expression) Function {\n\t\treturn NewMod(operands[0], operands[1])\n\t}\n}",
"func (this *IfNaN) Constructor() FunctionConstructor { return NewIfNaN }",
"func (this *StrToMillis) Constructor() FunctionConstructor {\n\treturn func(operands ...Expression) Function {\n\t\treturn NewStrToMillis(operands[0])\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
NewTransform creates a new default transform, corresponding to identity.
|
func NewTransform() *Transform {
return &Transform{}
}
|
[
"func NewTransform() Transform {\n\tt := transform{\n\t\tmodelView: mgl32.Ident4(),\n\t\trotation: mgl32.Vec3{0, 0, 0},\n\t\ttranslation: mgl32.Vec3{0, 0, 0},\n\t}\n\treturn &t\n}",
"func (t *Transform) New() *Transform {\n\tret := NewTransform()\n\tret.SetParent(t)\n\treturn ret\n}",
"func IdentityTransform() Transform {\n\treturn Transform{C.sfTransform_create()}\n}",
"func NewTransform() api.IAffineTransform {\n\to := new(affineTransform)\n\to.ToIdentity()\n\treturn o\n}",
"func NewTransformation() *Scale {\n\treturn &Scale{\n\t\tLine: newLine(),\n\t\tLength: 0,\n\t}\n}",
"func NewTransform2DIdentity() *Transform2D {\n\tvar dest C.godot_transform2d\n\tC.go_godot_transform2d_new_identity(GDNative.api, &dest)\n\treturn &Transform2D{base: &dest}\n}",
"func New() *Transformer {\n\treturn &Transformer{}\n}",
"func NewTransform(cfg *viper.Viper) (interface{}, error) {\n\tgremlinClient, err := client.NewGremlinQueryHelperFromConfig(core.CfgAuthOpts(cfg))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &vpclogsFlowTransformer{\n\t\tinterfaceIpCache: cache.New(10*time.Minute, 10*time.Minute),\n\t\tgremlinClient: gremlinClient,\n\t}, nil\n}",
"func NewTransformation(ctx *pulumi.Context,\n\tname string, args *TransformationArgs, opts ...pulumi.ResourceOption) (*Transformation, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.Path == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'Path'\")\n\t}\n\tvar resource Transformation\n\terr := ctx.RegisterResource(\"vault:transform/transformation:Transformation\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}",
"func (eval *evaluator) LinearTransformNew(ctIn *Ciphertext, linearTransform interface{}) (ctOut []*Ciphertext) {\n\n\tswitch element := linearTransform.(type) {\n\tcase []PtDiagMatrix:\n\t\tctOut = make([]*Ciphertext, len(element))\n\n\t\tvar maxLevel int\n\t\tfor _, matrix := range element {\n\t\t\tmaxLevel = utils.MaxInt(maxLevel, matrix.Level)\n\t\t}\n\n\t\tminLevel := utils.MinInt(maxLevel, ctIn.Level())\n\n\t\teval.DecomposeNTT(minLevel, eval.params.PCount()-1, eval.params.PCount(), ctIn.Value[1], eval.PoolDecompQP)\n\n\t\tfor i, matrix := range element {\n\t\t\tctOut[i] = NewCiphertext(eval.params, 1, minLevel, ctIn.Scale)\n\n\t\t\tif matrix.Naive {\n\t\t\t\teval.MultiplyByDiagMatrix(ctIn, matrix, eval.PoolDecompQP, ctOut[i])\n\t\t\t} else {\n\t\t\t\teval.MultiplyByDiagMatrixBSGS(ctIn, matrix, eval.PoolDecompQP, ctOut[i])\n\t\t\t}\n\t\t}\n\n\tcase PtDiagMatrix:\n\n\t\tminLevel := utils.MinInt(element.Level, ctIn.Level())\n\t\teval.DecomposeNTT(minLevel, eval.params.PCount()-1, eval.params.PCount(), ctIn.Value[1], eval.PoolDecompQP)\n\n\t\tctOut = []*Ciphertext{NewCiphertext(eval.params, 1, minLevel, ctIn.Scale)}\n\n\t\tif element.Naive {\n\t\t\teval.MultiplyByDiagMatrix(ctIn, element, eval.PoolDecompQP, ctOut[0])\n\t\t} else {\n\t\t\teval.MultiplyByDiagMatrixBSGS(ctIn, element, eval.PoolDecompQP, ctOut[0])\n\t\t}\n\t}\n\treturn\n}",
"func NewTransformer(schema *jsonschema.Schema, tranformIdentifier string) (*Transformer, error) {\n\treturn newTransformer(schema, tranformIdentifier, jsonInput)\n}",
"func NewTransformComponent(parent *Entity, position Vector3, size Vector3, origin Vector3, rotation float64) *TransformComponent {\n\ttransformComponent := &TransformComponent{\n\t\tID: \"transform\",\n\t\tParent: parent,\n\t\tPosition: position,\n\t\tSize: size,\n\t\tOrigin: origin,\n\t\tRotation: rotation,\n\t}\n\treturn transformComponent\n}",
"func (t *Transform) Transform() *Transform {\n\treturn t\n}",
"func CreateTransform(w http.ResponseWriter, r *http.Request) {\n\tbody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tfail(err, w)\n\t\treturn\n\t}\n\n\ttransform, err := persistence.Transform{}.FromJSON(body)\n\tif err != nil {\n\t\tfail(err, w)\n\t\treturn\n\t}\n\tif err = transform.Create(db); err != nil {\n\t\tfail(err, w)\n\t\treturn\n\t}\n\t// Reload it from the database to ensure the frontend always gets datastore-casted values.\n\ttransform, err = persistence.Transform{}.Get(db, transform.ID)\n\tif err != nil {\n\t\tfail(err, w)\n\t\treturn\n\t}\n\n\tw.Write(transform.AsJSON())\n}",
"func NewTranslate(v Vector) Matrix {\n\treturn Matrix{\n\t\t1, 0, 0, v.X,\n\t\t0, 1, 0, v.Y,\n\t\t0, 0, 1, v.Z,\n\t\t0, 0, 0, 1}\n}",
"func CreateTransform(lowerLeft, upperRight *Point, width, height int,\n\tgd *GridDef) *PointTransform {\n\tworldNx := math.Abs(lowerLeft.X() - upperRight.X())\n\tworldNy := math.Abs(lowerLeft.Y() - upperRight.Y())\n\tdx := worldNx / float64(width)\n\tdy := worldNy / float64(height)\n\tmaxx := math.Max(lowerLeft.X(), upperRight.X())\n\tmaxy := math.Max(lowerLeft.Y(), upperRight.Y())\n\tmax := NewPoint2D(maxx, maxy)\n\treturn &PointTransform{dx, dy, max, width, height, gd}\n}",
"func (t *Transform) SetIdentity() {\n\tt.P.SetZero()\n\tt.Q.SetIdentity()\n}",
"func NewIdentity() Matrix {\n\treturn Matrix{\n\t\t1, 0, 0, 0,\n\t\t0, 1, 0, 0,\n\t\t0, 0, 1, 0,\n\t\t0, 0, 0, 1}\n}",
"func NewTransformSource() *TransformSource {\n\tr := &TransformSource{}\n\n\treturn r\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
SetScale sets transform scale.
|
func (t *Transform) SetScale(sx, sy float64) *Transform {
t.Scale1.X = sx - 1
t.Scale1.Y = sy - 1
return t
}
|
[
"func (this *Transformable) SetScale(scale Vector2f) {\n\tC.sfTransformable_setScale(this.cptr, scale.toC())\n}",
"func (g *GameObject) SetScale(scale float64) {\r\n\tg.Hitbox.maxX *= scale / g.Scale\r\n\tg.Hitbox.maxY *= scale / g.Scale\r\n\tg.Scale = scale\r\n}",
"func (polygon *ConvexPolygon) SetScale(w, h float64) {\n\tpolygon.ScaleW = w\n\tpolygon.ScaleH = h\n}",
"func (circle *Circle) SetScale(w, h float64) {\n\tcircle.scale = math.Max(w, h)\n\tcircle.radius = circle.originalRadius * circle.scale\n}",
"func (self ConvexShape) SetScale(x, y float32) {\n\tC.sfConvexShape_setScale(self.Cref, C.sfVector2f{\n\t\tC.float(x), C.float(y),\n\t})\n}",
"func (self Transform) Scale(scaleX, scaleY float32) {\n\tC.sfTransform_scale(self.Cref, C.float(scaleX), C.float(scaleY))\n}",
"func (t *Transform) Scale(sx, sy float64) {\n\tout := fmt.Sprintf(\"scale(%g,%g)\", sx, sy)\n\n\tt.transforms = append(t.transforms, out)\n}",
"func (s *UpdateTaskSetInput) SetScale(v *Scale) *UpdateTaskSetInput {\n\ts.Scale = v\n\treturn s\n}",
"func (d *L3GD20HDriver) SetScale(s L3GD20HScale) {\n\td.scale = s\n}",
"func (tr *trooper) setScale(scale float64) { tr.part.SetScale(scale, scale, scale) }",
"func (this *RectangleShape) SetScale(scale Vector2f) {\n\tC.sfRectangleShape_setScale(this.cptr, scale.toC())\n}",
"func (self Text) SetScale(x, y float32) {\n\tv := C.sfVector2f{C.float(x), C.float(y)}\n\tC.sfText_setScale(self.Cref, v)\n}",
"func (fpu *FloorPlanUpdate) SetScale(f *FloorPlanScale) *FloorPlanUpdate {\n\treturn fpu.SetScaleID(f.ID)\n}",
"func (gs *GLMSummary) SetScale(xf func(float64) float64, msg string) *GLMSummary {\n\tgs.paramXform = xf\n\tgs.messages = append(gs.messages, msg)\n\treturn gs\n}",
"func (z *Big) SetScale(scale int) *Big {\n\tz.exp = -scale\n\treturn z\n}",
"func (fpuo *FloorPlanUpdateOne) SetScale(f *FloorPlanScale) *FloorPlanUpdateOne {\n\treturn fpuo.SetScaleID(f.ID)\n}",
"func (f *Fpdf) TransformScale(scaleWd, scaleHt, x, y float64) {\n\tif scaleWd == 0 || scaleHt == 0 {\n\t\tf.err = fmt.Errorf(\"scale factor cannot be zero\")\n\t\treturn\n\t}\n\ty = (f.h - y) * f.k\n\tx *= f.k\n\tscaleWd /= 100\n\tscaleHt /= 100\n\tf.Transform(TransformMatrix{scaleWd, 0, 0,\n\t\tscaleHt, x * (1 - scaleWd), y * (1 - scaleHt)})\n}",
"func (self *Affine2) Scale(scaleX, scaleY float32) *Affine2 {\n\tself.m00 *= scaleX\n\tself.m01 *= scaleY\n\tself.m10 *= scaleX\n\tself.m11 *= scaleY\n\treturn self\n}",
"func (sprite *Sprite) SetScaling(xScale, yScale float64) {\n\n\tsprite.shape.SetScaling(xScale, yScale)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
SetRotation sets transform rotation.
|
func (t *Transform) SetRotation(angle float64) *Transform {
t.Rotation = angle
return t
}
|
[
"func (t *Transform) SetRotation(r float64) ITransform {\n\tt.Rotation = r\n\treturn t\n}",
"func (this *Transformable) SetRotation(rot float32) {\n\tC.sfTransformable_setRotation(this.cptr, C.float(rot))\n}",
"func (p *Plane) SetRotation(rot mgl32.Mat4) {\n\tp.model.Rotation = rot\n}",
"func (m *ModelObject) SetRotation(rot mgl32.Mat4) {\n\tm.Model.Rotation = rot\n}",
"func (d *Device) SetRotation(rotation Rotation) error {\n\td.rotation = rotation\n\td.startWrite()\n\terr := d.setRotation(rotation)\n\td.endWrite()\n\treturn err\n}",
"func (polygon *ConvexPolygon) SetRotation(radians float64) {\n\tpolygon.rotation = radians\n\tif polygon.rotation > math.Pi {\n\t\tpolygon.rotation -= math.Pi * 2\n\t} else if polygon.rotation < -math.Pi {\n\t\tpolygon.rotation += math.Pi * 2\n\t}\n}",
"func (s *AppServerV3) SetRotation(r Rotation) {\n\ts.Spec.Rotation = r\n}",
"func (s *DatabaseServerV3) SetRotation(r Rotation) {\n\ts.Spec.Rotation = r\n}",
"func (t *Transform) SetRot(r lmath.Vec3) {\n\tt.access.Lock()\n\tif t.rot != r {\n\t\tt.built = nil\n\t\tt.quat = nil\n\t\tt.rot = r\n\t}\n\tt.access.Unlock()\n}",
"func (t Transform) Rotation(angle float64) Transform {\n\tt.rot = angle\n\treturn t\n}",
"func (self Text) SetRotation(angle float32) {\n\tC.sfText_setRotation(self.Cref, C.float(angle))\n}",
"func (this *RectangleShape) SetRotation(rot float32) {\n\tC.sfRectangleShape_setRotation(this.cptr, C.float(rot))\n}",
"func (p *Part) SetRotation(rotation vector.Vector) {\n\tif rotation.Len() != 3 && rotation.Kind() != reflect.Float32 {\n\t\tlog.Fatalf(\"Part.SetRotation: expects 3D-Float32 vector, got %dD-%v\", rotation.Len(), rotation.Kind())\n\t}\n\t// Make float64 values in Radians for math\n\txAngle := float64(rotation.Get(0).(float32)) * math.Pi / 180.0\n\tyAngle := float64(rotation.Get(1).(float32)) * math.Pi / 180.0\n\tzAngle := float64(rotation.Get(2).(float32)) * math.Pi / 180.0\n\t// Create a rotation matrix for each axis\n\txRotation := matrix.NewMatrix([][]float32{\n\t\t{1.0, 0.0, 0.0},\n\t\t{0.0, float32(math.Cos(xAngle)), float32(-math.Sin(xAngle))},\n\t\t{0.0, float32(math.Sin(xAngle)), float32(math.Cos(xAngle))},\n\t})\n\tyRotation := matrix.NewMatrix([][]float32{\n\t\t{float32(math.Cos(yAngle)), 0.0, float32(math.Sin(yAngle))},\n\t\t{0.0, 1.0, 0.0},\n\t\t{float32(-math.Sin(yAngle)), 0.0, float32(math.Cos(yAngle))},\n\t})\n\tzRotation := matrix.NewMatrix([][]float32{\n\t\t{float32(math.Cos(zAngle)), float32(-math.Sin(zAngle)), 0.0},\n\t\t{float32(math.Sin(zAngle)), float32(math.Cos(zAngle)), 0.0},\n\t\t{0.0, 0.0, 1.0},\n\t})\n\t// Combine the rotations\n\tp.rotation = zRotation.Mulm(yRotation.Mulm(xRotation))\n}",
"func SetRotation(left, right, up, down bool) {\n\tif left {\n\t\tif up {\n\t\t\t// Going up left\n\t\t\tp.direction = math.Pi / 4\n\t\t\treturn\n\t\t}\n\n\t\tif down {\n\t\t\t// Going down left\n\t\t\tp.direction = (math.Pi * 3) / 4\n\t\t\treturn\n\t\t}\n\n\t\t// Going left\n\t\tp.direction = math.Pi / 2\n\t\treturn\n\t}\n\n\tif right {\n\t\tif up {\n\t\t\t// Going up right\n\t\t\tp.direction = (math.Pi * 7) / 4\n\t\t\treturn\n\t\t}\n\n\t\tif down {\n\t\t\t// Going down right\n\t\t\tp.direction = (math.Pi * 5) / 4\n\t\t\treturn\n\t\t}\n\n\t\t// Going right\n\t\tp.direction = (math.Pi * 3) / 2\n\t\treturn\n\t}\n\n\tif up {\n\t\tp.direction = 0\n\t\treturn\n\t}\n\n\tif down {\n\t\tp.direction = math.Pi\n\t}\n}",
"func (x *X11) SetRotate(rotate bool) {\n\tx.mut.Lock()\n\tx.rotate = rotate\n\tx.mut.Unlock()\n}",
"func Rotation(angle float64) Transform {\n\treturn ZT.Rotation(angle)\n}",
"func (f *Fpdf) TransformRotate(angle, x, y float64) {\n\ty = (f.h - y) * f.k\n\tx *= f.k\n\tangle = angle * math.Pi / 180\n\tvar tm TransformMatrix\n\ttm.A = math.Cos(angle)\n\ttm.B = math.Sin(angle)\n\ttm.C = -tm.B\n\ttm.D = tm.A\n\ttm.E = x + tm.B*y - tm.A*x\n\ttm.F = y - tm.A*y - tm.B*x\n\tf.Transform(tm)\n}",
"func (self ConvexShape) Setrotation(angle float32) { \n C.sfConvexShape_setRotation(self.Cref, C.float(angle))\n}",
"func (w *FileLogWriter) SetRotate(rotate bool) *FileLogWriter {\n\t//fmt.Fprintf(os.Stderr, \"FileLogWriter.SetRotate: %v\\n\", rotate)\n\tw.rotate = rotate\n\treturn w\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
SetTranslation sets transform translation.
|
func (t *Transform) SetTranslation(tx, ty float64) *Transform {
t.Translation.X = tx
t.Translation.Y = ty
return t
}
|
[
"func (mat *T) SetTranslation(v *vec3.T) *T {\n\tmat[3][0] = v[0]\n\tmat[3][1] = v[1]\n\tmat[3][2] = v[2]\n\treturn mat\n}",
"func (p *Plane) SetTransform(translation Mat4x4) {\n\tp.Transform = Multiply(p.Transform, translation)\n\tp.Inverse = Inverse(p.Transform)\n\tp.InverseTranspose = Transpose(p.Inverse)\n}",
"func (m *Mat4) SetToTranslation(t Vec3) {\n\tm[0][0] = 1\n\tm[0][1] = 0\n\tm[0][2] = 0\n\tm[0][3] = 0\n\n\tm[1][0] = 0\n\tm[1][1] = 1\n\tm[1][2] = 0\n\tm[1][3] = 0\n\n\tm[2][0] = 0\n\tm[2][1] = 0\n\tm[2][2] = 1\n\tm[2][3] = 0\n\n\tm[3][0] = t.X\n\tm[3][1] = t.Y\n\tm[3][2] = t.Z\n\tm[3][3] = 1\n}",
"func (n *Node) SetTransform(translate, rotate, scale *vmath.Vector3) {\n\tn.H3DNode.SetTransform(translate[0], translate[1], translate[2],\n\t\trotate[0], rotate[1], rotate[2],\n\t\tscale[0], scale[1], scale[2])\n}",
"func (p *Gradient) SetTransform(m *matrix.Matrix) {\n\tp.Transform = m\n}",
"func (p *Stripe) SetTransform(m *matrix.Matrix) {\n\tp.Transform = m\n}",
"func (o *Object) SetTransform(transform *matrix.Matrix) {\n\to.Transform = transform\n}",
"func (s *Sphere) SetTransform(m *matrix.Matrix) {\n\ts.Transform = m\n}",
"func (o *object) SetTransform(matrix ...*base.Matrix) {\n\tt := base.Identity\n\tfor _, m := range matrix {\n\t\tt = *t.Multiply(m)\n\t}\n\to.transform = t\n}",
"func (f *Fpdf) TransformTranslate(tx, ty float64) {\n\tf.Transform(TransformMatrix{1, 0, 0, 1, tx * f.k, -ty * f.k})\n}",
"func (g *Group) SetTransform(m *algebra.Matrix) {\n\tif len(m.Get()) != 4 || len(m.Get()[0]) != 4 {\n\t\tpanic(algebra.ExpectedDimension(4))\n\t}\n\tg.transform = m\n}",
"func (c *canvasRenderer) SetTransform(transform sprec.Mat4) {\n\tif c.currentLayer == c.topLayer {\n\t\tc.currentLayer.Transform = transform\n\t} else {\n\t\tc.currentLayer.Transform = sprec.Mat4Prod(\n\t\t\tc.currentLayer.previous.Transform,\n\t\t\ttransform,\n\t\t)\n\t}\n}",
"func (c *Context2D) SetTransform(m Matrix) { c.Call(\"setTransform\", m.A, m.B, m.C, m.D, m.E, m.F) }",
"func (p *RadialGradient) SetTransform(m *matrix.Matrix) {\n\tp.Transform = m\n}",
"func (p *Checkers) SetTransform(m *matrix.Matrix) {\n\tp.Transform = m\n}",
"func (t *Transform) Translate(tx, ty float64) {\n\tout := fmt.Sprintf(\"translate(%g,%g)\", tx, ty)\n\n\tt.transforms = append(t.transforms, out)\n}",
"func (ts *TextState) Translate(tx, ty float64) {\n\tts.Tm = transform.TranslationMatrix(tx, ty).Mult(ts.Tm)\n}",
"func (obj *Device) SetTransform(state TRANSFORMSTATETYPE, matrix MATRIX) Error {\n\tret, _, _ := syscall.Syscall(\n\t\tobj.vtbl.SetTransform,\n\t\t3,\n\t\tuintptr(unsafe.Pointer(obj)),\n\t\tuintptr(state),\n\t\tuintptr(unsafe.Pointer(&matrix[0])),\n\t)\n\treturn toErr(ret)\n}",
"func SetLanguage(language string) {\n\tif _, ok := supportedTranslations[language]; ok {\n\t\tgotext.SetLanguage(language)\n\t\treturn\n\t}\n\tgotext.SetLanguage(defaultLanguage)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
SetDepth sets transform depth.
|
func (t *Transform) SetDepth(depth float64) *Transform {
t.Depth = depth
return t
}
|
[
"func (o *Dig) SetDepth(v int32) {\n\to.Depth = v\n}",
"func SetDepth(newdepth string) string {\n\tDepth = newdepth\n\treturn Depth\n}",
"func (o *GetHardwaresParams) SetDepth(depth *string) {\n\to.Depth = depth\n}",
"func (a *GetFlattenedDocumentArgs) SetDepth(depth int) *GetFlattenedDocumentArgs {\n\ta.Depth = &depth\n\treturn a\n}",
"func (dev *Device) SetDepthMode(mode FrameMode) int {\n\treturn int(C.freenect_set_depth_mode(dev.ptr(), *mode.ptr()))\n}",
"func SetCallDepth(callDepth int) {\n\tstd.mu.Lock()\n\tdefer std.mu.Unlock()\n\tstd.callDepth = callDepth\n}",
"func (buf *CommandBuffer) SetDepthBias(constantFactor, clamp, slopeFactor float32) {\n\tC.domVkCmdSetDepthBias(buf.fps[vkCmdSetDepthBias], buf.hnd, C.float(constantFactor), C.float(clamp), C.float(slopeFactor))\n}",
"func SetDepthFunc(_func Enum) {\n\tc_func, _ := (C.GLenum)(_func), cgoAllocsUnknown\n\tC.glDepthFunc(c_func)\n}",
"func (i *Image) SetZDepth(z int) {\n\ti.z = z\n}",
"func (t *T) SetMaxDepth(d int) {\n\tt.maxDepth = d\n}",
"func (l *Logger) SetCallDepth(d int) {\n\tl.calldepth = d\n}",
"func (im *Image) SetChannelDepth(ch Channel, depth uint) error {\n\tif C.SetImageChannelDepth(im.image, C.ChannelType(ch), magickUint(depth)) == 0 {\n\t\treturn errors.New(\"error setting channel\")\n\t}\n\treturn nil\n}",
"func (c *canvasRenderer) SetTransform(transform sprec.Mat4) {\n\tif c.currentLayer == c.topLayer {\n\t\tc.currentLayer.Transform = transform\n\t} else {\n\t\tc.currentLayer.Transform = sprec.Mat4Prod(\n\t\t\tc.currentLayer.previous.Transform,\n\t\t\ttransform,\n\t\t)\n\t}\n}",
"func SetCallStackDepth(stackDepth int) {\n\tcallStackDepth = stackDepth\n}",
"func (o *CatalogCategoryManagementV1GetTreeGetParams) SetDepth(depth *int64) {\n\to.Depth = depth\n}",
"func ClearDepth(depth float64) {\n\tsyscall.Syscall(gpClearDepth, 1, uintptr(math.Float64bits(depth)), 0, 0)\n}",
"func (r *Registry) SetRecursiveDepth(count int) {\n\tr.recursiveDepth = count\n}",
"func MakeDepth(width, height int) Texture {\n\ttex := Make(width, height, gl.DEPTH_COMPONENT, gl.DEPTH_COMPONENT,\n\t\tgl.UNSIGNED_BYTE, nil, gl.LINEAR, gl.LINEAR, gl.CLAMP_TO_BORDER,\n\t\tgl.CLAMP_TO_BORDER)\n\treturn tex\n}",
"func WithDepth(depth int) Option {\n\treturn func(o *outputOpts) (*outputOpts, error) {\n\t\tif depth < 0 {\n\t\t\treturn nil, fmt.Errorf(\"invalid depth value %d, value must be >= 0\", depth)\n\t\t}\n\t\tc := o.copy()\n\t\tc.depth = depth\n\t\treturn c, nil\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
SetOrigin sets transform origin.
|
func (t *Transform) SetOrigin(origin string) *Transform {
t.Origin = origin
return t
}
|
[
"func (this *Transformable) SetOrigin(orig Vector2f) {\n\tC.sfTransformable_setOrigin(this.cptr, orig.toC())\n}",
"func SetOrigin(val string) {\n\torigin = val\n}",
"func SetOrigin(\n\tctx context.Context,\n\tcallOrigin string,\n) context.Context {\n\treturn setIncomingMD(ctx, map[string]string{callOriginHeaderName: callOrigin})\n}",
"func (m *RequestMetaHeader) SetOrigin(v *RequestMetaHeader) {\n\tif m != nil {\n\t\tm.Origin = v\n\t}\n}",
"func (a *SetPermissionArgs) SetOrigin(origin string) *SetPermissionArgs {\n\ta.Origin = &origin\n\treturn a\n}",
"func (a *AStar) SetOrigin(x, y int) {\n\ta.origin = &a.grid[y][x]\n}",
"func (p *ProvisionTokenV2) SetOrigin(origin string) {\n\tp.Metadata.SetOrigin(origin)\n}",
"func (m *ResponseMetaHeader) SetOrigin(v *ResponseMetaHeader) {\n\tif m != nil {\n\t\tm.Origin = v\n\t}\n}",
"func (s *AppServerV3) SetOrigin(origin string) {\n\ts.Metadata.SetOrigin(origin)\n}",
"func (t *Transaction) SetOrigin(origin Address) {\n\tt.origin = &origin\n}",
"func (m *RequestVerificationHeader) SetOrigin(v *RequestVerificationHeader) {\n\tif m != nil {\n\t\tm.Origin = v\n\t}\n}",
"func (h *ResourceHeader) SetOrigin(origin string) {\n\th.Metadata.SetOrigin(origin)\n}",
"func (m *ResponseVerificationHeader) SetOrigin(v *ResponseVerificationHeader) {\n\tif m != nil {\n\t\tm.Origin = v\n\t}\n}",
"func (a *GrantPermissionsArgs) SetOrigin(origin string) *GrantPermissionsArgs {\n\ta.Origin = &origin\n\treturn a\n}",
"func (mr *MockProcessOptionsMockRecorder) SetOrigin(value interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"SetOrigin\", reflect.TypeOf((*MockProcessOptions)(nil).SetOrigin), value)\n}",
"func (self ConvexShape) Setorigin(x, y float32) { \n C.sfConvexShape_setOrigin(self.Cref, C.sfVector2f {\n\t\tC.float(x), C.float(y),\n\t})\n}",
"func (client *Client) SetLayerOrigin(layer int, origin image.Point) error {\n\treturn client.stream.Send(&pb.DrawRequest{\n\t\tType: &pb.DrawRequest_SetLayerOrigin{\n\t\t\tSetLayerOrigin: &pb.SetLayerOrigin{\n\t\t\t\tLayer: int32(layer),\n\t\t\t\tPosition: &pb.Point{\n\t\t\t\t\tX: int32(origin.X),\n\t\t\t\t\tY: int32(origin.Y),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t})\n}",
"func (self Text) SetOrigin(x, y float32) {\n\tv := C.sfVector2f{C.float(x), C.float(y)}\n\tC.sfText_setOrigin(self.Cref, v)\n}",
"func (o *object) SetTransform(matrix ...*base.Matrix) {\n\tt := base.Identity\n\tfor _, m := range matrix {\n\t\tt = *t.Multiply(m)\n\t}\n\to.transform = t\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
ComputeOriginOffset returns the transform origin offset.
|
func (t *Transform) ComputeOriginOffset(screenWidth, screenHeight float64) (offsetX, offsetY float64) {
switch t.Origin {
case TransformOriginTopLeft:
offsetX, offsetY = 0, screenHeight
case TransformOriginTopMiddle:
offsetX, offsetY = screenWidth/2, screenHeight
case TransformOriginTopRight:
offsetX, offsetY = screenWidth, screenHeight
case TransformOriginMiddleLeft:
offsetX, offsetY = 0, screenHeight/2
case TransformOriginMiddle:
offsetX, offsetY = screenWidth/2, screenHeight/2
case TransformOriginMiddleRight:
offsetX, offsetY = screenWidth, screenHeight/2
case TransformOriginBottomLeft:
offsetX, offsetY = 0, 0
case TransformOriginBottomMiddle:
offsetX, offsetY = screenWidth/2, 0
case TransformOriginBottomRight:
offsetX, offsetY = screenWidth, 0
case "": // TransformOriginBottomLeft
offsetX, offsetY = 0, 0
default:
utils.LogError(fmt.Errorf("unknown transform origin value: %s", t.Origin))
}
return
}
|
[
"func (this *Transformable) GetOrigin() (origin Vector2f) {\n\torigin.fromC(C.sfTransformable_getOrigin(this.cptr))\n\treturn\n}",
"func (self ConvexShape) GetOrigin() (x, y float32) { \n p := C.sfConvexShape_getOrigin(self.Cref)\n\treturn float32(p.x), float32(p.y)\t\n}",
"func (dla *DLASystem) DeterminePositionDistanceFromOrigin(position [2]int) float64 {\n\n\tradiusSquared := float64(position[0]*position[0] + position[1]*position[1])\n\treturn math.Sqrt(radiusSquared)\n}",
"func (p point) distanceToOrigin() int {\n\treturn p.distanceToPoint(point{x: 0, y: 0})\n}",
"func (tlc *TLCMessage) GetOrigin() string {\n\treturn tlc.Origin\n}",
"func (o *OenormPropertiesDto) GetOriginCode() OenormOriginCodeDto {\n\tif o == nil {\n\t\tvar ret OenormOriginCodeDto\n\t\treturn ret\n\t}\n\n\treturn o.OriginCode\n}",
"func calcCoordinateStartingPoint(wm *WatermarkImage, oh OverheadImage, position int, offsetX int, offsetY int) image.Point {\n\n\treturn image.Point{0, 0}\n}",
"func (r *RumorMessage) GetOrigin() string {\n\treturn r.Origin\n}",
"func (z *Input) Offset() int {\n\treturn z.pos\n}",
"func (b *BaseDevice) Origin() Point {\n\treturn b.origin\n}",
"func (m *Metric) Origin() *Origin {\n\treturn m.source.origin\n}",
"func (self Text) Origin() (x, y float32) {\n\tv := C.sfText_getOrigin(self.Cref)\n\treturn float32(v.x), float32(v.y)\n}",
"func (this *RectangleShape) GetOrigin() (origin Vector2f) {\n\torigin.fromC(C.sfRectangleShape_getOrigin(this.cptr))\n\treturn\n}",
"func (s UserSet) ComputeTZOffset() m.UserData {\n\tres := s.Collection().Call(\"ComputeTZOffset\")\n\tresTyped, _ := res.(m.UserData)\n\treturn resTyped\n}",
"func (r Rect) CenterOrigin(v Vec, z float64) Vec3 {\n\treturn r.Center().ScaledXY(v).Vec3(z)\n}",
"func (s *Structured) GetOffset() int {\n\treturn s.cloudEvent.Offset\n}",
"func (g *GetSupergroupMembersRequest) GetOffset() (value int32) {\n\tif g == nil {\n\t\treturn\n\t}\n\treturn g.Offset\n}",
"func (_OpCodes *OpCodesTransactorSession) GetOrigin() (*types.Transaction, error) {\n\treturn _OpCodes.Contract.GetOrigin(&_OpCodes.TransactOpts)\n}",
"func (c MethodsCollection) ComputeTZOffset() pComputeTZOffset {\n\treturn pComputeTZOffset{\n\t\tMethod: c.MustGet(\"ComputeTZOffset\"),\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
RunExampleResolver shows how you would setup one resolver to use this pattern The resolver would typically be a mutation, as queries have their own implementation The GraphQL server acts as a thin proxy that maps and passes requests onto the handler Initialise request structs Pass in the writer and reader Add in the middleware Execute with context
|
func (r *RootResolver) RunExampleResolver(ctx context.Context) {
// Prepare args
req := handlers.MustNewReader(&OnboardStartRequest{})
resp := &bytes.Buffer{}
// Prepare func
fn := handlers.WithLogging(handlers.WithMetrics("start", r.Calls, r.Errors, r.Saturation, r.Duration, OnboardStart(r.userStore, r.accountStore)))
// Execute func and handle errors
err := fn(ctx, resp, req)
var userErr *handlers.UserError
var sysErr *handlers.SystemError
if errors.As(err, &userErr) {
fmt.Println("Input Error:", userErr.Message)
return
}
if errors.As(err, &sysErr) {
fmt.Println("Internal Error")
return
}
}
|
[
"func NewMockResolver() *MockResolver {\n\treturn &MockResolver{\n\t\tCommitGraphFunc: &ResolverCommitGraphFunc{\n\t\t\tdefaultHook: func(context.Context, int) (graphqlbackend.CodeIntelligenceCommitGraphResolver, error) {\n\t\t\t\treturn nil, nil\n\t\t\t},\n\t\t},\n\t\tDeleteIndexByIDFunc: &ResolverDeleteIndexByIDFunc{\n\t\t\tdefaultHook: func(context.Context, int) error {\n\t\t\t\treturn nil\n\t\t\t},\n\t\t},\n\t\tDeleteUploadByIDFunc: &ResolverDeleteUploadByIDFunc{\n\t\t\tdefaultHook: func(context.Context, int) error {\n\t\t\t\treturn nil\n\t\t\t},\n\t\t},\n\t\tGetIndexByIDFunc: &ResolverGetIndexByIDFunc{\n\t\t\tdefaultHook: func(context.Context, int) (dbstore.Index, bool, error) {\n\t\t\t\treturn dbstore.Index{}, false, nil\n\t\t\t},\n\t\t},\n\t\tGetIndexesByIDsFunc: &ResolverGetIndexesByIDsFunc{\n\t\t\tdefaultHook: func(context.Context, ...int) ([]dbstore.Index, error) {\n\t\t\t\treturn nil, nil\n\t\t\t},\n\t\t},\n\t\tGetUploadByIDFunc: &ResolverGetUploadByIDFunc{\n\t\t\tdefaultHook: func(context.Context, int) (dbstore.Upload, bool, error) {\n\t\t\t\treturn dbstore.Upload{}, false, nil\n\t\t\t},\n\t\t},\n\t\tGetUploadsByIDsFunc: &ResolverGetUploadsByIDsFunc{\n\t\t\tdefaultHook: func(context.Context, ...int) ([]dbstore.Upload, error) {\n\t\t\t\treturn nil, nil\n\t\t\t},\n\t\t},\n\t\tIndexConfigurationFunc: &ResolverIndexConfigurationFunc{\n\t\t\tdefaultHook: func(context.Context, int) ([]byte, error) {\n\t\t\t\treturn nil, nil\n\t\t\t},\n\t\t},\n\t\tIndexConnectionResolverFunc: &ResolverIndexConnectionResolverFunc{\n\t\t\tdefaultHook: func(dbstore.GetIndexesOptions) *resolvers.IndexesResolver {\n\t\t\t\treturn nil\n\t\t\t},\n\t\t},\n\t\tQueryResolverFunc: &ResolverQueryResolverFunc{\n\t\t\tdefaultHook: func(context.Context, *graphqlbackend.GitBlobLSIFDataArgs) (resolvers.QueryResolver, error) {\n\t\t\t\treturn nil, nil\n\t\t\t},\n\t\t},\n\t\tQueueAutoIndexJobForRepoFunc: &ResolverQueueAutoIndexJobForRepoFunc{\n\t\t\tdefaultHook: func(context.Context, int) error {\n\t\t\t\treturn nil\n\t\t\t},\n\t\t},\n\t\tUpdateIndexConfigurationByRepositoryIDFunc: &ResolverUpdateIndexConfigurationByRepositoryIDFunc{\n\t\t\tdefaultHook: func(context.Context, int, string) error {\n\t\t\t\treturn nil\n\t\t\t},\n\t\t},\n\t\tUploadConnectionResolverFunc: &ResolverUploadConnectionResolverFunc{\n\t\t\tdefaultHook: func(dbstore.GetUploadsOptions) *resolvers.UploadsResolver {\n\t\t\t\treturn nil\n\t\t\t},\n\t\t},\n\t}\n}",
"func (m *MockResolver) QueryResolver(v0 context.Context, v1 *graphqlbackend.GitBlobLSIFDataArgs) (resolvers.QueryResolver, error) {\n\tr0, r1 := m.QueryResolverFunc.nextHook()(v0, v1)\n\tm.QueryResolverFunc.appendCall(ResolverQueryResolverFuncCall{v0, v1, r0, r1})\n\treturn r0, r1\n}",
"func init() {\n\tresolver.Register(&Resolver{})\n}",
"func ToResolver[P any, A any, R any](f func(*Context, P, A) (R, error)) graphql.FieldResolveFn {\n\treturn func(p graphql.ResolveParams) (any, error) {\n\t\trecorder := progrock.RecorderFromContext(p.Context)\n\n\t\tvar args A\n\t\targBytes, err := json.Marshal(p.Args)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to marshal args: %w\", err)\n\t\t}\n\t\tif err := json.Unmarshal(argBytes, &args); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to unmarshal args: %w\", err)\n\t\t}\n\n\t\tparent, ok := p.Source.(P)\n\t\tif !ok {\n\t\t\tparentBytes, err := json.Marshal(p.Source)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"failed to marshal parent: %w\", err)\n\t\t\t}\n\t\t\tif err := json.Unmarshal(parentBytes, &parent); err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"failed to unmarshal parent: %w\", err)\n\t\t\t}\n\t\t}\n\n\t\tif pipelineable, ok := p.Source.(Pipelineable); ok {\n\t\t\trecorder = pipelineable.PipelinePath().RecorderGroup(recorder)\n\t\t\tp.Context = progrock.RecorderToContext(p.Context, recorder)\n\t\t}\n\n\t\tvtx, err := queryVertex(recorder, p.Info.FieldName, p.Source, args)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tctx := Context{\n\t\t\tContext: p.Context,\n\t\t\tResolveParams: p,\n\t\t\tVertex: vtx,\n\t\t}\n\n\t\tres, err := f(&ctx, parent, args)\n\t\tif err != nil {\n\t\t\tvtx.Done(err)\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif edible, ok := any(res).(Digestible); ok {\n\t\t\tdg, err := edible.Digest()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"failed to compute digest: %w\", err)\n\t\t\t}\n\n\t\t\tvtx.Output(dg)\n\t\t}\n\n\t\tvtx.Done(nil)\n\n\t\treturn res, nil\n\t}\n}",
"func NewResolver(args map[string]interface{}) Resolver {\n\treturn &concreteResolver{args: args}\n}",
"func NewResolver(redisAddr, redisPwd, serviceName string) naming.Registry {\n\tregistry := New(redisAddr, redisPwd, serviceName)\n\tresolver.Register(registry)\n\treturn registry\n}",
"func ExampleQueryHandler(db *scribble.Driver) internal.HandlerFuncType {\n\t/*\n\t Create User object type with fields \"id\" and \"name\" by using GraphQLObjectTypeConfig:\n\t - Name: name of object type\n\t - Fields: a map of fields by using GraphQLFields\n\t Setup type of field use GraphQLFieldConfig\n\t*/\n\tvar user userExample\n\tvar userType = graphql.NewObject(\n\t\tgraphql.ObjectConfig{\n\t\t\tName: \"User\",\n\t\t\tFields: graphql.Fields{\n\t\t\t\t\"id\": &graphql.Field{\n\t\t\t\t\tType: graphql.String,\n\t\t\t\t},\n\t\t\t\t\"name\": &graphql.Field{\n\t\t\t\t\tType: graphql.String,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t)\n\n\t/*\n\t Create Query object type with fields \"user\" has type [userType] by using GraphQLObjectTypeConfig:\n\t - Name: name of object type\n\t - Fields: a map of fields by using GraphQLFields\n\t Setup type of field use GraphQLFieldConfig to define:\n\t - Type: type of field\n\t - Args: arguments to query with current field\n\t - Resolve: function to query data using params from [Args] and return value with current type\n\t*/\n\tvar queryType = graphql.NewObject(\n\t\tgraphql.ObjectConfig{\n\t\t\tName: \"Query\",\n\t\t\tFields: graphql.Fields{\n\t\t\t\t\"user\": &graphql.Field{\n\t\t\t\t\tType: userType,\n\t\t\t\t\tArgs: graphql.FieldConfigArgument{\n\t\t\t\t\t\t\"id\": &graphql.ArgumentConfig{\n\t\t\t\t\t\t\tType: graphql.String,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tResolve: func(p graphql.ResolveParams) (interface{}, error) {\n\t\t\t\t\t\tidQuery, isOK := p.Args[\"id\"].(string)\n\t\t\t\t\t\tif isOK {\n\t\t\t\t\t\t\terr := db.Read(\"users\", idQuery, user)\n\t\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\treturn user, nil\n\t\t\t\t\t\t}\n\t\t\t\t\t\treturn nil, nil\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\n\tvar schema, _ = graphql.NewSchema(\n\t\tgraphql.SchemaConfig{\n\t\t\tQuery: queryType,\n\t\t},\n\t)\n\treturn baseQueryFunction(schema)\n}",
"func NewResolver() exec.Resolver {\n\treturn &resolver{}\n}",
"func (r *queryCustomResolverType) Resolver(ctx context.Context) (*customresolver.Resolver, error) {\n\t// CustomerResolverType.Resolver implementation\n\treturn nil, nil\n}",
"func (v *Venom) RegisterResolver(level ConfigLevel, r Resolver) {\n\tv.Store.RegisterResolver(level, r)\n}",
"func (mf MiddlewareFunc) Run(req *Request, handler Handler) (*Response, error) {\n\treturn mf(req, handler)\n}",
"func NewResolver(config *config.Config) Resolver {\n\tif config.RemoteTaggerEnabled {\n\t\toptions, err := remote.NodeAgentOptions()\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"unable to configure the remote tagger: %s\", err)\n\t\t} else {\n\t\t\treturn &DefaultResolver{\n\t\t\t\ttagger: remote.NewTagger(options),\n\t\t\t}\n\t\t}\n\t}\n\treturn &DefaultResolver{\n\t\ttagger: &nullTagger{},\n\t}\n}",
"func GetResolver(ctx gocontext.Context, clicontext *cli.Context) (remotes.Resolver, error) {\n\tusername := clicontext.String(\"user\")\n\tvar secret string\n\tif i := strings.IndexByte(username, ':'); i > 0 {\n\t\tsecret = username[i+1:]\n\t\tusername = username[0:i]\n\t}\n\toptions := docker.ResolverOptions{\n\t\tTracker: PushTracker,\n\t}\n\tif username != \"\" {\n\t\tif secret == \"\" {\n\t\t\tfmt.Printf(\"Password: \")\n\n\t\t\tvar err error\n\t\t\tsecret, err = passwordPrompt()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tfmt.Print(\"\\n\")\n\t\t}\n\t} else if rt := clicontext.String(\"refresh\"); rt != \"\" {\n\t\tsecret = rt\n\t}\n\n\thostOptions := config.HostOptions{}\n\thostOptions.Credentials = func(host string) (string, string, error) {\n\t\t// If host doesn't match...\n\t\t// Only one host\n\t\treturn username, secret, nil\n\t}\n\tif clicontext.Bool(\"plain-http\") {\n\t\thostOptions.DefaultScheme = \"http\"\n\t}\n\tdefaultTLS, err := resolverDefaultTLS(clicontext)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\thostOptions.DefaultTLS = defaultTLS\n\tif hostDir := clicontext.String(\"hosts-dir\"); hostDir != \"\" {\n\t\thostOptions.HostDir = config.HostDirFromRoot(hostDir)\n\t}\n\n\tif clicontext.Bool(\"http-dump\") {\n\t\thostOptions.UpdateClient = func(client *http.Client) error {\n\t\t\tclient.Transport = &DebugTransport{\n\t\t\t\ttransport: client.Transport,\n\t\t\t\twriter: log.G(ctx).Writer(),\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t}\n\n\toptions.Hosts = config.ConfigureHosts(ctx, hostOptions)\n\n\treturn docker.NewResolver(options), nil\n}",
"func (s serverImpl) RegisterResolver(ctx context.Context, msg *data.MsgRegisterResolver) (*data.MsgRegisterResolverResponse, error) {\n\tresolver, err := s.stateStore.ResolverTable().Get(ctx, msg.ResolverId)\n\tif err != nil {\n\t\treturn nil, sdkerrors.ErrNotFound.Wrapf(\"resolver with id %d does not exist\", msg.ResolverId)\n\t}\n\n\tmanager, err := sdk.AccAddressFromBech32(msg.Manager)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif !bytes.Equal(resolver.Manager, manager) {\n\t\treturn nil, data.ErrUnauthorizedResolverManager\n\t}\n\n\tsdkCtx := sdk.UnwrapSDKContext(ctx)\n\n\tfor _, ch := range msg.ContentHashes {\n\t\tiri, id, _, err := s.anchorAndGetIRI(ctx, ch)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\terr = s.stateStore.DataResolverTable().Save(\n\t\t\tctx,\n\t\t\t&api.DataResolver{\n\t\t\t\tResolverId: msg.ResolverId,\n\t\t\t\tId: id,\n\t\t\t},\n\t\t)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\terr = sdkCtx.EventManager().EmitTypedEvent(&data.EventRegisterResolver{\n\t\t\tId: msg.ResolverId,\n\t\t\tIri: iri,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tsdkCtx.GasMeter().ConsumeGas(data.GasCostPerIteration, \"data/RegisterResolver content hash iteration\")\n\t}\n\n\treturn &data.MsgRegisterResolverResponse{}, nil\n}",
"func NewResolver() peerresolver.Provider {\n\treturn func(ed service.Dispatcher, context context.Client, channelID string, opts ...options.Opt) peerresolver.Resolver {\n\t\treturn New(ed, context, channelID, opts...)\n\t}\n}",
"func InitializeResolver() generated.Config {\n\tresolver := Resolver{}\n\n\tresolver.dbClient = postgres.Connect()\n\tresolver.redisClient = redis.Connect()\n\n\treturn generated.Config{\n\t\tResolvers: &resolver,\n\t}\n}",
"func GraphqlHandler() gin.HandlerFunc {\n // NewExecutableSchema and Config are in the generated.go file\n // Resolver is in the resolver.go file\n h := handler.NewDefaultServer(generated.NewExecutableSchema(generated.Config{Resolvers: &resolvers.Resolver{}}))\n\n return func(c *gin.Context) {\n h.ServeHTTP(c.Writer, c.Request)\n }\n}",
"func NewResolver(cln *client.Client, mw *progress.MultiWriter) (Resolver, error) {\n\troot, exist, err := modulesPathExist()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif !exist {\n\t\treturn &remoteResolver{cln, mw, root}, nil\n\t}\n\n\treturn &vendorResolver{root}, nil\n}",
"func Handler(service e2e.Service, hooks *twirp.ServerHooks) *handler.Server {\n\tes := NewExecutableSchema(Config{Resolvers: &Resolver{service}})\n\tsrv := handler.New(es)\n\tsrv.AddTransport(transport.POST{})\n\tsrv.Use(extension.Introspection{})\n\tif hooks == nil {\n\t\treturn srv\n\t}\n\tsrv.AroundFields(func(ctx context.Context, next graphql.Resolver) (res interface{}, err error) {\n\t\tf := graphql.GetFieldContext(ctx)\n\t\tparent := f.Parent.Path().String()\n\t\tif parent != \"\" {\n\t\t\treturn next(ctx)\n\t\t}\n\t\tctx = ctxsetters.WithMethodName(ctx, f.Field.Name)\n\t\tif hooks.RequestRouted != nil {\n\t\t\tctx, err = hooks.RequestRouted(ctx)\n\t\t\tif err != nil {\n\t\t\t\tif terr, ok := err.(twirp.Error); ok && hooks.Error != nil {\n\t\t\t\t\tctx = hooks.Error(ctx, terr)\n\t\t\t\t}\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t\tres, err = next(ctx)\n\t\tif terr, ok := err.(twirp.Error); ok && hooks.Error != nil {\n\t\t\tctx = hooks.Error(ctx, terr)\n\t\t}\n\t\treturn res, err\n\t})\n\treturn srv\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
OnboardStart holds the business logic in a signature agnostic way, allowing for middlewares to be applied It returns a func, allowing for additional data to be passed via closures
|
func OnboardStart(userStore UserStore, accountStore AccountStore) handlers.Handler {
fn := func(ctx context.Context, w handlers.Writer, r handlers.Reader) error {
req := &OnboardStartRequest{}
handlers.MustDecode(r, req)
// Handle validations
if true {
return &handlers.UserError{"3c157e31-0a63-4f96-9c9c-19353024ce34", "wrong username", errors.New("example error")}
}
// Handle logic
if true {
return &handlers.SystemError{"8c24c633-1bba-4990-813c-b21d24d6e7f5", "database connection failure", errors.New("example error")}
}
// Handle response
resp := &OnboardStartResponse{}
handlers.MustEncode(w, resp)
// Return user errors and system errors
return nil
}
return fn
}
|
[
"func (b *Bot) addStartFunc(fn server.Component) {\n\tb.server.RunComponent(fn)\n}",
"func (c *controller) Startup(fn func() error) {\n\tc.parser.cfg.Startup = append(c.parser.cfg.Startup, fn)\n}",
"func (s *BasejossListener) EnterFunction_(ctx *Function_Context) {}",
"func Start(h http.Handler) {\n\tlambda.Start(apiGatewayHandler(h))\n}",
"func (o *Onboarder) DoOnboard(ce cloudevents.Event, loggingDone chan bool) error {\n\n\tdefer func() { loggingDone <- true }()\n\n\tevent := &keptnevents.ServiceCreateEventData{}\n\tif err := ce.DataAs(event); err != nil {\n\t\to.logger.Error(fmt.Sprintf(\"Got Data Error: %s\", err.Error()))\n\t\treturn err\n\t}\n\n\tif _, ok := event.DeploymentStrategies[\"*\"]; ok {\n\t\tdeplStrategies, err := FixDeploymentStrategies(event.Project, event.DeploymentStrategies[\"*\"])\n\t\tif err != nil {\n\t\t\to.logger.Error(fmt.Sprintf(\"Error when getting deployment strategies: %s\" + err.Error()))\n\t\t\treturn err\n\t\t}\n\t\tevent.DeploymentStrategies = deplStrategies\n\t} else if os.Getenv(\"PRE_WORKFLOW_ENGINE\") == \"true\" && (event.DeploymentStrategies == nil || len(event.DeploymentStrategies) == 0) {\n\t\tdeplStrategies, err := GetDeploymentStrategies(event.Project)\n\t\tif err != nil {\n\t\t\to.logger.Error(fmt.Sprintf(\"Error when getting deployment strategies: %s\" + err.Error()))\n\t\t\treturn err\n\t\t}\n\t\tevent.DeploymentStrategies = deplStrategies\n\t}\n\n\to.logger.Info(fmt.Sprintf(\"Start creating service %s in project %s\", event.Service, event.Project))\n\n\turl, err := serviceutils.GetConfigServiceURL()\n\tif err != nil {\n\t\to.logger.Error(fmt.Sprintf(\"Error when getting config service url: %s\", err.Error()))\n\t\treturn err\n\t}\n\n\tstageHandler := keptnutils.NewStageHandler(url.String())\n\tstages, err := stageHandler.GetAllStages(event.Project)\n\tif err != nil {\n\t\to.logger.Error(\"Error when getting all stages: \" + err.Error())\n\t\treturn err\n\t}\n\n\tfirstService, err := o.isFirstServiceOfProject(event, stages)\n\tif err != nil {\n\t\to.logger.Error(\"Error when checking whether any service was created before: \" + err.Error())\n\t\treturn err\n\t}\n\tif firstService {\n\t\to.logger.Info(\"Create Helm umbrella charts\")\n\t\tumbrellaChartHandler := helm.NewUmbrellaChartHandler(o.mesh)\n\t\tif err := o.initAndApplyUmbrellaChart(event, umbrellaChartHandler, stages); err != nil {\n\t\t\to.logger.Error(fmt.Sprintf(\"Error when initalizing and applying umbrella charts for project %s: %s\", event.Project, err.Error()))\n\t\t\treturn err\n\t\t}\n\t}\n\n\tfor _, stage := range stages {\n\t\tif err := o.onboardService(stage.StageName, event, url.String()); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\to.logger.Info(fmt.Sprintf(\"Finished creating service %s in project %s\", event.Service, event.Project))\n\treturn nil\n}",
"func (o *Onboarder) DoOnboard(ce cloudevents.Event, loggingDone chan bool) error {\n\n\tdefer func() { loggingDone <- true }()\n\n\tkeptnHandler, err := keptnevents.NewKeptn(&ce, keptnevents.KeptnOpts{})\n\tif err != nil {\n\t\to.keptnHandler.Logger.Error(\"Could not initialize Keptn Handler: \" + err.Error())\n\t\treturn err\n\t}\n\tevent := &keptnevents.ServiceCreateEventData{}\n\tif err := ce.DataAs(event); err != nil {\n\t\to.keptnHandler.Logger.Error(fmt.Sprintf(\"Got Data Error: %s\", err.Error()))\n\t\treturn err\n\t}\n\n\tif err := o.checkAndSetServiceName(event); err != nil {\n\t\to.keptnHandler.Logger.Error(fmt.Sprintf(\"Invalid service name: %s\", err.Error()))\n\t\treturn err\n\t}\n\n\tif _, ok := event.DeploymentStrategies[\"*\"]; ok {\n\t\t// Uses the provided deployment strategy for ALL stages\n\t\tdeplStrategies, err := fixDeploymentStrategies(keptnHandler, event.DeploymentStrategies[\"*\"])\n\t\tif err != nil {\n\t\t\to.keptnHandler.Logger.Error(fmt.Sprintf(\"Error when getting deployment strategies: %s\", err.Error()))\n\t\t\treturn err\n\t\t}\n\t\tevent.DeploymentStrategies = deplStrategies\n\t} else if os.Getenv(\"PRE_WORKFLOW_ENGINE\") == \"true\" && len(event.DeploymentStrategies) == 0 {\n\t\tdeplStrategies, err := getDeploymentStrategies(keptnHandler)\n\t\tif err != nil {\n\t\t\to.keptnHandler.Logger.Error(fmt.Sprintf(\"Error when getting deployment strategies: %s\", err.Error()))\n\t\t\treturn err\n\t\t}\n\t\tevent.DeploymentStrategies = deplStrategies\n\t}\n\n\to.keptnHandler.Logger.Debug(fmt.Sprintf(\"Start creating service %s in project %s\", event.Service, event.Project))\n\n\tstageHandler := configutils.NewStageHandler(o.configServiceURL)\n\tstages, err := stageHandler.GetAllStages(event.Project)\n\tif err != nil {\n\t\to.keptnHandler.Logger.Error(\"Error when getting all stages: \" + err.Error())\n\t\treturn err\n\t}\n\n\tif len(stages) == 0 {\n\t\to.keptnHandler.Logger.Error(\"Cannot onboard service because no stage is available\")\n\t\treturn errors.New(\"Cannot onboard service because no stage is available\")\n\t}\n\n\tnamespaceMng := NewNamespaceManager(o.keptnHandler.Logger)\n\n\tif event.HelmChart != \"\" {\n\n\t\tif err := namespaceMng.InitNamespaces(event.Project, stages); err != nil {\n\t\t\to.keptnHandler.Logger.Error(err.Error())\n\t\t\treturn err\n\t\t}\n\n\t\tumbrellaChartHandler := helm.NewUmbrellaChartHandler(o.configServiceURL)\n\t\tisUmbrellaChartAvailable, err := umbrellaChartHandler.IsUmbrellaChartAvailableInAllStages(event.Project, stages)\n\t\tif err != nil {\n\t\t\to.keptnHandler.Logger.Error(\"Error when getting Helm chart for stages. \" + err.Error())\n\t\t\treturn err\n\t\t}\n\t\tif !isUmbrellaChartAvailable {\n\t\t\to.keptnHandler.Logger.Info(\"Create Helm umbrella charts\")\n\t\t\t// Initialize the umbrella chart\n\t\t\tif err := umbrellaChartHandler.InitUmbrellaChart(event, stages); err != nil {\n\t\t\t\treturn fmt.Errorf(\"Error when initializing the umbrella chart for project %s: %s\", event.Project, err.Error())\n\t\t\t}\n\t\t}\n\t}\n\n\tfor _, stage := range stages {\n\t\tif err := o.onboardService(stage.StageName, event); err != nil {\n\t\t\to.keptnHandler.Logger.Error(err.Error())\n\t\t\treturn err\n\t\t}\n\t\tif event.DeploymentStrategies[stage.StageName] == keptnevents.Duplicate && event.HelmChart != \"\" {\n\t\t\t// inject Istio to the namespace for blue-green deployments\n\t\t\tif err := namespaceMng.InjectIstio(event.Project, stage.StageName); err != nil {\n\t\t\t\to.keptnHandler.Logger.Error(err.Error())\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\to.keptnHandler.Logger.Info(fmt.Sprintf(\"Finished creating service %s in project %s\", event.Service, event.Project))\n\treturn nil\n}",
"func (s *BaseTinyLangListener) EnterFuncInvocArgs(ctx *FuncInvocArgsContext) {}",
"func (r *Runtime) OnStart(f func(context.Context, *Executor) error) {\n\tr.onStartHooks = append(r.onStartHooks, f)\n}",
"func OnStarted(deciders ...Decider) Decider {\n\treturn func(ctx *FSMContext, h *swf.HistoryEvent, data interface{}) Outcome {\n\t\tswitch *h.EventType {\n\t\tcase enums.EventTypeWorkflowExecutionStarted:\n\t\t\tlogf(ctx, \"at=on-started\")\n\t\t\treturn NewComposedDecider(deciders...)(ctx, h, data)\n\t\t}\n\t\treturn ctx.Pass()\n\t}\n}",
"func OnStart() {\n}",
"func main() {\n\tlambda.Start(handleRequest)\n}",
"func (s *BaseConcertoListener) EnterFuncSpec(ctx *FuncSpecContext) {}",
"func (s *BasemdxListener) EnterFunction_(ctx *Function_Context) {}",
"func (s *BasejossListener) EnterFuncLog(ctx *FuncLogContext) {}",
"func (s *BasejossListener) EnterFuncSin(ctx *FuncSinContext) {}",
"func (b *backend) createAndStartDelegate(sink *auditregv1alpha1.AuditSink) (*delegate, error) {\n\tf := factory{\n\t\tconfig: b.config,\n\t\twebhookClientManager: b.webhookClientManager,\n\t\tsink: sink,\n\t}\n\tdelegate, err := f.BuildDelegate()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = delegate.Run(delegate.stopChan)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn delegate, nil\n}",
"func (c *Controller) onWorkloadConnect(entryName string, proxy *model.Proxy, conTime time.Time, autoCreate bool) error {\n\tif autoCreate {\n\t\treturn c.registerWorkload(entryName, proxy, conTime)\n\t}\n\treturn c.becomeControllerOf(entryName, proxy, conTime)\n}",
"func (s *BasecalculatorListener) EnterFunc_(ctx *Func_Context) {}",
"func (s *BasemumpsListener) EnterFunction_(ctx *Function_Context) {}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Deprecated: Use TopicSubmissionMessage.ProtoReflect.Descriptor instead.
|
func (*TopicSubmissionMessage) Descriptor() ([]byte, []int) {
return file_topic_submission_message_proto_rawDescGZIP(), []int{0}
}
|
[
"func (*Topic) Descriptor() ([]byte, []int) {\n\treturn file_proto_google_fhir_proto_r5_core_resources_topic_proto_rawDescGZIP(), []int{0}\n}",
"func (*TopicMessage) Descriptor() ([]byte, []int) {\n\treturn file_packetbroker_api_routing_v2_service_proto_rawDescGZIP(), []int{9}\n}",
"func (*Topic) Descriptor() ([]byte, []int) {\n\treturn file_toit_model_pubsub_topic_proto_rawDescGZIP(), []int{0}\n}",
"func (*Topic) Descriptor() ([]byte, []int) {\n\treturn file_topic_proto_rawDescGZIP(), []int{0}\n}",
"func (*TopicGroup) Descriptor() ([]byte, []int) {\n\treturn file_topic_proto_rawDescGZIP(), []int{1}\n}",
"func (*Topic_Details) Descriptor() ([]byte, []int) {\n\treturn file_topic_proto_rawDescGZIP(), []int{0, 1}\n}",
"func (*Topic_Parameters) Descriptor() ([]byte, []int) {\n\treturn file_toit_model_pubsub_topic_proto_rawDescGZIP(), []int{0, 0}\n}",
"func ProtoFromDescriptor(d protoreflect.Descriptor) proto.Message {\n\tswitch d := d.(type) {\n\tcase protoreflect.FileDescriptor:\n\t\treturn ProtoFromFileDescriptor(d)\n\tcase protoreflect.MessageDescriptor:\n\t\treturn ProtoFromMessageDescriptor(d)\n\tcase protoreflect.FieldDescriptor:\n\t\treturn ProtoFromFieldDescriptor(d)\n\tcase protoreflect.OneofDescriptor:\n\t\treturn ProtoFromOneofDescriptor(d)\n\tcase protoreflect.EnumDescriptor:\n\t\treturn ProtoFromEnumDescriptor(d)\n\tcase protoreflect.EnumValueDescriptor:\n\t\treturn ProtoFromEnumValueDescriptor(d)\n\tcase protoreflect.ServiceDescriptor:\n\t\treturn ProtoFromServiceDescriptor(d)\n\tcase protoreflect.MethodDescriptor:\n\t\treturn ProtoFromMethodDescriptor(d)\n\tdefault:\n\t\t// WTF??\n\t\tif res, ok := d.(DescriptorProtoWrapper); ok {\n\t\t\treturn res.AsProto()\n\t\t}\n\t\treturn nil\n\t}\n}",
"func ProtoFromMessageDescriptor(d protoreflect.MessageDescriptor) *descriptorpb.DescriptorProto {\n\ttype canProto interface {\n\t\tMessageDescriptorProto() *descriptorpb.DescriptorProto\n\t}\n\tif res, ok := d.(canProto); ok {\n\t\treturn res.MessageDescriptorProto()\n\t}\n\tif res, ok := d.(DescriptorProtoWrapper); ok {\n\t\tif md, ok := res.AsProto().(*descriptorpb.DescriptorProto); ok {\n\t\t\treturn md\n\t\t}\n\t}\n\treturn protodesc.ToDescriptorProto(d)\n}",
"func (*GenerateMessageRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_ai_generativelanguage_v1beta2_discuss_service_proto_rawDescGZIP(), []int{0}\n}",
"func (*RecentMessagesRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_threads_proto_rawDescGZIP(), []int{16}\n}",
"func (*MessagePublishRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_pubsublite_v1_publisher_proto_rawDescGZIP(), []int{2}\n}",
"func (*CMsgGCPlayerInfoSubmit) Descriptor() ([]byte, []int) {\n\treturn file_dota_gcmessages_client_proto_rawDescGZIP(), []int{118}\n}",
"func (*TopicSubscription) Descriptor() ([]byte, []int) {\n\treturn file_packetbroker_api_routing_v2_service_proto_rawDescGZIP(), []int{6}\n}",
"func (*Topic_ResourceTrigger) Descriptor() ([]byte, []int) {\n\treturn file_proto_google_fhir_proto_r5_core_resources_topic_proto_rawDescGZIP(), []int{0, 1}\n}",
"func (*CreateMessageRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_threads_proto_rawDescGZIP(), []int{12}\n}",
"func (*Contract_TopicX) Descriptor() ([]byte, []int) {\n\treturn file_proto_google_fhir_proto_r4_core_resources_contract_proto_rawDescGZIP(), []int{0, 1}\n}",
"func (*AnalysisMessageWeakSchema) Descriptor() ([]byte, []int) {\n\treturn file_analysis_v1alpha1_message_proto_rawDescGZIP(), []int{1}\n}",
"func (*Contract_Term_TopicX) Descriptor() ([]byte, []int) {\n\treturn file_proto_google_fhir_proto_r4_core_resources_contract_proto_rawDescGZIP(), []int{0, 3, 0}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
NewVvolBindingDeleteParams creates a new VvolBindingDeleteParams object, with the default timeout for this client. Default values are not hydrated, since defaults are normally applied by the API server side. To enforce default values in parameter, use SetDefaults or WithDefaults.
|
func NewVvolBindingDeleteParams() *VvolBindingDeleteParams {
return &VvolBindingDeleteParams{
timeout: cr.DefaultTimeout,
}
}
|
[
"func NewVvolBindingDeleteParamsWithTimeout(timeout time.Duration) *VvolBindingDeleteParams {\n\treturn &VvolBindingDeleteParams{\n\t\ttimeout: timeout,\n\t}\n}",
"func (o *VvolBindingDeleteParams) WithTimeout(timeout time.Duration) *VvolBindingDeleteParams {\n\to.SetTimeout(timeout)\n\treturn o\n}",
"func NewVvolBindingDeleteParamsWithHTTPClient(client *http.Client) *VvolBindingDeleteParams {\n\treturn &VvolBindingDeleteParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func (o *VvolBindingDeleteParams) WithDefaults() *VvolBindingDeleteParams {\n\to.SetDefaults()\n\treturn o\n}",
"func (o *VvolBindingDeleteParams) WithContext(ctx context.Context) *VvolBindingDeleteParams {\n\to.SetContext(ctx)\n\treturn o\n}",
"func (o *VvolBindingDeleteParams) WithHTTPClient(client *http.Client) *VvolBindingDeleteParams {\n\to.SetHTTPClient(client)\n\treturn o\n}",
"func (o *VvolBindingDeleteParams) WithDeleteAllReferences(deleteAllReferences *bool) *VvolBindingDeleteParams {\n\to.SetDeleteAllReferences(deleteAllReferences)\n\treturn o\n}",
"func NewVolumeDeleteParams() *VolumeDeleteParams {\n\tvar (\n\t\tforceDefault = bool(false)\n\t)\n\treturn &VolumeDeleteParams{\n\t\tForce: &forceDefault,\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func (s *VPCService) NewDeleteVPCOfferingParams(id string) *DeleteVPCOfferingParams {\n\tp := &DeleteVPCOfferingParams{}\n\tp.p = make(map[string]interface{})\n\tp.p[\"id\"] = id\n\treturn p\n}",
"func NewVvolBindingDeleteParamsWithContext(ctx context.Context) *VvolBindingDeleteParams {\n\treturn &VvolBindingDeleteParams{\n\t\tContext: ctx,\n\t}\n}",
"func NewDeleteSubnetParams() *DeleteSubnetParams {\n\tvar ()\n\treturn &DeleteSubnetParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func NewDeletePackageVersionParams() *DeletePackageVersionParams {\n\tvar ()\n\treturn &DeletePackageVersionParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func NewDeleteTriggerParams() *DeleteTriggerParams {\n\tvar ()\n\treturn &DeleteTriggerParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func NewVolumeDeleteParamsWithTimeout(timeout time.Duration) *VolumeDeleteParams {\n\tvar (\n\t\tforceDefault = bool(false)\n\t)\n\treturn &VolumeDeleteParams{\n\t\tForce: &forceDefault,\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewDeleteLoadBalancerPoolParams() *DeleteLoadBalancerPoolParams {\n\tvar ()\n\treturn &DeleteLoadBalancerPoolParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func NewDeleteTagParams() *DeleteTagParams {\n\tvar ()\n\treturn &DeleteTagParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func NewDeleteProtocolUsingDELETEParamsWithTimeout(timeout time.Duration) *DeleteProtocolUsingDELETEParams {\n\tvar ()\n\treturn &DeleteProtocolUsingDELETEParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func (o *VvolBindingDeleteParams) WithProtocolEndpointUUID(protocolEndpointUUID string) *VvolBindingDeleteParams {\n\to.SetProtocolEndpointUUID(protocolEndpointUUID)\n\treturn o\n}",
"func NewDeleteProtocolUsingDELETEParams() *DeleteProtocolUsingDELETEParams {\n\tvar ()\n\treturn &DeleteProtocolUsingDELETEParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
NewVvolBindingDeleteParamsWithTimeout creates a new VvolBindingDeleteParams object with the ability to set a timeout on a request.
|
func NewVvolBindingDeleteParamsWithTimeout(timeout time.Duration) *VvolBindingDeleteParams {
return &VvolBindingDeleteParams{
timeout: timeout,
}
}
|
[
"func (o *VvolBindingDeleteParams) WithTimeout(timeout time.Duration) *VvolBindingDeleteParams {\n\to.SetTimeout(timeout)\n\treturn o\n}",
"func NewVvolBindingDeleteParams() *VvolBindingDeleteParams {\n\treturn &VvolBindingDeleteParams{\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func (o *VvolBindingDeleteParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func NewVolumeDeleteParamsWithTimeout(timeout time.Duration) *VolumeDeleteParams {\n\tvar (\n\t\tforceDefault = bool(false)\n\t)\n\treturn &VolumeDeleteParams{\n\t\tForce: &forceDefault,\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewDeleteSubnetParamsWithTimeout(timeout time.Duration) *DeleteSubnetParams {\n\tvar ()\n\treturn &DeleteSubnetParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewDeletePackageVersionParamsWithTimeout(timeout time.Duration) *DeletePackageVersionParams {\n\tvar ()\n\treturn &DeletePackageVersionParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewComponentDeleteParamsWithTimeout(timeout time.Duration) *ComponentDeleteParams {\n\treturn &ComponentDeleteParams{\n\t\ttimeout: timeout,\n\t}\n}",
"func NewDeleteTagParamsWithTimeout(timeout time.Duration) *DeleteTagParams {\n\tvar ()\n\treturn &DeleteTagParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewDeleteProtocolUsingDELETEParamsWithTimeout(timeout time.Duration) *DeleteProtocolUsingDELETEParams {\n\tvar ()\n\treturn &DeleteProtocolUsingDELETEParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewLedgerVoucherAttachmentDeleteAttachmentParamsWithTimeout(timeout time.Duration) *LedgerVoucherAttachmentDeleteAttachmentParams {\n\tvar (\n\t\tsendToInboxDefault = bool(false)\n\t\tsplitDefault = bool(false)\n\t)\n\treturn &LedgerVoucherAttachmentDeleteAttachmentParams{\n\t\tSendToInbox: &sendToInboxDefault,\n\t\tSplit: &splitDefault,\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewDeleteResourceParamsWithTimeout(timeout time.Duration) *DeleteResourceParams {\n\tvar ()\n\treturn &DeleteResourceParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewDeletePackageParamsWithTimeout(timeout time.Duration) *DeletePackageParams {\n\tvar ()\n\treturn &DeletePackageParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewDeleteRuntimesParamsWithTimeout(timeout time.Duration) *DeleteRuntimesParams {\n\tvar ()\n\treturn &DeleteRuntimesParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewDeleteVersionControlRequestParamsWithTimeout(timeout time.Duration) *DeleteVersionControlRequestParams {\n\treturn &DeleteVersionControlRequestParams{\n\t\ttimeout: timeout,\n\t}\n}",
"func NewDeleteTriggerParamsWithTimeout(timeout time.Duration) *DeleteTriggerParams {\n\tvar ()\n\treturn &DeleteTriggerParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewDeleteConnectionParamsWithTimeout(timeout time.Duration) *DeleteConnectionParams {\n\tvar ()\n\treturn &DeleteConnectionParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewDeleteConditionParamsWithTimeout(timeout time.Duration) *DeleteConditionParams {\n\tvar ()\n\treturn &DeleteConditionParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewDeleteLoadBalancerPoolParamsWithTimeout(timeout time.Duration) *DeleteLoadBalancerPoolParams {\n\tvar ()\n\treturn &DeleteLoadBalancerPoolParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func (o *VolumeDeleteParams) WithTimeout(timeout time.Duration) *VolumeDeleteParams {\n\to.SetTimeout(timeout)\n\treturn o\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
NewVvolBindingDeleteParamsWithContext creates a new VvolBindingDeleteParams object with the ability to set a context for a request.
|
func NewVvolBindingDeleteParamsWithContext(ctx context.Context) *VvolBindingDeleteParams {
return &VvolBindingDeleteParams{
Context: ctx,
}
}
|
[
"func NewVvolBindingDeleteParams() *VvolBindingDeleteParams {\n\treturn &VvolBindingDeleteParams{\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func NewVvolBindingDeleteParamsWithHTTPClient(client *http.Client) *VvolBindingDeleteParams {\n\treturn &VvolBindingDeleteParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func (o *VvolBindingDeleteParams) WithContext(ctx context.Context) *VvolBindingDeleteParams {\n\to.SetContext(ctx)\n\treturn o\n}",
"func NewVvolBindingDeleteParamsWithTimeout(timeout time.Duration) *VvolBindingDeleteParams {\n\treturn &VvolBindingDeleteParams{\n\t\ttimeout: timeout,\n\t}\n}",
"func (o *VvolBindingDeleteParams) WithDefaults() *VvolBindingDeleteParams {\n\to.SetDefaults()\n\treturn o\n}",
"func (o *VvolBindingDeleteParams) WithHTTPClient(client *http.Client) *VvolBindingDeleteParams {\n\to.SetHTTPClient(client)\n\treturn o\n}",
"func (obj *Sys) BindingVipDeleteWithContext(ctx context.Context, input CoSysBindingVip, _opt ...map[string]string) (output Result, err error) {\n\tvar inputMarshal []byte\n\tinputMarshal, err = proto.Marshal(&input)\n\tif err != nil {\n\t\treturn output, err\n\t}\n\n\tvar _status map[string]string\n\tvar _context map[string]string\n\tif len(_opt) == 1 {\n\t\t_context = _opt[0]\n\t} else if len(_opt) == 2 {\n\t\t_context = _opt[0]\n\t\t_status = _opt[1]\n\t}\n\n\tresp := new(requestf.ResponsePacket)\n\n\terr = obj.s.Tars_invoke(ctx, 0, \"BindingVipDelete\", inputMarshal, _status, _context, resp)\n\tif err != nil {\n\t\treturn output, err\n\t}\n\tif err = proto.Unmarshal(tools.Int8ToByte(resp.SBuffer), &output); err != nil {\n\t\treturn output, err\n\t}\n\n\tif len(_opt) == 1 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t} else if len(_opt) == 2 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t\tfor k := range _status {\n\t\t\tdelete(_status, k)\n\t\t}\n\t\tfor k, v := range resp.Status {\n\t\t\t_status[k] = v\n\t\t}\n\t}\n\n\treturn output, nil\n}",
"func NewVolumeDeleteParamsWithContext(ctx context.Context) *VolumeDeleteParams {\n\tvar (\n\t\tforceDefault = bool(false)\n\t)\n\treturn &VolumeDeleteParams{\n\t\tForce: &forceDefault,\n\n\t\tContext: ctx,\n\t}\n}",
"func NewLedgerVoucherAttachmentDeleteAttachmentParamsWithContext(ctx context.Context) *LedgerVoucherAttachmentDeleteAttachmentParams {\n\tvar (\n\t\tsendToInboxDefault = bool(false)\n\t\tsplitDefault = bool(false)\n\t)\n\treturn &LedgerVoucherAttachmentDeleteAttachmentParams{\n\t\tSendToInbox: &sendToInboxDefault,\n\t\tSplit: &splitDefault,\n\n\t\tContext: ctx,\n\t}\n}",
"func (o *VvolBindingDeleteParams) WithTimeout(timeout time.Duration) *VvolBindingDeleteParams {\n\to.SetTimeout(timeout)\n\treturn o\n}",
"func NewDeleteLoadBalancerPoolParamsWithContext(ctx context.Context) *DeleteLoadBalancerPoolParams {\n\tvar ()\n\treturn &DeleteLoadBalancerPoolParams{\n\n\t\tContext: ctx,\n\t}\n}",
"func NewDeleteSubnetParamsWithContext(ctx context.Context) *DeleteSubnetParams {\n\tvar ()\n\treturn &DeleteSubnetParams{\n\n\t\tContext: ctx,\n\t}\n}",
"func (o *VvolBindingDeleteParams) WithVvolUUID(vvolUUID string) *VvolBindingDeleteParams {\n\to.SetVvolUUID(vvolUUID)\n\treturn o\n}",
"func NewDeleteProtocolUsingDELETEParamsWithContext(ctx context.Context) *DeleteProtocolUsingDELETEParams {\n\tvar ()\n\treturn &DeleteProtocolUsingDELETEParams{\n\n\t\tContext: ctx,\n\t}\n}",
"func (o *VvolBindingDeleteParams) WithDeleteAllReferences(deleteAllReferences *bool) *VvolBindingDeleteParams {\n\to.SetDeleteAllReferences(deleteAllReferences)\n\treturn o\n}",
"func (o *VvolBindingDeleteParams) WithProtocolEndpointUUID(protocolEndpointUUID string) *VvolBindingDeleteParams {\n\to.SetProtocolEndpointUUID(protocolEndpointUUID)\n\treturn o\n}",
"func NewDeletePackageVersionParamsWithContext(ctx context.Context) *DeletePackageVersionParams {\n\tvar ()\n\treturn &DeletePackageVersionParams{\n\n\t\tContext: ctx,\n\t}\n}",
"func NewDeleteTriggerParamsWithContext(ctx context.Context) *DeleteTriggerParams {\n\tvar ()\n\treturn &DeleteTriggerParams{\n\n\t\tContext: ctx,\n\t}\n}",
"func NewDeleteTagParamsWithContext(ctx context.Context) *DeleteTagParams {\n\tvar ()\n\treturn &DeleteTagParams{\n\n\t\tContext: ctx,\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
NewVvolBindingDeleteParamsWithHTTPClient creates a new VvolBindingDeleteParams object with the ability to set a custom HTTPClient for a request.
|
func NewVvolBindingDeleteParamsWithHTTPClient(client *http.Client) *VvolBindingDeleteParams {
return &VvolBindingDeleteParams{
HTTPClient: client,
}
}
|
[
"func (o *VvolBindingDeleteParams) WithHTTPClient(client *http.Client) *VvolBindingDeleteParams {\n\to.SetHTTPClient(client)\n\treturn o\n}",
"func NewVvolBindingDeleteParams() *VvolBindingDeleteParams {\n\treturn &VvolBindingDeleteParams{\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func NewVolumeDeleteParamsWithHTTPClient(client *http.Client) *VolumeDeleteParams {\n\tvar (\n\t\tforceDefault = bool(false)\n\t)\n\treturn &VolumeDeleteParams{\n\t\tForce: &forceDefault,\n\t\tHTTPClient: client,\n\t}\n}",
"func (o *VvolBindingDeleteParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func NewDeleteTagParamsWithHTTPClient(client *http.Client) *DeleteTagParams {\n\tvar ()\n\treturn &DeleteTagParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func NewDeletePackageVersionParamsWithHTTPClient(client *http.Client) *DeletePackageVersionParams {\n\tvar ()\n\treturn &DeletePackageVersionParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func (o *VolumeDeleteParams) WithHTTPClient(client *http.Client) *VolumeDeleteParams {\n\to.SetHTTPClient(client)\n\treturn o\n}",
"func NewComponentDeleteParamsWithHTTPClient(client *http.Client) *ComponentDeleteParams {\n\treturn &ComponentDeleteParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func NewDeleteLoadBalancerPoolParamsWithHTTPClient(client *http.Client) *DeleteLoadBalancerPoolParams {\n\tvar ()\n\treturn &DeleteLoadBalancerPoolParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func NewVvolBindingDeleteParamsWithTimeout(timeout time.Duration) *VvolBindingDeleteParams {\n\treturn &VvolBindingDeleteParams{\n\t\ttimeout: timeout,\n\t}\n}",
"func NewDeleteProtocolUsingDELETEParamsWithHTTPClient(client *http.Client) *DeleteProtocolUsingDELETEParams {\n\tvar ()\n\treturn &DeleteProtocolUsingDELETEParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func NewDeleteTriggerParamsWithHTTPClient(client *http.Client) *DeleteTriggerParams {\n\tvar ()\n\treturn &DeleteTriggerParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func NewDeleteDataSourceParamsWithHTTPClient(client *http.Client) *DeleteDataSourceParams {\n\treturn &DeleteDataSourceParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func (o *VvolBindingDeleteParams) WithContext(ctx context.Context) *VvolBindingDeleteParams {\n\to.SetContext(ctx)\n\treturn o\n}",
"func NewDeleteConditionParamsWithHTTPClient(client *http.Client) *DeleteConditionParams {\n\tvar ()\n\treturn &DeleteConditionParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func NewDeleteRuntimesParamsWithHTTPClient(client *http.Client) *DeleteRuntimesParams {\n\tvar ()\n\treturn &DeleteRuntimesParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func NewDeleteVMInterfaceParamsWithHTTPClient(client *http.Client) *DeleteVMInterfaceParams {\n\tvar ()\n\treturn &DeleteVMInterfaceParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func NewDeleteSubnetParamsWithHTTPClient(client *http.Client) *DeleteSubnetParams {\n\tvar ()\n\treturn &DeleteSubnetParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func NewDeleteVersionControlRequestParamsWithHTTPClient(client *http.Client) *DeleteVersionControlRequestParams {\n\treturn &DeleteVersionControlRequestParams{\n\t\tHTTPClient: client,\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
WithDefaults hydrates default values in the vvol binding delete params (not the query body). All values with no default are reset to their zero value.
|
func (o *VvolBindingDeleteParams) WithDefaults() *VvolBindingDeleteParams {
o.SetDefaults()
return o
}
|
[
"func (o *VvolBindingDeleteParams) SetDefaults() {\n\tvar (\n\t\tdeleteAllReferencesDefault = bool(false)\n\t)\n\n\tval := VvolBindingDeleteParams{\n\t\tDeleteAllReferences: &deleteAllReferencesDefault,\n\t}\n\n\tval.timeout = o.timeout\n\tval.Context = o.Context\n\tval.HTTPClient = o.HTTPClient\n\t*o = val\n}",
"func (o *DeleteUsingDELETE1Params) SetDefaults() {\n\t// no default values defined for this parameter\n}",
"func (o *DeleteBlueprintRequestUsingDELETE1Params) SetDefaults() {\n\t// no default values defined for this parameter\n}",
"func (o *ComponentDeleteParams) SetDefaults() {\n\t// no default values defined for this parameter\n}",
"func (o *OrderDeleteParams) SetDefaults() {\n\t// no default values defined for this parameter\n}",
"func (o *DeleteGerritListenerByIDUsingDELETEParams) SetDefaults() {\n\t// no default values defined for this parameter\n}",
"func (o *NvmeNamespaceDeleteParams) SetDefaults() {\n\tvar (\n\t\tallowDeleteWhileMappedDefault = bool(false)\n\t)\n\n\tval := NvmeNamespaceDeleteParams{\n\t\tAllowDeleteWhileMapped: &allowDeleteWhileMappedDefault,\n\t}\n\n\tval.timeout = o.timeout\n\tval.Context = o.Context\n\tval.HTTPClient = o.HTTPClient\n\t*o = val\n}",
"func (o *PcloudV2VolumescloneDeleteParams) SetDefaults() {\n\t// no default values defined for this parameter\n}",
"func (o *DeleteVersionControlRequestParams) SetDefaults() {\n\tvar (\n\t\tdisconnectedNodeAcknowledgedDefault = bool(false)\n\t)\n\n\tval := DeleteVersionControlRequestParams{\n\t\tDisconnectedNodeAcknowledged: &disconnectedNodeAcknowledgedDefault,\n\t}\n\n\tval.timeout = o.timeout\n\tval.Context = o.Context\n\tval.HTTPClient = o.HTTPClient\n\t*o = val\n}",
"func (o *WwpnAliasDeleteParams) SetDefaults() {\n\t// no default values defined for this parameter\n}",
"func (o *DeleteRepositoryParams) SetDefaults() {\n\t// no default values defined for this parameter\n}",
"func (o *DeleteRelationTupleParams) SetDefaults() {\n\t// no default values defined for this parameter\n}",
"func (o *DeleteRepositoryCredentialParams) SetDefaults() {\n\t// no default values defined for this parameter\n}",
"func (o *DeleteV1WebhooksWebhookIDParams) SetDefaults() {\n\t// no default values defined for this parameter\n}",
"func (o *DeleteDataSourceParams) SetDefaults() {\n\tvar (\n\t\tencryptionModeDefault = string(\"CLEAR\")\n\n\t\tstorageTypeDefault = string(\"LOCAL\")\n\t)\n\n\tval := DeleteDataSourceParams{\n\t\tEncryptionMode: &encryptionModeDefault,\n\t\tStorageType: &storageTypeDefault,\n\t}\n\n\tval.timeout = o.timeout\n\tval.Context = o.Context\n\tval.HTTPClient = o.HTTPClient\n\t*o = val\n}",
"func (o *DeleteProtectedEntityParams) SetDefaults() {\n\t// no default values defined for this parameter\n}",
"func (o *DeletePacketCapturesParams) SetDefaults() {\n\t// no default values defined for this parameter\n}",
"func (o *ServiceInstanceDeprovisionParams) SetDefaults() {\n\t// no default values defined for this parameter\n}",
"func (o *DeleteJobParams) SetDefaults() {\n\t// no default values defined for this parameter\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
SetDefaults hydrates default values in the vvol binding delete params (not the query body). All values with no default are reset to their zero value.
|
func (o *VvolBindingDeleteParams) SetDefaults() {
var (
deleteAllReferencesDefault = bool(false)
)
val := VvolBindingDeleteParams{
DeleteAllReferences: &deleteAllReferencesDefault,
}
val.timeout = o.timeout
val.Context = o.Context
val.HTTPClient = o.HTTPClient
*o = val
}
|
[
"func (o *DeleteBlueprintRequestUsingDELETE1Params) SetDefaults() {\n\t// no default values defined for this parameter\n}",
"func (o *DeleteUsingDELETE1Params) SetDefaults() {\n\t// no default values defined for this parameter\n}",
"func (o *ComponentDeleteParams) SetDefaults() {\n\t// no default values defined for this parameter\n}",
"func (o *OrderDeleteParams) SetDefaults() {\n\t// no default values defined for this parameter\n}",
"func (o *NvmeNamespaceDeleteParams) SetDefaults() {\n\tvar (\n\t\tallowDeleteWhileMappedDefault = bool(false)\n\t)\n\n\tval := NvmeNamespaceDeleteParams{\n\t\tAllowDeleteWhileMapped: &allowDeleteWhileMappedDefault,\n\t}\n\n\tval.timeout = o.timeout\n\tval.Context = o.Context\n\tval.HTTPClient = o.HTTPClient\n\t*o = val\n}",
"func (o *DeleteRepositoryParams) SetDefaults() {\n\t// no default values defined for this parameter\n}",
"func (o *DeleteVersionControlRequestParams) SetDefaults() {\n\tvar (\n\t\tdisconnectedNodeAcknowledgedDefault = bool(false)\n\t)\n\n\tval := DeleteVersionControlRequestParams{\n\t\tDisconnectedNodeAcknowledged: &disconnectedNodeAcknowledgedDefault,\n\t}\n\n\tval.timeout = o.timeout\n\tval.Context = o.Context\n\tval.HTTPClient = o.HTTPClient\n\t*o = val\n}",
"func (o *PcloudV2VolumescloneDeleteParams) SetDefaults() {\n\t// no default values defined for this parameter\n}",
"func (o *WwpnAliasDeleteParams) SetDefaults() {\n\t// no default values defined for this parameter\n}",
"func (o *DeleteDataSourceParams) SetDefaults() {\n\tvar (\n\t\tencryptionModeDefault = string(\"CLEAR\")\n\n\t\tstorageTypeDefault = string(\"LOCAL\")\n\t)\n\n\tval := DeleteDataSourceParams{\n\t\tEncryptionMode: &encryptionModeDefault,\n\t\tStorageType: &storageTypeDefault,\n\t}\n\n\tval.timeout = o.timeout\n\tval.Context = o.Context\n\tval.HTTPClient = o.HTTPClient\n\t*o = val\n}",
"func (o *DeleteProtectedEntityParams) SetDefaults() {\n\t// no default values defined for this parameter\n}",
"func (o *DeleteV1WebhooksWebhookIDParams) SetDefaults() {\n\t// no default values defined for this parameter\n}",
"func (o *DeleteRepositoryCredentialParams) SetDefaults() {\n\t// no default values defined for this parameter\n}",
"func (o *DeleteRelationTupleParams) SetDefaults() {\n\t// no default values defined for this parameter\n}",
"func (o *DeletePacketCapturesParams) SetDefaults() {\n\t// no default values defined for this parameter\n}",
"func (o *DeleteGerritListenerByIDUsingDELETEParams) SetDefaults() {\n\t// no default values defined for this parameter\n}",
"func (o *DeleteJobParams) SetDefaults() {\n\t// no default values defined for this parameter\n}",
"func (o *ServiceInstanceDeprovisionParams) SetDefaults() {\n\t// no default values defined for this parameter\n}",
"func (o *PostHostStorageSectorsDeleteMerklerootParams) SetDefaults() {\n\t// no default values defined for this parameter\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
WithTimeout adds the timeout to the vvol binding delete params
|
func (o *VvolBindingDeleteParams) WithTimeout(timeout time.Duration) *VvolBindingDeleteParams {
o.SetTimeout(timeout)
return o
}
|
[
"func (o *VvolBindingDeleteParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *VolumeDeleteParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func NewVvolBindingDeleteParamsWithTimeout(timeout time.Duration) *VvolBindingDeleteParams {\n\treturn &VvolBindingDeleteParams{\n\t\ttimeout: timeout,\n\t}\n}",
"func (o *VolumeDeleteParams) WithTimeout(timeout time.Duration) *VolumeDeleteParams {\n\to.SetTimeout(timeout)\n\treturn o\n}",
"func (o *PcloudV2VolumescloneDeleteParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *DeleteProtocolUsingDELETEParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *DeletePackageVersionParams) WithTimeout(timeout time.Duration) *DeletePackageVersionParams {\n\to.SetTimeout(timeout)\n\treturn o\n}",
"func NewVolumeDeleteParamsWithTimeout(timeout time.Duration) *VolumeDeleteParams {\n\tvar (\n\t\tforceDefault = bool(false)\n\t)\n\treturn &VolumeDeleteParams{\n\t\tForce: &forceDefault,\n\n\t\ttimeout: timeout,\n\t}\n}",
"func (o *DeleteBlueprintRequestUsingDELETE1Params) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *DeleteProtocolUsingDELETEParams) WithTimeout(timeout time.Duration) *DeleteProtocolUsingDELETEParams {\n\to.SetTimeout(timeout)\n\treturn o\n}",
"func (o *DeleteDeviceUsingDELETEParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *DeleteGerritListenerByIDUsingDELETEParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *RevokeDeviceCertificateUsingDELETEParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *DeletePackageVersionParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *SMSTemplatesByTemplateIDDeleteParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *ComponentDeleteParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func NewDeletePackageVersionParamsWithTimeout(timeout time.Duration) *DeletePackageVersionParams {\n\tvar ()\n\treturn &DeletePackageVersionParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func (o *DeleteUsingDELETE1Params) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *DeleteTriggerParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
SetTimeout adds the timeout to the vvol binding delete params
|
func (o *VvolBindingDeleteParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
|
[
"func (o *VolumeDeleteParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *PcloudV2VolumescloneDeleteParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *DeleteProtocolUsingDELETEParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *DeleteRuntimesParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *DeletePackageVersionParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *DeleteDebugRequestParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *SMSTemplatesByTemplateIDDeleteParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *DeleteDeviceUsingDELETEParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *DeleteBlueprintRequestUsingDELETE1Params) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *DeleteGerritListenerByIDUsingDELETEParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *DeleteTriggerParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *DeleteVersionControlRequestParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *ComponentDeleteParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *RevokeDeviceCertificateUsingDELETEParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *VvolBindingDeleteParams) WithTimeout(timeout time.Duration) *VvolBindingDeleteParams {\n\to.SetTimeout(timeout)\n\treturn o\n}",
"func (o *DeleteUsingDELETE1Params) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *DeleteLoadBalancerPoolParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *DeletePoolProjectParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *DeleteSubnetParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
WithContext adds the context to the vvol binding delete params
|
func (o *VvolBindingDeleteParams) WithContext(ctx context.Context) *VvolBindingDeleteParams {
o.SetContext(ctx)
return o
}
|
[
"func (obj *Sys) BindingVipDeleteWithContext(ctx context.Context, input CoSysBindingVip, _opt ...map[string]string) (output Result, err error) {\n\tvar inputMarshal []byte\n\tinputMarshal, err = proto.Marshal(&input)\n\tif err != nil {\n\t\treturn output, err\n\t}\n\n\tvar _status map[string]string\n\tvar _context map[string]string\n\tif len(_opt) == 1 {\n\t\t_context = _opt[0]\n\t} else if len(_opt) == 2 {\n\t\t_context = _opt[0]\n\t\t_status = _opt[1]\n\t}\n\n\tresp := new(requestf.ResponsePacket)\n\n\terr = obj.s.Tars_invoke(ctx, 0, \"BindingVipDelete\", inputMarshal, _status, _context, resp)\n\tif err != nil {\n\t\treturn output, err\n\t}\n\tif err = proto.Unmarshal(tools.Int8ToByte(resp.SBuffer), &output); err != nil {\n\t\treturn output, err\n\t}\n\n\tif len(_opt) == 1 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t} else if len(_opt) == 2 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t\tfor k := range _status {\n\t\t\tdelete(_status, k)\n\t\t}\n\t\tfor k, v := range resp.Status {\n\t\t\t_status[k] = v\n\t\t}\n\t}\n\n\treturn output, nil\n}",
"func (_obj *DataService) DeleteApplyWithContext(tarsCtx context.Context, wx_id string, club_id string, affectRows *int32, _opt ...map[string]string) (ret int32, err error) {\n\n\tvar length int32\n\tvar have bool\n\tvar ty byte\n\t_os := codec.NewBuffer()\n\terr = _os.Write_string(wx_id, 1)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\terr = _os.Write_string(club_id, 2)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\terr = _os.Write_int32((*affectRows), 3)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tvar _status map[string]string\n\tvar _context map[string]string\n\tif len(_opt) == 1 {\n\t\t_context = _opt[0]\n\t} else if len(_opt) == 2 {\n\t\t_context = _opt[0]\n\t\t_status = _opt[1]\n\t}\n\t_resp := new(requestf.ResponsePacket)\n\n\terr = _obj.s.Tars_invoke(tarsCtx, 0, \"deleteApply\", _os.ToBytes(), _status, _context, _resp)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\t_is := codec.NewReader(tools.Int8ToByte(_resp.SBuffer))\n\terr = _is.Read_int32(&ret, 0, true)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\terr = _is.Read_int32(&(*affectRows), 3, true)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tif len(_opt) == 1 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t} else if len(_opt) == 2 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t\tfor k := range _status {\n\t\t\tdelete(_status, k)\n\t\t}\n\t\tfor k, v := range _resp.Status {\n\t\t\t_status[k] = v\n\t\t}\n\n\t}\n\t_ = length\n\t_ = have\n\t_ = ty\n\treturn ret, nil\n}",
"func (_obj *Apichannels) Channels_deleteChannelWithContext(tarsCtx context.Context, params *TLchannels_deleteChannel, _opt ...map[string]string) (ret Updates, err error) {\n\n\tvar length int32\n\tvar have bool\n\tvar ty byte\n\t_os := codec.NewBuffer()\n\terr = params.WriteBlock(_os, 1)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tvar _status map[string]string\n\tvar _context map[string]string\n\tif len(_opt) == 1 {\n\t\t_context = _opt[0]\n\t} else if len(_opt) == 2 {\n\t\t_context = _opt[0]\n\t\t_status = _opt[1]\n\t}\n\t_resp := new(requestf.ResponsePacket)\n\n\terr = _obj.s.Tars_invoke(tarsCtx, 0, \"channels_deleteChannel\", _os.ToBytes(), _status, _context, _resp)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\t_is := codec.NewReader(tools.Int8ToByte(_resp.SBuffer))\n\terr = ret.ReadBlock(_is, 0, true)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tif len(_opt) == 1 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t} else if len(_opt) == 2 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t\tfor k := range _status {\n\t\t\tdelete(_status, k)\n\t\t}\n\t\tfor k, v := range _resp.Status {\n\t\t\t_status[k] = v\n\t\t}\n\n\t}\n\t_ = length\n\t_ = have\n\t_ = ty\n\treturn ret, nil\n}",
"func (_obj *DataService) DeleteActivityRecordWithContext(tarsCtx context.Context, activity_id string, wx_id string, affectRows *int32, _opt ...map[string]string) (ret int32, err error) {\n\n\tvar length int32\n\tvar have bool\n\tvar ty byte\n\t_os := codec.NewBuffer()\n\terr = _os.Write_string(activity_id, 1)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\terr = _os.Write_string(wx_id, 2)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\terr = _os.Write_int32((*affectRows), 3)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tvar _status map[string]string\n\tvar _context map[string]string\n\tif len(_opt) == 1 {\n\t\t_context = _opt[0]\n\t} else if len(_opt) == 2 {\n\t\t_context = _opt[0]\n\t\t_status = _opt[1]\n\t}\n\t_resp := new(requestf.ResponsePacket)\n\n\terr = _obj.s.Tars_invoke(tarsCtx, 0, \"deleteActivityRecord\", _os.ToBytes(), _status, _context, _resp)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\t_is := codec.NewReader(tools.Int8ToByte(_resp.SBuffer))\n\terr = _is.Read_int32(&ret, 0, true)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\terr = _is.Read_int32(&(*affectRows), 3, true)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tif len(_opt) == 1 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t} else if len(_opt) == 2 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t\tfor k := range _status {\n\t\t\tdelete(_status, k)\n\t\t}\n\t\tfor k, v := range _resp.Status {\n\t\t\t_status[k] = v\n\t\t}\n\n\t}\n\t_ = length\n\t_ = have\n\t_ = ty\n\treturn ret, nil\n}",
"func (_obj *DataService) DeleteApplyOneWayWithContext(tarsCtx context.Context, wx_id string, club_id string, affectRows *int32, _opt ...map[string]string) (ret int32, err error) {\n\n\tvar length int32\n\tvar have bool\n\tvar ty byte\n\t_os := codec.NewBuffer()\n\terr = _os.Write_string(wx_id, 1)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\terr = _os.Write_string(club_id, 2)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\terr = _os.Write_int32((*affectRows), 3)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tvar _status map[string]string\n\tvar _context map[string]string\n\tif len(_opt) == 1 {\n\t\t_context = _opt[0]\n\t} else if len(_opt) == 2 {\n\t\t_context = _opt[0]\n\t\t_status = _opt[1]\n\t}\n\t_resp := new(requestf.ResponsePacket)\n\n\terr = _obj.s.Tars_invoke(tarsCtx, 1, \"deleteApply\", _os.ToBytes(), _status, _context, _resp)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tif len(_opt) == 1 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t} else if len(_opt) == 2 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t\tfor k := range _status {\n\t\t\tdelete(_status, k)\n\t\t}\n\t\tfor k, v := range _resp.Status {\n\t\t\t_status[k] = v\n\t\t}\n\n\t}\n\t_ = length\n\t_ = have\n\t_ = ty\n\treturn ret, nil\n}",
"func (_obj *WebApiAuth) SysConfig_DeleteWithContext(tarsCtx context.Context, req *SysConfig, res *bool, _opt ...map[string]string) (err error) {\n\n\tvar length int32\n\tvar have bool\n\tvar ty byte\n\t_os := codec.NewBuffer()\n\terr = req.WriteBlock(_os, 1)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = _os.Write_bool((*res), 2)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar _status map[string]string\n\tvar _context map[string]string\n\tif len(_opt) == 1 {\n\t\t_context = _opt[0]\n\t} else if len(_opt) == 2 {\n\t\t_context = _opt[0]\n\t\t_status = _opt[1]\n\t}\n\t_resp := new(requestf.ResponsePacket)\n\n\terr = _obj.s.Tars_invoke(tarsCtx, 0, \"SysConfig_Delete\", _os.ToBytes(), _status, _context, _resp)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_is := codec.NewReader(tools.Int8ToByte(_resp.SBuffer))\n\terr = _is.Read_bool(&(*res), 2, true)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(_opt) == 1 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t} else if len(_opt) == 2 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t\tfor k := range _status {\n\t\t\tdelete(_status, k)\n\t\t}\n\t\tfor k, v := range _resp.Status {\n\t\t\t_status[k] = v\n\t\t}\n\n\t}\n\t_ = length\n\t_ = have\n\t_ = ty\n\treturn nil\n}",
"func (t *TrudyPipe) DeleteContext(key string) {\n\tt.pipeMutex.Lock()\n\tdelete(t.KV, key)\n\tt.pipeMutex.Unlock()\n}",
"func (_obj *DataService) DeleteActivityWithContext(tarsCtx context.Context, activity_id string, affectRows *int32, _opt ...map[string]string) (ret int32, err error) {\n\n\tvar length int32\n\tvar have bool\n\tvar ty byte\n\t_os := codec.NewBuffer()\n\terr = _os.Write_string(activity_id, 1)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\terr = _os.Write_int32((*affectRows), 2)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tvar _status map[string]string\n\tvar _context map[string]string\n\tif len(_opt) == 1 {\n\t\t_context = _opt[0]\n\t} else if len(_opt) == 2 {\n\t\t_context = _opt[0]\n\t\t_status = _opt[1]\n\t}\n\t_resp := new(requestf.ResponsePacket)\n\n\terr = _obj.s.Tars_invoke(tarsCtx, 0, \"deleteActivity\", _os.ToBytes(), _status, _context, _resp)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\t_is := codec.NewReader(tools.Int8ToByte(_resp.SBuffer))\n\terr = _is.Read_int32(&ret, 0, true)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\terr = _is.Read_int32(&(*affectRows), 2, true)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tif len(_opt) == 1 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t} else if len(_opt) == 2 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t\tfor k := range _status {\n\t\t\tdelete(_status, k)\n\t\t}\n\t\tfor k, v := range _resp.Status {\n\t\t\t_status[k] = v\n\t\t}\n\n\t}\n\t_ = length\n\t_ = have\n\t_ = ty\n\treturn ret, nil\n}",
"func (o *VolumeDeleteParams) WithContext(ctx context.Context) *VolumeDeleteParams {\n\to.SetContext(ctx)\n\treturn o\n}",
"func (_obj *Apichannels) Channels_deleteChannelOneWayWithContext(tarsCtx context.Context, params *TLchannels_deleteChannel, _opt ...map[string]string) (ret Updates, err error) {\n\n\tvar length int32\n\tvar have bool\n\tvar ty byte\n\t_os := codec.NewBuffer()\n\terr = params.WriteBlock(_os, 1)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tvar _status map[string]string\n\tvar _context map[string]string\n\tif len(_opt) == 1 {\n\t\t_context = _opt[0]\n\t} else if len(_opt) == 2 {\n\t\t_context = _opt[0]\n\t\t_status = _opt[1]\n\t}\n\t_resp := new(requestf.ResponsePacket)\n\n\terr = _obj.s.Tars_invoke(tarsCtx, 1, \"channels_deleteChannel\", _os.ToBytes(), _status, _context, _resp)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tif len(_opt) == 1 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t} else if len(_opt) == 2 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t\tfor k := range _status {\n\t\t\tdelete(_status, k)\n\t\t}\n\t\tfor k, v := range _resp.Status {\n\t\t\t_status[k] = v\n\t\t}\n\n\t}\n\t_ = length\n\t_ = have\n\t_ = ty\n\treturn ret, nil\n}",
"func (o *VvolBindingDeleteParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (obj *Sys) VipDeleteWithContext(ctx context.Context, input CoSysVip, _opt ...map[string]string) (output Result, err error) {\n\tvar inputMarshal []byte\n\tinputMarshal, err = proto.Marshal(&input)\n\tif err != nil {\n\t\treturn output, err\n\t}\n\n\tvar _status map[string]string\n\tvar _context map[string]string\n\tif len(_opt) == 1 {\n\t\t_context = _opt[0]\n\t} else if len(_opt) == 2 {\n\t\t_context = _opt[0]\n\t\t_status = _opt[1]\n\t}\n\n\tresp := new(requestf.ResponsePacket)\n\n\terr = obj.s.Tars_invoke(ctx, 0, \"VipDelete\", inputMarshal, _status, _context, resp)\n\tif err != nil {\n\t\treturn output, err\n\t}\n\tif err = proto.Unmarshal(tools.Int8ToByte(resp.SBuffer), &output); err != nil {\n\t\treturn output, err\n\t}\n\n\tif len(_opt) == 1 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t} else if len(_opt) == 2 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t\tfor k := range _status {\n\t\t\tdelete(_status, k)\n\t\t}\n\t\tfor k, v := range resp.Status {\n\t\t\t_status[k] = v\n\t\t}\n\t}\n\n\treturn output, nil\n}",
"func (_obj *WebApiAuth) SysConfig_DeleteOneWayWithContext(tarsCtx context.Context, req *SysConfig, res *bool, _opt ...map[string]string) (err error) {\n\n\tvar length int32\n\tvar have bool\n\tvar ty byte\n\t_os := codec.NewBuffer()\n\terr = req.WriteBlock(_os, 1)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = _os.Write_bool((*res), 2)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar _status map[string]string\n\tvar _context map[string]string\n\tif len(_opt) == 1 {\n\t\t_context = _opt[0]\n\t} else if len(_opt) == 2 {\n\t\t_context = _opt[0]\n\t\t_status = _opt[1]\n\t}\n\t_resp := new(requestf.ResponsePacket)\n\n\terr = _obj.s.Tars_invoke(tarsCtx, 1, \"SysConfig_Delete\", _os.ToBytes(), _status, _context, _resp)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(_opt) == 1 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t} else if len(_opt) == 2 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t\tfor k := range _status {\n\t\t\tdelete(_status, k)\n\t\t}\n\t\tfor k, v := range _resp.Status {\n\t\t\t_status[k] = v\n\t\t}\n\n\t}\n\t_ = length\n\t_ = have\n\t_ = ty\n\treturn nil\n}",
"func (C *SimpleClient) DeleteContext(ctx context.Context, URL string, Headers map[string][]string) (*http.Response, error) {\n\n\t// Create the request\n\treq, err := NewRequestWithContext(ctx, http.MethodDelete, URL, Headers, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Perform the request\n\treturn C.Do(req)\n}",
"func (_obj *Apichannels) Channels_deleteHistoryWithContext(tarsCtx context.Context, params *TLchannels_deleteHistory, _opt ...map[string]string) (ret Bool, err error) {\n\n\tvar length int32\n\tvar have bool\n\tvar ty byte\n\t_os := codec.NewBuffer()\n\terr = params.WriteBlock(_os, 1)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tvar _status map[string]string\n\tvar _context map[string]string\n\tif len(_opt) == 1 {\n\t\t_context = _opt[0]\n\t} else if len(_opt) == 2 {\n\t\t_context = _opt[0]\n\t\t_status = _opt[1]\n\t}\n\t_resp := new(requestf.ResponsePacket)\n\n\terr = _obj.s.Tars_invoke(tarsCtx, 0, \"channels_deleteHistory\", _os.ToBytes(), _status, _context, _resp)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\t_is := codec.NewReader(tools.Int8ToByte(_resp.SBuffer))\n\terr = ret.ReadBlock(_is, 0, true)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tif len(_opt) == 1 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t} else if len(_opt) == 2 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t\tfor k := range _status {\n\t\t\tdelete(_status, k)\n\t\t}\n\t\tfor k, v := range _resp.Status {\n\t\t\t_status[k] = v\n\t\t}\n\n\t}\n\t_ = length\n\t_ = have\n\t_ = ty\n\treturn ret, nil\n}",
"func (obj *User) UserBenefitDiyDeleteWithContext(ctx context.Context, input CoUserBenefitDiy, _opt ...map[string]string) (output Result, err error) {\n\tvar inputMarshal []byte\n\tinputMarshal, err = proto.Marshal(&input)\n\tif err != nil {\n\t\treturn output, err\n\t}\n\n\tvar _status map[string]string\n\tvar _context map[string]string\n\tif len(_opt) == 1 {\n\t\t_context = _opt[0]\n\t} else if len(_opt) == 2 {\n\t\t_context = _opt[0]\n\t\t_status = _opt[1]\n\t}\n\n\tresp := new(requestf.ResponsePacket)\n\n\terr = obj.s.Tars_invoke(ctx, 0, \"UserBenefitDiyDelete\", inputMarshal, _status, _context, resp)\n\tif err != nil {\n\t\treturn output, err\n\t}\n\tif err = proto.Unmarshal(tools.Int8ToByte(resp.SBuffer), &output); err != nil {\n\t\treturn output, err\n\t}\n\n\tif len(_opt) == 1 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t} else if len(_opt) == 2 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t\tfor k := range _status {\n\t\t\tdelete(_status, k)\n\t\t}\n\t\tfor k, v := range resp.Status {\n\t\t\t_status[k] = v\n\t\t}\n\t}\n\n\treturn output, nil\n}",
"func (h *Creator) DeleteWith(model interface{}) *EndpointHandler {\n\treturn &EndpointHandler{\n\t\tmodel: h.c.MustGetModelStruct(model),\n\t\thandler: h.handleDelete,\n\t}\n}",
"func (mr *MockShieldMockRecorder) DeleteProtectionWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\tvarargs := append([]interface{}{arg0, arg1}, arg2...)\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"DeleteProtectionWithContext\", reflect.TypeOf((*MockShield)(nil).DeleteProtectionWithContext), varargs...)\n}",
"func (_obj *DataService) DeleteActivityRecordOneWayWithContext(tarsCtx context.Context, activity_id string, wx_id string, affectRows *int32, _opt ...map[string]string) (ret int32, err error) {\n\n\tvar length int32\n\tvar have bool\n\tvar ty byte\n\t_os := codec.NewBuffer()\n\terr = _os.Write_string(activity_id, 1)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\terr = _os.Write_string(wx_id, 2)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\terr = _os.Write_int32((*affectRows), 3)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tvar _status map[string]string\n\tvar _context map[string]string\n\tif len(_opt) == 1 {\n\t\t_context = _opt[0]\n\t} else if len(_opt) == 2 {\n\t\t_context = _opt[0]\n\t\t_status = _opt[1]\n\t}\n\t_resp := new(requestf.ResponsePacket)\n\n\terr = _obj.s.Tars_invoke(tarsCtx, 1, \"deleteActivityRecord\", _os.ToBytes(), _status, _context, _resp)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tif len(_opt) == 1 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t} else if len(_opt) == 2 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t\tfor k := range _status {\n\t\t\tdelete(_status, k)\n\t\t}\n\t\tfor k, v := range _resp.Status {\n\t\t\t_status[k] = v\n\t\t}\n\n\t}\n\t_ = length\n\t_ = have\n\t_ = ty\n\treturn ret, nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
SetContext adds the context to the vvol binding delete params
|
func (o *VvolBindingDeleteParams) SetContext(ctx context.Context) {
o.Context = ctx
}
|
[
"func (o *VvolBindingDeleteParams) WithContext(ctx context.Context) *VvolBindingDeleteParams {\n\to.SetContext(ctx)\n\treturn o\n}",
"func (t *TrudyPipe) DeleteContext(key string) {\n\tt.pipeMutex.Lock()\n\tdelete(t.KV, key)\n\tt.pipeMutex.Unlock()\n}",
"func (o *VolumeDeleteParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *PcloudV2VolumescloneDeleteParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *DeleteProtocolUsingDELETEParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *DeleteGerritListenerByIDUsingDELETEParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (k *KMIP) DeleteContext() map[string]string {\n\treturn nil\n}",
"func (o *ComponentDeleteParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *SMSTemplatesByTemplateIDDeleteParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (obj *Sys) BindingVipDeleteWithContext(ctx context.Context, input CoSysBindingVip, _opt ...map[string]string) (output Result, err error) {\n\tvar inputMarshal []byte\n\tinputMarshal, err = proto.Marshal(&input)\n\tif err != nil {\n\t\treturn output, err\n\t}\n\n\tvar _status map[string]string\n\tvar _context map[string]string\n\tif len(_opt) == 1 {\n\t\t_context = _opt[0]\n\t} else if len(_opt) == 2 {\n\t\t_context = _opt[0]\n\t\t_status = _opt[1]\n\t}\n\n\tresp := new(requestf.ResponsePacket)\n\n\terr = obj.s.Tars_invoke(ctx, 0, \"BindingVipDelete\", inputMarshal, _status, _context, resp)\n\tif err != nil {\n\t\treturn output, err\n\t}\n\tif err = proto.Unmarshal(tools.Int8ToByte(resp.SBuffer), &output); err != nil {\n\t\treturn output, err\n\t}\n\n\tif len(_opt) == 1 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t} else if len(_opt) == 2 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t\tfor k := range _status {\n\t\t\tdelete(_status, k)\n\t\t}\n\t\tfor k, v := range resp.Status {\n\t\t\t_status[k] = v\n\t\t}\n\t}\n\n\treturn output, nil\n}",
"func (o *NegotiableQuoteCouponManagementV1RemoveDeleteParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *RevokeDeviceCertificateUsingDELETEParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (C *SimpleClient) DeleteContext(ctx context.Context, URL string, Headers map[string][]string) (*http.Response, error) {\n\n\t// Create the request\n\treq, err := NewRequestWithContext(ctx, http.MethodDelete, URL, Headers, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Perform the request\n\treturn C.Do(req)\n}",
"func (o *DeleteLolChatV1SettingsByKeyParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func DeleteContext(oldContext unsafe.Pointer) unsafe.Pointer {\n\tret, _, _ := syscall.Syscall(gpDeleteContext, 1, uintptr(oldContext), 0, 0)\n\treturn (unsafe.Pointer)(ret)\n}",
"func (o *LedgerVoucherAttachmentDeleteAttachmentParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *DeleteTriggerParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *ReposDeleteParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *VolumeDeleteParams) WithContext(ctx context.Context) *VolumeDeleteParams {\n\to.SetContext(ctx)\n\treturn o\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
WithHTTPClient adds the HTTPClient to the vvol binding delete params
|
func (o *VvolBindingDeleteParams) WithHTTPClient(client *http.Client) *VvolBindingDeleteParams {
o.SetHTTPClient(client)
return o
}
|
[
"func (o *VvolBindingDeleteParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *VolumeDeleteParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *VolumeDeleteParams) WithHTTPClient(client *http.Client) *VolumeDeleteParams {\n\to.SetHTTPClient(client)\n\treturn o\n}",
"func (o *DeleteGerritListenerByIDUsingDELETEParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *DeleteProtocolUsingDELETEParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *PcloudV2VolumescloneDeleteParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *RevokeDeviceCertificateUsingDELETEParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *DeleteDebugRequestParams) WithHTTPClient(client *http.Client) *DeleteDebugRequestParams {\n\to.SetHTTPClient(client)\n\treturn o\n}",
"func NewVolumeDeleteParamsWithHTTPClient(client *http.Client) *VolumeDeleteParams {\n\tvar (\n\t\tforceDefault = bool(false)\n\t)\n\treturn &VolumeDeleteParams{\n\t\tForce: &forceDefault,\n\t\tHTTPClient: client,\n\t}\n}",
"func (o *DeleteKeyPairsParams) WithHTTPClient(client *http.Client) *DeleteKeyPairsParams {\n\to.SetHTTPClient(client)\n\treturn o\n}",
"func (o *DeleteTagParams) WithHTTPClient(client *http.Client) *DeleteTagParams {\n\to.SetHTTPClient(client)\n\treturn o\n}",
"func (o *DeleteDeviceUsingDELETEParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *DeleteVersionControlRequestParams) WithHTTPClient(client *http.Client) *DeleteVersionControlRequestParams {\n\to.SetHTTPClient(client)\n\treturn o\n}",
"func (o *LedgerVoucherAttachmentDeleteAttachmentParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *SMSTemplatesByTemplateIDDeleteParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func NewVvolBindingDeleteParamsWithHTTPClient(client *http.Client) *VvolBindingDeleteParams {\n\treturn &VvolBindingDeleteParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func DeleteClient(nbmaster string, httpClient *http.Client, jwt string) {\r\n fmt.Printf(\"\\nSending a DELETE request to delete client %s from policy %s...\\n\", testClientName, testPolicyName)\r\n\r\n uri := \"https://\" + nbmaster + \":\" + port + \"/netbackup/\" + policiesUri + testPolicyName + \"/clients/\" + testClientName\r\n\r\n request, _ := http.NewRequest(http.MethodDelete, uri, nil)\r\n request.Header.Add(\"Authorization\", jwt);\r\n\r\n response, err := httpClient.Do(request)\r\n\r\n if err != nil {\r\n fmt.Printf(\"The HTTP request failed with error: %s\\n\", err)\r\n panic(\"Unable to delete client.\\n\")\r\n } else {\r\n if response.StatusCode != 204 {\r\n printErrorResponse(response)\r\n } else {\r\n fmt.Printf(\"%s deleted successfully.\\n\", testClientName);\r\n }\r\n }\r\n}",
"func (o *BundleProductOptionRepositoryV1DeleteByIDDeleteParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *HTTPDeletePersistenceServiceItemParams) WithHTTPClient(client *http.Client) *HTTPDeletePersistenceServiceItemParams {\n\to.SetHTTPClient(client)\n\treturn o\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
SetHTTPClient adds the HTTPClient to the vvol binding delete params
|
func (o *VvolBindingDeleteParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
|
[
"func (o *VolumeDeleteParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *DeleteProtocolUsingDELETEParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *DeleteGerritListenerByIDUsingDELETEParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *PcloudV2VolumescloneDeleteParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *RevokeDeviceCertificateUsingDELETEParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *DeleteDeviceUsingDELETEParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *ComponentDeleteParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *SMSTemplatesByTemplateIDDeleteParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *DeleteDebugRequestParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *DeleteVersionControlRequestParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *VvolBindingDeleteParams) WithHTTPClient(client *http.Client) *VvolBindingDeleteParams {\n\to.SetHTTPClient(client)\n\treturn o\n}",
"func (o *DeleteTagParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *BundleProductOptionRepositoryV1DeleteByIDDeleteParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *DeleteDataSourceParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *DeleteBlueprintRequestUsingDELETE1Params) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *DeleteKeyPairsParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *DeleteVMInterfaceParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *LedgerVoucherAttachmentDeleteAttachmentParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *NodesDelTagByIDParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
WithDeleteAllReferences adds the deleteAllReferences to the vvol binding delete params
|
func (o *VvolBindingDeleteParams) WithDeleteAllReferences(deleteAllReferences *bool) *VvolBindingDeleteParams {
o.SetDeleteAllReferences(deleteAllReferences)
return o
}
|
[
"func (o *VvolBindingDeleteParams) SetDeleteAllReferences(deleteAllReferences *bool) {\n\to.DeleteAllReferences = deleteAllReferences\n}",
"func (o *VvolBindingDeleteParams) SetDefaults() {\n\tvar (\n\t\tdeleteAllReferencesDefault = bool(false)\n\t)\n\n\tval := VvolBindingDeleteParams{\n\t\tDeleteAllReferences: &deleteAllReferencesDefault,\n\t}\n\n\tval.timeout = o.timeout\n\tval.Context = o.Context\n\tval.HTTPClient = o.HTTPClient\n\t*o = val\n}",
"func (o *VvolBindingDeleteParams) WithDefaults() *VvolBindingDeleteParams {\n\to.SetDefaults()\n\treturn o\n}",
"func (c *WrapperClient) DeleteAllOf(ctx context.Context, obj client.Object, opts ...client.DeleteAllOfOption) error {\n\tif u, ok := obj.(Wrapper); ok {\n\t\treturn c.kube.DeleteAllOf(ctx, u.GetUnstructured(), opts...)\n\t}\n\treturn c.kube.DeleteAllOf(ctx, obj, opts...)\n}",
"func (fkw *FakeClientWrapper) DeleteAllOf(ctx context.Context, obj runtime.Object, opts ...k8sCl.DeleteAllOfOption) error {\n\treturn fkw.client.DeleteAllOf(ctx, obj, opts...)\n}",
"func (o *VvolBindingDeleteParams) WithTimeout(timeout time.Duration) *VvolBindingDeleteParams {\n\to.SetTimeout(timeout)\n\treturn o\n}",
"func (d *Demo) DeleteAll(g *gom.Gom) {\n\ttoolkit.Println(\"===== Delete All =====\")\n\n\tvar err error\n\tif d.useParams {\n\t\t_, err = g.Set(&gom.SetParams{\n\t\t\tTableName: \"hero\",\n\t\t\tFilter: gom.EndWith(\"Name\", \"man\"),\n\t\t\tTimeout: 10,\n\t\t}).Cmd().DeleteAll()\n\t} else {\n\t\t_, err = g.Set(nil).Table(\"hero\").Timeout(10).Filter(gom.EndWith(\"Name\", \"man\")).Cmd().DeleteAll()\n\t}\n\n\tif err != nil {\n\t\ttoolkit.Println(err.Error())\n\t\treturn\n\t}\n}",
"func (o BlockSlice) DeleteAll(exec boil.Executor) error {\n\tif len(o) == 0 {\n\t\treturn nil\n\t}\n\n\tvar args []interface{}\n\tfor _, obj := range o {\n\t\tpkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), blockPrimaryKeyMapping)\n\t\targs = append(args, pkeyArgs...)\n\t}\n\n\tsql := \"DELETE FROM `block` WHERE \" +\n\t\tstrmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 0, blockPrimaryKeyColumns, len(o))\n\n\tif boil.DebugMode {\n\t\tfmt.Fprintln(boil.DebugWriter, sql)\n\t\tfmt.Fprintln(boil.DebugWriter, args)\n\t}\n\n\t_, err := exec.Exec(sql, args...)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"model: unable to delete all from block slice\")\n\t}\n\n\treturn nil\n}",
"func NewVvolBindingDeleteParams() *VvolBindingDeleteParams {\n\treturn &VvolBindingDeleteParams{\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func (m *MockVirtualMeshCertificateSigningRequestClient) DeleteAllOfVirtualMeshCertificateSigningRequest(ctx context.Context, opts ...client.DeleteAllOfOption) error {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{ctx}\n\tfor _, a := range opts {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"DeleteAllOfVirtualMeshCertificateSigningRequest\", varargs...)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}",
"func (r *FakeClient) DeleteAllOf(ctx context.Context, obj client.Object, opts ...client.DeleteAllOfOption) error {\n\t// TODO (covariance) implement me!\n\tpanic(\"not implemented\")\n}",
"func (m *MockVirtualMeshCertificateSigningRequestWriter) DeleteAllOfVirtualMeshCertificateSigningRequest(ctx context.Context, opts ...client.DeleteAllOfOption) error {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{ctx}\n\tfor _, a := range opts {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"DeleteAllOfVirtualMeshCertificateSigningRequest\", varargs...)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}",
"func (g *Group) DeleteAll() {\n\tg.Equivalents = list.New()\n\tg.Fingerprints = make(map[string]*list.Element)\n\tg.FirstExpr = make(map[Operand]*list.Element)\n\tg.SelfFingerprint = \"\"\n}",
"func DeleteAll(kube common.Kube, name string, kind string, listKind string, gv schema.GroupVersion, namespaces []string) error {\n\n\tiface, err := kube.DynamicInterface(gv, kind, listKind)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tapiResource := &metav1.APIResource{\n\t\tName: name,\n\t\tGroup: gv.Group,\n\t\tVersion: gv.Version,\n\t\tNamespaced: true,\n\t\tKind: kind,\n\t}\n\n\tfor _, ns := range namespaces {\n\t\tlog.Infof(\"Deleting all resources: name:%s (%s/%s), kind:%s, ns:%s\",\n\t\t\tname, gv.Group, gv.Version, kind, ns)\n\t\tif e := iface.Resource(apiResource, ns).\n\t\t\tDeleteCollection(&metav1.DeleteOptions{}, metav1.ListOptions{}); e != nil && err == nil {\n\t\t\terr = e\n\t\t}\n\t}\n\n\treturn err\n}",
"func (o BlockSlice) DeleteAll(ctx context.Context, exec boil.ContextExecutor) (int64, error) {\n\tif len(o) == 0 {\n\t\treturn 0, nil\n\t}\n\n\tvar args []interface{}\n\tfor _, obj := range o {\n\t\tpkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), blockPrimaryKeyMapping)\n\t\targs = append(args, pkeyArgs...)\n\t}\n\n\tsql := \"DELETE FROM \\\"block\\\" WHERE \" +\n\t\tstrmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 1, blockPrimaryKeyColumns, len(o))\n\n\tif boil.IsDebug(ctx) {\n\t\twriter := boil.DebugWriterFrom(ctx)\n\t\tfmt.Fprintln(writer, sql)\n\t\tfmt.Fprintln(writer, args)\n\t}\n\tresult, err := exec.ExecContext(ctx, sql, args...)\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: unable to delete all from block slice\")\n\t}\n\n\trowsAff, err := result.RowsAffected()\n\tif err != nil {\n\t\treturn 0, errors.Wrap(err, \"models: failed to get rows affected by deleteall for block\")\n\t}\n\n\treturn rowsAff, nil\n}",
"func (mr *MockVirtualMeshCertificateSigningRequestClientMockRecorder) DeleteAllOfVirtualMeshCertificateSigningRequest(ctx interface{}, opts ...interface{}) *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\tvarargs := append([]interface{}{ctx}, opts...)\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"DeleteAllOfVirtualMeshCertificateSigningRequest\", reflect.TypeOf((*MockVirtualMeshCertificateSigningRequestClient)(nil).DeleteAllOfVirtualMeshCertificateSigningRequest), varargs...)\n}",
"func NewVvolBindingDeleteParamsWithTimeout(timeout time.Duration) *VvolBindingDeleteParams {\n\treturn &VvolBindingDeleteParams{\n\t\ttimeout: timeout,\n\t}\n}",
"func (mg *MongoDAO) DeleteAll(selector map[string]interface{}) error {\n\tsession, sessionError := GetMongoConnection(mg.hostName)\n\tif sessionError != nil {\n\t\treturn sessionError\n\t}\n\n\tif mg.hostName == \"\" {\n\t\tmg.hostName = defaultHost\n\t}\n\tdb, ok := config[mg.hostName]\n\tif !ok {\n\t\treturn errors.New(\"No_Configuration_Found_For_Host: \" + mg.hostName)\n\t}\n\tcollection := session.Database(db.Database).Collection(mg.collectionName)\n\t_, deleteError := collection.DeleteMany(context.Background(), selector)\n\tif deleteError != nil {\n\t\treturn deleteError\n\t}\n\treturn deleteError\n}",
"func (o ModeratorSlice) DeleteAllGP() {\n\terr := o.DeleteAll(boil.GetDB())\n\tif err != nil {\n\t\tpanic(boil.WrapErr(err))\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
SetDeleteAllReferences adds the deleteAllReferences to the vvol binding delete params
|
func (o *VvolBindingDeleteParams) SetDeleteAllReferences(deleteAllReferences *bool) {
o.DeleteAllReferences = deleteAllReferences
}
|
[
"func (o *VvolBindingDeleteParams) WithDeleteAllReferences(deleteAllReferences *bool) *VvolBindingDeleteParams {\n\to.SetDeleteAllReferences(deleteAllReferences)\n\treturn o\n}",
"func (o *VvolBindingDeleteParams) SetDefaults() {\n\tvar (\n\t\tdeleteAllReferencesDefault = bool(false)\n\t)\n\n\tval := VvolBindingDeleteParams{\n\t\tDeleteAllReferences: &deleteAllReferencesDefault,\n\t}\n\n\tval.timeout = o.timeout\n\tval.Context = o.Context\n\tval.HTTPClient = o.HTTPClient\n\t*o = val\n}",
"func (c *WrapperClient) DeleteAllOf(ctx context.Context, obj client.Object, opts ...client.DeleteAllOfOption) error {\n\tif u, ok := obj.(Wrapper); ok {\n\t\treturn c.kube.DeleteAllOf(ctx, u.GetUnstructured(), opts...)\n\t}\n\treturn c.kube.DeleteAllOf(ctx, obj, opts...)\n}",
"func (o *VvolBindingDeleteParams) WithDefaults() *VvolBindingDeleteParams {\n\to.SetDefaults()\n\treturn o\n}",
"func (g *Group) DeleteAll() {\n\tg.Equivalents = list.New()\n\tg.Fingerprints = make(map[string]*list.Element)\n\tg.FirstExpr = make(map[Operand]*list.Element)\n\tg.SelfFingerprint = \"\"\n}",
"func (d *Demo) DeleteAll(g *gom.Gom) {\n\ttoolkit.Println(\"===== Delete All =====\")\n\n\tvar err error\n\tif d.useParams {\n\t\t_, err = g.Set(&gom.SetParams{\n\t\t\tTableName: \"hero\",\n\t\t\tFilter: gom.EndWith(\"Name\", \"man\"),\n\t\t\tTimeout: 10,\n\t\t}).Cmd().DeleteAll()\n\t} else {\n\t\t_, err = g.Set(nil).Table(\"hero\").Timeout(10).Filter(gom.EndWith(\"Name\", \"man\")).Cmd().DeleteAll()\n\t}\n\n\tif err != nil {\n\t\ttoolkit.Println(err.Error())\n\t\treturn\n\t}\n}",
"func (fkw *FakeClientWrapper) DeleteAllOf(ctx context.Context, obj runtime.Object, opts ...k8sCl.DeleteAllOfOption) error {\n\treturn fkw.client.DeleteAllOf(ctx, obj, opts...)\n}",
"func (mg *MongoDAO) DeleteAll(selector map[string]interface{}) error {\n\tsession, sessionError := GetMongoConnection(mg.hostName)\n\tif sessionError != nil {\n\t\treturn sessionError\n\t}\n\n\tif mg.hostName == \"\" {\n\t\tmg.hostName = defaultHost\n\t}\n\tdb, ok := config[mg.hostName]\n\tif !ok {\n\t\treturn errors.New(\"No_Configuration_Found_For_Host: \" + mg.hostName)\n\t}\n\tcollection := session.Database(db.Database).Collection(mg.collectionName)\n\t_, deleteError := collection.DeleteMany(context.Background(), selector)\n\tif deleteError != nil {\n\t\treturn deleteError\n\t}\n\treturn deleteError\n}",
"func (o BlockSlice) DeleteAll(exec boil.Executor) error {\n\tif len(o) == 0 {\n\t\treturn nil\n\t}\n\n\tvar args []interface{}\n\tfor _, obj := range o {\n\t\tpkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), blockPrimaryKeyMapping)\n\t\targs = append(args, pkeyArgs...)\n\t}\n\n\tsql := \"DELETE FROM `block` WHERE \" +\n\t\tstrmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 0, blockPrimaryKeyColumns, len(o))\n\n\tif boil.DebugMode {\n\t\tfmt.Fprintln(boil.DebugWriter, sql)\n\t\tfmt.Fprintln(boil.DebugWriter, args)\n\t}\n\n\t_, err := exec.Exec(sql, args...)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"model: unable to delete all from block slice\")\n\t}\n\n\treturn nil\n}",
"func (d *Dal) ClearCheckReferences(memberID string) error {\n\t// A recursive delete removes all child entries AND the dir itself; this is\n\t// a no-go for us, so we first perform a recursive fetch and then remove\n\t// each individual entry. See: https://github.com/coreos/etcd/issues/2385\n\tdata, err := d.Get(\"/cluster/members/\"+memberID+\"/config/\", &GetOptions{\n\t\tRecurse: true,\n\t})\n\tif err != nil {\n\t\tif !client.IsKeyNotFound(err) {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tfor key, _ := range data {\n\t\t_, err := d.KeysAPI.Delete(\n\t\t\tcontext.Background(),\n\t\t\tkey,\n\t\t\t&client.DeleteOptions{\n\t\t\t\tRecursive: false,\n\t\t\t},\n\t\t)\n\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Unable to delete '%v' during ClearCheckReferences: %v\", key, err)\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (o ModeratorSlice) DeleteAllGP() {\n\terr := o.DeleteAll(boil.GetDB())\n\tif err != nil {\n\t\tpanic(boil.WrapErr(err))\n\t}\n}",
"func (m *MemBook) deleteAll() {\n\n\t// Reassign the Book data so it's take by the garbage collector.\n\tm.books = make(map[string]*models.Book)\n}",
"func TestDeleteRefs(t *testing.T) {\n\tdb := NewInMemDatabase()\n\n\tvmi1 := new(types.VirtualMachineInterface)\n\tvmi1.SetUuid(uuid.New())\n\tvmi1.SetName(\"port1\")\n\tassert.NoError(t, db.Put(vmi1, nil, getReferenceList(vmi1)))\n\n\tvmi2 := new(types.VirtualMachineInterface)\n\tvmi2.SetUuid(uuid.New())\n\tvmi2.SetName(\"port2\")\n\tassert.NoError(t, db.Put(vmi2, nil, getReferenceList(vmi2)))\n\n\tfip := new(types.FloatingIp)\n\tfip.SetUuid(uuid.New())\n\tfip.SetName(\"fip\")\n\tfip.AddVirtualMachineInterface(vmi1)\n\tfip.AddVirtualMachineInterface(vmi2)\n\tassert.NoError(t, db.Put(fip, nil, getReferenceList(fip)))\n\n\tassert.Error(t, db.Delete(vmi1))\n\n\tresult, err := db.GetBackReferences(parseUID(vmi2.GetUuid()), \"floating_ip\")\n\tassert.NoError(t, err)\n\tassert.Len(t, result, 1)\n\n\tassert.NoError(t, db.Delete(fip))\n\n\tresult, err = db.GetBackReferences(parseUID(vmi2.GetUuid()), \"floating_ip\")\n\tassert.NoError(t, err)\n\tassert.Len(t, result, 0)\n\n\tassert.NoError(t, db.Delete(vmi1))\n\tassert.NoError(t, db.Delete(vmi2))\n}",
"func DeleteAll(kube common.Kube, name string, kind string, listKind string, gv schema.GroupVersion, namespaces []string) error {\n\n\tiface, err := kube.DynamicInterface(gv, kind, listKind)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tapiResource := &metav1.APIResource{\n\t\tName: name,\n\t\tGroup: gv.Group,\n\t\tVersion: gv.Version,\n\t\tNamespaced: true,\n\t\tKind: kind,\n\t}\n\n\tfor _, ns := range namespaces {\n\t\tlog.Infof(\"Deleting all resources: name:%s (%s/%s), kind:%s, ns:%s\",\n\t\t\tname, gv.Group, gv.Version, kind, ns)\n\t\tif e := iface.Resource(apiResource, ns).\n\t\t\tDeleteCollection(&metav1.DeleteOptions{}, metav1.ListOptions{}); e != nil && err == nil {\n\t\t\terr = e\n\t\t}\n\t}\n\n\treturn err\n}",
"func (r *FakeClient) DeleteAllOf(ctx context.Context, obj client.Object, opts ...client.DeleteAllOfOption) error {\n\t// TODO (covariance) implement me!\n\tpanic(\"not implemented\")\n}",
"func (q supportQuery) DeleteAllP(exec boil.Executor) {\n\terr := q.DeleteAll(exec)\n\tif err != nil {\n\t\tpanic(boil.WrapErr(err))\n\t}\n}",
"func (m *MockVirtualMeshCertificateSigningRequestClient) DeleteAllOfVirtualMeshCertificateSigningRequest(ctx context.Context, opts ...client.DeleteAllOfOption) error {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{ctx}\n\tfor _, a := range opts {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"DeleteAllOfVirtualMeshCertificateSigningRequest\", varargs...)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}",
"func (m *MockVirtualMeshCertificateSigningRequestWriter) DeleteAllOfVirtualMeshCertificateSigningRequest(ctx context.Context, opts ...client.DeleteAllOfOption) error {\n\tm.ctrl.T.Helper()\n\tvarargs := []interface{}{ctx}\n\tfor _, a := range opts {\n\t\tvarargs = append(varargs, a)\n\t}\n\tret := m.ctrl.Call(m, \"DeleteAllOfVirtualMeshCertificateSigningRequest\", varargs...)\n\tret0, _ := ret[0].(error)\n\treturn ret0\n}",
"func DeleteAll(db *sqlx.DB) error {\n\ttx, err := db.Begin()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif _, err := tx.Exec(deleteDoc); err != nil {\n\t\tif err := tx.Rollback(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn err\n\t}\n\n\treturn tx.Commit()\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
WithProtocolEndpointUUID adds the protocolEndpointUUID to the vvol binding delete params
|
func (o *VvolBindingDeleteParams) WithProtocolEndpointUUID(protocolEndpointUUID string) *VvolBindingDeleteParams {
o.SetProtocolEndpointUUID(protocolEndpointUUID)
return o
}
|
[
"func (o *VvolBindingDeleteParams) SetProtocolEndpointUUID(protocolEndpointUUID string) {\n\to.ProtocolEndpointUUID = protocolEndpointUUID\n}",
"func (a *DeprecatedBusManagementRouterApiService) DeleteBindingUsingDELETE(ctx _context.Context, routingKeyFilter string, fifoName string) ApiDeleteBindingUsingDELETERequest {\n\treturn ApiDeleteBindingUsingDELETERequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t\troutingKeyFilter: routingKeyFilter,\n\t\tfifoName: fifoName,\n\t}\n}",
"func (o *DeleteProtocolUsingDELETEParams) WithProtocolID(protocolID string) *DeleteProtocolUsingDELETEParams {\n\to.SetProtocolID(protocolID)\n\treturn o\n}",
"func NewDeleteProtocolUsingDELETEParams() *DeleteProtocolUsingDELETEParams {\n\tvar ()\n\treturn &DeleteProtocolUsingDELETEParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func (o *VvolBindingDeleteParams) WithVvolUUID(vvolUUID string) *VvolBindingDeleteParams {\n\to.SetVvolUUID(vvolUUID)\n\treturn o\n}",
"func (ec *ExtensionClient) DeleteEndpoint(extensionID, serviceID, URL string) error {\n\n\turl := url.QueryEscape(URL)\n\trequest, err := extensionc.BuildDeleteEndpointPayload(extensionID, serviceID, url)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = ec.c.DeleteEndpoint()(context.Background(), request)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func NewDeleteProtocolUsingDELETEParamsWithHTTPClient(client *http.Client) *DeleteProtocolUsingDELETEParams {\n\tvar ()\n\treturn &DeleteProtocolUsingDELETEParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func (a *DefaultApiService) DeleteEndpoint(ctx _context.Context, id string) ApiDeleteEndpointRequest {\n\treturn ApiDeleteEndpointRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t\tid: id,\n\t}\n}",
"func DeleteEndpoint(svc Service) endpoint.Endpoint {\n\treturn func(ctx context.Context, request interface{}) (interface{}, error) {\n\t\treq := request.(api.DeleteRequest)\n\t\treturn svc.Delete(ctx, &req, false)\n\t}\n}",
"func (o *DeleteProtocolUsingDELETEParams) WithTimeout(timeout time.Duration) *DeleteProtocolUsingDELETEParams {\n\to.SetTimeout(timeout)\n\treturn o\n}",
"func NewDeleteProtocolUsingDELETEParamsWithTimeout(timeout time.Duration) *DeleteProtocolUsingDELETEParams {\n\tvar ()\n\treturn &DeleteProtocolUsingDELETEParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func DeleteEndpoint(serviceAccountProvider provider.ServiceAccountProvider, projectProvider provider.ProjectProvider, userInfoGetter provider.UserInfoGetter) endpoint.Endpoint {\n\treturn func(ctx context.Context, request interface{}) (interface{}, error) {\n\t\treq, ok := request.(deleteReq)\n\t\tif !ok {\n\t\t\treturn nil, errors.NewBadRequest(\"invalid request\")\n\t\t}\n\t\terr := req.Validate()\n\t\tif err != nil {\n\t\t\treturn nil, errors.NewBadRequest(err.Error())\n\t\t}\n\t\tuserInfo, err := userInfoGetter(ctx, req.ProjectID)\n\t\tif err != nil {\n\t\t\treturn nil, common.KubernetesErrorToHTTPError(err)\n\t\t}\n\t\t// check if project exist\n\t\tif _, err := projectProvider.Get(userInfo, req.ProjectID, &provider.ProjectGetOptions{}); err != nil {\n\t\t\treturn nil, common.KubernetesErrorToHTTPError(err)\n\t\t}\n\n\t\t// check if service account exist before deleting it\n\t\tif _, err := serviceAccountProvider.Get(userInfo, req.ServiceAccountID, nil); err != nil {\n\t\t\treturn nil, common.KubernetesErrorToHTTPError(err)\n\t\t}\n\n\t\tif err := serviceAccountProvider.Delete(userInfo, req.ServiceAccountID); err != nil {\n\t\t\treturn nil, common.KubernetesErrorToHTTPError(err)\n\t\t}\n\n\t\treturn nil, nil\n\t}\n}",
"func DeleteEndpoint(route string, handlers ...httpx.Handler) Endpoint {\n\treturn Endpoint{\n\t\tRoute: route,\n\t\tDelete: &Pipeline{\n\t\t\tHandlers: handlers,\n\t\t},\n\t}\n}",
"func DeleteEndpoint(w http.ResponseWriter, r *http.Request) {\r\n\tr.Header.Set(\"Content-Type\", \"application/json, charset=UTF-8\")\r\n\tvar dc DeleteConfig\r\n\r\n\tswitch r.Method {\r\n\tcase \"GET\":\r\n\t\tparams := mux.Vars(r)\r\n\t\tdc.Default()\r\n\t\tdc.Endpoint = params[\"endpoint_name\"]\r\n\t\tdc.EndpointList = append(dc.EndpointList, dc.Endpoint)\r\n\tcase \"POST\":\r\n\t\tbody, err := ioutil.ReadAll(r.Body)\r\n\t\tif err != nil {\r\n\t\t\tapi.LogDebug(api.DEBUG, \"[+] POST /delete/endpoint, failed to read request\")\r\n\t\t\tfmt.Fprintln(w, api.HttpFailureMessage(\"Failed to read HTTP request\"))\r\n\t\t\treturn\r\n\t\t}\r\n\t\tdc.LoadParams(body)\r\n\t}\r\n\r\n\t// Verify Endpoint is provided in request body\r\n\tif len(dc.EndpointList) == 0 {\r\n\t\tapi.LogDebug(api.DEBUG, \"[+] POST /delete/endpoint - endpoint is required\")\r\n\t\tfmt.Fprintln(w, api.HttpFailureMessage(\"Endpoint required\"))\r\n\t\treturn\r\n\t}\r\n\r\n\tvar query elastic.Query\r\n\r\n\t// Convert EndpointList to interface{} slice\r\n\tendpoints := make([]interface{}, len(dc.EndpointList))\r\n\tfor i, v := range dc.EndpointList {\r\n\t\tendpoints[i] = v\r\n\t}\r\n\r\n\tquery = elastic.NewBoolQuery().\r\n\t\tMust(elastic.NewWildcardQuery(\"CaseInfo.CaseName\", dc.CaseName),\r\n\t\t\telastic.NewTermsQuery(\"ComputerName.keyword\", endpoints...))\r\n\r\n\tdeleteEndpointByQuery(w, r, query, \"DeleteEndpoint\")\r\n\r\n}",
"func MakeDeleteVideoEndpoint(s VideoService) endpoint.Endpoint {\n\treturn func(ctx context.Context, request interface{}) (interface{}, error) {\n\t\tid, ok := request.(string)\n\t\tif !ok {\n\t\t\treturn nil, fmt.Errorf(\"Illegal video ID parameter\")\n\t\t}\n\t\tif err := s.Delete(ctx, id); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn basicResponse{true, nil}, nil\n\t}\n}",
"func (client *Client) DeleteBasicEndpointWithOptions(request *DeleteBasicEndpointRequest, runtime *util.RuntimeOptions) (_result *DeleteBasicEndpointResponse, _err error) {\n\t_err = util.ValidateModel(request)\n\tif _err != nil {\n\t\treturn _result, _err\n\t}\n\tquery := map[string]interface{}{}\n\tif !tea.BoolValue(util.IsUnset(request.ClientToken)) {\n\t\tquery[\"ClientToken\"] = request.ClientToken\n\t}\n\n\tif !tea.BoolValue(util.IsUnset(request.EndpointGroupId)) {\n\t\tquery[\"EndpointGroupId\"] = request.EndpointGroupId\n\t}\n\n\tif !tea.BoolValue(util.IsUnset(request.EndpointId)) {\n\t\tquery[\"EndpointId\"] = request.EndpointId\n\t}\n\n\tif !tea.BoolValue(util.IsUnset(request.RegionId)) {\n\t\tquery[\"RegionId\"] = request.RegionId\n\t}\n\n\treq := &openapi.OpenApiRequest{\n\t\tQuery: openapiutil.Query(query),\n\t}\n\tparams := &openapi.Params{\n\t\tAction: tea.String(\"DeleteBasicEndpoint\"),\n\t\tVersion: tea.String(\"2019-11-20\"),\n\t\tProtocol: tea.String(\"HTTPS\"),\n\t\tPathname: tea.String(\"/\"),\n\t\tMethod: tea.String(\"POST\"),\n\t\tAuthType: tea.String(\"AK\"),\n\t\tStyle: tea.String(\"RPC\"),\n\t\tReqBodyType: tea.String(\"formData\"),\n\t\tBodyType: tea.String(\"json\"),\n\t}\n\t_result = &DeleteBasicEndpointResponse{}\n\t_body, _err := client.CallApi(params, req, runtime)\n\tif _err != nil {\n\t\treturn _result, _err\n\t}\n\t_err = tea.Convert(_body, &_result)\n\treturn _result, _err\n}",
"func (a AppblueprintApi) AppBlueprintsUuidDelete(uuid string) (*APIResponse, error) {\n\n\tvar httpMethod = \"Delete\"\n\t// create path and map variables\n\tpath := a.Configuration.BasePath + \"/app_blueprints/{uuid}\"\n\tpath = strings.Replace(path, \"{\"+\"uuid\"+\"}\", fmt.Sprintf(\"%v\", uuid), -1)\n\n\theaderParams := make(map[string]string)\n\tqueryParams := url.Values{}\n\tformParams := make(map[string]string)\n\tvar postBody interface{}\n\tvar fileName string\n\tvar fileBytes []byte\n\t// authentication (basicAuth) required\n\n\t// http basic authentication required\n\tif a.Configuration.Username != \"\" || a.Configuration.Password != \"\" {\n\t\theaderParams[\"Authorization\"] = \"Basic \" + a.Configuration.GetBasicAuthEncodedString()\n\t}\n\t// add default headers if any\n\tfor key := range a.Configuration.DefaultHeader {\n\t\theaderParams[key] = a.Configuration.DefaultHeader[key]\n\t}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := a.Configuration.APIClient.SelectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\theaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\n\t\t\"application/json\",\n\t}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := a.Configuration.APIClient.SelectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\theaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\n\thttpResponse, err := a.Configuration.APIClient.CallAPI(path, httpMethod, postBody, headerParams, queryParams, formParams, fileName, fileBytes)\n\tif err != nil {\n\t\treturn NewAPIResponse(httpResponse.RawResponse), err\n\t}\n\n\treturn NewAPIResponse(httpResponse.RawResponse), err\n}",
"func DeleteCommunicationEndPoint(w http.ResponseWriter, r *http.Request) {\n\tdefer r.Body.Close()\n\tvar communication Communication\n\tif err := json.NewDecoder(r.Body).Decode(&communication); err != nil {\n\t\trespondWithError(w, http.StatusBadRequest, \"Invalid request payload\")\n\t\treturn\n\t}\n\tif err := dao.Delete(communication); err != nil {\n\t\trespondWithError(w, http.StatusInternalServerError, err.Error())\n\t\treturn\n\t}\n\trespondWithJson(w, http.StatusOK, map[string]string{\"result\": \"success\"})\n}",
"func (o *VvolBindingDeleteParams) WithHTTPClient(client *http.Client) *VvolBindingDeleteParams {\n\to.SetHTTPClient(client)\n\treturn o\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
SetProtocolEndpointUUID adds the protocolEndpointUuid to the vvol binding delete params
|
func (o *VvolBindingDeleteParams) SetProtocolEndpointUUID(protocolEndpointUUID string) {
o.ProtocolEndpointUUID = protocolEndpointUUID
}
|
[
"func (o *VvolBindingDeleteParams) WithProtocolEndpointUUID(protocolEndpointUUID string) *VvolBindingDeleteParams {\n\to.SetProtocolEndpointUUID(protocolEndpointUUID)\n\treturn o\n}",
"func (ec *ExtensionClient) DeleteEndpoint(extensionID, serviceID, URL string) error {\n\n\turl := url.QueryEscape(URL)\n\trequest, err := extensionc.BuildDeleteEndpointPayload(extensionID, serviceID, url)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = ec.c.DeleteEndpoint()(context.Background(), request)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (o *DeleteProtocolUsingDELETEParams) WithProtocolID(protocolID string) *DeleteProtocolUsingDELETEParams {\n\to.SetProtocolID(protocolID)\n\treturn o\n}",
"func (a *DefaultApiService) DeleteEndpoint(ctx _context.Context, id string) ApiDeleteEndpointRequest {\n\treturn ApiDeleteEndpointRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t\tid: id,\n\t}\n}",
"func (m *peerMap) deleteEndpoint(ep *endpoint) {\n\tif ep == nil {\n\t\treturn\n\t}\n\tep.stopAndReset()\n\n\tepDisco := ep.disco.Load()\n\n\tpi := m.byNodeKey[ep.publicKey]\n\tif epDisco != nil {\n\t\tdelete(m.nodesOfDisco[epDisco.key], ep.publicKey)\n\t}\n\tdelete(m.byNodeKey, ep.publicKey)\n\tif pi == nil {\n\t\t// Kneejerk paranoia from earlier issue 2801.\n\t\t// Unexpected. But no logger plumbed here to log so.\n\t\treturn\n\t}\n\tfor ip := range pi.ipPorts {\n\t\tdelete(m.byIPPort, ip)\n\t}\n}",
"func DeleteEndpoint(route string, handlers ...httpx.Handler) Endpoint {\n\treturn Endpoint{\n\t\tRoute: route,\n\t\tDelete: &Pipeline{\n\t\t\tHandlers: handlers,\n\t\t},\n\t}\n}",
"func DeleteEndpoint(serviceAccountProvider provider.ServiceAccountProvider, projectProvider provider.ProjectProvider, userInfoGetter provider.UserInfoGetter) endpoint.Endpoint {\n\treturn func(ctx context.Context, request interface{}) (interface{}, error) {\n\t\treq, ok := request.(deleteReq)\n\t\tif !ok {\n\t\t\treturn nil, errors.NewBadRequest(\"invalid request\")\n\t\t}\n\t\terr := req.Validate()\n\t\tif err != nil {\n\t\t\treturn nil, errors.NewBadRequest(err.Error())\n\t\t}\n\t\tuserInfo, err := userInfoGetter(ctx, req.ProjectID)\n\t\tif err != nil {\n\t\t\treturn nil, common.KubernetesErrorToHTTPError(err)\n\t\t}\n\t\t// check if project exist\n\t\tif _, err := projectProvider.Get(userInfo, req.ProjectID, &provider.ProjectGetOptions{}); err != nil {\n\t\t\treturn nil, common.KubernetesErrorToHTTPError(err)\n\t\t}\n\n\t\t// check if service account exist before deleting it\n\t\tif _, err := serviceAccountProvider.Get(userInfo, req.ServiceAccountID, nil); err != nil {\n\t\t\treturn nil, common.KubernetesErrorToHTTPError(err)\n\t\t}\n\n\t\tif err := serviceAccountProvider.Delete(userInfo, req.ServiceAccountID); err != nil {\n\t\t\treturn nil, common.KubernetesErrorToHTTPError(err)\n\t\t}\n\n\t\treturn nil, nil\n\t}\n}",
"func DeleteEndpoint(svc Service) endpoint.Endpoint {\n\treturn func(ctx context.Context, request interface{}) (interface{}, error) {\n\t\treq := request.(api.DeleteRequest)\n\t\treturn svc.Delete(ctx, &req, false)\n\t}\n}",
"func DeleteEndpoint() error {\n\tvar err error\n\n\tclient := &http.Client{}\n\tdeleteEndpoint, err := http.NewRequest(http.MethodDelete, urlEndpoint+\"/\"+fmt.Sprintf(\"%s\", id), nil)\n\thelper.LogPanicln(err)\n\tdeleteResponse, err = client.Do(deleteEndpoint)\n\thelper.LogPanicln(err)\n\n\treturn nil\n}",
"func DeleteEndpoint(w http.ResponseWriter, r *http.Request) {\r\n\tr.Header.Set(\"Content-Type\", \"application/json, charset=UTF-8\")\r\n\tvar dc DeleteConfig\r\n\r\n\tswitch r.Method {\r\n\tcase \"GET\":\r\n\t\tparams := mux.Vars(r)\r\n\t\tdc.Default()\r\n\t\tdc.Endpoint = params[\"endpoint_name\"]\r\n\t\tdc.EndpointList = append(dc.EndpointList, dc.Endpoint)\r\n\tcase \"POST\":\r\n\t\tbody, err := ioutil.ReadAll(r.Body)\r\n\t\tif err != nil {\r\n\t\t\tapi.LogDebug(api.DEBUG, \"[+] POST /delete/endpoint, failed to read request\")\r\n\t\t\tfmt.Fprintln(w, api.HttpFailureMessage(\"Failed to read HTTP request\"))\r\n\t\t\treturn\r\n\t\t}\r\n\t\tdc.LoadParams(body)\r\n\t}\r\n\r\n\t// Verify Endpoint is provided in request body\r\n\tif len(dc.EndpointList) == 0 {\r\n\t\tapi.LogDebug(api.DEBUG, \"[+] POST /delete/endpoint - endpoint is required\")\r\n\t\tfmt.Fprintln(w, api.HttpFailureMessage(\"Endpoint required\"))\r\n\t\treturn\r\n\t}\r\n\r\n\tvar query elastic.Query\r\n\r\n\t// Convert EndpointList to interface{} slice\r\n\tendpoints := make([]interface{}, len(dc.EndpointList))\r\n\tfor i, v := range dc.EndpointList {\r\n\t\tendpoints[i] = v\r\n\t}\r\n\r\n\tquery = elastic.NewBoolQuery().\r\n\t\tMust(elastic.NewWildcardQuery(\"CaseInfo.CaseName\", dc.CaseName),\r\n\t\t\telastic.NewTermsQuery(\"ComputerName.keyword\", endpoints...))\r\n\r\n\tdeleteEndpointByQuery(w, r, query, \"DeleteEndpoint\")\r\n\r\n}",
"func (a *DeprecatedBusManagementRouterApiService) DeleteBindingUsingDELETE(ctx _context.Context, routingKeyFilter string, fifoName string) ApiDeleteBindingUsingDELETERequest {\n\treturn ApiDeleteBindingUsingDELETERequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t\troutingKeyFilter: routingKeyFilter,\n\t\tfifoName: fifoName,\n\t}\n}",
"func NewDeleteProtocolUsingDELETEParams() *DeleteProtocolUsingDELETEParams {\n\tvar ()\n\treturn &DeleteProtocolUsingDELETEParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func (o *DeleteProtocolUsingDELETEParams) WithTimeout(timeout time.Duration) *DeleteProtocolUsingDELETEParams {\n\to.SetTimeout(timeout)\n\treturn o\n}",
"func (self *VlanBridge) RemoveEndpoint(endpoint *OfnetEndpoint) error {\n\tlog.Infof(\"Received DELETE endpoint: %+v\", endpoint)\n\n\t// Nothing to do. Let OVS handle forwarding..\n\n\treturn nil\n}",
"func (e *EntityCRUDDefinition) DeleteEndpoint(entity Entity) (string, error) {\n\treturn e.renderWithPK(entity)\n}",
"func MakeDeleteVideoEndpoint(s VideoService) endpoint.Endpoint {\n\treturn func(ctx context.Context, request interface{}) (interface{}, error) {\n\t\tid, ok := request.(string)\n\t\tif !ok {\n\t\t\treturn nil, fmt.Errorf(\"Illegal video ID parameter\")\n\t\t}\n\t\tif err := s.Delete(ctx, id); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn basicResponse{true, nil}, nil\n\t}\n}",
"func (o *DeleteProtocolUsingDELETEParams) SetProtocolID(protocolID string) {\n\to.ProtocolID = protocolID\n}",
"func NewDeleteEndpoint(s Service) goa.Endpoint {\n\treturn func(ctx context.Context, req interface{}) (interface{}, error) {\n\t\tp := req.(*DeletePayload)\n\t\treturn nil, s.Delete(ctx, p)\n\t}\n}",
"func (nb *NetBuilder) DeleteEndpoint(nw *Network, ep *Endpoint) error {\n\t// Generate network name here as endpoint name is dependent upon network name.\n\tnw.Name = nb.generateHNSNetworkName(nw)\n\t// Query the namespace identifier.\n\tnsType, namespaceIdentifier := nb.getNamespaceIdentifier(ep)\n\n\t// Find the HNS endpoint ID.\n\tendpointName := nb.generateHNSEndpointName(nw.Name, namespaceIdentifier)\n\thnsEndpoint, err := hcsshim.GetHNSEndpointByName(endpointName)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Detach the HNS endpoint from the container's network namespace.\n\tlog.Infof(\"Detaching HNS endpoint %s from container %s netns.\", hnsEndpoint.Id, ep.ContainerID)\n\tif nsType == hcsNamespace {\n\t\t// Detach the HNS endpoint from the namespace, if we can.\n\t\t// HCN Namespace and HNS Endpoint have a 1-1 relationship, therefore,\n\t\t// even if detachment of endpoint from namespace fails, we can still proceed to delete it.\n\t\terr = hcn.RemoveNamespaceEndpoint(namespaceIdentifier, hnsEndpoint.Id)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Failed to detach endpoint, ignoring: %v\", err)\n\t\t}\n\t} else {\n\t\terr = hcsshim.HotDetachEndpoint(ep.ContainerID, hnsEndpoint.Id)\n\t\tif err != nil && err != hcsshim.ErrComputeSystemDoesNotExist {\n\t\t\treturn err\n\t\t}\n\n\t\t// The rest of the delete logic applies to infrastructure container only.\n\t\tif nsType == nonInfraContainerNS {\n\t\t\t// For non-infra containers, the network must not be deleted.\n\t\t\tnw.UseExisting = true\n\t\t\treturn nil\n\t\t}\n\t}\n\n\t// Delete the HNS endpoint.\n\tlog.Infof(\"Deleting HNS endpoint name: %s ID: %s\", endpointName, hnsEndpoint.Id)\n\t_, err = hnsEndpoint.Delete()\n\tif err != nil {\n\t\tlog.Errorf(\"Failed to delete HNS endpoint: %v.\", err)\n\t}\n\n\treturn err\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
WithVvolUUID adds the vvolUUID to the vvol binding delete params
|
func (o *VvolBindingDeleteParams) WithVvolUUID(vvolUUID string) *VvolBindingDeleteParams {
o.SetVvolUUID(vvolUUID)
return o
}
|
[
"func (o *VvolBindingDeleteParams) SetVvolUUID(vvolUUID string) {\n\to.VvolUUID = vvolUUID\n}",
"func (o *VvolBindingDeleteParams) WithProtocolEndpointUUID(protocolEndpointUUID string) *VvolBindingDeleteParams {\n\to.SetProtocolEndpointUUID(protocolEndpointUUID)\n\treturn o\n}",
"func DeleteVol(uuid string) int32 {\n\n\t_, conn, err := utils.DialVolMgr(VolMgrHosts)\n\tif err != nil {\n\t\tlogger.Error(\"DeleteVol failed,Dial to VolMgrHosts fail :%v\", err)\n\t\treturn -1\n\t}\n\tdefer conn.Close()\n\tvc := vp.NewVolMgrClient(conn)\n\n\tpDeleteVolReq := &vp.DeleteVolReq{\n\t\tUUID: uuid,\n\t}\n\tctx, _ := context.WithTimeout(context.Background(), VOLUME_TIMEOUT_SECONDS*time.Second)\n\tpDeleteVolAck, err := vc.DeleteVol(ctx, pDeleteVolReq)\n\tif err != nil {\n\t\treturn -1\n\t}\n\n\tif pDeleteVolAck.Ret != 0 {\n\t\tlogger.Error(\"DeleteVol failed :%v\", pDeleteVolAck.Ret)\n\t\treturn -1\n\t}\n\n\treturn 0\n}",
"func (l *Libvirt) StorageVolDelete(Vol StorageVol, Flags StorageVolDeleteFlags) (err error) {\n\tvar buf []byte\n\n\targs := StorageVolDeleteArgs {\n\t\tVol: Vol,\n\t\tFlags: Flags,\n\t}\n\n\tbuf, err = encode(&args)\n\tif err != nil {\n\t\treturn\n\t}\n\n\n\t_, err = l.requestStream(94, constants.Program, buf, nil, nil)\n\tif err != nil {\n\t\treturn\n\t}\n\n\treturn\n}",
"func deleteVolume(vol string) {\n\tclient.RemoveVolume(vol)\n}",
"func (o *VvolBindingDeleteParams) WithTimeout(timeout time.Duration) *VvolBindingDeleteParams {\n\to.SetTimeout(timeout)\n\treturn o\n}",
"func (p *FCProvisioner) Delete(volume *v1.PersistentVolume, config map[string]string, nodeList []*v1.Node) (err error) {\n\tdefer func() {\n\t\tif res := recover(); res != nil && err == nil {\n\t\t\terr = errors.New(\"error while deleting volume \" + fmt.Sprint(res))\n\t\t}\n\t}()\n\tprovisioned, err := p.provisioned(volume)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error determining if this provisioner was the one to provision volume %q: %v\", volume.Name, err)\n\t}\n\tif !provisioned {\n\t\tstrerr := fmt.Sprintf(\"this provisioner id %s didn't provision volume %q and so can't delete it; id %s did & can\", createdBy, volume.Name, volume.Annotations[annCreatedBy])\n\t\treturn &controller.IgnoredError{Reason: strerr}\n\t}\n\n\tglog.Info(\"volume deletion request received: \", volume.GetName())\n\n\tif volume.Annotations[\"volumeId\"] == \"\" {\n\t\terr = errors.New(\"volumeid is empty\")\n\t\treturn err\n\t}\n\tvolId, err := strconv.ParseInt(volume.Annotations[\"volumeId\"], 10, 64)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = p.volDestroy(volId, volume.Annotations[\"volume_name\"], nodeList)\n\tif err != nil {\n\t\tglog.Error(err)\n\t\treturn err\n\n\t}\n\tglog.Info(\"Volume deleted: \", volume.GetName())\n\treturn nil\n}",
"func NewVvolBindingDeleteParams() *VvolBindingDeleteParams {\n\treturn &VvolBindingDeleteParams{\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func cephRBDVolumeDelete(clusterName string, poolName string, volumeName string,\n\tvolumeType string, userName string) error {\n\t_, err := shared.RunCommand(\n\t\t\"rbd\",\n\t\t\"--id\", userName,\n\t\t\"--cluster\", clusterName,\n\t\t\"--pool\", poolName,\n\t\t\"rm\",\n\t\tfmt.Sprintf(\"%s_%s\", volumeType, volumeName))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func DelVol(fcVolName string) (string, error) {\n\tvar (\n\t\tcmdOut []byte\n\t\terr error\n\t)\n\n\tstrs := strings.SplitAfterN(fcVolName, \"/dev/\", 2)\n\tif len(strs) == 2 && strs[0] == \"/dev/\" {\n\t\tfcVolName = strs[1]\n\t}\n\tlog.Debug(\"fcVolName=\", fcVolName)\n\n\tcmd := \"fcagent\"\n\targs := []string{\n\t\t\"delvol\",\n\t\tfcVolName,\n\t\t\"-donotdelebs\"}\n\n\tlog.Debug(cmd, args)\n\n\tif cmdOut, err = exec.Command(cmd, args...).Output(); err != nil {\n\t\tlog.Debug(err)\n\t}\n\n\treturn string(cmdOut), err\n}",
"func (o *VvolBindingDeleteParams) WithContext(ctx context.Context) *VvolBindingDeleteParams {\n\to.SetContext(ctx)\n\treturn o\n}",
"func (s *Stack) DeleteVolumeAttachment(serverID, vaID string) error {\n\tif s == nil {\n\t\treturn scerr.InvalidInstanceError()\n\t}\n\tif serverID == \"\" {\n\t\treturn scerr.InvalidParameterError(\"serverID\", \"cannot be empty string\")\n\t}\n\tif vaID == \"\" {\n\t\treturn scerr.InvalidParameterError(\"vaID\", \"cannot be empty string\")\n\t}\n\n\tdefer concurrency.NewTracer(nil, \"('\"+serverID+\"', '\"+vaID+\"')\", true).WithStopwatch().GoingIn().OnExitTrace()()\n\n\tr := volumeattach.Delete(s.ComputeClient, serverID, vaID)\n\terr := r.ExtractErr()\n\tif err != nil {\n\t\treturn scerr.Wrap(err, fmt.Sprintf(\"error deleting volume attachment '%s': %s\", vaID, ProviderErrorToString(err)))\n\t}\n\treturn nil\n}",
"func (o *VvolBindingDeleteParams) WithHTTPClient(client *http.Client) *VvolBindingDeleteParams {\n\to.SetHTTPClient(client)\n\treturn o\n}",
"func NewVvolBindingDeleteParamsWithTimeout(timeout time.Duration) *VvolBindingDeleteParams {\n\treturn &VvolBindingDeleteParams{\n\t\ttimeout: timeout,\n\t}\n}",
"func (a *DeprecatedBusManagementRouterApiService) DeleteBindingUsingDELETE(ctx _context.Context, routingKeyFilter string, fifoName string) ApiDeleteBindingUsingDELETERequest {\n\treturn ApiDeleteBindingUsingDELETERequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t\troutingKeyFilter: routingKeyFilter,\n\t\tfifoName: fifoName,\n\t}\n}",
"func BdevLvolDelete(ctx context.Context, client *Client, args BdevLvolDeleteArgs) (bool, error) {\n\tvar response bool\n\terr := client.Invoke(ctx, \"bdev_lvol_delete\", args, &response)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn response, err\n}",
"func (obj *Sys) BindingVipDelete(input CoSysBindingVip, _opt ...map[string]string) (output Result, err error) {\n\tctx := context.Background()\n\treturn obj.BindingVipDeleteWithContext(ctx, input, _opt...)\n}",
"func (_VoterManager *VoterManagerTransactor) DeleteVoter(opts *bind.TransactOpts, _orgId string, _vAccount common.Address) (*types.Transaction, error) {\n\treturn _VoterManager.contract.Transact(opts, \"deleteVoter\", _orgId, _vAccount)\n}",
"func (rc *regClient) TagDelete(ctx context.Context, ref types.Ref) error {\n\tvar tempManifest manifest.Manifest\n\tif ref.Tag == \"\" {\n\t\treturn ErrMissingTag\n\t}\n\n\t// attempt to delete the tag directly, available in OCI distribution-spec, and Hub API\n\treq := httpReq{\n\t\thost: ref.Registry,\n\t\tnoMirrors: true,\n\t\tapis: map[string]httpReqAPI{\n\t\t\t\"\": {\n\t\t\t\tmethod: \"DELETE\",\n\t\t\t\trepository: ref.Repository,\n\t\t\t\tpath: \"manifests/\" + ref.Tag,\n\t\t\t\tignoreErr: true, // do not trigger backoffs if this fails\n\t\t\t},\n\t\t\t\"hub\": {\n\t\t\t\tmethod: \"DELETE\",\n\t\t\t\tpath: \"repositories/\" + ref.Repository + \"/tags/\" + ref.Tag + \"/\",\n\t\t\t},\n\t\t},\n\t}\n\n\tresp, err := rc.httpDo(ctx, req)\n\tif resp != nil {\n\t\tdefer resp.Close()\n\t}\n\t// TODO: Hub may return a different status\n\tif err == nil && resp != nil && resp.HTTPResponse().StatusCode == 202 {\n\t\treturn nil\n\t}\n\t// ignore errors, fallback to creating a temporary manifest to replace the tag and deleting that manifest\n\n\t// lookup the current manifest media type\n\tcurManifest, err := rc.ManifestHead(ctx, ref)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// create empty image config with single label\n\t// Note, this should be MediaType specific, but it appears that docker uses OCI for the config\n\tnow := time.Now()\n\tconf := ociv1.Image{\n\t\tCreated: &now,\n\t\tConfig: ociv1.ImageConfig{\n\t\t\tLabels: map[string]string{\n\t\t\t\t\"delete-tag\": ref.Tag,\n\t\t\t\t\"delete-date\": now.String(),\n\t\t\t},\n\t\t},\n\t\tOS: \"linux\",\n\t\tArchitecture: \"amd64\",\n\t\tRootFS: ociv1.RootFS{\n\t\t\tType: \"layers\",\n\t\t\tDiffIDs: []digest.Digest{},\n\t\t},\n\t}\n\tconfB, err := json.Marshal(conf)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdigester := digest.Canonical.Digester()\n\tconfBuf := bytes.NewBuffer(confB)\n\t_, err = confBuf.WriteTo(digester.Hash())\n\tif err != nil {\n\t\treturn err\n\t}\n\tconfDigest := digester.Digest()\n\n\t// create manifest with config, matching the original tag manifest type\n\tswitch curManifest.GetMediaType() {\n\tcase MediaTypeOCI1Manifest, MediaTypeOCI1ManifestList:\n\t\ttempManifest, err = manifest.FromOrig(ociv1.Manifest{\n\t\t\tVersioned: ociv1Specs.Versioned{\n\t\t\t\tSchemaVersion: 1,\n\t\t\t},\n\t\t\tConfig: ociv1.Descriptor{\n\t\t\t\tMediaType: MediaTypeOCI1ImageConfig,\n\t\t\t\tDigest: confDigest,\n\t\t\t\tSize: int64(len(confB)),\n\t\t\t},\n\t\t\tLayers: []ociv1.Descriptor{},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\tdefault: // default to the docker v2 schema\n\t\ttempManifest, err = manifest.FromOrig(dockerSchema2.Manifest{\n\t\t\tVersioned: dockerManifest.Versioned{\n\t\t\t\tSchemaVersion: 2,\n\t\t\t\tMediaType: MediaTypeDocker2Manifest,\n\t\t\t},\n\t\t\tConfig: dockerDistribution.Descriptor{\n\t\t\t\tMediaType: MediaTypeDocker2ImageConfig,\n\t\t\t\tDigest: confDigest,\n\t\t\t\tSize: int64(len(confB)),\n\t\t\t},\n\t\t\tLayers: []dockerDistribution.Descriptor{},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\trc.log.WithFields(logrus.Fields{\n\t\t\"ref\": ref.Reference,\n\t}).Debug(\"Sending dummy manifest to replace tag\")\n\n\t// push config\n\t_, _, err = rc.BlobPut(ctx, ref, confDigest, ioutil.NopCloser(bytes.NewReader(confB)), MediaTypeDocker2ImageConfig, int64(len(confB)))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed sending dummy config to delete %s: %w\", ref.CommonName(), err)\n\t}\n\n\t// push manifest to tag\n\terr = rc.ManifestPut(ctx, ref, tempManifest)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed sending dummy manifest to delete %s: %w\", ref.CommonName(), err)\n\t}\n\n\tref.Digest = tempManifest.GetDigest().String()\n\n\t// delete manifest by digest\n\trc.log.WithFields(logrus.Fields{\n\t\t\"ref\": ref.Reference,\n\t\t\"digest\": ref.Digest,\n\t}).Debug(\"Deleting dummy manifest\")\n\terr = rc.ManifestDelete(ctx, ref)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed deleting dummy manifest for %s: %w\", ref.CommonName(), err)\n\t}\n\n\treturn nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
SetVvolUUID adds the vvolUuid to the vvol binding delete params
|
func (o *VvolBindingDeleteParams) SetVvolUUID(vvolUUID string) {
o.VvolUUID = vvolUUID
}
|
[
"func (o *VvolBindingDeleteParams) WithVvolUUID(vvolUUID string) *VvolBindingDeleteParams {\n\to.SetVvolUUID(vvolUUID)\n\treturn o\n}",
"func DeleteVol(uuid string) int32 {\n\n\t_, conn, err := utils.DialVolMgr(VolMgrHosts)\n\tif err != nil {\n\t\tlogger.Error(\"DeleteVol failed,Dial to VolMgrHosts fail :%v\", err)\n\t\treturn -1\n\t}\n\tdefer conn.Close()\n\tvc := vp.NewVolMgrClient(conn)\n\n\tpDeleteVolReq := &vp.DeleteVolReq{\n\t\tUUID: uuid,\n\t}\n\tctx, _ := context.WithTimeout(context.Background(), VOLUME_TIMEOUT_SECONDS*time.Second)\n\tpDeleteVolAck, err := vc.DeleteVol(ctx, pDeleteVolReq)\n\tif err != nil {\n\t\treturn -1\n\t}\n\n\tif pDeleteVolAck.Ret != 0 {\n\t\tlogger.Error(\"DeleteVol failed :%v\", pDeleteVolAck.Ret)\n\t\treturn -1\n\t}\n\n\treturn 0\n}",
"func (o *VvolBindingDeleteParams) WithProtocolEndpointUUID(protocolEndpointUUID string) *VvolBindingDeleteParams {\n\to.SetProtocolEndpointUUID(protocolEndpointUUID)\n\treturn o\n}",
"func deleteVolume(vol string) {\n\tclient.RemoveVolume(vol)\n}",
"func (o *NvmeNamespaceDeleteParams) SetUUID(uuid string) {\n\to.UUID = uuid\n}",
"func (o *BucketsCollectionGetParams) SetVolumeUUID(volumeUUID *string) {\n\to.VolumeUUID = volumeUUID\n}",
"func (l *Libvirt) StorageVolDelete(Vol StorageVol, Flags StorageVolDeleteFlags) (err error) {\n\tvar buf []byte\n\n\targs := StorageVolDeleteArgs {\n\t\tVol: Vol,\n\t\tFlags: Flags,\n\t}\n\n\tbuf, err = encode(&args)\n\tif err != nil {\n\t\treturn\n\t}\n\n\n\t_, err = l.requestStream(94, constants.Program, buf, nil, nil)\n\tif err != nil {\n\t\treturn\n\t}\n\n\treturn\n}",
"func (o *WwpnAliasDeleteParams) SetSvmUUID(svmUUID string) {\n\to.SvmUUID = svmUUID\n}",
"func (o *QtreeCollectionGetParams) SetVolumeUUID(volumeUUID *string) {\n\to.VolumeUUID = volumeUUID\n}",
"func (s *VcenterClient) SetVcenterUUID(v string) *VcenterClient {\n\ts.VcenterUUID = &v\n\treturn s\n}",
"func (o *GetVersioningPolicyParams) SetUUID(uuid string) {\n\to.UUID = uuid\n}",
"func (o *FileInfoCreateParams) SetVolumeUUID(volumeUUID string) {\n\to.VolumeUUID = volumeUUID\n}",
"func DelVol(fcVolName string) (string, error) {\n\tvar (\n\t\tcmdOut []byte\n\t\terr error\n\t)\n\n\tstrs := strings.SplitAfterN(fcVolName, \"/dev/\", 2)\n\tif len(strs) == 2 && strs[0] == \"/dev/\" {\n\t\tfcVolName = strs[1]\n\t}\n\tlog.Debug(\"fcVolName=\", fcVolName)\n\n\tcmd := \"fcagent\"\n\targs := []string{\n\t\t\"delvol\",\n\t\tfcVolName,\n\t\t\"-donotdelebs\"}\n\n\tlog.Debug(cmd, args)\n\n\tif cmdOut, err = exec.Command(cmd, args...).Output(); err != nil {\n\t\tlog.Debug(err)\n\t}\n\n\treturn string(cmdOut), err\n}",
"func (p *FCProvisioner) Delete(volume *v1.PersistentVolume, config map[string]string, nodeList []*v1.Node) (err error) {\n\tdefer func() {\n\t\tif res := recover(); res != nil && err == nil {\n\t\t\terr = errors.New(\"error while deleting volume \" + fmt.Sprint(res))\n\t\t}\n\t}()\n\tprovisioned, err := p.provisioned(volume)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error determining if this provisioner was the one to provision volume %q: %v\", volume.Name, err)\n\t}\n\tif !provisioned {\n\t\tstrerr := fmt.Sprintf(\"this provisioner id %s didn't provision volume %q and so can't delete it; id %s did & can\", createdBy, volume.Name, volume.Annotations[annCreatedBy])\n\t\treturn &controller.IgnoredError{Reason: strerr}\n\t}\n\n\tglog.Info(\"volume deletion request received: \", volume.GetName())\n\n\tif volume.Annotations[\"volumeId\"] == \"\" {\n\t\terr = errors.New(\"volumeid is empty\")\n\t\treturn err\n\t}\n\tvolId, err := strconv.ParseInt(volume.Annotations[\"volumeId\"], 10, 64)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = p.volDestroy(volId, volume.Annotations[\"volume_name\"], nodeList)\n\tif err != nil {\n\t\tglog.Error(err)\n\t\treturn err\n\n\t}\n\tglog.Info(\"Volume deleted: \", volume.GetName())\n\treturn nil\n}",
"func (user *User) SetUUID(uuid string) {\n\tuser.GctlUUID = uuid\n}",
"func (o *FileInfoCollectionGetParams) SetVolumeUUID(volumeUUID string) {\n\to.VolumeUUID = volumeUUID\n}",
"func (o *GetContainersUUIDVolumesVolumeUUIDParams) SetUUID(uuid string) {\n\to.UUID = uuid\n}",
"func cephRBDVolumeDelete(clusterName string, poolName string, volumeName string,\n\tvolumeType string, userName string) error {\n\t_, err := shared.RunCommand(\n\t\t\"rbd\",\n\t\t\"--id\", userName,\n\t\t\"--cluster\", clusterName,\n\t\t\"--pool\", poolName,\n\t\t\"rm\",\n\t\tfmt.Sprintf(\"%s_%s\", volumeType, volumeName))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (o *VvolBindingDeleteParams) SetProtocolEndpointUUID(protocolEndpointUUID string) {\n\to.ProtocolEndpointUUID = protocolEndpointUUID\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
SessionKey returns the most recent sessionKey in MemStore.
|
func (ms *MemStore) SessionKey() string {
return ms.sessionKey
}
|
[
"func (mySession *Session) SessionKey() (param string) {\n\treturn mySession.SessionKeyvar\n}",
"func (a *Account) SessionKey() *big.Int {\n\tif a.sessionKey == nil {\n\t\ta.sessionKey, _ = new(big.Int).SetString(a.SessionKeyStr, 16)\n\t}\n\n\treturn a.sessionKey\n}",
"func (session *Session) GetSessionKey() string {\n\treturn session.id.String()\n}",
"func SessionStoreKey() string {\n\treturn app.SessionStoreKey\n}",
"func (st *MemSessionStore) SessionID() string {\n\treturn (*session.MemSessionStore)(st).SessionID(context.Background())\n}",
"func (s *session) getKey() string {\n\treturn s.uuid\n}",
"func (client *Client) SessionKey() []byte {\n\treturn client.sessionKey\n}",
"func (st *MemSessionStore) SessionID() string {\n\treturn st.sid\n}",
"func (dh *DiffieHelman) SessionKey() ([sha256.Size]byte, error) {\n\tif dh.B == big.NewInt(0) {\n\t\tvar k [sha256.Size]byte\n\t\treturn k, errors.New(\"no second public key available\")\n\t}\n\n\tsessionKey := big.NewInt(0)\n\t// Having your secret set to -1 means your public key is just 0.\n\t// (This is not a mathematical fact; we've just used -1 as a beacon here.)\n\tif dh.a.Cmp(big.NewInt(-1)) != 0 {\n\t\tsessionKey = big.NewInt(0).Exp(dh.B, dh.a, dh.p)\n\t}\n\n\tsessionKeyBytes := sessionKey.Bytes()\n\n\treturn sha256.Sum256(sessionKeyBytes), nil\n}",
"func (c *chat) key() int64 {\n\tc.Lock()\n\tdefer c.Unlock()\n\n\t// generates new possible key value\n\tvar key = time.Now().UnixNano()\n\n\t// generated key become actual if the previous key is absent\n\tif c.prev == 0 {\n\t\tc.prev = key\n\t\t// returns actual key\n\t\treturn c.prev\n\t}\n\n\t// calculates minimum next possible key value\n\tc.prev = c.prev + c.latency\n\n\t// generated key become actual if generated key greater than the minimum possible key\n\tif key > c.prev {\n\t\tc.prev = key\n\t}\n\n\t// returns actual key\n\treturn c.prev\n}",
"func (stateID StateID) Key() string {\n\treturn string(stateID.LastAppHash)\n}",
"func (ms *MemStore) GetMessageKey(\n\tsessionKey string,\n\tsender bool,\n\tmsgIndex uint64,\n) (*[64]byte, error) {\n\ts, ok := ms.sessions[sessionKey]\n\tif !ok {\n\t\treturn nil, log.Errorf(\"memstore: no session found for %s\", sessionKey)\n\t}\n\tif msgIndex >= uint64(len(s.send)) {\n\t\treturn nil, log.Error(\"memstore: message index out of bounds\")\n\t}\n\tvar key string\n\tvar party string\n\tif sender {\n\t\tkey = s.send[msgIndex]\n\t\tparty = \"sender\"\n\t} else {\n\t\tkey = s.recv[msgIndex]\n\t\tparty = \"recipient\"\n\t}\n\t// make sure key wasn't used yet\n\tif key == \"\" {\n\t\treturn nil, log.Error(session.ErrMessageKeyUsed)\n\t}\n\t// decode key\n\tvar messageKey [64]byte\n\tk, err := base64.Decode(key)\n\tif err != nil {\n\t\treturn nil,\n\t\t\tlog.Errorf(\"memstore: cannot decode %s key for %s\", party,\n\t\t\t\tsessionKey)\n\t}\n\tif copy(messageKey[:], k) != 64 {\n\t\treturn nil,\n\t\t\tlog.Errorf(\"memstore: %s key for %s has wrong length\", party,\n\t\t\t\tsessionKey)\n\t}\n\treturn &messageKey, nil\n}",
"func (ds *SessionManager) GetSessionHash() uint64 {\n\treturn ds.sessions.getHash()\n}",
"func (st *SessionStoreMySQL) SessionID() string {\n\treturn st.sid\n}",
"func (sid SessionID) getRedisKey() string {\n\treturn redisKeyPrefix + sid.String()\n}",
"func (m *Manager) sessionID() string {\n\treturn hex.EncodeToString(generateRandomKey(m.opt.IDLength / 2))\n}",
"func getSessionTicketKey() [32]byte {\n\tvar key [32]byte\n\tkeySlice, err := ioutil.ReadFile(\"session_ticket_key\")\n\tif err != nil {\n\t\tkeySlice = make([]byte, 32)\n\t\tn, err := rand.Read(keySlice)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Unable to generate session ticket key: %v\", err)\n\t\t\treturn key\n\t\t}\n\t\tif n != 32 {\n\t\t\tlog.Errorf(\"Generated unexpected length of random data %d\", n)\n\t\t\treturn key\n\t\t}\n\t\terr = ioutil.WriteFile(\"session_ticket_key\", keySlice, 0600)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Unable to save session_ticket_key: %v\", err)\n\t\t} else {\n\t\t\tlog.Debug(\"Saved new session_ticket_key\")\n\t\t}\n\t}\n\tcopy(key[:], keySlice)\n\treturn key\n}",
"func (st *MemSessionStore) Get(key interface{}) interface{} {\n\tst.lock.RLock()\n\tdefer st.lock.RUnlock()\n\tif v, ok := st.value[key]; ok {\n\t\treturn v\n\t}\n\treturn nil\n}",
"func (s *streamKey) lastID() string {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\n\treturn s.lastIDUnlocked()\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
AddPrivateKeyEntry adds private KeyEntry to memory store.
|
func (ms *MemStore) AddPrivateKeyEntry(ke *uid.KeyEntry) {
ms.privateKeyEntryMap[ke.HASH] = ke
}
|
[
"func (tg *TokenManager) AddPrivateKey(kid string) (*rsa.PrivateKey, error) {\n\treader := rand.Reader\n\tkey, err := rsa.GenerateKey(reader, bitSize)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttg.keyMap[kid] = key\n\treturn key, nil\n}",
"func (s *Service) AddPrivateKey(keyID string, privateKey crypto.PrivateKey) error {\n\tif _, ok := s.keys[keyID]; ok {\n\t\ts.log.Error(\"The specified key ID is already in use\", \"keyID\", keyID)\n\t\treturn signingkeys.ErrSigningKeyAlreadyExists.Errorf(\"The specified key ID is already in use: %s\", keyID)\n\t}\n\ts.keys[keyID] = privateKey.(crypto.Signer)\n\treturn nil\n}",
"func (ks *KeyStore) Add(privateKey *rsa.PrivateKey, kid string) {\n\tks.mu.Lock()\n\tdefer ks.mu.Unlock()\n\n\tks.store[kid] = privateKey\n}",
"func (keyDB *KeyDB) AddPrivateKeyInit(\n\tki *uid.KeyInit,\n\tpubKeyHash, sigPubKey, privateKey, serverSignature string,\n) error {\n\t_, err := keyDB.addPrivateKeyInitQuery.Exec(\n\t\tki.SigKeyHash(),\n\t\tpubKeyHash,\n\t\tki.JSON(),\n\t\tsigPubKey,\n\t\tprivateKey,\n\t\tserverSignature,\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func (kb *dsKeyBook) AddPrivKey(t thread.ID, p peer.ID, sk crypto.PrivKey) error {\n\tif sk == nil {\n\t\treturn fmt.Errorf(\"private key is nil\")\n\t}\n\tif !p.MatchesPrivateKey(sk) {\n\t\treturn fmt.Errorf(\"peer ID doesn't match with private key\")\n\t}\n\tskb, err := sk.Bytes()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error when getting private key bytes: %w\", err)\n\t}\n\tkey := dsLogKey(t, p, kbBase).Child(privSuffix)\n\tif err = kb.ds.Put(key, skb); err != nil {\n\t\treturn fmt.Errorf(\"error when putting key %v in datastore: %w\", key, err)\n\t}\n\treturn nil\n}",
"func (r *MemRepo) SetPrivateKey(pk crypto.PrivKey) error {\n\tr.pk = pk\n\treturn nil\n}",
"func (tg *TokenManager) addE2ETestPrivateKey() {\n\tkey := getE2ETestPrivateKey()\n\ttg.keyMap[e2ePrivateKID] = key\n}",
"func (s *onionStore) StorePrivateKey(privateKey []byte) error {\n\t_, err := s.Client.Put(context.Background(), onionPath, string(privateKey))\n\treturn err\n}",
"func (ms *MemStore) AddPublicKeyEntry(identity string, ke *uid.KeyEntry) {\n\tms.publicKeyEntryMap[identity] = ke\n}",
"func (ms *MemStore) GetPrivateKeyEntry(pubKeyHash string) (*uid.KeyEntry, error) {\n\tke, ok := ms.privateKeyEntryMap[pubKeyHash]\n\tif !ok {\n\t\treturn nil, log.Error(session.ErrNoKeyEntry)\n\t}\n\treturn ke, nil\n}",
"func (keyDB *KeyDB) AddPrivateUIDReply(\n\tmsg *uid.Message,\n\tmsgReply *uid.MessageReply,\n) error {\n\t_, err := keyDB.addPrivateUIDReplyQuery.Exec(msgReply.JSON(), msg.JSON())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func (cache *Cache) AddEntry (path string) *FilePrint {\n ent, ok := cache.FilePrints[path]\n if ! ok {\n ent = new(FilePrint)\n ent.Local.Changed = true\n ent.Remote.Changed = true\n cache.FilePrints[path] = ent\n }\n return ent\n}",
"func (w *Wallet) AddEntry(entry Entry) error {\n\terr := entry.Verify()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif w.SearchEntryByID(entry.ID) != (Entry{}) {\n\t\treturn fmt.Errorf(\"the id already exists in wallet, can't add the entry\")\n\t}\n\n\tentry.Create = time.Now().Unix()\n\tentry.LastUpdate = entry.Create\n\tw.Entries = append(w.Entries, entry)\n\n\treturn nil\n}",
"func (wallet *Wallet) DumpPrivateKey(paymentAddrSerialized string) KeySerializedData {\n\tfor _, account := range wallet.MasterAccount.Child {\n\t\taddress := account.Key.Base58CheckSerialize(PaymentAddressType)\n\t\tif address == paymentAddrSerialized {\n\t\t\tkey := KeySerializedData{\n\t\t\t\tPrivateKey: account.Key.Base58CheckSerialize(PriKeyType),\n\t\t\t}\n\t\t\treturn key\n\t\t}\n\t}\n\treturn KeySerializedData{}\n}",
"func (tg *TokenManager) RemovePrivateKey(kid string) {\n\tdelete(tg.keyMap, kid)\n}",
"func (a *Account) GetPrivateKey() crypto.PrivateKey { return a.key }",
"func (ks keystore) ExportPrivateKeyObject(uid string) (types.PrivKey, error) {\n\tk, err := ks.Key(uid)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpriv, err := extractPrivKeyFromRecord(k)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn priv, err\n}",
"func (ms *MemStore) AddSessionKey(\n\thash, json, privKey string,\n\tcleanupTime uint64,\n) error {\n\tms.sessionKeys[hash] = &sessionKey{\n\t\tjson: json,\n\t\tprivKey: privKey,\n\t\tcleanupTime: cleanupTime,\n\t}\n\treturn nil\n}",
"func (manager *UsersManager) AddKey(authyID string, publicKey string) error {\n\tif _, err := os.Stat(publicKey); err == nil { // publicKey is a file\n\t\tpublicKeyData, err := ioutil.ReadFile(publicKey)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tpublicKey = string(publicKeyData)\n\t}\n\n\t// clean up public key\n\tpublicKey = strings.Replace(publicKey, \"\\n\", \"\", -1)\n\n\tfile, err := os.OpenFile(usersDbPath(), os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdefer func() {\n\t\t_ = file.Close()\n\t}()\n\n\t_, err = file.WriteString(fmt.Sprintf(\"%s %s\\n\", authyID, publicKey))\n\treturn err\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
AddPublicKeyEntry adds public KeyEntry from identity to memory store.
|
func (ms *MemStore) AddPublicKeyEntry(identity string, ke *uid.KeyEntry) {
ms.publicKeyEntryMap[identity] = ke
}
|
[
"func (rd *RootDomain) AddNewMemberToPublicKeyMap(publicKey string, memberRef insolar.Reference) error {\n\ttrimmedPublicKey := foundation.TrimPublicKey(publicKey)\n\tshardIndex := foundation.GetShardIndex(trimmedPublicKey, insolar.GenesisAmountPublicKeyShards)\n\tif shardIndex >= len(rd.PublicKeyShards) {\n\t\treturn fmt.Errorf(\"incorrect public key shard index\")\n\t}\n\tpks := pkshard.GetObject(rd.PublicKeyShards[shardIndex])\n\terr := pks.SetRef(trimmedPublicKey, memberRef.String())\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to set reference in public key shard\")\n\t}\n\treturn nil\n}",
"func (a *App) AddPublicKey(name string, key io.Reader) *model.AppError {\n\tif isSamlFile(&a.Config().SamlSettings, name) {\n\t\treturn model.NewAppError(\"AddPublicKey\", \"app.plugin.modify_saml.app_error\", nil, \"\", http.StatusInternalServerError)\n\t}\n\tdata, err := io.ReadAll(key)\n\tif err != nil {\n\t\treturn model.NewAppError(\"AddPublicKey\", \"app.plugin.write_file.read.app_error\", nil, \"\", http.StatusInternalServerError).Wrap(err)\n\t}\n\terr = a.Srv().platform.SetConfigFile(name, data)\n\tif err != nil {\n\t\treturn model.NewAppError(\"AddPublicKey\", \"app.plugin.write_file.saving.app_error\", nil, \"\", http.StatusInternalServerError).Wrap(err)\n\t}\n\n\ta.UpdateConfig(func(cfg *model.Config) {\n\t\tif !utils.StringInSlice(name, cfg.PluginSettings.SignaturePublicKeyFiles) {\n\t\t\tcfg.PluginSettings.SignaturePublicKeyFiles = append(cfg.PluginSettings.SignaturePublicKeyFiles, name)\n\t\t}\n\t})\n\n\treturn nil\n}",
"func (_IRMAScheme *IRMASchemeTransactor) AddIssuerPublicKey(opts *bind.TransactOpts, _issuerId string, _key []byte) (*types.Transaction, error) {\n\treturn _IRMAScheme.contract.Transact(opts, \"addIssuerPublicKey\", _issuerId, _key)\n}",
"func (kb *dsKeyBook) AddPubKey(t thread.ID, p peer.ID, pk crypto.PubKey) error {\n\tif pk == nil {\n\t\treturn fmt.Errorf(\"public key is nil\")\n\t}\n\n\tif !p.MatchesPublicKey(pk) {\n\t\treturn fmt.Errorf(\"log ID doesn't provided match public key\")\n\t}\n\tval, err := pk.Bytes()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error when getting bytes from public key: %w\", err)\n\t}\n\tkey := dsLogKey(t, p, kbBase).Child(pubSuffix)\n\tif kb.ds.Put(key, val) != nil {\n\t\treturn fmt.Errorf(\"error when putting public key in store: %w\", err)\n\t}\n\treturn nil\n}",
"func (d *Document) AddPublicKey(pk *DocPublicKey, addRefToAuth bool, addFragment bool) error {\n\t// If ID is not given on public key, then add the doc owner ID by default and\n\t// add the next available key fragment value.\n\t// Overrides addFragment bool. if specific ID is needed, set ID/fragment before adding.\n\tif pk.ID == nil {\n\t\tpk.ID = CopyDID(&d.ID)\n\t\tpk.SetIDFragment(d.NextKeyFragment())\n\n\t} else {\n\t\tif !addFragment && pk.ID.Fragment == \"\" {\n\t\t\treturn errors.Errorf(\"no key id fragment found: %v\", pk.ID.String())\n\n\t\t} else if addFragment {\n\t\t\t// Increment the standard \"keys-\"\n\t\t\tpk.SetIDFragment(d.NextKeyFragment())\n\t\t}\n\t}\n\n\t// If controller is not set, set it to the doc owner ID by default. If you want\n\t// something else, needs to be passed in.\n\tif pk.Controller == nil {\n\t\tpk.Controller = CopyDID(&d.ID)\n\t}\n\n\t// If pk already exists, return\n\tif PublicKeyInSlice(*pk, d.PublicKeys) {\n\t\tlog.Infof(\"Public key is already in document: %+v\", *pk)\n\t\treturn nil\n\t}\n\n\t// Add new key to end of the list of keys\n\td.PublicKeys = append(d.PublicKeys, *pk)\n\n\tif addRefToAuth {\n\t\tauth := DocAuthenicationWrapper{\n\t\t\tDocPublicKey: *pk,\n\t\t\tIDOnly: true,\n\t\t}\n\t\td.Authentications = append(d.Authentications, auth)\n\t}\n\n\tupdated := time.Now().UTC()\n\td.Updated = &updated\n\n\treturn nil\n}",
"func (manager *UsersManager) AddKey(authyID string, publicKey string) error {\n\tif _, err := os.Stat(publicKey); err == nil { // publicKey is a file\n\t\tpublicKeyData, err := ioutil.ReadFile(publicKey)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tpublicKey = string(publicKeyData)\n\t}\n\n\t// clean up public key\n\tpublicKey = strings.Replace(publicKey, \"\\n\", \"\", -1)\n\n\tfile, err := os.OpenFile(usersDbPath(), os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdefer func() {\n\t\t_ = file.Close()\n\t}()\n\n\t_, err = file.WriteString(fmt.Sprintf(\"%s %s\\n\", authyID, publicKey))\n\treturn err\n}",
"func (ms *MemStore) GetPublicKeyEntry(uidMsg *uid.Message) (*uid.KeyEntry, string, error) {\n\tke, ok := ms.publicKeyEntryMap[uidMsg.Identity()]\n\tif !ok {\n\t\treturn nil, \"\", log.Error(session.ErrNoKeyEntry)\n\t}\n\treturn ke, \"undefined\", nil\n}",
"func (c *computeInstance) addPublicKey(ctx context.Context, user, key string) (bool, error) {\n\tuploadInstanceMetadata := func(metadata *compute.Metadata) error {\n\t\t_, err := c.service.Instances.SetMetadata(c.service.projectID, getZone(c.instance), c.Name(), metadata).Context(ctx).Do()\n\t\treturn err\n\t}\n\n\tif legacySSHKeys := findKey(c.instance.Metadata, legacySSHKey); legacySSHKeys != nil {\n\t\treturn ensureKey(c.instance.Metadata, newSSHKey, user, key, uploadInstanceMetadata)\n\t}\n\n\tif blockProjectSSH := findKey(c.instance.Metadata, blockProjectSSHKey); blockProjectSSH != nil && *blockProjectSSH == \"true\" {\n\t\treturn ensureKey(c.instance.Metadata, newSSHKey, user, key, uploadInstanceMetadata)\n\t}\n\n\t// Try adding the key to project metadata.\n\tproj, err := c.service.Projects.Get(c.service.projectID).Context(ctx).Do()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tuploadProjectMetadata := func(metadata *compute.Metadata) error {\n\t\t_, err := c.service.Projects.SetCommonInstanceMetadata(c.service.projectID, metadata).Context(ctx).Do()\n\t\treturn err\n\t}\n\n\tkeyAdded, err := ensureKey(proj.CommonInstanceMetadata, newSSHKey, user, key, uploadProjectMetadata)\n\tif err == nil {\n\t\t// The key is now present, so return.\n\t\treturn keyAdded, err\n\t}\n\n\t// Unable to update project metadata.\n\tactivity.Record(ctx, \"Unable to add SSH key to metadata for project %v: %v\", c.service.projectID, err)\n\n\t// Try adding the key to instance metadata.\n\tkeyAdded, err = ensureKey(c.instance.Metadata, newSSHKey, user, key, uploadInstanceMetadata)\n\tif err == nil {\n\t\t// The key is now present, so return.\n\t\treturn keyAdded, err\n\t}\n\n\treturn false, fmt.Errorf(\"unable to add SSH key to instance metadata for %v: %v\", c, err)\n}",
"func (m *NodeInfo) SetPublicKey(v []byte) {\n\tif m != nil {\n\t\tm.PublicKey = v\n\t}\n}",
"func (keyDB *KeyDB) AddPublicKeyInit(ki *uid.KeyInit) error {\n\t_, err := keyDB.addPublicKeyInitQuery.Exec(ki.SigKeyHash(), ki.JSON())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func (ms *MemStore) AddPrivateKeyEntry(ke *uid.KeyEntry) {\n\tms.privateKeyEntryMap[ke.HASH] = ke\n}",
"func (s *service) AddPublicKeyTag(ctx context.Context, tenant, fingerprint, tag string) error {\n\t// Checks if the namespace exists.\n\tnamespace, err := s.store.NamespaceGet(ctx, tenant)\n\tif err != nil || namespace == nil {\n\t\treturn NewErrNamespaceNotFound(tenant, err)\n\t}\n\n\t// Checks if the public key exists.\n\tkey, err := s.store.PublicKeyGet(ctx, fingerprint, tenant)\n\tif err != nil || key == nil {\n\t\treturn NewErrPublicKeyNotFound(fingerprint, err)\n\t}\n\n\tif key.Filter.Hostname != \"\" {\n\t\treturn NewErrPublicKeyFilter(nil)\n\t}\n\n\tif len(key.Filter.Tags) == DeviceMaxTags {\n\t\treturn NewErrTagLimit(DeviceMaxTags, nil)\n\t}\n\n\ttags, _, err := s.store.TagsGet(ctx, tenant)\n\tif err != nil {\n\t\treturn NewErrTagEmpty(tenant, err)\n\t}\n\n\tif !contains(tags, tag) {\n\t\treturn NewErrTagNotFound(tag, nil)\n\t}\n\n\t// Trys to add a public key.\n\terr = s.store.PublicKeyAddTag(ctx, tenant, fingerprint, tag)\n\tif err != nil {\n\t\tswitch err {\n\t\tcase store.ErrNoDocuments:\n\t\t\treturn ErrDuplicateTagName\n\t\tdefault:\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (_IRMAScheme *IRMASchemeTransactorSession) AddIssuerPublicKey(_issuerId string, _key []byte) (*types.Transaction, error) {\n\treturn _IRMAScheme.Contract.AddIssuerPublicKey(&_IRMAScheme.TransactOpts, _issuerId, _key)\n}",
"func (k kuKEM) updatePublicKey(pk, ad []byte) ([]byte, error) {\n\tvar p kuKEMPublicKey\n\tif err := binary.Unmarshal(pk, &p); err != nil {\n\t\treturn nil, err\n\t}\n\n\tp.A = append(p.A, ad)\n\treturn binary.Marshal(&p)\n}",
"func (w *Wallet) AddEntry(entry Entry) error {\n\terr := entry.Verify()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif w.SearchEntryByID(entry.ID) != (Entry{}) {\n\t\treturn fmt.Errorf(\"the id already exists in wallet, can't add the entry\")\n\t}\n\n\tentry.Create = time.Now().Unix()\n\tentry.LastUpdate = entry.Create\n\tw.Entries = append(w.Entries, entry)\n\n\treturn nil\n}",
"func NewPublicKey(pk map[string]interface{}) PublicKey {\n\treturn pk\n}",
"func (o *DKSharesInfo) SetPublicKey(v string) {\n\to.PublicKey = v\n}",
"func (c *Included) MustPublicKeyEntry(key Key) *PublicKeyEntry {\n\tvar publicKeyEntry PublicKeyEntry\n\tif c.tryFindEntry(key, &publicKeyEntry) {\n\t\treturn &publicKeyEntry\n\t}\n\treturn nil\n}",
"func AddSSHPublicKeyToConfig(name string, file string) (*SSHPublicKey, error) {\n\tsshPublicKey, err := parseSSHPublicKey(name, file)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error while parsing ssh key from file '%s' config: %s\", file, err)\n\t}\n\n\tsshPublicKey.WriteToConfig()\n\n\treturn sshPublicKey, nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
SetSessionState implemented in memory.
|
func (ms *MemStore) SetSessionState(
sessionStateKey string,
sessionState *session.State,
) error {
ms.sessionStates[sessionStateKey] = sessionState
return nil
}
|
[
"func (mc *mgmtClient) SetSessionState(ctx context.Context, sessionID string, state []byte) error {\n\tuuid, err := uuid.New()\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tamqpMsg := &amqp.Message{\n\t\tValue: map[string]interface{}{\n\t\t\t\"session-id\": sessionID,\n\t\t\t\"session-state\": state,\n\t\t},\n\t\tApplicationProperties: map[string]interface{}{\n\t\t\t\"operation\": \"com.microsoft:set-session-state\",\n\t\t\t\"com.microsoft:tracking-id\": uuid.String(),\n\t\t},\n\t}\n\n\tresp, err := mc.doRPCWithRetry(ctx, amqpMsg, 5, 5*time.Second)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif resp.Code != 200 {\n\t\treturn ErrAMQP(*resp)\n\t}\n\n\treturn nil\n}",
"func (b *BaseHandler) SetSession(key interface{}, value interface{}) {\n\tb.sessionStore.Set(b, key, value)\n}",
"func (s *StorageBase) SetSession(ctx context.Context, sessionId string, sessionData *gmap.StrAnyMap, ttl time.Duration) error {\n\treturn ErrorDisabled\n}",
"func NewSessionState(sessionStart time.Time, i interface{}) *State {\n\treturn &State{sessionStart, i}\n}",
"func SetSession(w http.ResponseWriter, r *http.Request, key string, value interface{}) bool {\n\tsession, err := store.Get(r, sessionName)\n\tif err != nil {\n\t\tlog.Error(err)\n\t\treturn false\n\t}\n\tsession.Values[key] = value\n\tsession.Save(r, w)\n\treturn true\n}",
"func (w *Worker) SetSession(s *Session) {\n\tw.mu.Lock()\n\tdefer w.mu.Unlock()\n\tw.s = s\n}",
"func (b *BaseHandler) setSessionStore(store SessionStore) {\n\tb.sessionStore = store\n}",
"func SetSession(client *client.Client, db *db.Database, log *log.Logger) {\n\ts = &Session{client, db, log}\n}",
"func setSession(c echo.Context, r *http.Response) {\r\n\r\n\tfor _, cookie := range r.Cookies() {\r\n\r\n\t\tif cookie.Name == \"PHPSESSID\" {\r\n\r\n\t\t\tsess, _ := session.Get(\"Session\", c)\r\n\t\t\tsess.Options = &sessions.Options{\r\n\t\t\t\tPath: \"*\",\r\n\t\t\t\tMaxAge: 60 * 3,\r\n\t\t\t\tHttpOnly: true,\r\n\t\t\t}\r\n\t\t\tsess.Values[\"PHPSESSID\"] = cookie.Value\r\n\r\n\t\t\tsess.Save(c.Request(), c.Response())\r\n\t\t}\r\n\t}\r\n}",
"func (s *StorageFile) SetSession(ctx context.Context, sessionId string, sessionData *gmap.StrAnyMap, ttl time.Duration) error {\n\tintlog.Printf(ctx, \"StorageFile.SetSession: %s, %v, %v\", sessionId, sessionData, ttl)\n\tpath := s.sessionFilePath(sessionId)\n\tcontent, err := json.Marshal(sessionData)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// Encrypt with AES.\n\tif s.cryptoEnabled {\n\t\tcontent, err = gaes.Encrypt(content, DefaultStorageFileCryptoKey)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tfile, err := gfile.OpenWithFlagPerm(\n\t\tpath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, os.ModePerm,\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer file.Close()\n\tif _, err = file.Write(gbinary.EncodeInt64(gtime.TimestampMilli())); err != nil {\n\t\terr = gerror.Wrapf(err, `write data failed to file \"%s\"`, path)\n\t\treturn err\n\t}\n\tif _, err = file.Write(content); err != nil {\n\t\terr = gerror.Wrapf(err, `write data failed to file \"%s\"`, path)\n\t\treturn err\n\t}\n\treturn nil\n}",
"func (c *Client) SetSession(ctx context.Context, req *SetSessionReq) error {\n\tuc := c.getUnitConversion()\n\n\tvar setSessionJSON = struct {\n\t\t*SetSessionReq\n\n\t\tTurtleDownloadRateLimit *int64 `json:\"alt-speed-down,omitempty\"`\n\t\tTurtleUploadRateLimit *int64 `json:\"alt-speed-up,omitempty\"`\n\t\tDownloadRateLimit *int64 `json:\"speed-limit-down,omitempty\"`\n\t\tUploadRateLimit *int64 `json:\"speed-limit-up,omitempty\"`\n\t\tCacheSize *int64 `json:\"cache-size-mb,omitempty\"`\n\t\tQueueStalled *time.Duration `json:\"queue-stalled-minutes,omitempty\"`\n\t\tIdleSeedingLimit *time.Duration `json:\"idle-seeding-limit,omitempty\"`\n\t}{\n\t\tSetSessionReq: req,\n\t}\n\tif req.TurtleDownloadRateLimit != nil {\n\t\tsetSessionJSON.TurtleDownloadRateLimit = OptInt64(*req.TurtleDownloadRateLimit / uc.speed)\n\t}\n\tif req.TurtleUploadRateLimit != nil {\n\t\tsetSessionJSON.TurtleUploadRateLimit = OptInt64(*req.TurtleUploadRateLimit / uc.speed)\n\t}\n\tif req.DownloadRateLimit != nil {\n\t\tsetSessionJSON.DownloadRateLimit = OptInt64(*req.DownloadRateLimit / uc.speed)\n\t}\n\tif req.UploadRateLimit != nil {\n\t\tsetSessionJSON.UploadRateLimit = OptInt64(*req.UploadRateLimit / uc.speed)\n\t}\n\tif req.CacheSize != nil {\n\t\tsetSessionJSON.CacheSize = OptInt64(*req.CacheSize / uc.size / uc.size)\n\t}\n\tif req.QueueStalled != nil {\n\t\tsetSessionJSON.QueueStalled = OptDuration(*req.QueueStalled / time.Minute)\n\t}\n\tif req.IdleSeedingLimit != nil {\n\t\tsetSessionJSON.IdleSeedingLimit = OptDuration(*req.IdleSeedingLimit / time.Minute)\n\t}\n\n\treturn c.callRPC(ctx, \"session-set\", &setSessionJSON, nil)\n}",
"func (c *Controller) SetSession(name interface{}, value interface{}) error {\n\tif c.CruSession == nil {\n\t\tc.StartSession()\n\t}\n\treturn c.CruSession.Set(context2.Background(), name, value)\n}",
"func (s *SessionTrackerV1) SetState(state SessionState) error {\n\tswitch state {\n\tcase SessionState_SessionStateRunning, SessionState_SessionStatePending, SessionState_SessionStateTerminated:\n\t\ts.Spec.State = state\n\t\treturn nil\n\tdefault:\n\t\treturn trace.BadParameter(\"invalid session state: %v\", state)\n\t}\n}",
"func (w ResponseWriter) SetState(k string, v interface{}) {\n\tst := w.State()\n\tst[k] = v\n}",
"func (p *PersistentStorage) UpdateSimSessionState(simID string, state SimulatorState) error {\n\tdefer func(now time.Time) {\n\t\trequestTime.With(\"method\", updateSimSessionState, \"data_store\", redisStore).Observe(time.Since(now).Seconds() * 1e3)\n\t}(time.Now())\n\n\tsession, err := p.FindSimSession(simID)\n\tif err != nil {\n\t\tif err == redis.ErrNil {\n\t\t\treturn ErrNoSimSession\n\t\t}\n\n\t\treturn err\n\t}\n\tsession.State = state\n\n\treturn p.SetSimSession(session)\n}",
"func NewServerSessionState(pool *redis.Pool, keyPairs [][]byte, options ...Option) (*session.ServerSessionState, error) {\n\trs := &storage{\n\t\tpool: pool,\n\t\tserializer: driver.GobSerializer,\n\t\tdefaultExpire: 604800, // 7 days\n\t\tidleTimeout: 604800, // 7 days\n\t\tabsoluteTimeout: 5184000, // 60 days\n\t}\n\tfor _, option := range options {\n\t\toption(rs)\n\t}\n\t_, err := rs.ping()\n\treturn session.NewServerSessionState(rs, keyPairs...), err\n}",
"func (c *minecraftConn) setSessionHandler(handler sessionHandler) {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\tc.setSessionHandler0(handler)\n}",
"func (s *SessionStore) Set(key, value interface{}) {\n\ts.session.Values[key] = value\n}",
"func (edit Editor) SetSession(session EditSession) {\n\tedit.Call(\"setSession\", session)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
StoreSession implemented in memory.
|
func (ms *MemStore) StoreSession(
sessionKey, rootKeyHash, chainKey string,
send, recv []string,
) error {
if len(send) != len(recv) {
return log.Error("memstore: len(send) != len(recv)")
}
log.Debugf("memstore.StoreSession(): %s", sessionKey)
s, ok := ms.sessions[sessionKey]
if !ok {
ms.sessions[sessionKey] = &memSession{
rootKeyHash: rootKeyHash,
chainKey: chainKey,
send: send,
recv: recv,
}
ms.sessionKey = sessionKey
} else {
// session already exists -> update
// rootKeyHash stays the same!
s.chainKey = chainKey
s.send = append(s.send, send...)
s.recv = append(s.recv, recv...)
}
return nil
}
|
[
"func Store() gorillaSessions.Store {\n\n}",
"func (session *Session) Store() error {\n\treturn session.cache.Store(session.id, session)\n}",
"func (s *inMemorySessionStore) Save(session USSDSession) {\n\ts.lock.Lock()\n\tdefer s.lock.Unlock()\n\ts.deleteOldestIfFull()\n\tif s.store[session.ID] == nil {\n\t\ts.currentSize++\n\t}\n\ts.gcList.PushFront(&session)\n\ts.store[session.ID] = s.gcList.Front()\n}",
"func (s session) Save() {\n\tsmutex.Lock()\n\tdefer smutex.Unlock()\n\tsessions[s.Id()] = s.clone()\n}",
"func (ss *SessionServiceImpl) StoreSession(session *entities.Session) (*entities.Session, []error) {\n\treturn ss.sessionRepo.StoreSession(session)\n}",
"func StoreInSession(key string, value string, req *http.Request, res http.ResponseWriter) error {\n\tsession, _ := store.New(req, sessionName)\n\n\tif err := updateSessionValue(session, key, value); err != nil {\n\t\treturn err\n\t}\n\n\treturn session.Save(req, res)\n}",
"func StoreInSession(key string, value string, req *http.Request, res http.ResponseWriter) error {\n\tsession, _ := Store.New(req, SessionName)\n\n\tif err := updateSessionValue(session, key, value); err != nil {\n\t\treturn err\n\t}\n\n\treturn session.Save(req, res)\n}",
"func (s session) Store() Store {\n\treturn s.store\n}",
"func (s *Store) Set(session *entities.Session) error {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\ts.sess[session.ClientID] = session\n\treturn nil\n}",
"func (m *MemoryCache) Save(s *Session) error {\n\t// Basically, this provides to option to replace an old session\n\t// with a new one (change the pointer), provided that the old\n\t// one existed and is still valid.\n\tm.mx.Lock()\n\tdefer m.mx.Unlock()\n\n\tif old, ok := m.store[s.Id]; !ok || !old.Valid() {\n\t\treturn ErrUnrecognized\n\t}\n\n\tm.store[s.Id] = s\n\treturn nil\n}",
"func (s *SessionStore) Save(r *http.Request, w http.ResponseWriter) {\n\ts.session.Save(r, w)\n}",
"func (xse *XormSessionManager) session(id Identifier, obj interface{}, db Engine) (Session, error) {\n\tif db == nil {\n\t\treturn nil, errors.NewErrNilDB(NormalizeValue(obj))\n\t}\n\t// use old session\n\n\ts := xse.getSessionFromList(id, db)\n\tif s != nil {\n\t\treturn s, nil\n\t}\n\n\t// create new session\n\ts = db.NewSession()\n\txse.addSessionIntoList(id, db, s)\n\treturn s, nil\n}",
"func Test_Session_Save(t *testing.T) {\n\tt.Parallel()\n\n\t// session store\n\tstore := New()\n\n\t// fiber instance\n\tapp := fiber.New()\n\n\t// fiber context\n\tctx := app.AcquireCtx(&fasthttp.RequestCtx{})\n\tdefer app.ReleaseCtx(ctx)\n\n\t// get store\n\tsess, _ := store.Get(ctx)\n\n\t// set value\n\tsess.Set(\"name\", \"john\")\n\n\t// save session\n\terr := sess.Save()\n\tutils.AssertEqual(t, nil, err)\n\n}",
"func (b *BaseHandler) SetSession(key interface{}, value interface{}) {\n\tb.sessionStore.Set(b, key, value)\n}",
"func (sess *Session) Store() Store {\n\treturn sess.store\n}",
"func (m *MonitorStore) PutSession(info SessionInfo) {\n\tm.sessions[info.ClientID] = info\n}",
"func NewSessionStore(c dal.Connection, database string, collection string, maxAge int, ensureTTL bool, keyPairs ...[]byte) Store {\n\treturn dalstore.New(c, database, collection, maxAge, ensureTTL, keyPairs...)\n}",
"func (s *SessionStore) Add(session *Session) {\n\tfmt.Println(session.AccessToken)\n\n\ts.atM.Lock()\n\ts.accessTokens[session.AccessToken] = session\n\ts.atM.Unlock()\n\n\ts.rtM.Lock()\n\ts.refreshTokens[session.RefreshToken] = session\n\ts.rtM.Unlock()\n}",
"func (s *SessionStore) Set(key, value interface{}) {\n\ts.session.Values[key] = value\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
HasSession implemented in memory.
|
func (ms *MemStore) HasSession(sessionKey string) bool {
_, ok := ms.sessions[sessionKey]
return ok
}
|
[
"func HasSessionsViaSession(iSession []byte) bool {\n\tif has, err := Engine.Where(\"session = ?\", iSession).Get(new(Sessions)); err != nil {\n\t\treturn false\n\t} else {\n\t\tif has {\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\t}\n}",
"func (o *AcceptOAuth2ConsentRequest) HasSession() bool {\n\tif o != nil && o.Session != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func HasSession() predicate.Event {\n\treturn predicate.Event(func(s *sql.Selector) {\n\t\tstep := sqlgraph.NewStep(\n\t\t\tsqlgraph.From(Table, FieldID),\n\t\t\tsqlgraph.To(SessionTable, FieldID),\n\t\t\tsqlgraph.Edge(sqlgraph.M2O, false, SessionTable, SessionColumn),\n\t\t)\n\t\tsqlgraph.HasNeighbors(s, step)\n\t})\n}",
"func (f *Features) canSession(ctx context.Context, adminDB *mongo.Database) {\n\tcmd := bson.D{\n\t\t{\n\t\t\tKey: \"replSetGetStatus\",\n\t\t\tValue: 1,\n\t\t},\n\t}\n\tvar result replInfo\n\terr := adminDB.RunCommand(ctx, cmd).Decode(&result)\n\tif err != nil {\n\t\t// assume we don't have session support on error..\n\t\t// error code 76 will be thrown if replSet is not enabled.\n\t\treturn\n\t}\n\n\tf.HasSessions = result.Ok\n}",
"func (this *SessionObject) Has(k string) bool {\n\t_, ok := this.Get(k)\n\treturn ok\n}",
"func HasSession(r *http.Request) bool {\n\t_, err := r.Cookie(CookieName)\n\treturn err == nil\n}",
"func (pder *MemProvider) SessionExist(sid string) bool {\n\tres, _ := (*session.MemProvider)(pder).SessionExist(context.Background(), sid)\n\treturn res\n}",
"func (o *StatusAzureServiceBus) HasSessions() bool {\n\tif o != nil && !IsNil(o.Sessions) {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (this *ConnackMessage) SessionPresent() bool {\n\treturn this.sessionPresent\n}",
"func (ca *NullClientAdapter) IsSessionPresent() bool {\n\treturn ca.SessionPresent\n}",
"func (s *SideTwistHandler) hasImplantSession(guid string) bool {\n\t_, ok := s.commandNumbers[guid]\n\treturn ok\n}",
"func SessionExists(SID string) bool {\n\t_, exists := SessionMap[SID]\n\treturn exists\n}",
"func (a *App) ExistsSession(tok string) bool {\n\t_, err := a.getSessionFromTok(tok)\n\treturn err == nil\n}",
"func (session Session) Exists() bool {\n\treturn session.Key != \"\"\n}",
"func (asr *sessionRegistry) Exists(sessionKey string) bool {\n\tasr.lock.RLock()\n\t_, exists := asr.registry[sessionKey]\n\tasr.lock.RUnlock()\n\treturn exists\n}",
"func hasUserSession(userID uuid.UUID) bool {\n\t_, ok := userCache[userID]\n\treturn ok\n}",
"func HasActiveSessions() bool {\n\treturn len(activeSessions) > 0\n}",
"func IsSession(r *http.Request) bool {\n\tval := r.Context().Value(authSessionActiveKey)\n\tswitch val.(type) {\n\tcase bool:\n\t\treturn val.(bool)\n\tdefault:\n\t\treturn false\n\t}\n}",
"func (pder *CookieProvider) SessionExist(ctx context.Context, sid string) (bool, error) {\n\treturn true, nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
GetPrivateKeyEntry implemented in memory.
|
func (ms *MemStore) GetPrivateKeyEntry(pubKeyHash string) (*uid.KeyEntry, error) {
ke, ok := ms.privateKeyEntryMap[pubKeyHash]
if !ok {
return nil, log.Error(session.ErrNoKeyEntry)
}
return ke, nil
}
|
[
"func (a *Account) GetPrivateKey() crypto.PrivateKey { return a.key }",
"func (okp OctetKeyPairBase) PrivateKey() []byte { return okp.privateKey }",
"func (a *Account) GetPrivateKey() crypto.PrivateKey {\n\treturn a.key\n}",
"func (w *Whisper) GetPrivateKey(id string) (*ecdsa.PrivateKey, error) {\n\tw.keyMu.RLock()\n\tdefer w.keyMu.RUnlock()\n\tkey := w.privateKeys[id]\n\tif key == nil {\n\t\treturn nil, fmt.Errorf(\"invalid id\")\n\t}\n\treturn key, nil\n}",
"func (u user) GetPrivateKey() crypto.PrivateKey {\n\treturn u.key\n}",
"func (c Certificate) GetPrivateKey() []byte {\n\treturn c.privateKey\n}",
"func (d *identityManager) PrivateKey() []byte {\n\treturn d.key.PrivateKey\n}",
"func (e *EppPrivateKey) Get(client sophos.ClientInterface, options ...sophos.Option) (err error) {\n\treturn get(client, \"/api/nodes/epp.private_key\", &e.Value, options...)\n}",
"func (k *key) getPrivateKey() (*ecdsa.PrivateKey, error) {\n\tby, err := base64.StdEncoding.DecodeString(k.PrivateKeyB64)\n\tif err != nil {\n\t\treturn (*ecdsa.PrivateKey)(nil), err\n\t}\n\n\tblock, _ := pem.Decode([]byte(by))\n\ttempKey, err := x509.ParseECPrivateKey(block.Bytes)\n\tif err != nil {\n\t\treturn (*ecdsa.PrivateKey)(nil), err\n\t}\n\n\treturn tempKey, nil\n}",
"func GetPrivateKey(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *PrivateKeyState, opts ...pulumi.ResourceOption) (*PrivateKey, error) {\n\tvar resource PrivateKey\n\terr := ctx.ReadResource(\"tls:index/privateKey:PrivateKey\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}",
"func GetPrivateKey(privateKeyPath string) (key *rsa.PrivateKey, e error) {\n\n\tvar buff []byte\n\tif buff, e = ioutil.ReadFile(privateKeyPath); e != nil {\n\t\treturn\n\t}\n\n\tblock, _ := pem.Decode(buff)\n\n\tif key, e = x509.ParsePKCS1PrivateKey(block.Bytes); e != nil {\n\t\treturn\n\t}\n\n\treturn\n\n}",
"func (decryptor *PgDecryptor) GetPrivateKey() (*keys.PrivateKey, error) {\n\tif decryptor.IsWithZone() {\n\t\treturn decryptor.keyStore.GetZonePrivateKey(decryptor.GetMatchedZoneID())\n\t}\n\treturn decryptor.keyStore.GetServerDecryptionPrivateKey(decryptor.clientID)\n}",
"func ExampleGetAddressFromPrivateKey() {\r\n\taddress, err := GetAddressFromPrivateKeyString(\"54035dd4c7dda99ac473905a3d82f7864322b49bab1ff441cc457183b9bd8abd\", true)\r\n\tif err != nil {\r\n\t\tfmt.Printf(\"error occurred: %s\", err.Error())\r\n\t\treturn\r\n\t}\r\n\tfmt.Printf(\"address found: %s\", address)\r\n\t// Output:address found: 1DfGxKmgL3ETwUdNnXLBueEvNpjcDGcKgK\r\n}",
"func (e *Domain) Private() *PrivateKey {\n\tif e.ClearPrivateKey == nil {\n\t\te.decrypt_privatekey()\n\t}\n\treturn e.ClearPrivateKey\n}",
"func (p *PrivateKey) PrivateKey() *ecdsa.PrivateKey {\n\treturn p.privateKey\n}",
"func (rm *ReconstructingMember) individualPrivateKey() *big.Int {\n\treturn rm.secretCoefficients[0]\n}",
"func (ms *MemStore) AddPrivateKeyEntry(ke *uid.KeyEntry) {\n\tms.privateKeyEntryMap[ke.HASH] = ke\n}",
"func (c create) getKey(output *logger.Logger, in, out string) (*rsa.PrivateKey, error) {\n\tif in != \"\" {\n\t\tif key, err := x509.OpenPrivateKey(in); err != nil {\n\t\t\tswitch e := err.(type) {\n\t\t\tcase *os.PathError:\n\t\t\t\treturn nil, e\n\t\t\tcase asn1.StructuralError:\n\t\t\t\treturn nil, fmt.Errorf(\"failed to decode private key (not a PKCS#8 format?), %s\", err.Error())\n\t\t\tdefault:\n\t\t\t\treturn nil, e\n\t\t\t}\n\t\t} else {\n\t\t\toutput.Debug(fmt.Sprintf(\"using private key '%s'\", in))\n\t\t\treturn key, nil\n\t\t}\n\t} else {\n\t\tif key, err := c.newKey(out); err != nil {\n\t\t\treturn nil, err\n\t\t} else {\n\t\t\toutput.Debug(fmt.Sprintf(\"created private key '%s'\", out))\n\t\t\treturn key, nil\n\t\t}\n\t}\n}",
"func (c *AuthConfigCommand) GetPrivateKey() string {\n\treturn ExpandEnv(c.cmd.args[\"private-key\"])\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
GetPublicKeyEntry implemented in memory.
|
func (ms *MemStore) GetPublicKeyEntry(uidMsg *uid.Message) (*uid.KeyEntry, string, error) {
ke, ok := ms.publicKeyEntryMap[uidMsg.Identity()]
if !ok {
return nil, "", log.Error(session.ErrNoKeyEntry)
}
return ke, "undefined", nil
}
|
[
"func (a *App) GetPublicKey(name string) ([]byte, *model.AppError) {\n\treturn a.Srv().getPublicKey(name)\n}",
"func (store *keyStore) GetPublicKey(address string) (*openpgp.Entity, error) {\n\tel := store.lookupPublicKey(address)\n\tif len(el) > 0 {\n\t\treturn el[0], nil\n\t}\n\treturn nil, nil\n}",
"func (r Tiered) GetPublicKey(ctx context.Context, p peer.ID) (ci.PubKey, error) {\n\tvInt, err := r.get(ctx, func(ri routing.Routing) (interface{}, error) {\n\t\treturn routing.GetPublicKey(ri, ctx, p)\n\t})\n\tval, _ := vInt.(ci.PubKey)\n\treturn val, err\n}",
"func GetPublicKey(pub ssh.PublicKey) []byte {\n\tmarshaled := ssh.MarshalAuthorizedKey(pub)\n\t// Strip trailing newline\n\treturn marshaled[:len(marshaled)-1]\n}",
"func (r *Member) GetPublicKey() string {\n\tvar args [0]interface{}\n\n\tvar argsSerialized []byte\n\n\terr := proxyctx.Current.Serialize(args, &argsSerialized)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tres, err := proxyctx.Current.RouteCall(r.Reference, true, \"GetPublicKey\", argsSerialized)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tret := [1]interface{}{}\n\tvar ret0 string\n\tret[0] = &ret0\n\n\terr = proxyctx.Current.Deserialize(res, &ret)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn ret0\n}",
"func GetPublicKey(fingerprint string) (result string, err string) {\n\tr, e := chevronlib.GetPublicKey(fingerprint)\n\tresult = r\n\tif e != nil {\n\t\terr = e.Error()\n\t}\n\n\treturn\n}",
"func (_UsersData *UsersDataCaller) GetPublicKey(opts *bind.CallOpts, uuid [16]byte) ([2][32]byte, error) {\n\tvar (\n\t\tret0 = new([2][32]byte)\n\t)\n\tout := ret0\n\terr := _UsersData.contract.Call(opts, out, \"getPublicKey\", uuid)\n\treturn *ret0, err\n}",
"func (o *DKSharesInfo) GetPublicKey() string {\n\tif o == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\n\treturn o.PublicKey\n}",
"func (_UsersData *UsersDataSession) GetPublicKey(uuid [16]byte) ([2][32]byte, error) {\n\treturn _UsersData.Contract.GetPublicKey(&_UsersData.CallOpts, uuid)\n}",
"func GetPublicKeyService(e ld.Entity) interface{} { return e.Get(Prop_PublicKeyService.ID) }",
"func (m *ActorRetriever) GetPublicKey(keyIRI *url.URL) (*vocab.PublicKeyType, error) {\n\tkey, ok := m.keys[keyIRI.String()]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"not found\")\n\t}\n\n\treturn key, nil\n}",
"func (keyRing *KeyRing) GetPublicKey() (b []byte, err error) {\n\tvar outBuf bytes.Buffer\n\tif err = keyRing.WritePublicKey(&outBuf); err != nil {\n\t\treturn\n\t}\n\n\tb = outBuf.Bytes()\n\treturn\n}",
"func (kr *KeyRing) GetPublicKey() (b []byte, err error) {\n\tvar outBuf bytes.Buffer\n\tif err = kr.WritePublicKey(&outBuf); err != nil {\n\t\treturn\n\t}\n\n\tb = outBuf.Bytes()\n\treturn\n}",
"func (rm *ReconstructingMember) individualPublicKey() *bn256.G2 {\n\treturn rm.publicKeySharePoints[0]\n}",
"func (okp OctetKeyPairBase) PublicKey() []byte { return okp.publicKey }",
"func (_BondedECDSAKeep *BondedECDSAKeepCaller) GetPublicKey(opts *bind.CallOpts) ([]byte, error) {\n\tvar (\n\t\tret0 = new([]byte)\n\t)\n\tout := ret0\n\terr := _BondedECDSAKeep.contract.Call(opts, out, \"getPublicKey\")\n\treturn *ret0, err\n}",
"func (e *SlashEvent) GetPublicKey() string {\n\treturn e.ValidatorPubKey\n}",
"func (c *client) GetPublicKey(adminToken string) (string, error) {\n\tapi := fmt.Sprintf(\"%sadmin/realms/%s/keys\", c.keycloakURL, c.realmID)\n\treq, err := http.NewRequest(\"GET\", api, nil)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\treq.Header.Set(\"Authorization\", fmt.Sprintf(\"Bearer %s\", adminToken))\n\n\tresponse, err := c.httpClient.Do(req)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tdefer func() {\n\t\terr = response.Body.Close()\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t}\n\t}()\n\n\tbody, err := ioutil.ReadAll(response.Body)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tkeys := Keys{}\n\terr = json.Unmarshal(body, &keys)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tfor _, v := range keys.KeyData {\n\t\tif v.Algorithm == \"RS256\" && v.Type == \"RSA\" {\n\t\t\treturn v.PublicKey, nil\n\t\t}\n\t}\n\n\treturn \"\", nil\n}",
"func (rd *RootDomain) GetMemberByPublicKey(publicKey string) (*insolar.Reference, error) {\n\ttrimmedPublicKey := foundation.TrimPublicKey(publicKey)\n\ti := foundation.GetShardIndex(trimmedPublicKey, insolar.GenesisAmountPublicKeyShards)\n\tif i >= len(rd.PublicKeyShards) {\n\t\treturn nil, fmt.Errorf(\"incorrect shard index\")\n\t}\n\ts := pkshard.GetObject(rd.PublicKeyShards[i])\n\trefStr, err := s.GetRef(trimmedPublicKey)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to get reference in shard\")\n\t}\n\tref, err := insolar.NewReferenceFromBase58(refStr)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"bad member reference for this public key\")\n\t}\n\n\treturn ref, nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
NumMessageKeys implemented in memory.
|
func (ms *MemStore) NumMessageKeys(sessionKey string) (uint64, error) {
s, ok := ms.sessions[sessionKey]
if !ok {
return 0, log.Errorf("memstore: no session found for %s", sessionKey)
}
return uint64(len(s.send)), nil
}
|
[
"func (p BnetAuthCheckPacket) NumKeys() int {\n\tpb := PacketBuffer(p)\n\treturn pb.Uint32(16)\n}",
"func (d *Data) NKeys() int { return 1 }",
"func (s *TestingStructure) NKeys() int { return 9 }",
"func (d *data) NKeys() int { return 2 }",
"func (m *Metadata) NKeys() int { return 1 }",
"func (k InternalKey) Size() int {\n\treturn len(k.UserKey) + 8\n}",
"func (k *Keychain) KeyCount() int {\n\tk.m.RLock()\n\tc := len(k.keys)\n\tk.m.RUnlock()\n\treturn c\n}",
"func (*Object) NKeys() int { return 2 }",
"func Bpf_map__key_size(map_ *Struct_bpf_map) uint32 {\n\treturn C.bpf_map__key_size()\n}",
"func (m *MemStore) GetKeyCount(user string) (int, error) {\n\treturn len(m.Data[user]), nil\n}",
"func (s *Store) numEntries() int {\r\n\treturn len(s.GetKeys())\r\n}",
"func (r *Message) Size() uint64 {\n\tresult := uint64(16)\n\tresult += uint64(len(r.Key))\n\tresult += uint64(len(r.Value))\n\n\tfor _, h := range r.Headers {\n\t\tresult += uint64(len(h.Key))\n\t\tresult += uint64(len(h.Value))\n\t}\n\n\treturn result\n}",
"func (s *session) getMessageCount() uint64 {\n\treturn uint64(len(s.msgSizes) - len(s.markedDeleted))\n}",
"func (t *DbTable) KeyCount() int {\n\tvar count int\n\n\t// accumulate the number of key fields\n\tfor _, v := range t.Fields {\n\t\tif v.KeyNum > 0 {\n\t\t\tcount++\n\t\t}\n\t}\n\n\treturn count\n}",
"func (m *HashMap) Len() int64 { return m.n }",
"func (c *Cache) Len() int {\n\treturn len(c.keyMap)\n}",
"func (p *Properties) NumRangeKeys() uint64 {\n\treturn p.NumRangeKeyDels + p.NumRangeKeySets + p.NumRangeKeyUnsets\n}",
"func (d DBase) CountKeys(table string) int {\n\tcount, err := d.Store.CountKeys(table)\n\tCritical(err)\n\treturn count\n}",
"func InternalNodeNumKeys(node []byte) *uint32 {\n\treturn (*uint32)(unsafe.Pointer(&node[InternalNodeNumKeysOffset]))\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
GetMessageKey implemented in memory.
|
func (ms *MemStore) GetMessageKey(
sessionKey string,
sender bool,
msgIndex uint64,
) (*[64]byte, error) {
s, ok := ms.sessions[sessionKey]
if !ok {
return nil, log.Errorf("memstore: no session found for %s", sessionKey)
}
if msgIndex >= uint64(len(s.send)) {
return nil, log.Error("memstore: message index out of bounds")
}
var key string
var party string
if sender {
key = s.send[msgIndex]
party = "sender"
} else {
key = s.recv[msgIndex]
party = "recipient"
}
// make sure key wasn't used yet
if key == "" {
return nil, log.Error(session.ErrMessageKeyUsed)
}
// decode key
var messageKey [64]byte
k, err := base64.Decode(key)
if err != nil {
return nil,
log.Errorf("memstore: cannot decode %s key for %s", party,
sessionKey)
}
if copy(messageKey[:], k) != 64 {
return nil,
log.Errorf("memstore: %s key for %s has wrong length", party,
sessionKey)
}
return &messageKey, nil
}
|
[
"func GetKey(x proto.Message) (string, error) {\n\treturn GetKeyUsingModelRegistry(x, DefaultRegistry)\n}",
"func (m Message) Key() []byte {\n\tstart, end, size := m.keyOffsets()\n\tif size == -1 {\n\t\treturn nil\n\t}\n\treturn m[start+4 : end]\n}",
"func MessageKey(authKey Key, plaintextPadded []byte, mode Side) bin.Int128 {\n\tr := make([]byte, 0, 256)\n\t// `msg_key_large = SHA256 (substr (auth_key, 88+x, 32) + plaintext + random_padding);`\n\tmsgKeyLarge := msgKeyLarge(r, authKey, plaintextPadded, mode)\n\t// `msg_key = substr (msg_key_large, 8, 16);`\n\treturn messageKey(msgKeyLarge)\n}",
"func MessageKey(authKey Key, plaintextPadded []byte, mode Side) bin.Int128 {\n\t// `msg_key_large = SHA256 (substr (auth_key, 88+x, 32) + plaintext + random_padding);`\n\tmsgKeyLarge := msgKeyLarge(authKey, plaintextPadded, mode)\n\t// `msg_key = substr (msg_key_large, 8, 16);`\n\treturn messageKey(msgKeyLarge)\n}",
"func messageKey(messageKeyLarge []byte) (v bin.Int128) {\n\tb := messageKeyLarge[8 : 16+8]\n\tcopy(v[:len(b)], b)\n\treturn v\n}",
"func messageKey(messageKeyLarge []byte) bin.Int128 {\n\tvar v bin.Int128\n\tb := messageKeyLarge[8 : 16+8]\n\tcopy(v[:len(b)], b)\n\treturn v\n}",
"func GetKey() [32]byte {\n\tif !keyset {\n\t\terr := setKey()\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\treturn key\n}",
"func (st *MemStorage) GetKey(gun, role string) (algorithm string, public []byte, err error) {\n\t// no need for lock. It's ok to return nil if an update\n\t// wasn't observed\n\tg, ok := st.keys[gun]\n\tif !ok {\n\t\treturn \"\", nil, &ErrNoKey{gun: gun}\n\t}\n\tk, ok := g[role]\n\tif !ok {\n\t\treturn \"\", nil, &ErrNoKey{gun: gun}\n\t}\n\n\treturn k.algorithm, k.public, nil\n}",
"func (symmetricKey *SymmetricKey) GetKey() []byte {\n\tsymmetricKey.mutex.RLock()\n\tdefer symmetricKey.mutex.RUnlock()\n\treturn symmetricKey.SymmetricKey\n}",
"func MsgKey(s string) {\n\tmsgKey = s\n\tgenLevelSlices()\n}",
"func (m *RegistryKeyState) GetKey()(*string) {\n return m.key\n}",
"func (p *Provider) GetKey() interface{} {\n\treturn p.key\n}",
"func (c *Command) GetKey() string {\n\tif c.duplicateKey {\n\t\treturn fmt.Sprintf(\"%s.%d\", c.ProcessKey, c.LineNo)\n\t}\n\treturn c.ProcessKey\n}",
"func (m *KeyValue) GetKey()(*string) {\n return m.key\n}",
"func (header SseKmsHeader) GetKey() string {\n\treturn header.Key\n}",
"func (w *General) GetKey() string {\n\treturn w.key\n}",
"func (c *Counter) GetKey() string {\n\treturn c.key\n}",
"func (sr *Smokering) GetKey(id string) *Key {\n\treturn sr.getKey(id, false)\n}",
"func MsgKey(R, P *btcec.PublicKey, m []byte) *btcec.PublicKey {\n\th := Hash(R.SerializeCompressed(), m)\n\th = new(big.Int).Mod(new(big.Int).Neg(h), btcec.S256().N)\n\thP := new(btcec.PublicKey)\n\thP.X, hP.Y = btcec.S256().ScalarMult(P.X, P.Y, h.Bytes())\n\treturn SumPubs(R, hP)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
GetRootKeyHash implemented in memory.
|
func (ms *MemStore) GetRootKeyHash(sessionKey string) (*[64]byte, error) {
s, ok := ms.sessions[sessionKey]
if !ok {
return nil, log.Errorf("memstore: no session found for %s", sessionKey)
}
// decode root key hash
var hash [64]byte
k, err := base64.Decode(s.rootKeyHash)
if err != nil {
return nil, log.Error("memstore: cannot decode root key hash")
}
if copy(hash[:], k) != 64 {
return nil, log.Errorf("memstore: root key hash has wrong length")
}
return &hash, nil
}
|
[
"func (t *Trie) RootHash() []byte {\n\treturn t.rootHash\n}",
"func (eln *EmptyLeafNode) GetKey() []byte {\n\treturn nil\n}",
"func (ws *workingSet) RootHash() hash.Hash32B {\n\treturn ws.accountTrie.RootHash()\n}",
"func (t *Trie) hashRoot(db *Database) (node.Node, node.Node, error) {\n\tif t.node == nil {\n\t\treturn node.NewHashNode(nilNode.Bytes()), nil, nil\n\t}\n\th := NewEncryptor()\n\tdefer hPool.Put(h)\n\treturn h.hash(t.node, db, true)\n}",
"func (d *dataUsageCache) rootHash() dataUsageHash {\n\treturn hashPath(d.Info.Name)\n}",
"func (s *StateDB) RootHash() *corecrypto.HashType {\n\treturn s.trie.RootHash()\n}",
"func (s *stack) root() crypto.Hash {\n\ti := uint64(bits.TrailingZeros64(s.used))\n\tif i == 64 {\n\t\treturn crypto.Hash{}\n\t}\n\troot := s.stack[i]\n\tfor i++; i < 64; i++ {\n\t\tif s.used&(1<<i) != 0 {\n\t\t\troot = s.nodeHash(s.stack[i], root)\n\t\t}\n\t}\n\treturn root\n}",
"func GetKey() [32]byte {\n\tif !keyset {\n\t\terr := setKey()\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\treturn key\n}",
"func (r *RootKeyStorage) Get(_ context.Context, id []byte) ([]byte, error) {\n\tr.encKeyMtx.RLock()\n\tdefer r.encKeyMtx.RUnlock()\n\n\tif r.encKey == nil {\n\t\treturn nil, ErrStoreLocked\n\t}\n\tvar rootKey []byte\n\terr := kvdb.View(r, func(tx kvdb.RTx) error {\n\t\tdbKey := tx.ReadBucket(rootKeyBucketName).Get(id)\n\t\tif len(dbKey) == 0 {\n\t\t\treturn fmt.Errorf(\"root key with id %s doesn't exist\",\n\t\t\t\tstring(id))\n\t\t}\n\n\t\tdecKey, err := r.encKey.Decrypt(dbKey)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\trootKey = make([]byte, len(decKey))\n\t\tcopy(rootKey, decKey)\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn rootKey, nil\n}",
"func (hasher *SHA256) HashKey() string {\n\treturn \"\"\n}",
"func (r *RootKeyStorage) RootKey(ctx context.Context) ([]byte, []byte, error) {\n\tr.encKeyMtx.RLock()\n\tdefer r.encKeyMtx.RUnlock()\n\n\tif r.encKey == nil {\n\t\treturn nil, nil, ErrStoreLocked\n\t}\n\tvar rootKey []byte\n\n\t// Read the root key ID from the context. If no key is specified in the\n\t// context, an error will be returned.\n\tid, err := RootKeyIDFromContext(ctx)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tif bytes.Equal(id, encryptedKeyID) {\n\t\treturn nil, nil, ErrKeyValueForbidden\n\t}\n\n\terr = kvdb.Update(r, func(tx kvdb.RwTx) error {\n\t\tns := tx.ReadWriteBucket(rootKeyBucketName)\n\t\tdbKey := ns.Get(id)\n\n\t\t// If there's a root key stored in the bucket, decrypt it and\n\t\t// return it.\n\t\tif len(dbKey) != 0 {\n\t\t\tdecKey, err := r.encKey.Decrypt(dbKey)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\trootKey = make([]byte, len(decKey))\n\t\t\tcopy(rootKey, decKey)\n\t\t\treturn nil\n\t\t}\n\n\t\t// Otherwise, create a RootKeyLen-byte root key, encrypt it,\n\t\t// and store it in the bucket.\n\t\trootKey = make([]byte, RootKeyLen)\n\t\tif _, err := io.ReadFull(rand.Reader, rootKey); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tencKey, err := r.encKey.Encrypt(rootKey)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn ns.Put(id, encKey)\n\t})\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn rootKey, id, nil\n}",
"func (h *Hasher) GetZeroHash() []byte {\n\treturn h.pool.zerohashes[h.pool.Depth]\n}",
"func (t *KeyedHashTree) Key(offset uint64) []byte {\n\tif offset > t.maxSize {\n\t\tpanic(\"offset greater than maximum size\")\n\t}\n\n\tbuf := make([]byte, 16)\n\tk := make([]byte, len(t.root))\n\tcopy(k, t.root)\n\tfor i := uint64(0); i < t.depth; i++ {\n\t\tlevel := t.depth - i\n\t\tblockSize := uint64(math.Pow(t.factor, float64(level-1))) * t.blockSize\n\t\ty := offset / blockSize\n\n\t\tbinary.LittleEndian.PutUint64(buf, uint64(i))\n\t\tbinary.LittleEndian.PutUint64(buf[8:], uint64(y))\n\n\t\th := t.alg(k)\n\t\t_, _ = h.Write(buf)\n\t\tk = h.Sum(k[:0])\n\t}\n\treturn k\n}",
"func Hash(s string, maxKey uint64) Key {\n\th := fnv.New64a()\n\th.Write([]byte(s))\n\treturn NewKey(h.Sum64() % maxKey)\n}",
"func GetBaseKey(addr sdk.ValAddress) []byte {\n\treturn append([]byte{0x00}, addr...)\n}",
"func RootHash(start uint64, end uint64) {\n\tRPCClient, err := rpc.Dial(\"ws://127.0.0.1:8585\") // websocket port of a node started from bor-devnet directory\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tclient := ethclient.NewClient(RPCClient)\n\tctx, _ := context.WithCancel(context.Background())\n\n\troot, err := client.GetRootHash(ctx, start, end)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tfmt.Println(root)\n}",
"func (f *Fork) HashTreeRoot() ([32]byte, error) {\n\treturn ssz.HashWithDefaultHasher(f)\n}",
"func (k KeyStore) Get(address bitcoin.RawAddress) (*Key, error) {\r\n\thash, err := address.Hash()\r\n\tif err != nil {\r\n\t\treturn nil, err\r\n\t}\r\n\tkey, ok := k.Keys[*hash]\r\n\tif !ok {\r\n\t\treturn nil, ErrKeyNotFound\r\n\t}\r\n\treturn key, nil\r\n}",
"func (m *MsgProofs) HashTreeRoot() ([32]byte, error) {\n\treturn ssz.HashWithDefaultHasher(m)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
GetChainKey implemented in memory.
|
func (ms *MemStore) GetChainKey(sessionKey string) (*[32]byte, error) {
s, ok := ms.sessions[sessionKey]
if !ok {
return nil, log.Errorf("memstore: no session found for %s", sessionKey)
}
// decode chain key
var key [32]byte
k, err := base64.Decode(s.chainKey)
if err != nil {
return nil, log.Error("memstore: cannot decode chain key")
}
if copy(key[:], k) != 32 {
return nil, log.Errorf("memstore: chain key has wrong length")
}
return &key, nil
}
|
[
"func GetChainKey(chainID string) []byte {\n\treturn append(KeyPrefix(ChainKey), []byte(chainID)...)\n}",
"func (hs *HandshakeState) GetChainingKey() []byte {\n\treturn hs.ss.chainingKey[:]\n}",
"func (k *Keychain) Key() (string, error) {\n\tkey, err := k.BinKey()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn hex.EncodeToString(key), nil\n}",
"func GetTheBlockKey(chain, index uint64) []byte {\n\tvar key Hash\n\tif chain == 0 {\n\t\treturn nil\n\t}\n\tif index == 0 {\n\t\tvar pStat BaseInfo\n\t\tgetDataFormDB(chain, dbStat{}, []byte{StatBaseInfo}, &pStat)\n\t\treturn pStat.Key[:]\n\t}\n\tgetDataFormLog(chain, logBlockInfo{}, runtime.Encode(index), &key)\n\tif key.Empty() {\n\t\treturn nil\n\t}\n\treturn key[:]\n}",
"func GetKey(t *testing.T) []byte {\n\tt.Helper()\n\n\tk, _ := GeneratePrivateKey()\n\treturn k.Data[:]\n}",
"func GetKey() [32]byte {\n\tif !keyset {\n\t\terr := setKey()\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\treturn key\n}",
"func (ctx *Context) APIKeychain() *Keychain {\n\treturn ctx.apiKeychain\n}",
"func (s *State) SenderChainKey() session.ChainKeyable {\n\tchainKey := s.senderChain.chainKey\n\treturn chain.NewKey(kdf.DeriveSecrets, chainKey.Key(), chainKey.Index())\n}",
"func GetNameAccountKey(hashName []byte, nameClass, nameParent common.PublicKey) common.PublicKey {\n\tseed := [][]byte{\n\t\thashName,\n\t\tnameClass.Bytes(),\n\t\tnameParent.Bytes(),\n\t}\n\tpubkey, _, _ := common.FindProgramAddress(seed, common.SPLNameServiceProgramID)\n\treturn pubkey\n}",
"func (rk *caIdemixRevocationKey) GetKey() *ecdsa.PrivateKey {\n\treturn rk.key\n}",
"func GetKeychain(u User, addresses set.Set[ids.ShortID]) (*secp256k1fx.Keychain, error) {\n\taddrsList := addresses.List()\n\tif len(addrsList) == 0 {\n\t\tvar err error\n\t\taddrsList, err = u.GetAddresses()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tkc := secp256k1fx.NewKeychain()\n\tfor _, addr := range addrsList {\n\t\tsk, err := u.GetKey(addr)\n\t\tif err == database.ErrNotFound {\n\t\t\tcontinue\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"problem retrieving private key for address %s: %w\", addr, err)\n\t\t}\n\t\tkc.Add(sk)\n\t}\n\treturn kc, nil\n}",
"func (w *Wallet) GetKey(address types.UnlockHash) (pk types.PublicKey, sk types.ByteSlice, err error) {\n\tw.mu.RLock()\n\tpk, sk, err = w.getKey(address)\n\tw.mu.RUnlock()\n\treturn\n}",
"func (bvt *BlockVerificationTicket) GetKey() datastore.Key {\n\treturn datastore.ToKey(bvt.BlockID)\n}",
"func (p *bitsharesAPI) GetChainID() objects.ChainID {\n\treturn p.chainID // return cached value\n}",
"func (symmetricKey *SymmetricKey) GetKey() []byte {\n\tsymmetricKey.mutex.RLock()\n\tdefer symmetricKey.mutex.RUnlock()\n\treturn symmetricKey.SymmetricKey\n}",
"func (la *LinkAccount) GetTxKey(hash *common.Hash) (*lkctypes.Key, error) {\n\t// if !la.walletOpen {\n\t// \treturn nil, types.ErrWalletNotOpen\n\t// }\n\t// txKey, ok := la.txKeys[*hash]\n\t// if ok {\n\t// \treturn &txKey, nil\n\t// }\n\t// return nil, types.ErrNotFoundTxKey\n\n\titr := la.walletDB.NewIteratorWithPrefix(hash[:])\n\tdefer itr.Close()\n\n\tfor ; itr.Valid(); itr.Next() {\n\t\tv := itr.Value()\n\t\tvar key lkctypes.Key\n\t\tcopy(key[:], v)\n\t\treturn &key, nil\n\t}\n\treturn nil, types.ErrNotFoundTxKey\n}",
"func NewKeychain() *Keychain {\n\treturn &Keychain{}\n}",
"func (s *State) ReceiverChainKey(senderEphemeral ecc.ECPublicKeyable) *chain.Key {\n\treceiverChainAndIndex := s.receiverChain(senderEphemeral)\n\treceiverChain := receiverChainAndIndex.ReceiverChain\n\n\tif receiverChainAndIndex == nil || receiverChain == nil {\n\t\treturn nil\n\t}\n\n\treturn chain.NewKey(\n\t\tkdf.DeriveSecrets,\n\t\treceiverChain.chainKey.Key(),\n\t\treceiverChain.chainKey.Index(),\n\t)\n}",
"func (bull *Bullion) GetKey(segment string) string {\n\treturn bull.GetKeyPrefix() + segment\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
DelMessageKey implemented in memory.
|
func (ms *MemStore) DelMessageKey(
sessionKey string,
sender bool,
msgIndex uint64,
) error {
s, ok := ms.sessions[sessionKey]
if !ok {
return log.Errorf("memstore: no session found for %s", sessionKey)
}
if msgIndex >= uint64(len(s.send)) {
return log.Error("memstore: message index out of bounds")
}
// delete key
if sender {
s.send[msgIndex] = ""
} else {
s.recv[msgIndex] = ""
}
return nil
}
|
[
"func (k Keeper) RemoveMessage(ctx sdk.Context, id uint64) {\n\tstore := prefix.NewStore(ctx.KVStore(k.storeKey), types.KeyPrefix(types.MessageKey))\n\tstore.Delete(GetMessageIDBytes(id))\n}",
"func DeleteMessage(id int) error {\n\t// check if id exist in map\n\tmessagemap.RLock()\n\t_, found := messagemap.m[id]\n\tmessagemap.RUnlock()\n\n\tif !found {\n\t\treturn ErrorNoSuchKey\n\t}\n\n\tmessagemap.Lock()\n\tdelete(messagemap.m, id)\n\tmessagemap.Unlock()\n\treturn nil\n}",
"func (c Context) Del(key string) {\n\tdelete(c.data, key)\n}",
"func (db *MemoryStorage) Del(key []byte) error {\n\tdb.data.Delete(common.BytesToHex(key))\n\treturn nil\n}",
"func (c *memoryCache) Del(key string) {\n\tdelete(c.data, key)\n}",
"func (s *State) RemoveMessageKeys(senderEphemeral ecc.ECPublicKeyable, counter uint32) *message.Keys {\n\t// Get our chain state that has our chain key.\n\tchainAndIndex := s.receiverChain(senderEphemeral)\n\tchainKey := chainAndIndex.ReceiverChain\n\n\t// If the chain is empty, we don't have any message keys.\n\tif chainKey == nil {\n\t\treturn nil\n\t}\n\n\t// Get our message keys from our receiver chain.\n\tmessageKeyList := chainKey.MessageKeys()\n\n\t// Loop through our message keys and compare its index with the\n\t// given counter. When we find a match, remove it from our list.\n\tvar rmIndex int\n\tfor i, messageKey := range messageKeyList {\n\t\tif messageKey.Index() == counter {\n\t\t\trmIndex = i\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// Retrive the message key\n\tmessageKey := chainKey.messageKeys[rmIndex]\n\n\t// Delete the message key from the given position.\n\tchainKey.messageKeys = append(chainKey.messageKeys[:rmIndex], chainKey.messageKeys[rmIndex+1:]...)\n\n\treturn message.NewKeys(\n\t\tmessageKey.CipherKey(),\n\t\tmessageKey.MacKey(),\n\t\tmessageKey.Iv(),\n\t\tmessageKey.Index(),\n\t)\n}",
"func (m *Messages) RemoveMessage(key string) {\n\tm.lock.Lock()\n\tdefer m.lock.Unlock()\n\tdelete(m.messages, key)\n}",
"func clearKeyData(logger log.Logger, deleter types.Deleter) {\n\tlevel.Info(logger).Log(\"msg\", \"Clearing keys\")\n\t_ = deleter.Delete([]byte(privateEccData), []byte(publicEccData))\n}",
"func (d Data) Del(key uint32) {\n\td.mutex.Lock()\n\tcount := d.counts[key]\n\tcount -= 1\n\tif count < 1 {\n\t\tdelete(d.data, key)\n\t\tdelete(d.counts, key)\n\t} else {\n\t\td.counts[key] = count\n\t}\n\td.mutex.Unlock()\n}",
"func MessageKey(authKey Key, plaintextPadded []byte, mode Side) bin.Int128 {\n\t// `msg_key_large = SHA256 (substr (auth_key, 88+x, 32) + plaintext + random_padding);`\n\tmsgKeyLarge := msgKeyLarge(authKey, plaintextPadded, mode)\n\t// `msg_key = substr (msg_key_large, 8, 16);`\n\treturn messageKey(msgKeyLarge)\n}",
"func MessageKey(authKey Key, plaintextPadded []byte, mode Side) bin.Int128 {\n\tr := make([]byte, 0, 256)\n\t// `msg_key_large = SHA256 (substr (auth_key, 88+x, 32) + plaintext + random_padding);`\n\tmsgKeyLarge := msgKeyLarge(r, authKey, plaintextPadded, mode)\n\t// `msg_key = substr (msg_key_large, 8, 16);`\n\treturn messageKey(msgKeyLarge)\n}",
"func (m *Uint64) Delete(key interface{}) {\n\tm.m.Delete(key)\n}",
"func (n *Notification) DelKey(key string) error {\n\tfor i, value := range n.apikeys {\n\t\tif strings.EqualFold(key, value) {\n\t\t\tcopy(n.apikeys[i:], n.apikeys[i+1:])\n\t\t\tn.apikeys[len(n.apikeys)-1] = \"\"\n\t\t\tn.apikeys = n.apikeys[:len(n.apikeys)-1]\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn errors.New(\"Error, key not found\")\n}",
"func (dao *GkvDB) DeleteEnvelopMessager(echoHash common.Hash) {\n\terr := dao.removeKeyValueFromBucket(models.BucketEnvelopMessager, echoHash[:])\n\tif err != nil {\n\t\t//可能这个消息完全不存在\n\t\t// this messsage might not exist.\n\t\tlog.Warn(fmt.Sprintf(\"try to remove envelop message %s,but err= %s\", utils.HPex(echoHash), err))\n\t}\n}",
"func (md Metadata) Del(key string) {\n\t// fast path\n\tif _, ok := md[key]; ok {\n\t\tdelete(md, key)\n\t} else {\n\t\t// slow path\n\t\tdelete(md, textproto.CanonicalMIMEHeaderKey(key))\n\t}\n}",
"func (r *Redis) Delete(key string) {\n}",
"func MsgKey(s string) {\n\tmsgKey = s\n\tgenLevelSlices()\n}",
"func (pdb *PebbleDB) MDel(keys []string) error {\n\treturn nil\n}",
"func (t *Tkeyid) DelFromKey(key string) (id uint, ok bool) {\n\tid, ok = t.keytoid[key]\n\tif ok {\n\t\tdelete(t.idtokey, id)\n\t\tdelete(t.keytoid, key)\n\t}\n\treturn\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
AddSessionKey implemented in memory.
|
func (ms *MemStore) AddSessionKey(
hash, json, privKey string,
cleanupTime uint64,
) error {
ms.sessionKeys[hash] = &sessionKey{
json: json,
privKey: privKey,
cleanupTime: cleanupTime,
}
return nil
}
|
[
"func (s *Session) Add(key string, value interface{}) error {\n\tbts, err := json.Marshal(value)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ts.values[key] = string(bts)\n\n\treturn nil\n}",
"func addSession(user *User) error {\n\trandBytes, err := scrypt.GenerateRandomBytes(32)\n\tif err != nil {\n\t\treturn err\n\t}\n\tsessionId := string(randBytes)\n\t// TODO: store more than the username\n\terr = rd.Set(\"session:\"+sessionId, user.Username, sessionTimeout).Err()\n\tif err != nil {\n\t\treturn err\n\t}\n\tuser.sessionId = url.QueryEscape(sessionId)\n\treturn nil\n}",
"func (a *LocalKeyAgent) addKey(key *Key) error {\n\tif key == nil {\n\t\treturn trace.BadParameter(\"key is nil\")\n\t}\n\tif key.ProxyHost == \"\" {\n\t\tkey.ProxyHost = a.proxyHost\n\t}\n\tif key.Username == \"\" {\n\t\tkey.Username = a.username\n\t}\n\n\t// In order to prevent unrelated key data to be left over after the new\n\t// key is added, delete any already stored key with the same index if their\n\t// RSA private keys do not match.\n\tstoredKey, err := a.keyStore.GetKey(key.KeyIndex)\n\tif err != nil {\n\t\tif !trace.IsNotFound(err) {\n\t\t\treturn trace.Wrap(err)\n\t\t}\n\t} else {\n\t\tif subtle.ConstantTimeCompare(storedKey.Priv, key.Priv) == 0 {\n\t\t\ta.log.Debugf(\"Deleting obsolete stored key with index %+v.\", storedKey.KeyIndex)\n\t\t\tif err := a.keyStore.DeleteKey(storedKey.KeyIndex); err != nil {\n\t\t\t\treturn trace.Wrap(err)\n\t\t\t}\n\t\t}\n\t}\n\n\t// Save the new key to the keystore (usually into ~/.tsh).\n\tif err := a.keyStore.AddKey(key); err != nil {\n\t\treturn trace.Wrap(err)\n\t}\n\treturn nil\n}",
"func (s *Server) addSession(sessionIdentifier string, listener pb.Iris_ListenServer) (*Session, error) {\n\ts.initialize()\n\n\ts.sessionsMutex.Lock()\n\tdefer s.sessionsMutex.Unlock()\n\n\tif s.sessions == nil {\n\t\ts.sessions = make(map[string]*Session)\n\t}\n\n\tsession := &Session{ID: sessionIdentifier, Listener: listener}\n\ts.sessions[sessionIdentifier] = session\n\n\treturn session, nil\n}",
"func (l *LNCSessionsStore) AddSession(ctx context.Context,\n\tsession *lnc.Session) error {\n\n\tif session.LocalStaticPrivKey == nil {\n\t\treturn fmt.Errorf(\"local static private key is required\")\n\t}\n\n\tlocalPrivKey := session.LocalStaticPrivKey.Serialize()\n\tcreatedAt := l.clock.Now().UTC().Truncate(time.Microsecond)\n\n\tvar writeTxOpts LNCSessionsDBTxOptions\n\terr := l.db.ExecTx(ctx, &writeTxOpts, func(tx LNCSessionsDB) error {\n\t\tparams := sqlc.InsertSessionParams{\n\t\t\tPassphraseWords: session.PassphraseWords,\n\t\t\tPassphraseEntropy: session.PassphraseEntropy,\n\t\t\tLocalStaticPrivKey: localPrivKey,\n\t\t\tMailboxAddr: session.MailboxAddr,\n\t\t\tCreatedAt: createdAt,\n\t\t\tDevServer: session.DevServer,\n\t\t}\n\n\t\treturn tx.InsertSession(ctx, params)\n\t})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to insert new session: %v\", err)\n\t}\n\n\tsession.CreatedAt = createdAt\n\n\treturn nil\n}",
"func (gorilla Gorilla) Add(w http.ResponseWriter, req *http.Request, key string, value interface{}) error {\n\tdefer gorilla.saveSession(w, req)\n\n\tsession, err := gorilla.getSession(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif str, ok := value.(string); ok {\n\t\tsession.Values[key] = str\n\t} else {\n\t\tresult, _ := json.Marshal(value)\n\t\tsession.Values[key] = string(result)\n\t}\n\n\treturn nil\n}",
"func (i service) AddKey(ctx context.Context, key id.KeyDID) error {\n\tDID, err := NewDIDFromContext(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcontract, opts, err := i.prepareTransaction(ctx, DID)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.Info(\"Add key to identity contract %s\", DID.ToAddress().String())\n\ttxID, done, err := i.txManager.ExecuteWithinTX(context.Background(), DID, transactions.NilTxID(), \"Check TX for add key\",\n\t\ti.ethereumTX(opts, contract.AddKey, key.GetKey(), key.GetPurpose(), key.GetType()))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tisDone := <-done\n\t// non async task\n\tif !isDone {\n\t\treturn errors.New(\"add key TX failed: txID:%s\", txID.String())\n\n\t}\n\treturn nil\n\n}",
"func (s *SessionStore) Add(session *Session) {\n\tfmt.Println(session.AccessToken)\n\n\ts.atM.Lock()\n\ts.accessTokens[session.AccessToken] = session\n\ts.atM.Unlock()\n\n\ts.rtM.Lock()\n\ts.refreshTokens[session.RefreshToken] = session\n\ts.rtM.Unlock()\n}",
"func (s *SharemeService) Add(c *gae.Context, session *Session, key string) (stat Share) {\n\tstat = s.Stat(c, key)\n\tif stat.IsError() {\n\t\treturn\n\t}\n\tsession.Set(fmt.Sprintf(\"%s%s\", KeySessionPrefix, key), stat.Name)\n\treturn\n}",
"func (ctx *MqttSrvContext) AddSession(fd int, s interface{}) {\n\tctx.Clock.Lock()\n\tctx.Connections[fd] = s\n\tctx.Clock.Unlock()\n}",
"func (m *SessionManager) Add(s *Session) {\n\tif s == nil {\n\t\treturn\n\t}\n\tm.Sessions.Store(s.ID(), s)\n}",
"func (mySession *Session) SessionKey() (param string) {\n\treturn mySession.SessionKeyvar\n}",
"func (ks *KeyStore) Add(privateKey *rsa.PrivateKey, kid string) {\n\tks.mu.Lock()\n\tdefer ks.mu.Unlock()\n\n\tks.store[kid] = privateKey\n}",
"func (s ServiceClientWrapper) AddKey(name string, password string) (addr string, mnemonic string, err error) {\n\treturn s.ServiceClient.Insert(name, password)\n}",
"func (ms *MemStore) SessionKey() string {\n\treturn ms.sessionKey\n}",
"func (a *Account) SessionKey() *big.Int {\n\tif a.sessionKey == nil {\n\t\ta.sessionKey, _ = new(big.Int).SetString(a.SessionKeyStr, 16)\n\t}\n\n\treturn a.sessionKey\n}",
"func (sm *SessionManager) AddSession(s Session) {\n\tif sm == nil {\n\t\treturn\n\t}\n\tsm.mu.Lock()\n\tdefer sm.mu.Unlock()\n\tsm.mu.sessionsByID[s.GetUUIDString()] = s\n\t_, ok := sm.mu.sessionsByTenant[s.GetTenantName()]\n\tif !ok {\n\t\tsm.mu.sessionsByTenant[s.GetTenantName()] = make(map[Session]struct{})\n\t}\n\tsm.mu.sessionsByTenant[s.GetTenantName()][s] = struct{}{}\n}",
"func AddKey(key * Key) {\n\tKeys = append(Keys, *key)\n\tSaveDatabase(Keys, \"keys\")\n}",
"func (p *Player) AddKey(key KeySet) {\n\tp.Keys |= key\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
DelPrivSessionKey implemented in memory.
|
func (ms *MemStore) DelPrivSessionKey(hash string) error {
sk, ok := ms.sessionKeys[hash]
if !ok {
return nil
}
sk.privKey = ""
return nil
}
|
[
"func (privKey *YubiHsmPrivateKey) UnloadYubiHsmPrivKey() {\n\tif privKey.sessionMgr == nil {\n\t\treturn\n\t}\n\tprivKey.sessionMgr.Destroy()\n}",
"func (m *MemoryStorer) Del(key string) error {\n\tm.mut.Lock()\n\tdelete(m.sessions, key)\n\tm.mut.Unlock()\n\n\treturn nil\n}",
"func (y *Yubikey) Remove(profile string, val credentials.Value) error {\n\tvar err error\n\n\tsource, _ := y.Config.SourceProfile(profile)\n\n\tsess := session.New(&aws.Config{Region: aws.String(source.Region),\n\t\tCredentials: credentials.NewCredentials(&credentials.StaticProvider{Value: val}),\n\t})\n\n\tcurrentUserName, err := GetUsernameFromSession(sess)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.Printf(\"Found access key ****************%s for user %s\",\n\t\tval.AccessKeyID[len(val.AccessKeyID)-4:],\n\t\tcurrentUserName)\n\n\tdevice, err := yubikey.New()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tm, err := mfa.New(sess, device)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := m.Delete(y.Username); err != nil {\n\t\treturn err\n\t}\n\n\t// now delete the session we just used that was created using TOTP from the deleted yubikey\n\t// other sessions that used a TOTP from the yubikey may still be cached but there's not much\n\t// we can do about that\n\tkrs, err := NewKeyringSessions(y.Keyring, y.Config)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"unable to create keyring sessions\")\n\t}\n\n\tn, err := krs.Delete(profile)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"unable to delete keyring session for %s\", profile)\n\t}\n\n\tif n == 1 {\n\t\tlog.Printf(\"deleted session for '%s'\", profile)\n\t}\n\tif n > 1 {\n\t\t// this shouldn't be possible\n\t\tlog.Printf(\"deleted %d sessions for '%s' \", n, profile)\n\t}\n\n\treturn nil\n}",
"func (ms *MemStore) DelMessageKey(\n\tsessionKey string,\n\tsender bool,\n\tmsgIndex uint64,\n) error {\n\ts, ok := ms.sessions[sessionKey]\n\tif !ok {\n\t\treturn log.Errorf(\"memstore: no session found for %s\", sessionKey)\n\t}\n\tif msgIndex >= uint64(len(s.send)) {\n\t\treturn log.Error(\"memstore: message index out of bounds\")\n\t}\n\t// delete key\n\tif sender {\n\t\ts.send[msgIndex] = \"\"\n\t} else {\n\t\ts.recv[msgIndex] = \"\"\n\t}\n\treturn nil\n}",
"func (tg *TokenManager) RemovePrivateKey(kid string) {\n\tdelete(tg.keyMap, kid)\n}",
"func (s *onionStore) DeletePrivateKey() error {\n\t_, err := s.Client.Delete(context.Background(), onionPath)\n\treturn err\n}",
"func Remove(sessionKey string) {\n\tmu.Lock()\n\t// TODO: clean up internals?\n\tdelete(gameStore, sessionKey)\n\tmu.Unlock()\n}",
"func (keyDB *KeyDB) DelPrivateUID(msg *uid.Message) error {\n\tif _, err := keyDB.delPrivateUIDQuery.Exec(msg.JSON()); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func DecryptPrivKey(entList openpgp.EntityList, pass string) {\n\tent := entList[0]\n\tpassphrase := []byte(pass)\n\n\t// Decrypt priv key\n\tif ent.PrivateKey != nil && ent.PrivateKey.Encrypted {\n\t\terr := ent.PrivateKey.Decrypt(passphrase)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Private key decryption failed.\")\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n\n\t// Decrypt all subkeys\n\tfor _, subkey := range ent.Subkeys {\n\t\tif subkey.PrivateKey != nil && subkey.PrivateKey.Encrypted {\n\t\t\terr := subkey.PrivateKey.Decrypt(passphrase)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\"Subkey decryption failed.\")\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t}\n\t}\n}",
"func DecryptPrivKey(data []byte, password string) (crypto.PrivateKey, error) {\n\tvar plain []byte\n\tif password != \"\" {\n\t\t// Set up the crypto stuff\n\t\thash := crypto.SHA256.New()\n\t\tif _, err := hash.Write([]byte(password)); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"unable to hash: %w\", err)\n\t\t}\n\t\thashPW := hash.Sum(nil)\n\t\taes, err := aes.NewCipher(hashPW)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\taesGCM, err := cipher.NewGCM(aes)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tnonceSize := aesGCM.NonceSize()\n\n\t\tnonce, ciphertext := data[:nonceSize], data[nonceSize:]\n\t\tplain, err = aesGCM.Open(nil, nonce, ciphertext, nil)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\tplain = data\n\t}\n\n\tkey, err := parsePrivateKey(plain)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn key, nil\n}",
"func (privCode *PrivChaincode) Del(stub shim.ChaincodeStubInterface, params []string) peer.Response {\n\t// Check for args count MUST be done - not being done here for clarity\n\tCollectionName := params[0]\n\n\terr := stub.DelPrivateData(CollectionName, \"token\")\n\tif err != nil {\n\t\treturn shim.Error(\"Error=\" + err.Error())\n\t}\n\n\treturn shim.Success([]byte(\"true\"))\n}",
"func (o *Gojwt) SetPrivKeyPath(path string)(){\n o.privKeyPath = path\n}",
"func (d *Dao) DelPKGCache(c context.Context, mid int64) (err error) {\n\tkey := _pendantPKG + strconv.FormatInt(mid, 10)\n\tconn := d.redis.Get(c)\n\tdefer conn.Close()\n\tif err = conn.Send(\"DEL\", key); err != nil {\n\t\tlog.Error(\"conn.Send(DEL, %s) error(%v)\", key, err)\n\t\treturn\n\t}\n\treturn\n}",
"func (key Key) ToPrivKey() cryptotypes.PrivKey {\n\treturn key.k\n}",
"func (r *RPCKeyRing) DerivePrivKey(_ keychain.KeyDescriptor) (*btcec.PrivateKey,\n\terror) {\n\n\t// This operation is not supported with remote signing. There should be\n\t// no need for invoking this method unless a channel backup (SCB) file\n\t// for pre-0.13.0 channels are attempted to be restored. In that case\n\t// it is recommended to restore the channels using a node with the full\n\t// seed available.\n\treturn nil, ErrRemoteSigningPrivateKeyNotAvailable\n}",
"func (system *System) RevokeSystemKeyPair() error {\n\tdb := database.GetGORMDbConnection()\n\tdefer database.Close(db)\n\n\tvar encryptionKey EncryptionKey\n\n\terr := db.Where(\"system_id = ?\", system.ID).Find(&encryptionKey).Error\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = db.Delete(&encryptionKey).Error\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func GenPrivKey() PrivKey {\n\treturn genPrivKey(crypto.CReader())\n}",
"func (system *System) RevokeSystemKeyPair() error {\n\tdb := GetGORMDbConnection()\n\tdefer Close(db)\n\n\tvar encryptionKey EncryptionKey\n\n\terr := db.Where(\"system_id = ?\", system.ID).Find(&encryptionKey).Error\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = db.Delete(&encryptionKey).Error\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (c *memoryCache) Del(key string) {\n\tdelete(c.data, key)\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
CleanupSessionKeys implemented in memory.
|
func (ms *MemStore) CleanupSessionKeys(t uint64) error {
var oldKeys []string
for hash, sk := range ms.sessionKeys {
if sk.cleanupTime < t {
oldKeys = append(oldKeys, hash)
}
}
for _, hash := range oldKeys {
delete(ms.sessionKeys, hash)
}
return nil
}
|
[
"func SessionCleanup() {\n\tfor {\n\t\tselect {\n\t\tcase <-time.After(SessionManager.SessionCleanupTime * time.Minute):\n\t\t\tSessionManager.ReqSessionMem <- 1 // ask to access the shared mem, blocks until granted\n\t\t\t<-SessionManager.ReqSessionMemAck // make sure we got it\n\t\t\tss := make(map[string]*Session, 0) // here's the new Session list\n\t\t\tn := 0 // total number removed\n\t\t\tfor k, v := range Sessions { // look at every Session\n\t\t\t\tif time.Now().After(v.Expire) { // if it's still active...\n\t\t\t\t\tn++ // removed another\n\t\t\t\t} else {\n\t\t\t\t\tss[k] = v // ...copy it to the new list\n\t\t\t\t}\n\t\t\t}\n\t\t\tSessions = ss // set the new list\n\t\t\tSessionManager.ReqSessionMemAck <- 1 // tell SessionDispatcher we're done with the data\n\t\t\t//fmt.Printf(\"SessionCleanup completed. %d removed. Current Session list size = %d\\n\", n, len(Sessions))\n\t\t}\n\t}\n}",
"func Clean() {\n\tfor i := 0; i < len(sessions); i++ {\n\t\tif time.Now().After(sessions[i].Expires) {\n\t\t\tRemove(sessions[i].ID)\n\t\t\ti--\n\t\t}\n\t}\n}",
"func (conn StorageConn) CleanupSessions(wsf WebSessionFactory) error {\n\tnow := time.Now()\n\treturn conn.MultiExec([]SQLQuery{{\n\t\tSQL: \"UPDATE secrets.sessions SET csrf = NULL, csrf_date = NULL WHERE csrf_date < ?\",\n\t\tArgs: []Any{\n\t\t\tnow.Add(-wsf.MaxCSRFAge).Unix()},\n\t}, {\n\t\tSQL: \"DELETE FROM secrets.sessions WHERE updated < ? OR created < ?\",\n\t\tArgs: []Any{\n\t\t\tnow.Add(-wsf.MaxAge).Unix(),\n\t\t\tnow.Add(-wsf.MaxUpdateAge).Unix()},\n\t}})\n}",
"func (b *BaseHandler) CleanSession() {\n\tb.sessionStore.Clean(b)\n}",
"func (u *CryptohomeClient) CleanupSession(ctx context.Context, authSessionID string) error {\n\t// Kill the AuthSession.\n\tif _, err := u.binary.invalidateAuthSession(ctx, authSessionID); err != nil {\n\t\treturn errors.Wrap(err, \"failed to invaldiate AuthSession\")\n\t}\n\t// Clean up obsolete state, in case there's any.\n\tif err := u.UnmountAll(ctx); err != nil {\n\t\treturn errors.Wrap(err, \"failed to unmount vaults for cleanup\")\n\t}\n\treturn nil\n}",
"func (sess Session) Clear() {\n\tfor k, _ := range sess {\n\t\tdelete(sess, k)\n\t}\n}",
"func (c *Client) cleanup() {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\tfor key, rt := range c.regTokens {\n\t\tif rt.GetExpiresAt().Before(time.Now()) {\n\t\t\tdelete(c.regTokens, key)\n\t\t}\n\t}\n}",
"func (m *MemoryStorer) Clean() {\n\tt := time.Now().UTC()\n\tm.mut.Lock()\n\tfor id, session := range m.sessions {\n\t\tif t.After(session.expires) {\n\t\t\tdelete(m.sessions, id)\n\t\t}\n\t}\n\tm.mut.Unlock()\n}",
"func (h CGHandler) Cleanup(sess sarama.ConsumerGroupSession) error {\n\t/* For Future TroubleShooting or Debugging options:\n\tfor t, parts := range sess.Claims() {\n\t\tfor _, p := range parts {\n\t\t\tfmt.Println(h.cg.Metadata.Members[t][p], \"> Topic:\", t, \">\", p)\n\t\t}\n\t}\n\t*/\n\treturn nil\n}",
"func PurgeSessions() {\n\tsessions.Lock()\n\tdefer sessions.Unlock()\n\n\t// Update all sessions in the database.\n\tfor id, session := range sessions.sessions {\n\t\tPersistence.SaveSession(id, session)\n\t\t// We only do this to update the last access time. Errors are not that\n\t\t// bad.\n\t}\n\n\tsessions.sessions = make(map[string]*Session, MaxSessionCacheSize)\n}",
"func (s *Session) Cleanup() error {\n\tfor _, rule := range s.inputRules {\n\t\terr := s.ipt.Delete(s.Table, s.InputChain, rule...)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tfor _, rule := range s.outputRules {\n\t\terr := s.ipt.Delete(s.Table, s.OutputChain, rule...)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}",
"func (this *SessionStorage) StopClearingSessions() {\n this.done<-true\n}",
"func (pair *certificateKeyPair) cleanup() {\n\tif pair.certificatePath != \"\" {\n\t\t// Delete the old certificate file.\n\t\tif err := os.Remove(pair.certificatePath); err != nil && !os.IsNotExist(err) {\n\t\t\tklog.ErrorS(err, \"Failed to delete old certificate\", \"file\", pair.certificatePath)\n\t\t}\n\t}\n\tif pair.privateKeyPath != \"\" {\n\t\t// Delete the old private key file.\n\t\tif err := os.Remove(pair.privateKeyPath); err != nil && !os.IsNotExist(err) {\n\t\t\tklog.ErrorS(err, \"Failed to delete old private key\", \"file\", pair.privateKeyPath)\n\t\t}\n\t}\n}",
"func (pm *pageManager) cleanup(){\n\tsession := pm.Session.Copy()\n\tdefer session.Close()\n\tc := session.DB(config.Values.MgoDBName).C(config.Values.MgoCollectionName)\n\n\texpiredPages := bson.M{\"expiresAt\": bson.M{\"$lt\": time.Now()}}\n\tvar expiredPageIDs []struct{ Id string `bson:\"pageId\"` }\n\tif err := c.Find(expiredPages).Select(bson.M{\"pageId\": 1}).All(&expiredPageIDs); err != nil {\n\t\tlog.Print(\"err getting expired pages: \\n\", err)\n\t\treturn\n\t}\n\n\tif len(expiredPageIDs) == 0 {\n\t\treturn\n\t}\n\n\tc.RemoveAll(expiredPages)\n\tfor idx := range expiredPageIDs {\n\t\tos.RemoveAll(filepath.Join(config.Values.UploadDir, expiredPageIDs[idx].Id))\n\t}\n}",
"func Remove(sessionKey string) {\n\tmu.Lock()\n\t// TODO: clean up internals?\n\tdelete(gameStore, sessionKey)\n\tmu.Unlock()\n}",
"func CleanupSessions(path string, age time.Duration) error {\n\tfsMutex.Lock()\n\tdefer fsMutex.Unlock()\n\n\tfiles, err := ioutil.ReadDir(path)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, fi := range files {\n\t\tif age > 0 {\n\t\t\tmin := time.Now().Add(-age).UnixNano()\n\t\t\tif fi.ModTime().UnixNano() >= min {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\tif err := os.Remove(filepath.Join(path, fi.Name())); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (s *CacheLastPasswordSent) cleanup() {\n\ttickEvery := time.NewTicker(1 * time.Minute)\n\tdefer tickEvery.Stop()\n\n\tclearOutAfter := time.Duration(10 * time.Minute)\n\nLoop:\n\tfor {\n\t\tselect {\n\t\tcase <-s.stop:\n\t\t\tbreak Loop\n\t\tcase <-tickEvery.C:\n\t\t}\n\n\t\ts.mu.Lock()\n\t\tnow := time.Now().UTC()\n\t\tfor key, t := range s.data {\n\t\t\tif now.Sub(t) >= clearOutAfter {\n\t\t\t\tdelete(s.data, key)\n\t\t\t}\n\t\t}\n\t\ts.mu.Unlock()\n\t}\n}",
"func (k *Keys) Clean() {\n\tif k != nil {\n\t\tif k.Admin != nil {\n\t\t\tk.Admin.Clean()\n\t\t\tk.Admin = nil\n\t\t}\n\t\tif k.Server != nil {\n\t\t\tk.Server.Clean()\n\t\t\tk.Server = nil\n\t\t}\n\t\tk.Nonce = nil\n\t}\n}",
"func (s *Sessions) CleanExpired() {\n\tsessForKill := []string{}\n\n\ts.RLock()\n\tfor k, v := range s.sessions {\n\t\t// if time.Now().Unix()-v.LastActivity >= int64(cfg.SessionLifeTime) {\n\t\t// \tdelete(s.sessions, k)\n\t\t// }\n\t\tif time.Now().After(time.Unix(v.LastActivity, 0).Add(time.Duration(cfg.SessionLifeTime) * time.Second)) {\n\t\t\tsessForKill = append(sessForKill, k)\n\t\t}\n\t}\n\ts.RUnlock()\n\n\tfor _, sessID := range sessForKill {\n\t\ts.DelByID(sessID)\n\t}\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
NewDatapoint creates a new Datapoint, inserts it to the database and returns it
|
func NewDatapoint(datasetID int64, imageURL string) (dp *Datapoint, err error) {
var id int64
err = DB.QueryRow("INSERT INTO datapoint (dataset_id, image_url) VALUES ($1, $2) RETURNING id", datasetID, imageURL).Scan(&id)
if err != nil {
return // something went wrong! lets get out of here!
}
//blank space for readability
dp = &Datapoint{
ID: id,
DatasetID: datasetID,
ImageURL: imageURL,
}
return
}
|
[
"func (db *PostgresDatapointRepo) CreateDatapoint(dp earthworks.Datapoint) (earthworks.Datapoint, error) {\n\tquery := `INSERT INTO datapoint (location) VALUES ($1) RETURNING id`\n\tcreated := earthworks.Datapoint{}\n\terr := db.Get(&created, query, wkt.MarshalString(dp.Location))\n\tif err != nil {\n\t\treturn earthworks.Datapoint{}, err\n\t}\n\n\treturn created, nil\n}",
"func newDatapoint(n string, v float32, t time.Time) *datapoint {\n\treturn &datapoint{\n\t\tname: n,\n\t\tvalue: v,\n\t\ttime: t,\n\t}\n}",
"func newDataPoint() *dataPoint {\n\treturn &dataPoint{}\n}",
"func (ctrl *SensorController) CreateDatapoints(res http.ResponseWriter, req *http.Request) {\n\tsensorID := mux.Vars(req)[\"id\"]\n\tdatapoints := make([]model.Datapoint, 0)\n\tdec, _ := ioutil.ReadAll(req.Body)\n\terr := json.Unmarshal(dec, &datapoints)\n\n\t// save datapoints\n\tfor index := range datapoints {\n\t\tdatapoints[index].SensorID = bson.ObjectIdHex(sensorID)\n\t}\n\n\tdatapoints, err = model.BulkSaveDatapoints(ctrl.db, datapoints)\n\tif err != nil {\n\t\tctrl.r.JSON(res, http.StatusInternalServerError, err)\n\t\tlog.Fatal(err)\n\t}\n\n\tctrl.r.JSON(res, http.StatusCreated, datapoints)\n}",
"func (a *Adapter) AddNewDataPoint(ma MetricAdapter, name string, tags map[string]string, fields map[string]interface{}, t time.Time) error {\n\tif a.Verbose {\n\t\tdumpDataPoint(name, tags, fields, t)\n\t}\n\treturn ma.AddDataPoint(name, tags, fields, t)\n}",
"func NewDatapoint(data interface{}) *Datapoint {\n\treturn &Datapoint{\n\t\tTimestamp: float64(time.Now().UnixNano()) * 1e-9,\n\t\tData: data,\n\t}\n}",
"func NewDatapoint(data interface{}, points []float64) *Datapoint {\n\tif points == nil {\n\t\tpoints = []float64{}\n\t}\n\tf := make([]float64, len(points), len(points))\n\tcopy(f, points)\n\td := Datapoint{\n\t\tdata: data,\n\t\tset: f,\n\t}\n\treturn &d\n}",
"func (bp *BatchPoints) AddPoint(measurement string, tags map[string]string, fields map[string]interface{}, ts time.Time) error {\n\tbp.mtx.Lock()\n\tdefer bp.mtx.Unlock()\n\tpt, err := influx_client.NewPoint(measurement, tags, fields, ts)\n\tif err != nil {\n\t\treturn err\n\t}\n\tbp.bp.AddPoint(pt)\n\treturn nil\n}",
"func (ts *Timeseries) AddNewPoint(v float64, x interface{}) error {\n\tts.Lock()\n\tdefer ts.Unlock() // unlocks at the end\n\n\tswitch T := x.(type) {\n\tcase int64:\n\t\tts.XY[T] = v\n\tcase time.Time:\n\t\tts.XY[T.UnixNano()] = v\n\tcase int:\n\t\tts.XY[int64(T)] = v\n\tdefault:\n\t\treturn fmt.Errorf(\"Adding point not possible\")\n\t}\n\n\treturn nil\n}",
"func (repo *PostgresRepo) CreateInstrument(instr earthworks.InstrumentCreateRequest, project int) (earthworks.Instrument, error) {\n\n\t// If a datapoint wasn't supplied, create one.\n\t// If a location also wasn't supplied, it will be created at the default location (0, 0?)\n\tif !instr.Datapoint.Valid {\n\t\tprojectsRepo := projectsRepo.NewDatapointRepo(repo.conn)\n\t\tnewDP := earthworks.Datapoint{Location: orb.Point{instr.Location[0], instr.Location[1]}}\n\t\tcreatedDP, err := projectsRepo.CreateDatapoint(newDP)\n\t\tif err != nil {\n\t\t\treturn earthworks.Instrument{}, err\n\t\t}\n\t\tinstr.Datapoint = createdDP.ID\n\t}\n\n\tquery := `\n\t\tINSERT INTO instrument (project, name, device_id, datapoint, field_eng, install_date)\n\t\tVALUES ($1, $2, $3, $4, $5, $6) RETURNING id, project, name, device_id, datapoint, field_eng, install_date\n\t`\n\n\tcreated := earthworks.Instrument{}\n\terr := repo.conn.Get(\n\t\t&created,\n\t\tquery,\n\t\tproject,\n\t\tinstr.Name,\n\t\tinstr.DeviceID,\n\t\tinstr.Datapoint,\n\t\tinstr.FieldEng,\n\t\tinstr.InstallDate,\n\t)\n\tif err != nil {\n\t\treturn earthworks.Instrument{}, err\n\t}\n\n\treturn created, nil\n}",
"func AddPoint(db *gorm.DB, item *model.Point, DeviceUUID string) (*model.Point, error) {\n\tdevice := new(model.Device)\n\tif err := db.Where(\"uuid = ? \", DeviceUUID).First(&device).Error; err != nil {\n\t\treturn nil, &NotFoundError{deviceName, DeviceUUID, nil, \"not found\"}\n\t}\n\tif err := db.Create(&item).Error; err != nil {\n\t\treturn item, &NotFoundError{deviceName, \"na\", err, \"not found\"}\n\t}\n\treturn item, nil\n}",
"func (s *MetricsService) AddPoint(id int, value int, timestamp string) (*Point, *Response, error) {\n\tu := fmt.Sprintf(\"api/v1/metrics/%d/points\", id)\n\tv := new(metricPointAPIResponse)\n\n\tp := struct {\n\t\tValue int `json:\"value\"`\n\t\tTimestamp string `json:\"timestamp\"`\n\t}{\n\t\tValue: value,\n\t\tTimestamp: timestamp,\n\t}\n\n\tresp, err := s.client.Call(\"POST\", u, p, v)\n\treturn v.Data, resp, err\n}",
"func NewDataPoint(name string, value float64, unit Unit, dimensions ...Dimension) Point {\n\tp := Point{Name: name, Value: value, Unit: unit, Timestamp: time.Now().UTC()}\n\tp.AddDimensions(dimensions...)\n\treturn p\n}",
"func NewPoint(latitude float64, longitude float64) *Point {\n return &Point{latitude: latitude, longitude: longitude}\n}",
"func NewPoint(x, y float64) *Point { return &Point{x, y} }",
"func create_point(x float64, y float64) Point{\n\tp1 := Point{ x:x, y:y,}\n\treturn p1\n}",
"func (dal *DataAccessLayer) CreateTimeSeriesDatumTable(dropExistingTable bool) {\n if dropExistingTable {\n drop_stmt := \"DROP TABLE data;\"\n results, err := dal.db.Exec(drop_stmt)\n if err != nil {\n fmt.Println(\"TimeSeriesDatum table dropped with error:\", results, err)\n } else {\n fmt.Println(\"TimeSeriesDatum table dropped and re-created\")\n }\n }\n\n // Special thanks:\n // * http://www.postgresqltutorial.com/postgresql-create-table/\n // * https://www.postgresql.org/docs/9.5/datatype.html\n\n stmt := `CREATE TABLE data (\n id bigserial PRIMARY KEY,\n tenant_id bigint NOT NULL,\n sensor_id BIGINT NOT NULL,\n value FLOAT NULL,\n timestamp BIGINT NOT NULL,\n unique (tenant_id, sensor_id, timestamp)\n );`\n results, err := dal.db.Exec(stmt)\n if err != nil {\n fmt.Println(\"TimeSeriesDatum Model\", results, err)\n }\n return\n}",
"func PointNew(x, y, z float64) Tuple {\n\tvar t Tuple\n\tt.X = x\n\tt.Y = y\n\tt.Z = z\n\tt.W = 1.0\n\treturn t\n}",
"func (d *Influxdb) AddData(tags map[string]string, fields map[string]interface{}) error {\n\tzap.L().Debug(\"Calling AddData\", zap.Any(\"tags\", tags), zap.Any(\"fields\", fields))\n\tbp, err := client.NewBatchPoints(client.BatchPointsConfig{\n\t\tDatabase: d.database,\n\t\tPrecision: \"us\",\n\t})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Couldn't add data, error creating batchpoint: %s\", err)\n\t}\n\n\tif tags[EventName] == EventTypeContainerStart || tags[EventName] == EventTypeContainerStop {\n\t\tpt, err := client.NewPoint(EventTypeContainer, tags, fields, time.Now())\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Couldn't add ContainerEvent: %s\", err)\n\t\t}\n\t\tbp.AddPoint(pt)\n\t} else if tags[EventName] == EventTypeFlow {\n\t\tpt, err := client.NewPoint(EventTypeFlow, tags, fields, time.Now())\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Couldn't add FlowEvent: %s\", err)\n\t\t}\n\t\tbp.AddPoint(pt)\n\t}\n\tif err := d.httpClient.Write(bp); err != nil {\n\t\treturn fmt.Errorf(\"Couldn't add data: %s\", err)\n\t}\n\n\treturn nil\n}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.