Search is not available for this dataset
SHA256
string | Repository
string | File Name
string | File path in Repository
string | Code
string | Code Commit hash
string | File Path for Unit Test
string | Unit Test - (Ground Truth)
string | Unit Test Commit hash
string |
|---|---|---|---|---|---|---|---|---|
40ee25c9bcd565bd2b85d741009bf358defd75f2bdbb26cd90c263fc44f13600
|
golang/go
|
aes.go
|
src/crypto/internal/fips/aes/aes.go
|
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package aes
import (
"crypto/internal/fips"
"crypto/internal/fips/alias"
"strconv"
)
// BlockSize is the AES block size in bytes.
const BlockSize = 16
// A Block is an instance of AES using a particular key.
// It is safe for concurrent use.
type Block struct {
block
}
// blockExpanded is the block type used for all architectures except s390x,
// which feeds the raw key directly to its instructions.
type blockExpanded struct {
rounds int
// Round keys, where only the first (rounds + 1) × (128 ÷ 32) words are used.
enc [60]uint32
dec [60]uint32
}
const (
// AES-128 has 128-bit keys, 10 rounds, and uses 11 128-bit round keys
// (11×128÷32 = 44 32-bit words).
// AES-192 has 192-bit keys, 12 rounds, and uses 13 128-bit round keys
// (13×128÷32 = 52 32-bit words).
// AES-256 has 256-bit keys, 14 rounds, and uses 15 128-bit round keys
// (15×128÷32 = 60 32-bit words).
aes128KeySize = 16
aes192KeySize = 24
aes256KeySize = 32
aes128Rounds = 10
aes192Rounds = 12
aes256Rounds = 14
)
// roundKeysSize returns the number of uint32 of c.end or c.dec that are used.
func (b *blockExpanded) roundKeysSize() int {
return (b.rounds + 1) * (128 / 32)
}
type KeySizeError int
func (k KeySizeError) Error() string {
return "crypto/aes: invalid key size " + strconv.Itoa(int(k))
}
// New creates and returns a new [cipher.Block] implementation.
// The key argument should be the AES key, either 16, 24, or 32 bytes to select
// AES-128, AES-192, or AES-256.
func New(key []byte) (*Block, error) {
// This call is outline to let the allocation happen on the parent stack.
return newOutlined(&Block{}, key)
}
// newOutlined is marked go:noinline to avoid it inlining into New, and making New
// too complex to inline itself.
//
//go:noinline
func newOutlined(b *Block, key []byte) (*Block, error) {
switch len(key) {
case aes128KeySize, aes192KeySize, aes256KeySize:
default:
return nil, KeySizeError(len(key))
}
return newBlock(b, key), nil
}
func newBlockExpanded(c *blockExpanded, key []byte) {
switch len(key) {
case aes128KeySize:
c.rounds = aes128Rounds
case aes192KeySize:
c.rounds = aes192Rounds
case aes256KeySize:
c.rounds = aes256Rounds
}
expandKeyGeneric(c, key)
}
func (c *Block) BlockSize() int { return BlockSize }
func (c *Block) Encrypt(dst, src []byte) {
if len(src) < BlockSize {
panic("crypto/aes: input not full block")
}
if len(dst) < BlockSize {
panic("crypto/aes: output not full block")
}
if alias.InexactOverlap(dst[:BlockSize], src[:BlockSize]) {
panic("crypto/aes: invalid buffer overlap")
}
fips.RecordApproved()
encryptBlock(c, dst, src)
}
func (c *Block) Decrypt(dst, src []byte) {
if len(src) < BlockSize {
panic("crypto/aes: input not full block")
}
if len(dst) < BlockSize {
panic("crypto/aes: output not full block")
}
if alias.InexactOverlap(dst[:BlockSize], src[:BlockSize]) {
panic("crypto/aes: invalid buffer overlap")
}
fips.RecordApproved()
decryptBlock(c, dst, src)
}
|
06eff26d0c559bba5921d1550dd115d1b6f00534
|
src/crypto/internal/fips/aes/aes_test.go
|
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package aes
import "testing"
// See const.go for overview of math here.
// Test that powx is initialized correctly.
// (Can adapt this code to generate it too.)
func TestPowx(t *testing.T) {
p := 1
for i := 0; i < len(powx); i++ {
if powx[i] != byte(p) {
t.Errorf("powx[%d] = %#x, want %#x", i, powx[i], p)
}
p <<= 1
if p&0x100 != 0 {
p ^= poly
}
}
}
// Multiply b and c as GF(2) polynomials modulo poly
func mul(b, c uint32) uint32 {
i := b
j := c
s := uint32(0)
for k := uint32(1); k < 0x100 && j != 0; k <<= 1 {
// Invariant: k == 1<<n, i == b * xⁿ
if j&k != 0 {
// s += i in GF(2); xor in binary
s ^= i
j ^= k // turn off bit to end loop early
}
// i *= x in GF(2) modulo the polynomial
i <<= 1
if i&0x100 != 0 {
i ^= poly
}
}
return s
}
// Test all mul inputs against bit-by-bit n² algorithm.
func TestMul(t *testing.T) {
for i := uint32(0); i < 256; i++ {
for j := uint32(0); j < 256; j++ {
// Multiply i, j bit by bit.
s := uint8(0)
for k := uint(0); k < 8; k++ {
for l := uint(0); l < 8; l++ {
if i&(1<<k) != 0 && j&(1<<l) != 0 {
s ^= powx[k+l]
}
}
}
if x := mul(i, j); x != uint32(s) {
t.Fatalf("mul(%#x, %#x) = %#x, want %#x", i, j, x, s)
}
}
}
}
// Check that S-boxes are inverses of each other.
// They have more structure that we could test,
// but if this sanity check passes, we'll assume
// the cut and paste from the FIPS PDF worked.
func TestSboxes(t *testing.T) {
for i := 0; i < 256; i++ {
if j := sbox0[sbox1[i]]; j != byte(i) {
t.Errorf("sbox0[sbox1[%#x]] = %#x", i, j)
}
if j := sbox1[sbox0[i]]; j != byte(i) {
t.Errorf("sbox1[sbox0[%#x]] = %#x", i, j)
}
}
}
// Test that encryption tables are correct.
// (Can adapt this code to generate them too.)
func TestTe(t *testing.T) {
for i := 0; i < 256; i++ {
s := uint32(sbox0[i])
s2 := mul(s, 2)
s3 := mul(s, 3)
w := s2<<24 | s<<16 | s<<8 | s3
te := [][256]uint32{te0, te1, te2, te3}
for j := 0; j < 4; j++ {
if x := te[j][i]; x != w {
t.Fatalf("te[%d][%d] = %#x, want %#x", j, i, x, w)
}
w = w<<24 | w>>8
}
}
}
// Test that decryption tables are correct.
// (Can adapt this code to generate them too.)
func TestTd(t *testing.T) {
for i := 0; i < 256; i++ {
s := uint32(sbox1[i])
s9 := mul(s, 0x9)
sb := mul(s, 0xb)
sd := mul(s, 0xd)
se := mul(s, 0xe)
w := se<<24 | s9<<16 | sd<<8 | sb
td := [][256]uint32{td0, td1, td2, td3}
for j := 0; j < 4; j++ {
if x := td[j][i]; x != w {
t.Fatalf("td[%d][%d] = %#x, want %#x", j, i, x, w)
}
w = w<<24 | w>>8
}
}
}
|
35046389135c44e0ef65ae6a64ed2f2f030fc492
|
9cd365387e94129bde899d8896b170712ce7eb67e365ff2b57821cc770aaf152
|
moby/moby
|
v2_metadata_service.go
|
distribution/metadata/v2_metadata_service.go
|
package metadata // import "github.com/docker/docker/distribution/metadata"
import (
"crypto/hmac"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"errors"
"github.com/docker/docker/api/types/registry"
"github.com/docker/docker/layer"
"github.com/opencontainers/go-digest"
)
// V2MetadataService maps layer IDs to a set of known metadata for
// the layer.
type V2MetadataService interface {
GetMetadata(diffID layer.DiffID) ([]V2Metadata, error)
GetDiffID(dgst digest.Digest) (layer.DiffID, error)
Add(diffID layer.DiffID, metadata V2Metadata) error
TagAndAdd(diffID layer.DiffID, hmacKey []byte, metadata V2Metadata) error
Remove(metadata V2Metadata) error
}
// v2MetadataService implements V2MetadataService
type v2MetadataService struct {
store Store
}
var _ V2MetadataService = &v2MetadataService{}
// V2Metadata contains the digest and source repository information for a layer.
type V2Metadata struct {
Digest digest.Digest
SourceRepository string
// HMAC hashes above attributes with recent authconfig digest used as a key in order to determine matching
// metadata entries accompanied by the same credentials without actually exposing them.
HMAC string
}
// CheckV2MetadataHMAC returns true if the given "meta" is tagged with a hmac hashed by the given "key".
func CheckV2MetadataHMAC(meta *V2Metadata, key []byte) bool {
if len(meta.HMAC) == 0 || len(key) == 0 {
return len(meta.HMAC) == 0 && len(key) == 0
}
mac := hmac.New(sha256.New, key)
mac.Write([]byte(meta.Digest))
mac.Write([]byte(meta.SourceRepository))
expectedMac := mac.Sum(nil)
storedMac, err := hex.DecodeString(meta.HMAC)
if err != nil {
return false
}
return hmac.Equal(storedMac, expectedMac)
}
// ComputeV2MetadataHMAC returns a hmac for the given "meta" hash by the given key.
func ComputeV2MetadataHMAC(key []byte, meta *V2Metadata) string {
if len(key) == 0 || meta == nil {
return ""
}
mac := hmac.New(sha256.New, key)
mac.Write([]byte(meta.Digest))
mac.Write([]byte(meta.SourceRepository))
return hex.EncodeToString(mac.Sum(nil))
}
// ComputeV2MetadataHMACKey returns a key for the given "authConfig" that can be used to hash v2 metadata
// entries.
func ComputeV2MetadataHMACKey(authConfig *registry.AuthConfig) ([]byte, error) {
if authConfig == nil {
return nil, nil
}
key := authConfigKeyInput{
Username: authConfig.Username,
Password: authConfig.Password,
Auth: authConfig.Auth,
IdentityToken: authConfig.IdentityToken,
RegistryToken: authConfig.RegistryToken,
}
buf, err := json.Marshal(&key)
if err != nil {
return nil, err
}
return []byte(digest.FromBytes(buf)), nil
}
// authConfigKeyInput is a reduced AuthConfig structure holding just relevant credential data eligible for
// hmac key creation.
type authConfigKeyInput struct {
Username string `json:"username,omitempty"`
Password string `json:"password,omitempty"`
Auth string `json:"auth,omitempty"`
IdentityToken string `json:"identitytoken,omitempty"`
RegistryToken string `json:"registrytoken,omitempty"`
}
// maxMetadata is the number of metadata entries to keep per layer DiffID.
const maxMetadata = 50
// NewV2MetadataService creates a new diff ID to v2 metadata mapping service.
func NewV2MetadataService(store Store) V2MetadataService {
return &v2MetadataService{
store: store,
}
}
func (serv *v2MetadataService) diffIDNamespace() string {
return "v2metadata-by-diffid"
}
func (serv *v2MetadataService) digestNamespace() string {
return "diffid-by-digest"
}
func (serv *v2MetadataService) diffIDKey(diffID layer.DiffID) string {
return string(digest.Digest(diffID).Algorithm()) + "/" + digest.Digest(diffID).Encoded()
}
func (serv *v2MetadataService) digestKey(dgst digest.Digest) string {
return string(dgst.Algorithm()) + "/" + dgst.Encoded()
}
// GetMetadata finds the metadata associated with a layer DiffID.
func (serv *v2MetadataService) GetMetadata(diffID layer.DiffID) ([]V2Metadata, error) {
if serv.store == nil {
return nil, errors.New("no metadata storage")
}
jsonBytes, err := serv.store.Get(serv.diffIDNamespace(), serv.diffIDKey(diffID))
if err != nil {
return nil, err
}
var metadata []V2Metadata
if err := json.Unmarshal(jsonBytes, &metadata); err != nil {
return nil, err
}
return metadata, nil
}
// GetDiffID finds a layer DiffID from a digest.
func (serv *v2MetadataService) GetDiffID(dgst digest.Digest) (layer.DiffID, error) {
if serv.store == nil {
return layer.DiffID(""), errors.New("no metadata storage")
}
diffIDBytes, err := serv.store.Get(serv.digestNamespace(), serv.digestKey(dgst))
if err != nil {
return layer.DiffID(""), err
}
return layer.DiffID(diffIDBytes), nil
}
// Add associates metadata with a layer DiffID. If too many metadata entries are
// present, the oldest one is dropped.
func (serv *v2MetadataService) Add(diffID layer.DiffID, metadata V2Metadata) error {
if serv.store == nil {
// Support a service which has no backend storage, in this case
// an add becomes a no-op.
// TODO: implement in memory storage
return nil
}
oldMetadata, err := serv.GetMetadata(diffID)
if err != nil {
oldMetadata = nil
}
newMetadata := make([]V2Metadata, 0, len(oldMetadata)+1)
// Copy all other metadata to new slice
for _, oldMeta := range oldMetadata {
if oldMeta != metadata {
newMetadata = append(newMetadata, oldMeta)
}
}
newMetadata = append(newMetadata, metadata)
if len(newMetadata) > maxMetadata {
newMetadata = newMetadata[len(newMetadata)-maxMetadata:]
}
jsonBytes, err := json.Marshal(newMetadata)
if err != nil {
return err
}
err = serv.store.Set(serv.diffIDNamespace(), serv.diffIDKey(diffID), jsonBytes)
if err != nil {
return err
}
return serv.store.Set(serv.digestNamespace(), serv.digestKey(metadata.Digest), []byte(diffID))
}
// TagAndAdd amends the given "meta" for hmac hashed by the given "hmacKey" and associates it with a layer
// DiffID. If too many metadata entries are present, the oldest one is dropped.
func (serv *v2MetadataService) TagAndAdd(diffID layer.DiffID, hmacKey []byte, meta V2Metadata) error {
meta.HMAC = ComputeV2MetadataHMAC(hmacKey, &meta)
return serv.Add(diffID, meta)
}
// Remove disassociates a metadata entry from a layer DiffID.
func (serv *v2MetadataService) Remove(metadata V2Metadata) error {
if serv.store == nil {
// Support a service which has no backend storage, in this case
// an remove becomes a no-op.
// TODO: implement in memory storage
return nil
}
diffID, err := serv.GetDiffID(metadata.Digest)
if err != nil {
return err
}
oldMetadata, err := serv.GetMetadata(diffID)
if err != nil {
oldMetadata = nil
}
newMetadata := make([]V2Metadata, 0, len(oldMetadata))
// Copy all other metadata to new slice
for _, oldMeta := range oldMetadata {
if oldMeta != metadata {
newMetadata = append(newMetadata, oldMeta)
}
}
if len(newMetadata) == 0 {
return serv.store.Delete(serv.diffIDNamespace(), serv.diffIDKey(diffID))
}
jsonBytes, err := json.Marshal(newMetadata)
if err != nil {
return err
}
return serv.store.Set(serv.diffIDNamespace(), serv.diffIDKey(diffID), jsonBytes)
}
|
bdb72da15700bdd9bfcf3f21d4e116d3b70c9813
|
distribution/metadata/v2_metadata_service_test.go
|
package metadata // import "github.com/docker/docker/distribution/metadata"
import (
"encoding/hex"
"math/rand"
"os"
"reflect"
"testing"
"github.com/docker/docker/layer"
"github.com/opencontainers/go-digest"
)
func TestV2MetadataService(t *testing.T) {
tmpDir, err := os.MkdirTemp("", "blobsum-storage-service-test")
if err != nil {
t.Fatalf("could not create temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
metadataStore, err := NewFSMetadataStore(tmpDir)
if err != nil {
t.Fatalf("could not create metadata store: %v", err)
}
V2MetadataService := NewV2MetadataService(metadataStore)
tooManyBlobSums := make([]V2Metadata, 100)
for i := range tooManyBlobSums {
randDigest := randomDigest()
tooManyBlobSums[i] = V2Metadata{Digest: randDigest}
}
testVectors := []struct {
diffID layer.DiffID
metadata []V2Metadata
}{
{
diffID: layer.DiffID("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"),
metadata: []V2Metadata{
{Digest: digest.Digest("sha256:f0cd5ca10b07f35512fc2f1cbf9a6cefbdb5cba70ac6b0c9e5988f4497f71937")},
},
},
{
diffID: layer.DiffID("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa"),
metadata: []V2Metadata{
{Digest: digest.Digest("sha256:f0cd5ca10b07f35512fc2f1cbf9a6cefbdb5cba70ac6b0c9e5988f4497f71937")},
{Digest: digest.Digest("sha256:9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e")},
},
},
{
diffID: layer.DiffID("sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb"),
metadata: tooManyBlobSums,
},
}
// Set some associations
for _, vec := range testVectors {
for _, blobsum := range vec.metadata {
err := V2MetadataService.Add(vec.diffID, blobsum)
if err != nil {
t.Fatalf("error calling Set: %v", err)
}
}
}
// Check the correct values are read back
for _, vec := range testVectors {
metadata, err := V2MetadataService.GetMetadata(vec.diffID)
if err != nil {
t.Fatalf("error calling Get: %v", err)
}
expectedMetadataEntries := len(vec.metadata)
if expectedMetadataEntries > 50 {
expectedMetadataEntries = 50
}
if !reflect.DeepEqual(metadata, vec.metadata[len(vec.metadata)-expectedMetadataEntries:len(vec.metadata)]) {
t.Fatal("Get returned incorrect layer ID")
}
}
// Test GetMetadata on a nonexistent entry
_, err = V2MetadataService.GetMetadata(layer.DiffID("sha256:82379823067823853223359023576437723560923756b03560378f4497753917"))
if err == nil {
t.Fatal("expected error looking up nonexistent entry")
}
// Test GetDiffID on a nonexistent entry
_, err = V2MetadataService.GetDiffID(digest.Digest("sha256:82379823067823853223359023576437723560923756b03560378f4497753917"))
if err == nil {
t.Fatal("expected error looking up nonexistent entry")
}
// Overwrite one of the entries and read it back
err = V2MetadataService.Add(testVectors[1].diffID, testVectors[0].metadata[0])
if err != nil {
t.Fatalf("error calling Add: %v", err)
}
diffID, err := V2MetadataService.GetDiffID(testVectors[0].metadata[0].Digest)
if err != nil {
t.Fatalf("error calling GetDiffID: %v", err)
}
if diffID != testVectors[1].diffID {
t.Fatal("GetDiffID returned incorrect diffID")
}
}
func randomDigest() digest.Digest {
b := [32]byte{}
for i := 0; i < len(b); i++ {
b[i] = byte(rand.Intn(256))
}
d := hex.EncodeToString(b[:])
return digest.Digest("sha256:" + d)
}
|
94d0f0b9c3ebdfd2258c658db3e5ed2f3efb4300
|
8cb3908ceb17c405009542acbe562a5066a0f1484f89aca84f12af47160a1197
|
gohugoio/hugo
|
finder.go
|
identity/finder.go
|
// Copyright 2024 The Hugo Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package identity
import (
"fmt"
"sync"
"github.com/gohugoio/hugo/compare"
)
// NewFinder creates a new Finder.
// This is a thread safe implementation with a cache.
func NewFinder(cfg FinderConfig) *Finder {
return &Finder{cfg: cfg, answers: make(map[ManagerIdentity]FinderResult), seenFindOnce: make(map[Identity]bool)}
}
var searchIDPool = sync.Pool{
New: func() interface{} {
return &searchID{seen: make(map[Manager]bool)}
},
}
func getSearchID() *searchID {
return searchIDPool.Get().(*searchID)
}
func putSearchID(sid *searchID) {
sid.id = nil
sid.isDp = false
sid.isPeq = false
sid.hasEqer = false
sid.maxDepth = 0
sid.dp = nil
sid.peq = nil
sid.eqer = nil
for k := range sid.seen {
delete(sid.seen, k)
}
searchIDPool.Put(sid)
}
// GetSearchID returns a searchID from the pool.
// Finder finds identities inside another.
type Finder struct {
cfg FinderConfig
answers map[ManagerIdentity]FinderResult
muAnswers sync.RWMutex
seenFindOnce map[Identity]bool
muSeenFindOnce sync.RWMutex
}
type FinderResult int
const (
FinderNotFound FinderResult = iota
FinderFoundOneOfManyRepetition
FinderFoundOneOfMany
FinderFound
)
// Contains returns whether in contains id.
func (f *Finder) Contains(id, in Identity, maxDepth int) FinderResult {
if id == Anonymous || in == Anonymous {
return FinderNotFound
}
if id == GenghisKhan && in == GenghisKhan {
return FinderNotFound
}
if id == GenghisKhan {
return FinderFound
}
if id == in {
return FinderFound
}
if id == nil || in == nil {
return FinderNotFound
}
var (
isDp bool
isPeq bool
dp IsProbablyDependentProvider
peq compare.ProbablyEqer
)
if !f.cfg.Exact {
dp, isDp = id.(IsProbablyDependentProvider)
peq, isPeq = id.(compare.ProbablyEqer)
}
eqer, hasEqer := id.(compare.Eqer)
sid := getSearchID()
sid.id = id
sid.isDp = isDp
sid.isPeq = isPeq
sid.hasEqer = hasEqer
sid.dp = dp
sid.peq = peq
sid.eqer = eqer
sid.maxDepth = maxDepth
defer putSearchID(sid)
if r := f.checkOne(sid, in, 0); r > 0 {
return r
}
m := GetDependencyManager(in)
if m != nil {
if r := f.checkManager(sid, m, 0); r > 0 {
return r
}
}
return FinderNotFound
}
func (f *Finder) checkMaxDepth(sid *searchID, level int) FinderResult {
if sid.maxDepth >= 0 && level > sid.maxDepth {
return FinderNotFound
}
if level > 100 {
// This should never happen, but some false positives are probably better than a panic.
if !f.cfg.Exact {
return FinderFound
}
panic("too many levels")
}
return -1
}
func (f *Finder) checkManager(sid *searchID, m Manager, level int) FinderResult {
if r := f.checkMaxDepth(sid, level); r >= 0 {
return r
}
if m == nil {
return FinderNotFound
}
if sid.seen[m] {
return FinderNotFound
}
sid.seen[m] = true
f.muAnswers.RLock()
r, ok := f.answers[ManagerIdentity{Manager: m, Identity: sid.id}]
f.muAnswers.RUnlock()
if ok {
return r
}
r = f.search(sid, m, level)
if r == FinderFoundOneOfMany {
// Don't cache this one.
return r
}
f.muAnswers.Lock()
f.answers[ManagerIdentity{Manager: m, Identity: sid.id}] = r
f.muAnswers.Unlock()
return r
}
func (f *Finder) checkOne(sid *searchID, v Identity, depth int) (r FinderResult) {
if ff, ok := v.(FindFirstManagerIdentityProvider); ok {
f.muSeenFindOnce.RLock()
mi := ff.FindFirstManagerIdentity()
seen := f.seenFindOnce[mi.Identity]
f.muSeenFindOnce.RUnlock()
if seen {
return FinderFoundOneOfManyRepetition
}
r = f.doCheckOne(sid, mi.Identity, depth)
if r == 0 {
r = f.checkManager(sid, mi.Manager, depth)
}
if r > FinderFoundOneOfManyRepetition {
f.muSeenFindOnce.Lock()
// Double check.
if f.seenFindOnce[mi.Identity] {
f.muSeenFindOnce.Unlock()
return FinderFoundOneOfManyRepetition
}
f.seenFindOnce[mi.Identity] = true
f.muSeenFindOnce.Unlock()
r = FinderFoundOneOfMany
}
return r
} else {
return f.doCheckOne(sid, v, depth)
}
}
func (f *Finder) doCheckOne(sid *searchID, v Identity, depth int) FinderResult {
id2 := Unwrap(v)
if id2 == Anonymous {
return FinderNotFound
}
id := sid.id
if sid.hasEqer {
if sid.eqer.Eq(id2) {
return FinderFound
}
} else if id == id2 {
return FinderFound
}
if f.cfg.Exact {
return FinderNotFound
}
if id2 == nil {
return FinderNotFound
}
if id2 == GenghisKhan {
return FinderFound
}
if id.IdentifierBase() == id2.IdentifierBase() {
return FinderFound
}
if sid.isDp && sid.dp.IsProbablyDependent(id2) {
return FinderFound
}
if sid.isPeq && sid.peq.ProbablyEq(id2) {
return FinderFound
}
if pdep, ok := id2.(IsProbablyDependencyProvider); ok && pdep.IsProbablyDependency(id) {
return FinderFound
}
if peq, ok := id2.(compare.ProbablyEqer); ok && peq.ProbablyEq(id) {
return FinderFound
}
return FinderNotFound
}
// search searches for id in ids.
func (f *Finder) search(sid *searchID, m Manager, depth int) FinderResult {
id := sid.id
if id == Anonymous {
return FinderNotFound
}
if !f.cfg.Exact && id == GenghisKhan {
return FinderNotFound
}
var r FinderResult
m.forEeachIdentity(
func(v Identity) bool {
if r > 0 {
panic("should be terminated")
}
r = f.checkOne(sid, v, depth)
if r > 0 {
return true
}
m := GetDependencyManager(v)
if r = f.checkManager(sid, m, depth+1); r > 0 {
return true
}
return false
},
)
return r
}
// FinderConfig provides configuration for the Finder.
// Note that we by default will use a strategy where probable matches are
// good enough. The primary use case for this is to identity the change set
// for a given changed identity (e.g. a template), and we don't want to
// have any false negatives there, but some false positives are OK. Also, speed is important.
type FinderConfig struct {
// Match exact matches only.
Exact bool
}
// ManagerIdentity wraps a pair of Identity and Manager.
type ManagerIdentity struct {
Identity
Manager
}
func (p ManagerIdentity) String() string {
return fmt.Sprintf("%s:%s", p.Identity.IdentifierBase(), p.Manager.IdentifierBase())
}
type searchID struct {
id Identity
isDp bool
isPeq bool
hasEqer bool
maxDepth int
seen map[Manager]bool
dp IsProbablyDependentProvider
peq compare.ProbablyEqer
eqer compare.Eqer
}
|
91fac72378bfc6843f136907f74454a73300b2ca
|
identity/finder_test.go
|
// Copyright 2024 The Hugo Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package provides ways to identify values in Hugo. Used for dependency tracking etc.
package identity_test
import (
"testing"
"github.com/gohugoio/hugo/identity"
)
func BenchmarkFinder(b *testing.B) {
m1 := identity.NewManager("")
m2 := identity.NewManager("")
m3 := identity.NewManager("")
m1.AddIdentity(
testIdentity{"base", "id1", "", "pe1"},
testIdentity{"base2", "id2", "eq1", ""},
m2,
m3,
)
b4 := testIdentity{"base4", "id4", "", ""}
b5 := testIdentity{"base5", "id5", "", ""}
m2.AddIdentity(b4)
f := identity.NewFinder(identity.FinderConfig{})
b.Run("Find one", func(b *testing.B) {
for i := 0; i < b.N; i++ {
r := f.Contains(b4, m1, -1)
if r == 0 {
b.Fatal("not found")
}
}
})
b.Run("Find none", func(b *testing.B) {
for i := 0; i < b.N; i++ {
r := f.Contains(b5, m1, -1)
if r > 0 {
b.Fatal("found")
}
}
})
}
|
abfab9d756a31d9c2a4151358809c1f1e8d8ed50
|
ce8e08543736a7ba30fa4c1a18e7af8129ec98301e813cfa78639c3fac445583
|
pingcap/tidb
|
runtime.go
|
pkg/timer/runtime/runtime.go
|
// Copyright 2023 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package runtime
import (
"context"
"encoding/hex"
"fmt"
"maps"
"slices"
"sync"
"time"
"github.com/google/uuid"
"github.com/pingcap/tidb/pkg/timer/api"
"github.com/pingcap/tidb/pkg/timer/metrics"
"github.com/pingcap/tidb/pkg/util"
"github.com/pingcap/tidb/pkg/util/intest"
"github.com/pingcap/tidb/pkg/util/logutil"
"github.com/prometheus/client_golang/prometheus"
"go.uber.org/zap"
)
var (
fullRefreshTimersInterval = time.Minute
maxTriggerEventInterval = 60 * time.Second
minTriggerEventInterval = time.Second
reWatchInterval = 5 * time.Second
batchProcessWatchRespInterval = time.Second
retryBusyWorkerInterval = 5 * time.Second
checkWaitCloseTimerInterval = 10 * time.Second
)
func init() {
if intest.InTest {
// minTriggerEventInterval and batchProcessWatchRespInterval are used to
// forbid the event trigger too fast to exhaust the CPU.
// In the test environment, we can set them to a smaller value to speed up the test.
minTriggerEventInterval = time.Millisecond
batchProcessWatchRespInterval = time.Millisecond
}
}
var idleWatchChan = make(api.WatchTimerChan)
// TimerRuntimeBuilder is used to TimerRuntimeBuilder
type TimerRuntimeBuilder struct {
rt *TimerGroupRuntime
}
// NewTimerRuntimeBuilder creates a new TimerRuntimeBuilder
func NewTimerRuntimeBuilder(groupID string, store *api.TimerStore) *TimerRuntimeBuilder {
return &TimerRuntimeBuilder{
rt: &TimerGroupRuntime{
logger: logutil.BgLogger().With(zap.String("groupID", groupID)),
cache: newTimersCache(),
groupID: groupID,
store: store,
cond: &api.TimerCond{},
cli: api.NewDefaultTimerClient(store),
factories: make(map[string]api.HookFactory),
workerRespCh: make(chan *triggerEventResponse, workerRespChanCap),
workers: make(map[string]*hookWorker),
nowFunc: time.Now,
// metrics
fullRefreshTimerCounter: metrics.TimerScopeCounter(fmt.Sprintf("runtime.%s", groupID), "full_refresh_timers"),
partialRefreshTimerCounter: metrics.TimerScopeCounter(fmt.Sprintf("runtime.%s", groupID), "partial_refresh_timers"),
retryLoopWait: 10 * time.Second,
},
}
}
// SetCond sets the timer condition for the TimerGroupRuntime to manage timers
func (b *TimerRuntimeBuilder) SetCond(cond api.Cond) *TimerRuntimeBuilder {
b.rt.cond = cond
return b
}
// RegisterHookFactory registers a hook factory for specified hookClass
func (b *TimerRuntimeBuilder) RegisterHookFactory(hookClass string, factory api.HookFactory) *TimerRuntimeBuilder {
b.rt.factories[hookClass] = factory
return b
}
// Build returns the TimerGroupRuntime
func (b *TimerRuntimeBuilder) Build() *TimerGroupRuntime {
return b.rt
}
// TimerGroupRuntime is the runtime to manage timers
// It will run a background loop to detect the timers which are up to time and trigger events for them.
type TimerGroupRuntime struct {
mu sync.Mutex
ctx context.Context
cancel func()
wg util.WaitGroupWrapper
logger *zap.Logger
cache *timersCache
groupID string
store *api.TimerStore
cli api.TimerClient
cond api.Cond
factories map[string]api.HookFactory
workerRespCh chan *triggerEventResponse
workers map[string]*hookWorker
// nowFunc is only used by test
nowFunc func() time.Time
// metrics
fullRefreshTimerCounter prometheus.Counter
partialRefreshTimerCounter prometheus.Counter
// retryLoopWait indicates the wait time before restarting the loop after panic.
retryLoopWait time.Duration
}
// Start starts the TimerGroupRuntime
func (rt *TimerGroupRuntime) Start() {
rt.mu.Lock()
defer rt.mu.Unlock()
if rt.ctx != nil {
return
}
rt.initCtx()
rt.wg.Run(func() {
withRecoverUntil(rt.ctx, rt.loop)
})
}
// Running returns whether the runtime is running
func (rt *TimerGroupRuntime) Running() bool {
rt.mu.Lock()
defer rt.mu.Unlock()
return rt.ctx != nil && rt.cancel != nil
}
func (rt *TimerGroupRuntime) initCtx() {
rt.ctx, rt.cancel = context.WithCancel(context.Background())
}
// Stop stops the runtime
func (rt *TimerGroupRuntime) Stop() {
rt.mu.Lock()
if rt.cancel != nil {
rt.cancel()
rt.cancel = nil
}
rt.mu.Unlock()
rt.wg.Wait()
}
func (rt *TimerGroupRuntime) loop(totalPanic uint64) {
if totalPanic > 0 {
sleep(rt.ctx, rt.retryLoopWait)
rt.logger.Info("TimerGroupRuntime loop resumed from panic",
zap.Uint64("totalPanic", totalPanic),
zap.Duration("delay", rt.retryLoopWait))
} else {
rt.logger.Info("TimerGroupRuntime loop started")
}
defer rt.logger.Info("TimerGroupRuntime loop exit")
fullRefreshTimersTicker := time.NewTicker(fullRefreshTimersInterval)
defer fullRefreshTimersTicker.Stop()
checkWaitCloseTimerTicker := time.NewTicker(checkWaitCloseTimerInterval)
defer checkWaitCloseTimerTicker.Stop()
tryTriggerEventTimer := time.NewTimer(minTriggerEventInterval)
defer tryTriggerEventTimer.Stop()
reWatchTimer := time.NewTimer(time.Minute)
defer reWatchTimer.Stop()
batchHandleResponsesTimer := time.NewTimer(batchProcessWatchRespInterval)
defer batchHandleResponsesTimer.Stop()
watchCtx, cancelWatch := context.WithCancel(rt.ctx)
defer cancelWatch()
watchCh := rt.createWatchTimerChan(watchCtx)
batchResponses := make([]api.WatchTimerResponse, 0, 1)
var lastTryTriggerTime time.Time
rt.fullRefreshTimers()
for {
select {
case <-rt.ctx.Done():
return
case <-fullRefreshTimersTicker.C:
rt.fullRefreshTimers()
rt.setTryTriggerTimer(tryTriggerEventTimer, lastTryTriggerTime)
case <-tryTriggerEventTimer.C:
rt.tryTriggerTimerEvents()
lastTryTriggerTime = rt.nowFunc()
rt.setTryTriggerTimer(tryTriggerEventTimer, lastTryTriggerTime)
case resp := <-rt.workerRespCh:
rt.handleWorkerResponse(resp)
rt.setTryTriggerTimer(tryTriggerEventTimer, lastTryTriggerTime)
case <-checkWaitCloseTimerTicker.C:
if rt.tryCloseTriggeringTimers() {
rt.setTryTriggerTimer(tryTriggerEventTimer, lastTryTriggerTime)
}
case <-batchHandleResponsesTimer.C:
if rt.batchHandleWatchResponses(batchResponses) {
rt.setTryTriggerTimer(tryTriggerEventTimer, lastTryTriggerTime)
}
batchResponses = batchResponses[:0]
case resp, ok := <-watchCh:
if ok {
if len(batchResponses) == 0 {
resetTimer(batchHandleResponsesTimer, batchProcessWatchRespInterval)
}
batchResponses = append(batchResponses, resp)
} else {
rt.logger.Warn("WatchTimerChan closed, retry watch after a while",
zap.Bool("storeSupportWatch", rt.store.WatchSupported()),
zap.Duration("after", reWatchInterval),
)
watchCh = idleWatchChan
resetTimer(reWatchTimer, reWatchInterval)
}
case <-reWatchTimer.C:
if watchCh == idleWatchChan {
watchCh = rt.createWatchTimerChan(watchCtx)
}
}
}
}
func (rt *TimerGroupRuntime) fullRefreshTimers() {
rt.fullRefreshTimerCounter.Inc()
timers, err := rt.store.List(rt.ctx, rt.cond)
if err != nil {
rt.logger.Error("error occurs when fullRefreshTimers", zap.Error(err))
return
}
rt.cache.fullUpdateTimers(timers)
}
func (rt *TimerGroupRuntime) tryTriggerTimerEvents() {
now := rt.nowFunc()
var readyTimers []*timerCacheItem
rt.cache.iterTryTriggerTimers(func(timer *api.TimerRecord, tryTriggerTime time.Time, nextEventTime *time.Time) bool {
if tryTriggerTime.After(now) {
return false
}
if timer.EventStatus == api.SchedEventIdle && (!timer.Enable || nextEventTime == nil || nextEventTime.After(now)) {
return true
}
if readyTimers == nil {
readyTimers = make([]*timerCacheItem, 0, 8)
}
readyTimers = append(readyTimers, &timerCacheItem{
timer: timer,
nextEventTime: nextEventTime,
})
return true
})
if len(readyTimers) == 0 {
return
}
// resort timer to make sure the timer has the smallest nextEventTime has a higher priority to trigger
slices.SortFunc(readyTimers, func(a, b *timerCacheItem) int {
if a.nextEventTime == nil || b.nextEventTime == nil {
if a.nextEventTime != nil {
return 1
}
if b.nextEventTime != nil {
return -1
}
return 0
}
return a.nextEventTime.Compare(*b.nextEventTime)
})
var retryTimerIDs []string
var retryTimerKeys []string
var busyWorkers map[string]struct{}
for i, item := range readyTimers {
timer := item.timer
worker, ok := rt.ensureWorker(timer.HookClass)
if !ok {
continue
}
eventID := timer.EventID
if eventID == "" {
uid := uuid.New()
eventID = hex.EncodeToString(uid[:])
}
req := &triggerEventRequest{
eventID: eventID,
timer: timer,
store: rt.store,
resp: rt.workerRespCh,
}
select {
case <-rt.ctx.Done():
return
case worker.ch <- req:
rt.cache.setTimerProcStatus(timer.ID, procTriggering, eventID)
default:
if busyWorkers == nil {
busySize := len(readyTimers) - i
retryTimerIDs = make([]string, 0, busySize)
retryTimerKeys = make([]string, 0, busySize)
busyWorkers = make(map[string]struct{}, busySize)
}
busyWorkers[timer.HookClass] = struct{}{}
retryTimerIDs = append(retryTimerIDs, timer.ID)
retryTimerKeys = append(retryTimerKeys, fmt.Sprintf("[%s] %s", timer.Namespace, timer.Key))
}
}
if len(retryTimerIDs) > 0 {
busyWorkerList := make([]string, 0, len(busyWorkers))
for hookClass := range busyWorkers {
busyWorkerList = append(busyWorkerList, hookClass)
}
rt.logger.Warn(
"some hook workers are busy, retry triggering after a while",
zap.Strings("retryTimerIDs", retryTimerIDs),
zap.Strings("retryTimerKeys", retryTimerKeys),
zap.Strings("busyWorkers", busyWorkerList),
zap.Duration("retryAfter", retryBusyWorkerInterval),
)
for _, timerID := range retryTimerIDs {
rt.cache.updateNextTryTriggerTime(timerID, now.Add(retryBusyWorkerInterval))
}
}
}
func (rt *TimerGroupRuntime) tryCloseTriggeringTimers() bool {
return rt.partialRefreshTimers(rt.cache.waitCloseTimerIDs)
}
func (rt *TimerGroupRuntime) setTryTriggerTimer(t *time.Timer, lastTryTriggerTime time.Time) {
interval := rt.getNextTryTriggerDuration(lastTryTriggerTime)
resetTimer(t, interval)
}
func (rt *TimerGroupRuntime) getNextTryTriggerDuration(lastTryTriggerTime time.Time) time.Duration {
now := rt.nowFunc()
sinceLastTrigger := now.Sub(lastTryTriggerTime)
if sinceLastTrigger < 0 {
sinceLastTrigger = 0
}
maxDuration := maxTriggerEventInterval - sinceLastTrigger
if maxDuration <= 0 {
return time.Duration(0)
}
minDuration := minTriggerEventInterval - sinceLastTrigger
if minDuration < 0 {
minDuration = 0
}
duration := maxDuration
rt.cache.iterTryTriggerTimers(func(_ *api.TimerRecord, tryTriggerTime time.Time, _ *time.Time) bool {
if interval := tryTriggerTime.Sub(now); interval < duration {
duration = interval
}
return false
})
if duration < minDuration {
duration = minDuration
}
return duration
}
func (rt *TimerGroupRuntime) handleWorkerResponse(resp *triggerEventResponse) {
if !rt.cache.hasTimer(resp.timerID) {
return
}
if updateTimer, ok := resp.newTimerRecord.Get(); ok {
if updateTimer == nil {
rt.cache.removeTimer(resp.timerID)
} else {
rt.cache.updateTimer(updateTimer)
}
}
if resp.success {
rt.cache.setTimerProcStatus(resp.timerID, procWaitTriggerClose, resp.eventID)
} else {
rt.cache.setTimerProcStatus(resp.timerID, procIdle, "")
if retryAfter, ok := resp.retryAfter.Get(); ok {
rt.cache.updateNextTryTriggerTime(resp.timerID, rt.nowFunc().Add(retryAfter))
}
}
}
func (rt *TimerGroupRuntime) partialRefreshTimers(timerIDs map[string]struct{}) bool {
if len(timerIDs) == 0 {
return false
}
rt.partialRefreshTimerCounter.Inc()
cond := rt.buildTimerIDsCond(timerIDs)
timers, err := rt.store.List(rt.ctx, cond)
if err != nil {
rt.logger.Error("error occurs when get timers", zap.Error(err))
return false
}
if len(timers) != len(timerIDs) {
noExistTimers := maps.Clone(timerIDs)
for _, timer := range timers {
delete(noExistTimers, timer.ID)
}
for timerID := range noExistTimers {
rt.cache.removeTimer(timerID)
}
}
return rt.cache.partialBatchUpdateTimers(timers)
}
func (rt *TimerGroupRuntime) createWatchTimerChan(ctx context.Context) api.WatchTimerChan {
watchSupported := rt.store.WatchSupported()
rt.logger.Info("create watch chan if possible for timer runtime",
zap.Bool("storeSupportWatch", watchSupported),
)
if watchSupported {
return rt.store.Watch(ctx)
}
return idleWatchChan
}
func (rt *TimerGroupRuntime) batchHandleWatchResponses(responses []api.WatchTimerResponse) bool {
if len(responses) == 0 {
return false
}
updateTimerIDs := make(map[string]struct{}, len(responses))
delTimerIDs := make(map[string]struct{}, len(responses))
for _, resp := range responses {
for _, event := range resp.Events {
switch event.Tp {
case api.WatchTimerEventCreate, api.WatchTimerEventUpdate:
updateTimerIDs[event.TimerID] = struct{}{}
case api.WatchTimerEventDelete:
delTimerIDs[event.TimerID] = struct{}{}
}
}
}
change := rt.partialRefreshTimers(updateTimerIDs)
for timerID := range delTimerIDs {
if rt.cache.removeTimer(timerID) {
change = true
}
}
return change
}
func (rt *TimerGroupRuntime) ensureWorker(hookClass string) (*hookWorker, bool) {
worker, ok := rt.workers[hookClass]
if ok {
return worker, true
}
factory, ok := rt.factories[hookClass]
if !ok {
return nil, false
}
var hookFn func() api.Hook
if factory != nil {
cli := rt.cli
hookFn = func() api.Hook {
return factory(hookClass, cli)
}
}
worker = newHookWorker(rt.ctx, &rt.wg, rt.groupID, hookClass, hookFn, rt.nowFunc)
rt.workers[hookClass] = worker
return worker, true
}
func (rt *TimerGroupRuntime) buildTimerIDsCond(ids map[string]struct{}) api.Cond {
condList := make([]api.Cond, 0, len(ids))
for timerID := range ids {
condList = append(condList, &api.TimerCond{ID: api.NewOptionalVal(timerID)})
}
return api.And(
rt.cond,
api.Or(condList...),
)
}
// setNowFunc is only used by test
func (rt *TimerGroupRuntime) setNowFunc(fn func() time.Time) {
rt.nowFunc = fn
rt.cache.nowFunc = fn
}
func resetTimer(t *time.Timer, interval time.Duration) {
if !t.Stop() {
select {
case <-t.C:
default:
}
}
t.Reset(interval)
}
func withRecoverUntil(ctx context.Context, fn func(uint64)) {
var i uint64
success := false
for ctx.Err() == nil && !success {
util.WithRecovery(func() {
fn(i)
}, func(r any) {
if r == nil {
success = true
}
})
i++
}
}
func sleep(ctx context.Context, d time.Duration) {
if ctx == nil {
ctx = context.Background()
}
select {
case <-ctx.Done():
case <-time.After(d):
}
}
|
4e3638fd96775d105258bea04a557422ad970178
|
pkg/timer/runtime/runtime_test.go
|
// Copyright 2023 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package runtime
import (
"context"
"fmt"
"sync"
"sync/atomic"
"testing"
"time"
"unsafe"
"github.com/pingcap/errors"
"github.com/pingcap/tidb/pkg/timer/api"
mockutil "github.com/pingcap/tidb/pkg/util/mock"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
)
func TestRuntimeStartStop(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
store := api.NewMemoryTimerStore()
defer store.Close()
cli := api.NewDefaultTimerClient(store)
_, err := cli.CreateTimer(ctx, api.TimerSpec{
Namespace: "n1",
Key: "k1",
SchedPolicyType: api.SchedEventInterval,
SchedPolicyExpr: "1m",
Enable: true,
HookClass: "hook1",
})
require.NoError(t, err)
timerProcessed := make(chan struct{})
hook := newMockHook()
hook.On("Start").Return().Once()
hook.On("Stop").Return().Once()
hook.On("OnPreSchedEvent", mock.Anything, mock.Anything).
Return(api.PreSchedEventResult{}, nil).Once()
hook.On("OnSchedEvent", mock.Anything, mock.Anything).
Return(nil).Once().
Run(func(args mock.Arguments) {
close(timerProcessed)
})
var factoryMock mock.Mock
factoryMock.On("factory", "hook1", cli).Return(hook).Once()
hookFactory := func(hookClass string, cli api.TimerClient) api.Hook {
return factoryMock.MethodCalled("factory", hookClass, cli).Get(0).(api.Hook)
}
runtime := NewTimerRuntimeBuilder("g1", store).
RegisterHookFactory("hook1", hookFactory).
Build()
require.NotNil(t, runtime.fullRefreshTimerCounter)
require.NotNil(t, runtime.partialRefreshTimerCounter)
runtime.Start()
require.True(t, runtime.Running())
waitDone(timerProcessed, time.Minute)
go func() {
runtime.Stop()
require.False(t, runtime.Running())
cancel()
}()
waitDone(ctx.Done(), time.Minute)
factoryMock.AssertExpectations(t)
hook.AssertExpectations(t)
}
func TestEnsureWorker(t *testing.T) {
store := api.NewMemoryTimerStore()
defer store.Close()
cli := api.NewDefaultTimerClient(store)
var factoryMock mock.Mock
hook := newMockHook()
hook.On("Start").Return().Once()
hook.On("Stop").Return().Once()
factoryMock.On("factory", "hook1", cli).Return(hook).Once()
hookFactory := func(hookClass string, cli api.TimerClient) api.Hook {
return factoryMock.MethodCalled("factory", hookClass, cli).Get(0).(api.Hook)
}
runtime := NewTimerRuntimeBuilder("g1", store).
RegisterHookFactory("hook1", hookFactory).
Build()
runtime.initCtx()
worker1, ok := runtime.ensureWorker("hook1")
require.True(t, ok)
waitDone(hook.started, time.Minute)
worker2, ok := runtime.ensureWorker("hook1")
require.True(t, ok)
require.Same(t, worker1, worker2)
_, ok = runtime.ensureWorker("hook2")
require.False(t, ok)
runtime.Stop()
factoryMock.AssertExpectations(t)
hook.AssertExpectations(t)
}
func TestTryTriggerTimer(t *testing.T) {
now := time.Now()
store := api.NewMemoryTimerStore()
defer store.Close()
runtime := NewTimerRuntimeBuilder("g1", store).Build()
runtime.setNowFunc(func() time.Time {
return now
})
runtime.initCtx()
// t1: idle timer
t1 := newTestTimer("t1", "1m", now.Add(-time.Hour))
runtime.cache.updateTimer(t1)
// t2: not idle timer, it will be triggered event timer disabled
t2 := newTestTimer("t2", "1h", now)
t2.EventStatus = api.SchedEventTrigger
t2.EventID = "event2"
t2.EventStart = now.Add(-time.Hour)
t2.Enable = false
runtime.cache.updateTimer(t2)
// t3: next event time after now
t3 := newTestTimer("t3", "10m", now)
runtime.cache.updateTimer(t3)
runtime.cache.updateNextTryTriggerTime(t3.ID, now.Add(-10*time.Minute))
// t4: next try trigger time after now
t4 := newTestTimer("t4", "1m", now.Add(-time.Hour))
runtime.cache.updateTimer(t4)
runtime.cache.updateNextTryTriggerTime(t4.ID, now.Add(time.Second))
t5 := newTestTimer("t5", "5m", now.Add(-10*time.Minute))
runtime.cache.updateTimer(t5)
// t6: worker chan will full when emit t6
t6 := newTestTimer("t6", "6m", now.Add(-10*time.Minute))
runtime.cache.updateTimer(t6)
// t6: worker chan will full when emit t7
t7 := newTestTimer("t7", "6m", now.Add(-10*time.Minute))
runtime.cache.updateTimer(t7)
// t8: triggering
t8 := newTestTimer("t8", "1m", now.Add(-2*time.Hour))
runtime.cache.updateTimer(t8)
runtime.cache.setTimerProcStatus(t8.ID, procTriggering, "event8")
// t9: wait close
t9 := newTestTimer("t9", "1m", now.Add(-2*time.Hour))
t9.EventStatus = api.SchedEventTrigger
t9.EventID = "event9"
t9.EventStart = now.Add(-2 * time.Hour)
runtime.cache.updateTimer(t9)
runtime.cache.setTimerProcStatus(t9.ID, procWaitTriggerClose, "event9")
ch := make(chan *triggerEventRequest, 3)
runtime.workers["hook1"] = &hookWorker{ch: ch}
runtime.tryTriggerTimerEvents()
require.Equal(t, procTriggering, runtime.cache.items[t1.ID].procStatus)
require.NotEmpty(t, runtime.cache.items[t1.ID].triggerEventID)
require.Equal(t, procTriggering, runtime.cache.items[t2.ID].procStatus)
require.Equal(t, "event2", runtime.cache.items[t2.ID].triggerEventID)
require.Equal(t, procIdle, runtime.cache.items[t3.ID].procStatus)
require.Empty(t, runtime.cache.items[t3.ID].triggerEventID)
require.Equal(t, procIdle, runtime.cache.items[t4.ID].procStatus)
require.Empty(t, runtime.cache.items[t4.ID].triggerEventID)
require.Equal(t, procTriggering, runtime.cache.items[t5.ID].procStatus)
require.NotEmpty(t, runtime.cache.items[t5.ID].triggerEventID)
require.Equal(t, procIdle, runtime.cache.items[t6.ID].procStatus)
require.Empty(t, runtime.cache.items[t6.ID].triggerEventID)
require.Equal(t, runtime.cache.items[t6.ID].nextTryTriggerTime, now.Add(retryBusyWorkerInterval))
require.Equal(t, procIdle, runtime.cache.items[t7.ID].procStatus)
require.Empty(t, runtime.cache.items[t7.ID].triggerEventID)
require.Equal(t, runtime.cache.items[t7.ID].nextTryTriggerTime, now.Add(retryBusyWorkerInterval))
require.Equal(t, procTriggering, runtime.cache.items[t8.ID].procStatus)
require.Equal(t, procWaitTriggerClose, runtime.cache.items[t9.ID].procStatus)
consumeAndVerify := func(tm *api.TimerRecord) {
select {
case req := <-ch:
if tm == nil {
require.FailNow(t, "should not reach here")
return
}
require.Equal(t, tm, req.timer)
require.Same(t, runtime.store, req.store)
require.NotNil(t, runtime.workerRespCh)
require.Equal(t, runtime.cache.items[tm.ID].triggerEventID, req.eventID)
default:
if tm != nil {
require.FailNow(t, "should not reach here")
}
}
}
consumeAndVerify(t2)
consumeAndVerify(t1)
consumeAndVerify(t5)
consumeAndVerify(nil)
// t3: has a processed manual request
t3 = t3.Clone()
t3.Version++
t3.ManualRequest = api.ManualRequest{
ManualRequestID: "req1",
ManualRequestTime: now,
ManualTimeout: time.Minute,
ManualProcessed: true,
ManualEventID: "event1",
}
runtime.cache.updateTimer(t3)
runtime.tryTriggerTimerEvents()
consumeAndVerify(nil)
// t3: has a not processed manual request but timer is disabled
t3 = t3.Clone()
t3.Enable = false
t3.ManualRequest = api.ManualRequest{
ManualRequestID: "req2",
ManualRequestTime: now,
ManualTimeout: time.Minute,
}
t3.Version++
runtime.cache.updateTimer(t3)
runtime.tryTriggerTimerEvents()
consumeAndVerify(nil)
// t3: has a not processed manual request
t3 = t3.Clone()
t3.Enable = true
t3.Version++
runtime.cache.updateTimer(t3)
runtime.tryTriggerTimerEvents()
consumeAndVerify(t3)
}
func TestTryTriggerTimePriority(t *testing.T) {
now := time.Now()
store := api.NewMemoryTimerStore()
defer store.Close()
runtime := NewTimerRuntimeBuilder("g1", store).Build()
runtime.setNowFunc(func() time.Time {
return now
})
runtime.initCtx()
ch := make(chan *triggerEventRequest, 2)
runtime.workers["hook1"] = &hookWorker{ch: ch}
t1 := newTestTimer("t1", "1m", now.Add(-time.Hour))
runtime.cache.updateTimer(t1)
runtime.cache.updateNextTryTriggerTime(t1.ID, now.Add(-3*time.Minute))
t2 := newTestTimer("t2", "1m", now.Add(-2*time.Hour))
runtime.cache.updateTimer(t2)
runtime.cache.updateNextTryTriggerTime(t2.ID, now.Add(-2*time.Minute))
t3 := newTestTimer("t3", "1h", now)
t3.EventStatus = api.SchedEventTrigger
t3.EventID = "event2"
t3.EventStart = now.Add(-time.Minute)
t3.Enable = false
runtime.cache.updateTimer(t3)
t4 := newTestTimer("t4", "1m", now.Add(-10*time.Hour))
runtime.cache.updateTimer(t4)
runtime.cache.updateNextTryTriggerTime(t4.ID, now.Add(time.Minute))
// nextEventTime: t3 (nil) < t4 < t2 < t1
// nextTryTriggerTime: t1 < t2 < t3 (eventStart) < t4
// we should test the priority trigger is ordered by `nextEventTime` because to ensure the timer who has a max
// delay time will be triggered first.
// t4 should not be scheduled for the next trigger time is after now.
// so, t3 and t2 will be triggered when the capacity of chan is 2
runtime.tryTriggerTimerEvents()
require.Equal(t, procTriggering, runtime.cache.items[t2.ID].procStatus)
require.Equal(t, procTriggering, runtime.cache.items[t3.ID].procStatus)
// t1, t4 should keep not triggered
require.Equal(t, procIdle, runtime.cache.items[t1.ID].procStatus)
require.Equal(t, procIdle, runtime.cache.items[t4.ID].procStatus)
}
func TestHandleHookWorkerResponse(t *testing.T) {
now := time.Now()
store := api.NewMemoryTimerStore()
defer store.Close()
runtime := NewTimerRuntimeBuilder("g1", store).Build()
runtime.setNowFunc(func() time.Time {
return now
})
runtime.initCtx()
t1 := newTestTimer("t1", "1m", now.Add(-time.Hour))
runtime.cache.updateTimer(t1)
runtime.cache.setTimerProcStatus(t1.ID, procTriggering, "event1")
// success response
runtime.cache.removeTimer(t1.ID)
runtime.cache.updateTimer(t1)
triggerTimer1 := t1.Clone()
triggerTimer1.EventID = "event1"
triggerTimer1.EventStatus = api.SchedEventTrigger
triggerTimer1.EventStart = now
triggerTimer1.EventData = []byte("data1")
triggerTimer1.Version++
runtime.handleWorkerResponse(&triggerEventResponse{
success: true,
timerID: t1.ID,
eventID: "event1",
newTimerRecord: api.NewOptionalVal(triggerTimer1),
})
item := runtime.cache.items[t1.ID]
require.Equal(t, item.timer, triggerTimer1)
require.Equal(t, procWaitTriggerClose, item.procStatus)
require.Equal(t, "event1", item.triggerEventID)
require.Equal(t, 1, len(runtime.cache.waitCloseTimerIDs))
_, ok := runtime.cache.waitCloseTimerIDs[t1.ID]
require.True(t, ok)
// not success response with timer removed
var newTimer *api.TimerRecord
runtime.cache.removeTimer(t1.ID)
runtime.cache.updateTimer(t1)
runtime.handleWorkerResponse(&triggerEventResponse{
success: false,
timerID: t1.ID,
eventID: "event1",
newTimerRecord: api.NewOptionalVal(newTimer),
})
require.False(t, runtime.cache.hasTimer(t1.ID))
require.Equal(t, 0, len(runtime.cache.waitCloseTimerIDs))
// not success response with timer changed
runtime.cache.removeTimer(t1.ID)
runtime.cache.updateTimer(t1)
newTimer = t1.Clone()
newTimer.Version++
newTimer.Watermark = now.Add(time.Second)
runtime.handleWorkerResponse(&triggerEventResponse{
success: false,
timerID: t1.ID,
eventID: "event1",
newTimerRecord: api.NewOptionalVal(newTimer),
})
item = runtime.cache.items[t1.ID]
require.Equal(t, newTimer, item.timer)
require.Equal(t, procIdle, item.procStatus)
require.Equal(t, "", item.triggerEventID)
require.Equal(t, 0, len(runtime.cache.waitCloseTimerIDs))
// not success response with retry after
runtime.cache.removeTimer(t1.ID)
runtime.cache.updateTimer(t1)
runtime.handleWorkerResponse(&triggerEventResponse{
success: false,
timerID: t1.ID,
eventID: "event1",
retryAfter: api.NewOptionalVal(12 * time.Second),
})
item = runtime.cache.items[t1.ID]
require.Equal(t, t1, item.timer)
require.Equal(t, procIdle, item.procStatus)
require.Equal(t, "", item.triggerEventID)
require.Equal(t, now.Add(12*time.Second), item.nextTryTriggerTime)
require.Equal(t, 0, len(runtime.cache.waitCloseTimerIDs))
}
func TestNextTryTriggerDuration(t *testing.T) {
origMinTriggerEventInterval := minTriggerEventInterval
minTriggerEventInterval = time.Second
defer func() {
minTriggerEventInterval = origMinTriggerEventInterval
}()
now := time.Now()
store := api.NewMemoryTimerStore()
defer store.Close()
runtime := NewTimerRuntimeBuilder("g1", store).Build()
runtime.setNowFunc(func() time.Time {
return now
})
runtime.initCtx()
t1 := newTestTimer("t1", "0.1m", now)
runtime.cache.updateTimer(t1)
runtime.cache.setTimerProcStatus(t1.ID, procTriggering, "event1")
t2 := newTestTimer("t2", "1.5m", now)
runtime.cache.updateTimer(t2)
t3 := newTestTimer("t3", "2m", now)
runtime.cache.updateTimer(t3)
interval := runtime.getNextTryTriggerDuration(now)
require.Equal(t, 60*time.Second, interval)
now = now.Add(70 * time.Second)
interval = runtime.getNextTryTriggerDuration(now)
require.Equal(t, 20*time.Second, interval)
now = now.Add(19*time.Second + 500*time.Millisecond)
interval = runtime.getNextTryTriggerDuration(now.Add(-time.Second))
require.Equal(t, 500*time.Millisecond, interval)
interval = runtime.getNextTryTriggerDuration(now)
require.Equal(t, time.Second, interval)
interval = runtime.getNextTryTriggerDuration(now.Add(100 * time.Millisecond))
require.Equal(t, time.Second, interval)
now = now.Add(time.Hour)
interval = runtime.getNextTryTriggerDuration(time.UnixMilli(0))
require.Equal(t, time.Duration(0), interval)
}
func TestFullRefreshTimers(t *testing.T) {
fullRefreshCounter := &mockutil.MetricsCounter{}
mockCore, mockStore := newMockStore()
runtime := NewTimerRuntimeBuilder("g1", mockStore).Build()
require.NotNil(t, runtime.fullRefreshTimerCounter)
runtime.fullRefreshTimerCounter = fullRefreshCounter
runtime.cond = &api.TimerCond{Namespace: api.NewOptionalVal("n1")}
runtime.initCtx()
timers := make([]*api.TimerRecord, 7)
for i := 0; i < len(timers); i++ {
timer := newTestTimer(fmt.Sprintf("t%d", i), "1m", time.Now())
procStatus := procIdle
if i == 2 || i == 4 {
timer.EventStatus = api.SchedEventTrigger
timer.EventStart = time.Now()
timer.EventID = fmt.Sprintf("event%d", i+1)
procStatus = procWaitTriggerClose
}
if i == 6 {
procStatus = procTriggering
}
runtime.cache.updateTimer(timer)
runtime.cache.setTimerProcStatus(timer.ID, procStatus, timer.EventID)
timers[i] = timer
}
t0New := timers[0].Clone()
t0New.Version++
t2New := timers[2].Clone()
t2New.Version++
t4New := timers[4].Clone()
t4New.EventStatus = api.SchedEventIdle
t4New.EventID = ""
t4New.Version++
t6New := timers[6].Clone()
t6New.Version++
mockCore.On("List", mock.Anything, runtime.cond).Return(timers[0:], errors.New("mockErr")).Once()
require.Equal(t, float64(0), fullRefreshCounter.Val())
runtime.fullRefreshTimers()
require.Equal(t, float64(1), fullRefreshCounter.Val())
require.Equal(t, 7, len(runtime.cache.items))
mockCore.On("List", mock.Anything, runtime.cond).Return([]*api.TimerRecord{t0New, timers[1], t2New, t4New, t6New}, nil).Once()
runtime.fullRefreshTimers()
require.Equal(t, float64(2), fullRefreshCounter.Val())
mockCore.AssertExpectations(t)
require.Equal(t, 5, len(runtime.cache.items))
require.Equal(t, t0New, runtime.cache.items["t0"].timer)
require.Equal(t, timers[1], runtime.cache.items["t1"].timer)
require.Equal(t, t2New, runtime.cache.items["t2"].timer)
require.Equal(t, procWaitTriggerClose, runtime.cache.items["t2"].procStatus)
require.Equal(t, t4New, runtime.cache.items["t4"].timer)
require.Equal(t, procIdle, runtime.cache.items["t4"].procStatus)
require.Equal(t, t6New, runtime.cache.items["t6"].timer)
require.Equal(t, procTriggering, runtime.cache.items["t6"].procStatus)
}
func TestBatchHandlerWatchResponses(t *testing.T) {
partialRefreshCounter := &mockutil.MetricsCounter{}
mockCore, mockStore := newMockStore()
runtime := NewTimerRuntimeBuilder("g1", mockStore).Build()
require.NotNil(t, runtime.partialRefreshTimerCounter)
runtime.cond = &api.TimerCond{Namespace: api.NewOptionalVal("n1")}
runtime.initCtx()
runtime.partialRefreshTimerCounter = partialRefreshCounter
timers := make([]*api.TimerRecord, 7)
for i := 0; i < len(timers); i++ {
timer := newTestTimer(fmt.Sprintf("t%d", i), "1m", time.Now())
procStatus := procIdle
if i == 2 {
timer.EventStatus = api.SchedEventTrigger
timer.EventStart = time.Now()
timer.EventID = fmt.Sprintf("event%d", i+1)
procStatus = procWaitTriggerClose
}
if i == 6 {
procStatus = procTriggering
}
runtime.cache.updateTimer(timer)
runtime.cache.setTimerProcStatus(timer.ID, procStatus, timer.EventID)
timers[i] = timer
}
t10 := newTestTimer("t10", "1m", time.Now())
t2New := timers[2].Clone()
t2New.EventStatus = api.SchedEventIdle
t2New.EventID = ""
t2New.Version++
t6New := timers[6].Clone()
t6New.Version++
mockCore.On("List", mock.Anything, mock.Anything).
Return([]*api.TimerRecord{t2New, t6New, t10}, nil).Once().
Run(func(args mock.Arguments) {
and, ok := args[1].(*api.Operator)
require.True(t, ok)
require.Equal(t, api.OperatorAnd, and.Op)
require.False(t, and.Not)
require.Equal(t, 2, len(and.Children))
require.Equal(t, runtime.cond, and.Children[0])
or, ok := and.Children[1].(*api.Operator)
require.True(t, ok)
require.Equal(t, api.OperatorOr, or.Op)
require.False(t, or.Not)
require.Equal(t, 2, len(or.Children))
condIDs := make(map[string]struct{})
for i := range or.Children {
idCond, ok := or.Children[i].(*api.TimerCond)
require.True(t, ok)
got, ok := idCond.ID.Get()
require.True(t, ok)
require.Empty(t, idCond.FieldsSet(unsafe.Pointer(&idCond.ID)))
condIDs[got] = struct{}{}
}
require.Equal(t, len(condIDs), 2)
require.Contains(t, condIDs, "t10")
require.Contains(t, condIDs, "t2")
})
require.Equal(t, float64(0), partialRefreshCounter.Val())
runtime.batchHandleWatchResponses([]api.WatchTimerResponse{
{
Events: []*api.WatchTimerEvent{
{
Tp: api.WatchTimerEventDelete,
TimerID: "t0",
},
{
Tp: api.WatchTimerEventCreate,
TimerID: "t10",
},
},
},
{
Events: []*api.WatchTimerEvent{
{
Tp: api.WatchTimerEventUpdate,
TimerID: "t2",
},
{
Tp: api.WatchTimerEventDelete,
TimerID: "t5",
},
},
},
})
require.Equal(t, float64(1), partialRefreshCounter.Val())
mockCore.AssertExpectations(t)
require.Equal(t, 6, len(runtime.cache.items))
require.False(t, runtime.cache.hasTimer("t0"))
require.False(t, runtime.cache.hasTimer("t5"))
require.Equal(t, t10, runtime.cache.items["t10"].timer)
require.Equal(t, procIdle, runtime.cache.items["t10"].procStatus)
require.Equal(t, t2New, runtime.cache.items["t2"].timer)
require.Equal(t, procIdle, runtime.cache.items["t2"].procStatus)
require.Equal(t, t6New, runtime.cache.items["t6"].timer)
require.Equal(t, procTriggering, runtime.cache.items["t6"].procStatus)
}
func TestCloseWaitingCloseTimers(t *testing.T) {
mockCore, mockStore := newMockStore()
runtime := NewTimerRuntimeBuilder("g1", mockStore).Build()
runtime.cond = &api.TimerCond{Namespace: api.NewOptionalVal("n1")}
runtime.initCtx()
require.False(t, runtime.tryCloseTriggeringTimers())
timers := make([]*api.TimerRecord, 5)
for i := 0; i < len(timers); i++ {
timer := newTestTimer(fmt.Sprintf("t%d", i), "1m", time.Now())
timer.EventStatus = api.SchedEventTrigger
timer.EventStart = time.Now()
timer.EventID = fmt.Sprintf("event%d", i)
runtime.cache.updateTimer(timer)
runtime.cache.setTimerProcStatus(timer.ID, procWaitTriggerClose, timer.EventID)
timers[i] = timer
}
mockCore.On("List", mock.Anything, mock.Anything).
Return(timers, nil).Once().
Run(func(args mock.Arguments) {
and, ok := args[1].(*api.Operator)
require.True(t, ok)
require.Equal(t, api.OperatorAnd, and.Op)
require.False(t, and.Not)
require.Equal(t, 2, len(and.Children))
require.Equal(t, runtime.cond, and.Children[0])
or, ok := and.Children[1].(*api.Operator)
require.True(t, ok)
require.Equal(t, api.OperatorOr, or.Op)
require.False(t, or.Not)
require.Equal(t, len(timers), len(or.Children))
condIDs := make(map[string]struct{})
for i := range or.Children {
idCond, ok := or.Children[i].(*api.TimerCond)
require.True(t, ok)
got, ok := idCond.ID.Get()
require.True(t, ok)
require.Empty(t, idCond.FieldsSet(unsafe.Pointer(&idCond.ID)))
condIDs[got] = struct{}{}
}
require.Equal(t, len(condIDs), len(or.Children))
for i := range timers {
require.Contains(t, condIDs, fmt.Sprintf("t%d", i))
}
})
require.False(t, runtime.tryCloseTriggeringTimers())
mockCore.AssertExpectations(t)
require.Equal(t, len(timers), len(runtime.cache.waitCloseTimerIDs))
require.Equal(t, len(timers), len(runtime.cache.items))
require.Equal(t, len(timers), runtime.cache.sorted.Len())
t1New := timers[1].Clone()
t1New.EventStatus = api.SchedEventIdle
t1New.EventID = ""
t1New.Version++
t4New := timers[4].Clone()
t4New.EventID = "event_next"
t4New.Version++
mockCore.On("List", mock.Anything, mock.Anything).
Return([]*api.TimerRecord{timers[0], t1New, timers[2], t4New}, nil).Once()
require.True(t, runtime.tryCloseTriggeringTimers())
mockCore.AssertExpectations(t)
require.Equal(t, 2, len(runtime.cache.waitCloseTimerIDs))
require.Equal(t, 4, len(runtime.cache.items))
require.Equal(t, 4, runtime.cache.sorted.Len())
require.Contains(t, runtime.cache.waitCloseTimerIDs, "t0")
require.Contains(t, runtime.cache.waitCloseTimerIDs, "t2")
require.Equal(t, timers[0], runtime.cache.items["t0"].timer)
require.Equal(t, procWaitTriggerClose, runtime.cache.items["t0"].procStatus)
require.Equal(t, t1New, runtime.cache.items["t1"].timer)
require.Equal(t, procIdle, runtime.cache.items["t1"].procStatus)
require.Equal(t, timers[2], runtime.cache.items["t2"].timer)
require.Equal(t, procWaitTriggerClose, runtime.cache.items["t2"].procStatus)
require.Equal(t, t4New, runtime.cache.items["t4"].timer)
require.Equal(t, procIdle, runtime.cache.items["t4"].procStatus)
}
func TestCreateWatchTimerChan(t *testing.T) {
mockCore, mockStore := newMockStore()
runtime := NewTimerRuntimeBuilder("g1", mockStore).Build()
ch := make(chan api.WatchTimerResponse, 1)
ch <- api.WatchTimerResponse{Events: []*api.WatchTimerEvent{{TimerID: "AAA"}}}
retCh := api.WatchTimerChan(ch)
mockCore.On("Watch", mock.Anything).Return(retCh).Once()
mockCore.On("WatchSupported").Return(true).Once()
got := runtime.createWatchTimerChan(context.Background())
require.True(t, got != idleWatchChan)
select {
case resp, ok := <-got:
require.True(t, ok)
require.Equal(t, 1, len(resp.Events))
require.Equal(t, "AAA", resp.Events[0].TimerID)
default:
require.FailNow(t, "should fail here")
}
mockCore.AssertExpectations(t)
mockCore.On("WatchSupported").Return(false).Once()
got = runtime.createWatchTimerChan(context.Background())
require.True(t, got == idleWatchChan)
select {
case <-got:
require.FailNow(t, "should fail here")
default:
}
mockCore.AssertExpectations(t)
}
func TestWatchTimerRetry(t *testing.T) {
origReWatchInterval := reWatchInterval
reWatchInterval = 100 * time.Millisecond
defer func() {
reWatchInterval = origReWatchInterval
}()
mockCore, mockStore := newMockStore()
ch := make(chan api.WatchTimerResponse)
close(ch)
closedCh := api.WatchTimerChan(ch)
ch = make(chan api.WatchTimerResponse)
normalCh := api.WatchTimerChan(ch)
mockCore.On("WatchSupported").Return(true).Times(3)
var watch1StartTime atomic.Pointer[time.Time]
var watch2StartTime atomic.Pointer[time.Time]
done := make(chan struct{})
mockCore.On("List", mock.Anything, mock.Anything).Return([]*api.TimerRecord(nil), nil)
mockCore.On("Watch", mock.Anything).Return(closedCh).Once().Run(func(args mock.Arguments) {
now := time.Now()
watch1StartTime.Store(&now)
})
mockCore.On("Watch", mock.Anything).Return(normalCh).Once().Run(func(args mock.Arguments) {
now := time.Now()
watch2StartTime.Store(&now)
close(done)
})
runtime := NewTimerRuntimeBuilder("g1", mockStore).Build()
runtime.Start()
defer runtime.Stop()
waitDone(done, time.Minute)
require.NotNil(t, watch1StartTime.Load())
require.NotNil(t, watch2StartTime.Load())
require.GreaterOrEqual(t, watch2StartTime.Load().Sub(*watch1StartTime.Load()), reWatchInterval)
}
func TestTimerFullProcess(t *testing.T) {
origBatchProcessWatchRespInterval := batchProcessWatchRespInterval
origMinTriggerEventInterval := minTriggerEventInterval
origMaxTriggerEventInterval := maxTriggerEventInterval
batchProcessWatchRespInterval = time.Millisecond
minTriggerEventInterval = time.Millisecond
maxTriggerEventInterval = 10 * time.Millisecond
defer func() {
batchProcessWatchRespInterval = origBatchProcessWatchRespInterval
minTriggerEventInterval = origMinTriggerEventInterval
maxTriggerEventInterval = origMaxTriggerEventInterval
}()
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
var now atomic.Pointer[time.Time]
setNow := func(n time.Time) { now.Store(&n) }
setNow(time.UnixMilli(0))
var zeroTime time.Time
store := api.NewMemoryTimerStore()
defer store.Close()
cli := api.NewDefaultTimerClient(store)
hook := newMockHook()
runtime := NewTimerRuntimeBuilder("g1", store).
RegisterHookFactory("h1", func(hookClass string, timerCli api.TimerClient) api.Hook {
require.Equal(t, "h1", hookClass)
require.Equal(t, cli, timerCli)
return hook
}).
Build()
runtime.setNowFunc(func() time.Time {
return *now.Load()
})
var currentEvent atomic.Pointer[string]
var preSchedTimer atomic.Pointer[api.TimerRecord]
checkPreSchedEventFunc := func(checkTimer *api.TimerRecord) func(args mock.Arguments) {
return func(args mock.Arguments) {
argCtx, ok := args[0].(context.Context)
require.True(t, ok)
require.NotNil(t, argCtx)
argEvent, ok := args[1].(api.TimerShedEvent)
require.True(t, ok)
require.NotNil(t, argEvent)
eventID := argEvent.EventID()
currentEvent.Store(&eventID)
argTimer := argEvent.Timer()
require.Equal(t, checkTimer, argTimer)
preSchedTimer.Store(argTimer)
}
}
onSchedDone := make(chan struct{})
var onSchedTimer atomic.Pointer[api.TimerRecord]
checkOnSchedEventFunc := func(checkEventData []byte, checkEventStart time.Time) func(args mock.Arguments) {
return func(args mock.Arguments) {
argCtx, ok := args[0].(context.Context)
require.True(t, ok)
require.NotNil(t, argCtx)
argEvent, ok := args[1].(api.TimerShedEvent)
require.True(t, ok)
require.NotNil(t, argEvent)
eventID := argEvent.EventID()
require.Equal(t, *currentEvent.Load(), eventID)
argTimer := argEvent.Timer()
preTimer := preSchedTimer.Load()
require.Equal(t, preTimer.ID, argTimer.ID)
require.Equal(t, preTimer.TimerSpec, argTimer.TimerSpec)
require.Equal(t, preTimer.Watermark, argTimer.Watermark)
require.Equal(t, api.SchedEventTrigger, argTimer.EventStatus)
require.Equal(t, preTimer.SummaryData, argTimer.SummaryData)
require.Equal(t, eventID, argTimer.EventID)
require.Equal(t, checkEventData, argTimer.EventData)
require.Equal(t, checkEventStart, argTimer.EventStart)
currentEvent.Store(nil)
preSchedTimer.Store(nil)
onSchedTimer.Store(argTimer)
close(onSchedDone)
}
}
hookStartWait := make(chan time.Time)
hook.On("Start").Return().Once().WaitUntil(hookStartWait)
hook.On("Stop").Return().Maybe()
runtime.Start()
defer runtime.Stop()
timer, err := cli.CreateTimer(ctx, api.TimerSpec{
Key: "key1",
Data: []byte("timer1data"),
SchedPolicyType: api.SchedEventInterval,
SchedPolicyExpr: "1m",
HookClass: "h1",
Enable: true,
})
require.NoError(t, err)
timerID := timer.ID
close(hookStartWait)
hook.On("OnPreSchedEvent", mock.Anything, mock.Anything).
Return(api.PreSchedEventResult{EventData: []byte("eventdata1")}, nil).
Once().
Run(checkPreSchedEventFunc(timer))
hook.On("OnSchedEvent", mock.Anything, mock.Anything).
Return(nil).
Once().
Run(checkOnSchedEventFunc([]byte("eventdata1"), *now.Load()))
waitDone(onSchedDone, 5*time.Second)
onSchedDone = make(chan struct{})
hook.AssertExpectations(t)
timer, err = cli.GetTimerByID(ctx, timerID)
require.NoError(t, err)
require.Equal(t, onSchedTimer.Load(), timer)
onSchedTimer.Store(nil)
// should not trigger again before close previous event
setNow(now.Load().Add(2 * time.Minute))
checkNotDone(onSchedDone, time.Second)
tmpTimer, err := cli.GetTimerByID(ctx, timerID)
require.NoError(t, err)
require.Equal(t, timer, tmpTimer)
// close event
err = cli.CloseTimerEvent(ctx, timerID, timer.EventID,
api.WithSetWatermark(*now.Load()),
api.WithSetSummaryData([]byte("summary1")),
)
require.NoError(t, err)
timer, err = cli.GetTimerByID(ctx, timerID)
require.NoError(t, err)
require.Equal(t, api.SchedEventIdle, timer.EventStatus)
require.Empty(t, timer.EventID)
require.Equal(t, zeroTime, timer.EventStart)
require.Empty(t, timer.EventData)
require.Equal(t, []byte("summary1"), timer.SummaryData)
checkNotDone(onSchedDone, time.Second)
// trigger again after 1 minute
setNow(now.Load().Add(time.Minute))
hook.On("OnPreSchedEvent", mock.Anything, mock.Anything).
Return(api.PreSchedEventResult{EventData: []byte("eventdata2")}, nil).
Once().
Run(checkPreSchedEventFunc(timer))
hook.On("OnSchedEvent", mock.Anything, mock.Anything).
Return(nil).
Once().
Run(checkOnSchedEventFunc([]byte("eventdata2"), *now.Load()))
waitDone(onSchedDone, 5*time.Second)
onSchedDone = make(chan struct{})
hook.AssertExpectations(t)
timer, err = cli.GetTimerByID(ctx, timer.ID)
require.Nil(t, err)
require.Equal(t, onSchedTimer.Load(), timer)
onSchedTimer.Store(nil)
}
func TestTimerRuntimeLoopPanicRecover(t *testing.T) {
mockCore, mockStore := newMockStore()
rt := NewTimerRuntimeBuilder("g1", mockStore).Build()
// start and panic two times, then normal
started := make(chan struct{})
mockCore.On("WatchSupported").Return(false).Times(3)
mockCore.On("List", mock.Anything, mock.Anything).Panic("store panic").Twice()
mockCore.On("List", mock.Anything, mock.Anything).Return([]*api.TimerRecord(nil), nil).Once().Run(func(args mock.Arguments) {
close(started)
})
rt.retryLoopWait = time.Millisecond
rt.Start()
waitDone(started, 5*time.Second)
mockCore.AssertExpectations(t)
// normal stop
stopped := make(chan struct{})
go func() {
rt.Stop()
close(stopped)
}()
waitDone(stopped, 5*time.Second)
mockCore.AssertExpectations(t)
// start and panic always
rt = NewTimerRuntimeBuilder("g1", mockStore).Build()
mockCore.On("WatchSupported").Return(false)
mockCore.On("List", mock.Anything, mock.Anything).Panic("store panic")
rt.retryLoopWait = time.Millisecond
rt.Start()
time.Sleep(10 * time.Millisecond)
// can also stop
stopped = make(chan struct{})
go func() {
rt.Stop()
close(stopped)
}()
waitDone(stopped, 5*time.Second)
mockCore.AssertExpectations(t)
// stop should stop immediately
mockCore, mockStore = newMockStore()
rt = NewTimerRuntimeBuilder("g1", mockStore).Build()
started = make(chan struct{})
var once sync.Once
mockCore.On("WatchSupported").Return(false).Once()
mockCore.On("List", mock.Anything, mock.Anything).Once().Run(func(args mock.Arguments) {
once.Do(func() {
close(started)
})
panic("store panic")
})
rt.retryLoopWait = time.Minute
rt.Start()
waitDone(started, 5*time.Second)
time.Sleep(time.Millisecond)
stopped = make(chan struct{})
go func() {
rt.Stop()
close(stopped)
}()
waitDone(stopped, 5*time.Second)
mockCore.AssertExpectations(t)
}
|
63280502606254ddb3c34b07b7ea80ac2d2d2355
|
983d2eb284cbdaf95595f5bb63290b43f698a14e2c3da67d3cf1a05343cfc1de
|
kubernetes/kubernetes
|
election.go
|
pkg/controlplane/controller/leaderelection/election.go
|
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package leaderelection
import (
"fmt"
"time"
"github.com/blang/semver/v4"
v1 "k8s.io/api/coordination/v1"
v1alpha2 "k8s.io/api/coordination/v1alpha2"
"k8s.io/utils/clock"
)
func pickBestLeaderOldestEmulationVersion(candidates []*v1alpha2.LeaseCandidate) *v1alpha2.LeaseCandidate {
var electee *v1alpha2.LeaseCandidate
for _, c := range candidates {
if !validLeaseCandidateForOldestEmulationVersion(c) {
continue
}
if electee == nil || compare(electee, c) > 0 {
electee = c
}
}
return electee
}
func pickBestStrategy(candidates []*v1alpha2.LeaseCandidate) (v1.CoordinatedLeaseStrategy, error) {
nilStrategy := v1.CoordinatedLeaseStrategy("")
if len(candidates) == 0 {
return nilStrategy, fmt.Errorf("no candidates")
}
candidateName := candidates[0].Name
strategy := candidates[0].Spec.Strategy
highestBV := getBinaryVersionOrZero(candidates[0])
for _, c := range candidates[1:] {
binVersion := getBinaryVersionOrZero(c)
result := highestBV.Compare(binVersion)
if result < 0 {
strategy = c.Spec.Strategy
highestBV = binVersion
candidateName = c.Name
} else if result == 0 && c.Spec.Strategy != strategy {
return nilStrategy, fmt.Errorf("candidates %q, %q at same binary version but received differing strategies %s, %s", candidateName, c.Name, strategy, c.Spec.Strategy)
}
}
return strategy, nil
}
func validLeaseCandidateForOldestEmulationVersion(l *v1alpha2.LeaseCandidate) bool {
_, err := semver.ParseTolerant(l.Spec.EmulationVersion)
if err != nil {
return false
}
_, err = semver.ParseTolerant(l.Spec.BinaryVersion)
return err == nil
}
func getEmulationVersionOrZero(l *v1alpha2.LeaseCandidate) semver.Version {
value := l.Spec.EmulationVersion
v, err := semver.ParseTolerant(value)
if err != nil {
return semver.Version{}
}
return v
}
func getBinaryVersionOrZero(l *v1alpha2.LeaseCandidate) semver.Version {
value := l.Spec.BinaryVersion
v, err := semver.ParseTolerant(value)
if err != nil {
return semver.Version{}
}
return v
}
// -1: lhs better, 1: rhs better
func compare(lhs, rhs *v1alpha2.LeaseCandidate) int {
l := getEmulationVersionOrZero(lhs)
r := getEmulationVersionOrZero(rhs)
result := l.Compare(r)
if result == 0 {
l := getBinaryVersionOrZero(lhs)
r := getBinaryVersionOrZero(rhs)
result = l.Compare(r)
}
if result == 0 {
if lhs.CreationTimestamp.After(rhs.CreationTimestamp.Time) {
return 1
}
return -1
}
return result
}
func isLeaseExpired(clock clock.Clock, lease *v1.Lease) bool {
currentTime := clock.Now()
return lease.Spec.RenewTime == nil ||
lease.Spec.LeaseDurationSeconds == nil ||
lease.Spec.RenewTime.Add(time.Duration(*lease.Spec.LeaseDurationSeconds)*time.Second).Before(currentTime)
}
func isLeaseCandidateExpired(clock clock.Clock, lease *v1alpha2.LeaseCandidate) bool {
currentTime := clock.Now()
return lease.Spec.RenewTime == nil ||
lease.Spec.RenewTime.Add(leaseCandidateValidDuration).Before(currentTime)
}
|
9b77345f76c1d71b62f0bba48ccf078f80fc98b9
|
pkg/controlplane/controller/leaderelection/election_test.go
|
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package leaderelection
import (
"testing"
"time"
"github.com/blang/semver/v4"
v1 "k8s.io/api/coordination/v1"
v1alpha2 "k8s.io/api/coordination/v1alpha2"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func TestPickBestLeaderOldestEmulationVersion(t *testing.T) {
tests := []struct {
name string
candidates []*v1alpha2.LeaseCandidate
want *v1alpha2.LeaseCandidate
}{
{
name: "empty",
candidates: []*v1alpha2.LeaseCandidate{},
want: nil,
},
{
name: "single candidate",
candidates: []*v1alpha2.LeaseCandidate{
{
ObjectMeta: metav1.ObjectMeta{
Name: "candidate1",
Namespace: "default",
CreationTimestamp: metav1.Time{Time: time.Now()},
},
Spec: v1alpha2.LeaseCandidateSpec{
EmulationVersion: "0.1.0",
BinaryVersion: "0.1.0",
},
},
},
want: &v1alpha2.LeaseCandidate{
ObjectMeta: metav1.ObjectMeta{
Name: "candidate1",
Namespace: "default",
},
Spec: v1alpha2.LeaseCandidateSpec{
EmulationVersion: "0.1.0",
BinaryVersion: "0.1.0",
},
},
},
{
name: "multiple candidates, different emulation versions",
candidates: []*v1alpha2.LeaseCandidate{
{
ObjectMeta: metav1.ObjectMeta{
Name: "candidate1",
Namespace: "default",
CreationTimestamp: metav1.Time{Time: time.Now().Add(-1 * time.Hour)},
},
Spec: v1alpha2.LeaseCandidateSpec{
EmulationVersion: "0.1.0",
BinaryVersion: "0.1.0",
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "candidate2",
Namespace: "default",
CreationTimestamp: metav1.Time{Time: time.Now()},
},
Spec: v1alpha2.LeaseCandidateSpec{
EmulationVersion: "0.2.0",
BinaryVersion: "0.2.0",
},
},
},
want: &v1alpha2.LeaseCandidate{
ObjectMeta: metav1.ObjectMeta{
Name: "candidate1",
Namespace: "default",
},
Spec: v1alpha2.LeaseCandidateSpec{
EmulationVersion: "v1",
BinaryVersion: "v1",
},
},
},
{
name: "multiple candidates, same emulation versions, different binary versions",
candidates: []*v1alpha2.LeaseCandidate{
{
ObjectMeta: metav1.ObjectMeta{
Name: "candidate1",
Namespace: "default",
CreationTimestamp: metav1.Time{Time: time.Now().Add(-1 * time.Hour)},
},
Spec: v1alpha2.LeaseCandidateSpec{
EmulationVersion: "0.1.0",
BinaryVersion: "0.1.0",
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "candidate2",
Namespace: "default",
CreationTimestamp: metav1.Time{Time: time.Now()},
},
Spec: v1alpha2.LeaseCandidateSpec{
EmulationVersion: "0.1.0",
BinaryVersion: "0.2.0",
},
},
},
want: &v1alpha2.LeaseCandidate{
ObjectMeta: metav1.ObjectMeta{
Name: "candidate1",
Namespace: "default",
},
Spec: v1alpha2.LeaseCandidateSpec{
EmulationVersion: "0.1.0",
BinaryVersion: "0.1.0",
},
},
},
{
name: "multiple candidates, same emulation versions, same binary versions, different creation timestamps",
candidates: []*v1alpha2.LeaseCandidate{
{
ObjectMeta: metav1.ObjectMeta{
Name: "candidate1",
Namespace: "default",
CreationTimestamp: metav1.Time{Time: time.Now().Add(-1 * time.Hour)},
},
Spec: v1alpha2.LeaseCandidateSpec{
EmulationVersion: "0.1.0",
BinaryVersion: "0.1.0",
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "candidate2",
Namespace: "default",
CreationTimestamp: metav1.Time{Time: time.Now()},
},
Spec: v1alpha2.LeaseCandidateSpec{
EmulationVersion: "0.1.0",
BinaryVersion: "0.1.0",
},
},
},
want: &v1alpha2.LeaseCandidate{
ObjectMeta: metav1.ObjectMeta{
Name: "candidate1",
Namespace: "default",
},
Spec: v1alpha2.LeaseCandidateSpec{
EmulationVersion: "0.1.0",
BinaryVersion: "0.1.0",
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := pickBestLeaderOldestEmulationVersion(tt.candidates)
if got != nil && tt.want != nil {
if got.Name != tt.want.Name || got.Namespace != tt.want.Namespace {
t.Errorf("pickBestLeaderOldestEmulationVersion() = %v, want %v", got, tt.want)
}
} else if got != tt.want {
t.Errorf("pickBestLeaderOldestEmulationVersion() = %v, want %v", got, tt.want)
}
})
}
}
func TestValidLeaseCandidateForOldestEmulationVersion(t *testing.T) {
tests := []struct {
name string
candidate *v1alpha2.LeaseCandidate
want bool
}{
{
name: "valid emulation and binary versions",
candidate: &v1alpha2.LeaseCandidate{
Spec: v1alpha2.LeaseCandidateSpec{
EmulationVersion: "0.1.0",
BinaryVersion: "0.1.0",
},
},
want: true,
},
{
name: "invalid emulation version",
candidate: &v1alpha2.LeaseCandidate{
Spec: v1alpha2.LeaseCandidateSpec{
EmulationVersion: "invalid",
BinaryVersion: "0.1.0",
},
},
want: false,
},
{
name: "invalid binary version",
candidate: &v1alpha2.LeaseCandidate{
Spec: v1alpha2.LeaseCandidateSpec{
EmulationVersion: "0.1.0",
BinaryVersion: "invalid",
},
},
want: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := validLeaseCandidateForOldestEmulationVersion(tt.candidate)
if got != tt.want {
t.Errorf("validLeaseCandidateForOldestEmulationVersion() = %v, want %v", got, tt.want)
}
})
}
}
func TestGetEmulationVersion(t *testing.T) {
tests := []struct {
name string
candidate *v1alpha2.LeaseCandidate
want semver.Version
}{
{
name: "valid emulation version",
candidate: &v1alpha2.LeaseCandidate{
Spec: v1alpha2.LeaseCandidateSpec{
EmulationVersion: "0.1.0",
},
},
want: semver.MustParse("0.1.0"),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := getEmulationVersionOrZero(tt.candidate)
if got.FinalizeVersion() != tt.want.FinalizeVersion() {
t.Errorf("getEmulationVersion() = %v, want %v", got, tt.want)
}
})
}
}
func TestGetBinaryVersion(t *testing.T) {
tests := []struct {
name string
candidate *v1alpha2.LeaseCandidate
want semver.Version
}{
{
name: "valid binary version",
candidate: &v1alpha2.LeaseCandidate{
Spec: v1alpha2.LeaseCandidateSpec{
BinaryVersion: "0.3.0",
},
},
want: semver.MustParse("0.3.0"),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := getBinaryVersionOrZero(tt.candidate)
if got.FinalizeVersion() != tt.want.FinalizeVersion() {
t.Errorf("getBinaryVersion() = %v, want %v", got, tt.want)
}
})
}
}
func TestCompare(t *testing.T) {
nowTime := time.Now()
cases := []struct {
name string
lhs *v1alpha2.LeaseCandidate
rhs *v1alpha2.LeaseCandidate
expectedResult int
}{
{
name: "identical versions earlier timestamp",
lhs: &v1alpha2.LeaseCandidate{
Spec: v1alpha2.LeaseCandidateSpec{
EmulationVersion: "1.20.0",
BinaryVersion: "1.21.0",
},
ObjectMeta: metav1.ObjectMeta{
CreationTimestamp: metav1.Time{Time: nowTime.Add(time.Duration(1))},
},
},
rhs: &v1alpha2.LeaseCandidate{
Spec: v1alpha2.LeaseCandidateSpec{
EmulationVersion: "1.20.0",
BinaryVersion: "1.21.0",
},
ObjectMeta: metav1.ObjectMeta{
CreationTimestamp: metav1.Time{Time: nowTime},
},
},
expectedResult: 1,
},
{
name: "no lhs version",
lhs: &v1alpha2.LeaseCandidate{},
rhs: &v1alpha2.LeaseCandidate{
Spec: v1alpha2.LeaseCandidateSpec{
EmulationVersion: "1.20.0",
BinaryVersion: "1.21.0",
},
},
expectedResult: -1,
},
{
name: "no rhs version",
lhs: &v1alpha2.LeaseCandidate{
Spec: v1alpha2.LeaseCandidateSpec{
EmulationVersion: "1.20.0",
BinaryVersion: "1.21.0",
},
},
rhs: &v1alpha2.LeaseCandidate{},
expectedResult: 1,
},
{
name: "invalid lhs version",
lhs: &v1alpha2.LeaseCandidate{
Spec: v1alpha2.LeaseCandidateSpec{
EmulationVersion: "xyz",
BinaryVersion: "xyz",
},
},
rhs: &v1alpha2.LeaseCandidate{
Spec: v1alpha2.LeaseCandidateSpec{
EmulationVersion: "1.20.0",
BinaryVersion: "1.21.0",
},
},
expectedResult: -1,
},
{
name: "invalid rhs version",
lhs: &v1alpha2.LeaseCandidate{
Spec: v1alpha2.LeaseCandidateSpec{
EmulationVersion: "1.20.0",
BinaryVersion: "1.21.0",
},
},
rhs: &v1alpha2.LeaseCandidate{
Spec: v1alpha2.LeaseCandidateSpec{
EmulationVersion: "xyz",
BinaryVersion: "xyz",
},
},
expectedResult: 1,
},
{
name: "lhs less than rhs",
lhs: &v1alpha2.LeaseCandidate{
Spec: v1alpha2.LeaseCandidateSpec{
EmulationVersion: "1.19.0",
BinaryVersion: "1.20.0",
},
},
rhs: &v1alpha2.LeaseCandidate{
Spec: v1alpha2.LeaseCandidateSpec{
EmulationVersion: "1.20.0",
BinaryVersion: "1.20.0",
},
},
expectedResult: -1,
},
{
name: "rhs less than lhs",
lhs: &v1alpha2.LeaseCandidate{
Spec: v1alpha2.LeaseCandidateSpec{
EmulationVersion: "1.20.0",
BinaryVersion: "1.20.0",
},
},
rhs: &v1alpha2.LeaseCandidate{
Spec: v1alpha2.LeaseCandidateSpec{
EmulationVersion: "1.19.0",
BinaryVersion: "1.20.0",
},
},
expectedResult: 1,
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
result := compare(tc.lhs, tc.rhs)
if result != tc.expectedResult {
t.Errorf("Expected comparison result of %d but got %d", tc.expectedResult, result)
}
})
}
}
func TestShouldReelect(t *testing.T) {
cases := []struct {
name string
candidates []*v1alpha2.LeaseCandidate
currentLeader *v1alpha2.LeaseCandidate
expectResult bool
}{
{
name: "candidate with newer binary version",
candidates: []*v1alpha2.LeaseCandidate{
{
ObjectMeta: metav1.ObjectMeta{
Name: "component-identity-1",
},
Spec: v1alpha2.LeaseCandidateSpec{
EmulationVersion: "1.19.0",
BinaryVersion: "1.19.0",
Strategy: v1.OldestEmulationVersion,
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "component-identity-2",
},
Spec: v1alpha2.LeaseCandidateSpec{
EmulationVersion: "1.19.0",
BinaryVersion: "1.20.0",
Strategy: v1.OldestEmulationVersion,
},
},
},
currentLeader: &v1alpha2.LeaseCandidate{
ObjectMeta: metav1.ObjectMeta{
Name: "component-identity-1",
},
Spec: v1alpha2.LeaseCandidateSpec{
EmulationVersion: "1.19.0",
BinaryVersion: "1.19.0",
Strategy: v1.OldestEmulationVersion,
},
},
expectResult: false,
},
{
name: "no newer candidates",
candidates: []*v1alpha2.LeaseCandidate{
{
ObjectMeta: metav1.ObjectMeta{
Name: "component-identity-1",
},
Spec: v1alpha2.LeaseCandidateSpec{
EmulationVersion: "1.19.0",
BinaryVersion: "1.19.0",
Strategy: v1.OldestEmulationVersion,
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "component-identity-2",
},
Spec: v1alpha2.LeaseCandidateSpec{
EmulationVersion: "1.19.0",
BinaryVersion: "1.19.0",
Strategy: v1.OldestEmulationVersion,
},
},
},
currentLeader: &v1alpha2.LeaseCandidate{
ObjectMeta: metav1.ObjectMeta{
Name: "component-identity-1",
},
Spec: v1alpha2.LeaseCandidateSpec{
EmulationVersion: "1.19.0",
BinaryVersion: "1.19.0",
Strategy: v1.OldestEmulationVersion,
},
},
expectResult: false,
},
{
name: "no candidates",
candidates: []*v1alpha2.LeaseCandidate{},
currentLeader: &v1alpha2.LeaseCandidate{
ObjectMeta: metav1.ObjectMeta{
Name: "component-identity-1",
},
Spec: v1alpha2.LeaseCandidateSpec{
EmulationVersion: "1.19.0",
BinaryVersion: "1.19.0",
Strategy: v1.OldestEmulationVersion,
},
},
expectResult: false,
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
result := shouldReelect(tc.candidates, tc.currentLeader)
if tc.expectResult != result {
t.Errorf("Expected %t but got %t", tc.expectResult, result)
}
})
}
}
func TestPickBestStrategy(t *testing.T) {
tests := []struct {
name string
candidates []*v1alpha2.LeaseCandidate
wantStrategy v1.CoordinatedLeaseStrategy
wantError bool
}{
{
name: "single candidate, single preferred strategy",
candidates: []*v1alpha2.LeaseCandidate{
{
ObjectMeta: metav1.ObjectMeta{
Name: "candidate1",
Namespace: "default",
},
Spec: v1alpha2.LeaseCandidateSpec{
LeaseName: "component-A",
Strategy: v1.OldestEmulationVersion,
},
},
},
wantStrategy: v1.OldestEmulationVersion,
wantError: false,
},
{
name: "multiple candidates, different preferred strategies should fail",
candidates: []*v1alpha2.LeaseCandidate{
{
ObjectMeta: metav1.ObjectMeta{
Name: "candidate1",
Namespace: "default",
},
Spec: v1alpha2.LeaseCandidateSpec{
LeaseName: "component-A",
Strategy: v1.OldestEmulationVersion,
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "candidate2",
Namespace: "default",
},
Spec: v1alpha2.LeaseCandidateSpec{
LeaseName: "component-A",
Strategy: v1.CoordinatedLeaseStrategy("foo.com/bar"),
},
},
},
wantError: true,
},
{
name: "multiple candidates, different preferred strategy different binary version should resolve",
candidates: []*v1alpha2.LeaseCandidate{
{
ObjectMeta: metav1.ObjectMeta{
Name: "candidate1",
Namespace: "default",
},
Spec: v1alpha2.LeaseCandidateSpec{
LeaseName: "component-A",
BinaryVersion: "1.32.0",
Strategy: v1.OldestEmulationVersion,
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "candidate2",
Namespace: "default",
},
Spec: v1alpha2.LeaseCandidateSpec{
LeaseName: "component-A",
BinaryVersion: "1.31.0",
Strategy: v1.CoordinatedLeaseStrategy("foo.com/bar"),
},
},
},
wantStrategy: v1.OldestEmulationVersion,
wantError: false,
},
{
name: "multiple candidates, different preferred strategy different binary version should resolve, order agnostic",
candidates: []*v1alpha2.LeaseCandidate{
{
ObjectMeta: metav1.ObjectMeta{
Name: "candidate2",
Namespace: "default",
},
Spec: v1alpha2.LeaseCandidateSpec{
LeaseName: "component-A",
BinaryVersion: "1.31.0",
Strategy: v1.CoordinatedLeaseStrategy("foo.com/bar"),
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "candidate1",
Namespace: "default",
},
Spec: v1alpha2.LeaseCandidateSpec{
LeaseName: "component-A",
BinaryVersion: "1.32.0",
Strategy: v1.OldestEmulationVersion,
},
},
},
wantStrategy: v1.OldestEmulationVersion,
wantError: false,
},
{
name: "multiple candidates, different preferred strategy different binary version string comparison check",
candidates: []*v1alpha2.LeaseCandidate{
{
ObjectMeta: metav1.ObjectMeta{
Name: "candidate1",
Namespace: "default",
},
Spec: v1alpha2.LeaseCandidateSpec{
LeaseName: "component-A",
BinaryVersion: "1.1.10",
Strategy: v1.OldestEmulationVersion,
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "candidate2",
Namespace: "default",
},
Spec: v1alpha2.LeaseCandidateSpec{
LeaseName: "component-A",
BinaryVersion: "1.1.2",
Strategy: v1.CoordinatedLeaseStrategy("foo.com/bar"),
},
},
},
wantStrategy: v1.OldestEmulationVersion,
wantError: false,
},
{
name: "multiple candidates, same preferred strategy",
candidates: []*v1alpha2.LeaseCandidate{
{
ObjectMeta: metav1.ObjectMeta{
Name: "candidate1",
Namespace: "default",
},
Spec: v1alpha2.LeaseCandidateSpec{
LeaseName: "component-A",
BinaryVersion: "1.31.0",
Strategy: v1.OldestEmulationVersion,
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "candidate2",
Namespace: "default",
},
Spec: v1alpha2.LeaseCandidateSpec{
LeaseName: "component-A",
BinaryVersion: "1.31.0",
Strategy: v1.OldestEmulationVersion,
},
},
},
wantStrategy: v1.OldestEmulationVersion,
wantError: false,
},
{
name: "multiple candidates, conflicting preferred strategy",
candidates: []*v1alpha2.LeaseCandidate{
{
ObjectMeta: metav1.ObjectMeta{
Name: "candidate1",
Namespace: "default",
},
Spec: v1alpha2.LeaseCandidateSpec{
LeaseName: "component-A",
BinaryVersion: "1.31.0",
Strategy: v1.OldestEmulationVersion,
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "candidate2",
Namespace: "default",
},
Spec: v1alpha2.LeaseCandidateSpec{
LeaseName: "component-A",
BinaryVersion: "1.31.0",
Strategy: v1.CoordinatedLeaseStrategy("foo.com/bar"),
},
},
},
wantStrategy: "",
wantError: true,
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
gotStrategy, err := pickBestStrategy(tc.candidates)
gotError := err != nil
if gotError != tc.wantError {
t.Errorf("pickBestStrategy() error = %v,:%v want %v", gotError, err, tc.wantError)
}
if !gotError && gotStrategy != tc.wantStrategy {
t.Errorf("pickBestStrategy() = %v, want %v", gotStrategy, tc.wantStrategy)
}
})
}
}
func shouldReelect(candidates []*v1alpha2.LeaseCandidate, currentLeader *v1alpha2.LeaseCandidate) bool {
pickedLeader := pickBestLeaderOldestEmulationVersion(candidates)
if pickedLeader == nil {
return false
}
return compare(currentLeader, pickedLeader) > 0
}
|
c6b08d2c7f70a4338e825117b2855709e1c204ae
|
33de171dd038c1cbbc8d6e6af05f6181485b9a2353b362f91ed62c4e9aa91c26
|
prometheus/prometheus
|
zookeeper.go
|
discovery/zookeeper/zookeeper.go
|
// Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package zookeeper
import (
"context"
"encoding/json"
"errors"
"fmt"
"log/slog"
"net"
"strconv"
"strings"
"time"
"github.com/go-zookeeper/zk"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"
"github.com/prometheus/prometheus/discovery"
"github.com/prometheus/prometheus/discovery/targetgroup"
"github.com/prometheus/prometheus/util/strutil"
"github.com/prometheus/prometheus/util/treecache"
)
var (
// DefaultServersetSDConfig is the default Serverset SD configuration.
DefaultServersetSDConfig = ServersetSDConfig{
Timeout: model.Duration(10 * time.Second),
}
// DefaultNerveSDConfig is the default Nerve SD configuration.
DefaultNerveSDConfig = NerveSDConfig{
Timeout: model.Duration(10 * time.Second),
}
)
func init() {
discovery.RegisterConfig(&ServersetSDConfig{})
discovery.RegisterConfig(&NerveSDConfig{})
}
// ServersetSDConfig is the configuration for Twitter serversets in Zookeeper based discovery.
type ServersetSDConfig struct {
Servers []string `yaml:"servers"`
Paths []string `yaml:"paths"`
Timeout model.Duration `yaml:"timeout,omitempty"`
}
// NewDiscovererMetrics implements discovery.Config.
func (*ServersetSDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
return &discovery.NoopDiscovererMetrics{}
}
// Name returns the name of the Config.
func (*ServersetSDConfig) Name() string { return "serverset" }
// NewDiscoverer returns a Discoverer for the Config.
func (c *ServersetSDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
return NewServersetDiscovery(c, opts.Logger)
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *ServersetSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
*c = DefaultServersetSDConfig
type plain ServersetSDConfig
err := unmarshal((*plain)(c))
if err != nil {
return err
}
if len(c.Servers) == 0 {
return errors.New("serverset SD config must contain at least one Zookeeper server")
}
if len(c.Paths) == 0 {
return errors.New("serverset SD config must contain at least one path")
}
for _, path := range c.Paths {
if !strings.HasPrefix(path, "/") {
return fmt.Errorf("serverset SD config paths must begin with '/': %s", path)
}
}
return nil
}
// NerveSDConfig is the configuration for AirBnB's Nerve in Zookeeper based discovery.
type NerveSDConfig struct {
Servers []string `yaml:"servers"`
Paths []string `yaml:"paths"`
Timeout model.Duration `yaml:"timeout,omitempty"`
}
// NewDiscovererMetrics implements discovery.Config.
func (*NerveSDConfig) NewDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
return &discovery.NoopDiscovererMetrics{}
}
// Name returns the name of the Config.
func (*NerveSDConfig) Name() string { return "nerve" }
// NewDiscoverer returns a Discoverer for the Config.
func (c *NerveSDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
return NewNerveDiscovery(c, opts.Logger)
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *NerveSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error {
*c = DefaultNerveSDConfig
type plain NerveSDConfig
err := unmarshal((*plain)(c))
if err != nil {
return err
}
if len(c.Servers) == 0 {
return errors.New("nerve SD config must contain at least one Zookeeper server")
}
if len(c.Paths) == 0 {
return errors.New("nerve SD config must contain at least one path")
}
for _, path := range c.Paths {
if !strings.HasPrefix(path, "/") {
return fmt.Errorf("nerve SD config paths must begin with '/': %s", path)
}
}
return nil
}
// Discovery implements the Discoverer interface for discovering
// targets from Zookeeper.
type Discovery struct {
conn *zk.Conn
sources map[string]*targetgroup.Group
updates chan treecache.ZookeeperTreeCacheEvent
pathUpdates []chan treecache.ZookeeperTreeCacheEvent
treeCaches []*treecache.ZookeeperTreeCache
parse func(data []byte, path string) (model.LabelSet, error)
logger *slog.Logger
}
// NewNerveDiscovery returns a new Discovery for the given Nerve config.
func NewNerveDiscovery(conf *NerveSDConfig, logger *slog.Logger) (*Discovery, error) {
return NewDiscovery(conf.Servers, time.Duration(conf.Timeout), conf.Paths, logger, parseNerveMember)
}
// NewServersetDiscovery returns a new Discovery for the given serverset config.
func NewServersetDiscovery(conf *ServersetSDConfig, logger *slog.Logger) (*Discovery, error) {
return NewDiscovery(conf.Servers, time.Duration(conf.Timeout), conf.Paths, logger, parseServersetMember)
}
// NewDiscovery returns a new discovery along Zookeeper parses with
// the given parse function.
func NewDiscovery(
srvs []string,
timeout time.Duration,
paths []string,
logger *slog.Logger,
pf func(data []byte, path string) (model.LabelSet, error),
) (*Discovery, error) {
if logger == nil {
logger = promslog.NewNopLogger()
}
conn, _, err := zk.Connect(
srvs, timeout,
func(c *zk.Conn) {
c.SetLogger(treecache.NewZookeeperLogger(logger))
})
if err != nil {
return nil, err
}
updates := make(chan treecache.ZookeeperTreeCacheEvent)
sd := &Discovery{
conn: conn,
updates: updates,
sources: map[string]*targetgroup.Group{},
parse: pf,
logger: logger,
}
for _, path := range paths {
pathUpdate := make(chan treecache.ZookeeperTreeCacheEvent)
sd.pathUpdates = append(sd.pathUpdates, pathUpdate)
sd.treeCaches = append(sd.treeCaches, treecache.NewZookeeperTreeCache(conn, path, pathUpdate, logger))
}
return sd, nil
}
// Run implements the Discoverer interface.
func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {
defer func() {
for _, tc := range d.treeCaches {
tc.Stop()
}
for _, pathUpdate := range d.pathUpdates {
// Drain event channel in case the treecache leaks goroutines otherwise.
for range pathUpdate {
}
}
d.conn.Close()
}()
for _, pathUpdate := range d.pathUpdates {
go func(update chan treecache.ZookeeperTreeCacheEvent) {
for event := range update {
select {
case d.updates <- event:
case <-ctx.Done():
return
}
}
}(pathUpdate)
}
for {
select {
case <-ctx.Done():
return
case event := <-d.updates:
tg := &targetgroup.Group{
Source: event.Path,
}
if event.Data != nil {
labelSet, err := d.parse(*event.Data, event.Path)
if err == nil {
tg.Targets = []model.LabelSet{labelSet}
d.sources[event.Path] = tg
} else {
delete(d.sources, event.Path)
}
} else {
delete(d.sources, event.Path)
}
select {
case <-ctx.Done():
return
case ch <- []*targetgroup.Group{tg}:
}
}
}
}
const (
serversetLabelPrefix = model.MetaLabelPrefix + "serverset_"
serversetStatusLabel = serversetLabelPrefix + "status"
serversetPathLabel = serversetLabelPrefix + "path"
serversetEndpointLabelPrefix = serversetLabelPrefix + "endpoint"
serversetShardLabel = serversetLabelPrefix + "shard"
)
type serversetMember struct {
ServiceEndpoint serversetEndpoint
AdditionalEndpoints map[string]serversetEndpoint
Status string `json:"status"`
Shard int `json:"shard"`
}
type serversetEndpoint struct {
Host string
Port int
}
func parseServersetMember(data []byte, path string) (model.LabelSet, error) {
member := serversetMember{}
if err := json.Unmarshal(data, &member); err != nil {
return nil, fmt.Errorf("error unmarshaling serverset member %q: %w", path, err)
}
labels := model.LabelSet{}
labels[serversetPathLabel] = model.LabelValue(path)
labels[model.AddressLabel] = model.LabelValue(
net.JoinHostPort(member.ServiceEndpoint.Host, strconv.Itoa(member.ServiceEndpoint.Port)))
labels[serversetEndpointLabelPrefix+"_host"] = model.LabelValue(member.ServiceEndpoint.Host)
labels[serversetEndpointLabelPrefix+"_port"] = model.LabelValue(strconv.Itoa(member.ServiceEndpoint.Port))
for name, endpoint := range member.AdditionalEndpoints {
cleanName := model.LabelName(strutil.SanitizeLabelName(name))
labels[serversetEndpointLabelPrefix+"_host_"+cleanName] = model.LabelValue(
endpoint.Host)
labels[serversetEndpointLabelPrefix+"_port_"+cleanName] = model.LabelValue(
strconv.Itoa(endpoint.Port))
}
labels[serversetStatusLabel] = model.LabelValue(member.Status)
labels[serversetShardLabel] = model.LabelValue(strconv.Itoa(member.Shard))
return labels, nil
}
const (
nerveLabelPrefix = model.MetaLabelPrefix + "nerve_"
nervePathLabel = nerveLabelPrefix + "path"
nerveEndpointLabelPrefix = nerveLabelPrefix + "endpoint"
)
type nerveMember struct {
Host string `json:"host"`
Port int `json:"port"`
Name string `json:"name"`
}
func parseNerveMember(data []byte, path string) (model.LabelSet, error) {
member := nerveMember{}
err := json.Unmarshal(data, &member)
if err != nil {
return nil, fmt.Errorf("error unmarshaling nerve member %q: %w", path, err)
}
labels := model.LabelSet{}
labels[nervePathLabel] = model.LabelValue(path)
labels[model.AddressLabel] = model.LabelValue(
net.JoinHostPort(member.Host, strconv.Itoa(member.Port)))
labels[nerveEndpointLabelPrefix+"_host"] = model.LabelValue(member.Host)
labels[nerveEndpointLabelPrefix+"_port"] = model.LabelValue(strconv.Itoa(member.Port))
labels[nerveEndpointLabelPrefix+"_name"] = model.LabelValue(member.Name)
return labels, nil
}
|
a1cfe3d055b356458381098d97222d5da7acdc95
|
discovery/zookeeper/zookeeper_test.go
|
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package zookeeper
import (
"testing"
"time"
"github.com/prometheus/common/model"
"github.com/stretchr/testify/require"
"go.uber.org/goleak"
)
func TestMain(m *testing.M) {
goleak.VerifyTestMain(m)
}
func TestNewDiscoveryError(t *testing.T) {
_, err := NewDiscovery(
[]string{"unreachable.test"},
time.Second, []string{"/"},
nil,
func(data []byte, path string) (model.LabelSet, error) { return nil, nil })
require.Error(t, err)
}
|
c2b41ce7a3ca7b3ccb83b9811e501b0944380aff
|
7c2b3ca106bf35c6031a97c7678c8e4afa60029b06ab54a82cecfb1d685dbe96
|
golang/go
|
p256_asm.go
|
src/crypto/internal/fips/nistec/p256_asm.go
|
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file contains the Go wrapper for the constant-time, 64-bit assembly
// implementation of P256. The optimizations performed here are described in
// detail in:
// S.Gueron and V.Krasnov, "Fast prime field elliptic-curve cryptography with
// 256-bit primes"
// https://link.springer.com/article/10.1007%2Fs13389-014-0090-x
// https://eprint.iacr.org/2013/816.pdf
//go:build (amd64 || arm64 || ppc64le || s390x) && !purego
package nistec
import (
"crypto/internal/fipsdeps/byteorder"
"errors"
"math/bits"
"runtime"
"unsafe"
)
// p256Element is a P-256 base field element in [0, P-1] in the Montgomery
// domain (with R 2²⁵⁶) as four limbs in little-endian order value.
type p256Element [4]uint64
// p256One is one in the Montgomery domain.
var p256One = p256Element{0x0000000000000001, 0xffffffff00000000,
0xffffffffffffffff, 0x00000000fffffffe}
var p256Zero = p256Element{}
// p256P is 2²⁵⁶ - 2²²⁴ + 2¹⁹² + 2⁹⁶ - 1 in the Montgomery domain.
var p256P = p256Element{0xffffffffffffffff, 0x00000000ffffffff,
0x0000000000000000, 0xffffffff00000001}
// P256Point is a P-256 point. The zero value should not be assumed to be valid
// (although it is in this implementation).
type P256Point struct {
// (X:Y:Z) are Jacobian coordinates where x = X/Z² and y = Y/Z³. The point
// at infinity can be represented by any set of coordinates with Z = 0.
x, y, z p256Element
}
// NewP256Point returns a new P256Point representing the point at infinity.
func NewP256Point() *P256Point {
return &P256Point{
x: p256One, y: p256One, z: p256Zero,
}
}
// SetGenerator sets p to the canonical generator and returns p.
func (p *P256Point) SetGenerator() *P256Point {
p.x = p256Element{0x79e730d418a9143c, 0x75ba95fc5fedb601,
0x79fb732b77622510, 0x18905f76a53755c6}
p.y = p256Element{0xddf25357ce95560a, 0x8b4ab8e4ba19e45c,
0xd2e88688dd21f325, 0x8571ff1825885d85}
p.z = p256One
return p
}
// Set sets p = q and returns p.
func (p *P256Point) Set(q *P256Point) *P256Point {
p.x, p.y, p.z = q.x, q.y, q.z
return p
}
const p256ElementLength = 32
const p256UncompressedLength = 1 + 2*p256ElementLength
const p256CompressedLength = 1 + p256ElementLength
// SetBytes sets p to the compressed, uncompressed, or infinity value encoded in
// b, as specified in SEC 1, Version 2.0, Section 2.3.4. If the point is not on
// the curve, it returns nil and an error, and the receiver is unchanged.
// Otherwise, it returns p.
func (p *P256Point) SetBytes(b []byte) (*P256Point, error) {
// p256Mul operates in the Montgomery domain with R = 2²⁵⁶ mod p. Thus rr
// here is R in the Montgomery domain, or R×R mod p. See comment in
// P256OrdInverse about how this is used.
rr := p256Element{0x0000000000000003, 0xfffffffbffffffff,
0xfffffffffffffffe, 0x00000004fffffffd}
switch {
// Point at infinity.
case len(b) == 1 && b[0] == 0:
return p.Set(NewP256Point()), nil
// Uncompressed form.
case len(b) == p256UncompressedLength && b[0] == 4:
var r P256Point
p256BigToLittle(&r.x, (*[32]byte)(b[1:33]))
p256BigToLittle(&r.y, (*[32]byte)(b[33:65]))
if p256LessThanP(&r.x) == 0 || p256LessThanP(&r.y) == 0 {
return nil, errors.New("invalid P256 element encoding")
}
p256Mul(&r.x, &r.x, &rr)
p256Mul(&r.y, &r.y, &rr)
if err := p256CheckOnCurve(&r.x, &r.y); err != nil {
return nil, err
}
r.z = p256One
return p.Set(&r), nil
// Compressed form.
case len(b) == p256CompressedLength && (b[0] == 2 || b[0] == 3):
var r P256Point
p256BigToLittle(&r.x, (*[32]byte)(b[1:33]))
if p256LessThanP(&r.x) == 0 {
return nil, errors.New("invalid P256 element encoding")
}
p256Mul(&r.x, &r.x, &rr)
// y² = x³ - 3x + b
p256Polynomial(&r.y, &r.x)
if !p256Sqrt(&r.y, &r.y) {
return nil, errors.New("invalid P256 compressed point encoding")
}
// Select the positive or negative root, as indicated by the least
// significant bit, based on the encoding type byte.
yy := new(p256Element)
p256FromMont(yy, &r.y)
cond := int(yy[0]&1) ^ int(b[0]&1)
p256NegCond(&r.y, cond)
r.z = p256One
return p.Set(&r), nil
default:
return nil, errors.New("invalid P256 point encoding")
}
}
// p256Polynomial sets y2 to x³ - 3x + b, and returns y2.
func p256Polynomial(y2, x *p256Element) *p256Element {
x3 := new(p256Element)
p256Sqr(x3, x, 1)
p256Mul(x3, x3, x)
threeX := new(p256Element)
p256Add(threeX, x, x)
p256Add(threeX, threeX, x)
p256NegCond(threeX, 1)
p256B := &p256Element{0xd89cdf6229c4bddf, 0xacf005cd78843090,
0xe5a220abf7212ed6, 0xdc30061d04874834}
p256Add(x3, x3, threeX)
p256Add(x3, x3, p256B)
*y2 = *x3
return y2
}
func p256CheckOnCurve(x, y *p256Element) error {
// y² = x³ - 3x + b
rhs := p256Polynomial(new(p256Element), x)
lhs := new(p256Element)
p256Sqr(lhs, y, 1)
if p256Equal(lhs, rhs) != 1 {
return errors.New("P256 point not on curve")
}
return nil
}
// p256LessThanP returns 1 if x < p, and 0 otherwise. Note that a p256Element is
// not allowed to be equal to or greater than p, so if this function returns 0
// then x is invalid.
func p256LessThanP(x *p256Element) int {
var b uint64
_, b = bits.Sub64(x[0], p256P[0], b)
_, b = bits.Sub64(x[1], p256P[1], b)
_, b = bits.Sub64(x[2], p256P[2], b)
_, b = bits.Sub64(x[3], p256P[3], b)
return int(b)
}
func p256BigToLittle(l *p256Element, b *[32]byte) {
bytesToLimbs((*[4]uint64)(l), b)
}
func bytesToLimbs(l *[4]uint64, b *[32]byte) {
l[0] = byteorder.BEUint64(b[24:])
l[1] = byteorder.BEUint64(b[16:])
l[2] = byteorder.BEUint64(b[8:])
l[3] = byteorder.BEUint64(b[:])
}
func p256LittleToBig(b *[32]byte, l *p256Element) {
limbsToBytes(b, (*[4]uint64)(l))
}
func limbsToBytes(b *[32]byte, l *[4]uint64) {
byteorder.BEPutUint64(b[24:], l[0])
byteorder.BEPutUint64(b[16:], l[1])
byteorder.BEPutUint64(b[8:], l[2])
byteorder.BEPutUint64(b[:], l[3])
}
// p256Add sets res = x + y.
func p256Add(res, x, y *p256Element) {
var c, b uint64
t1 := make([]uint64, 4)
t1[0], c = bits.Add64(x[0], y[0], 0)
t1[1], c = bits.Add64(x[1], y[1], c)
t1[2], c = bits.Add64(x[2], y[2], c)
t1[3], c = bits.Add64(x[3], y[3], c)
t2 := make([]uint64, 4)
t2[0], b = bits.Sub64(t1[0], p256P[0], 0)
t2[1], b = bits.Sub64(t1[1], p256P[1], b)
t2[2], b = bits.Sub64(t1[2], p256P[2], b)
t2[3], b = bits.Sub64(t1[3], p256P[3], b)
// Three options:
// - a+b < p
// then c is 0, b is 1, and t1 is correct
// - p <= a+b < 2^256
// then c is 0, b is 0, and t2 is correct
// - 2^256 <= a+b
// then c is 1, b is 1, and t2 is correct
t2Mask := (c ^ b) - 1
res[0] = (t1[0] & ^t2Mask) | (t2[0] & t2Mask)
res[1] = (t1[1] & ^t2Mask) | (t2[1] & t2Mask)
res[2] = (t1[2] & ^t2Mask) | (t2[2] & t2Mask)
res[3] = (t1[3] & ^t2Mask) | (t2[3] & t2Mask)
}
// p256Sqrt sets e to a square root of x. If x is not a square, p256Sqrt returns
// false and e is unchanged. e and x can overlap.
func p256Sqrt(e, x *p256Element) (isSquare bool) {
t0, t1 := new(p256Element), new(p256Element)
// Since p = 3 mod 4, exponentiation by (p + 1) / 4 yields a square root candidate.
//
// The sequence of 7 multiplications and 253 squarings is derived from the
// following addition chain generated with github.com/mmcloughlin/addchain v0.4.0.
//
// _10 = 2*1
// _11 = 1 + _10
// _1100 = _11 << 2
// _1111 = _11 + _1100
// _11110000 = _1111 << 4
// _11111111 = _1111 + _11110000
// x16 = _11111111 << 8 + _11111111
// x32 = x16 << 16 + x16
// return ((x32 << 32 + 1) << 96 + 1) << 94
//
p256Sqr(t0, x, 1)
p256Mul(t0, x, t0)
p256Sqr(t1, t0, 2)
p256Mul(t0, t0, t1)
p256Sqr(t1, t0, 4)
p256Mul(t0, t0, t1)
p256Sqr(t1, t0, 8)
p256Mul(t0, t0, t1)
p256Sqr(t1, t0, 16)
p256Mul(t0, t0, t1)
p256Sqr(t0, t0, 32)
p256Mul(t0, x, t0)
p256Sqr(t0, t0, 96)
p256Mul(t0, x, t0)
p256Sqr(t0, t0, 94)
p256Sqr(t1, t0, 1)
if p256Equal(t1, x) != 1 {
return false
}
*e = *t0
return true
}
// The following assembly functions are implemented in p256_asm_*.s
// Montgomery multiplication. Sets res = in1 * in2 * R⁻¹ mod p.
//
//go:noescape
func p256Mul(res, in1, in2 *p256Element)
// Montgomery square, repeated n times (n >= 1).
//
//go:noescape
func p256Sqr(res, in *p256Element, n int)
// Montgomery multiplication by R⁻¹, or 1 outside the domain.
// Sets res = in * R⁻¹, bringing res out of the Montgomery domain.
//
//go:noescape
func p256FromMont(res, in *p256Element)
// If cond is not 0, sets val = -val mod p.
//
//go:noescape
func p256NegCond(val *p256Element, cond int)
// If cond is 0, sets res = b, otherwise sets res = a.
//
//go:noescape
func p256MovCond(res, a, b *P256Point, cond int)
// p256Table is a table of the first 16 multiples of a point. Points are stored
// at an index offset of -1 so [8]P is at index 7, P is at 0, and [16]P is at 15.
// [0]P is the point at infinity and it's not stored.
type p256Table [16]P256Point
// p256Select sets res to the point at index idx in the table.
// idx must be in [0, 15]. It executes in constant time.
//
//go:noescape
func p256Select(res *P256Point, table *p256Table, idx int)
// p256AffinePoint is a point in affine coordinates (x, y). x and y are still
// Montgomery domain elements. The point can't be the point at infinity.
type p256AffinePoint struct {
x, y p256Element
}
// p256AffineTable is a table of the first 32 multiples of a point. Points are
// stored at an index offset of -1 like in p256Table, and [0]P is not stored.
type p256AffineTable [32]p256AffinePoint
// p256Precomputed is a series of precomputed multiples of G, the canonical
// generator. The first p256AffineTable contains multiples of G. The second one
// multiples of [2⁶]G, the third one of [2¹²]G, and so on, where each successive
// table is the previous table doubled six times. Six is the width of the
// sliding window used in p256ScalarBaseMult, and having each table already
// pre-doubled lets us avoid the doublings between windows entirely. This table
// aliases into p256PrecomputedEmbed.
var p256Precomputed *[43]p256AffineTable
func init() {
p256PrecomputedPtr := unsafe.Pointer(&p256PrecomputedEmbed)
if runtime.GOARCH == "s390x" {
var newTable [43 * 32 * 2 * 4]uint64
for i, x := range (*[43 * 32 * 2 * 4][8]byte)(p256PrecomputedPtr) {
newTable[i] = byteorder.LEUint64(x[:])
}
p256PrecomputedPtr = unsafe.Pointer(&newTable)
}
p256Precomputed = (*[43]p256AffineTable)(p256PrecomputedPtr)
}
// p256SelectAffine sets res to the point at index idx in the table.
// idx must be in [0, 31]. It executes in constant time.
//
//go:noescape
func p256SelectAffine(res *p256AffinePoint, table *p256AffineTable, idx int)
// Point addition with an affine point and constant time conditions.
// If zero is 0, sets res = in2. If sel is 0, sets res = in1.
// If sign is not 0, sets res = in1 + -in2. Otherwise, sets res = in1 + in2
//
//go:noescape
func p256PointAddAffineAsm(res, in1 *P256Point, in2 *p256AffinePoint, sign, sel, zero int)
// Point addition. Sets res = in1 + in2. Returns one if the two input points
// were equal and zero otherwise. If in1 or in2 are the point at infinity, res
// and the return value are undefined.
//
//go:noescape
func p256PointAddAsm(res, in1, in2 *P256Point) int
// Point doubling. Sets res = in + in. in can be the point at infinity.
//
//go:noescape
func p256PointDoubleAsm(res, in *P256Point)
// p256OrdElement is a P-256 scalar field element in [0, ord(G)-1] in the
// Montgomery domain (with R 2²⁵⁶) as four uint64 limbs in little-endian order.
type p256OrdElement [4]uint64
// p256OrdReduce ensures s is in the range [0, ord(G)-1].
func p256OrdReduce(s *p256OrdElement) {
// Since 2 * ord(G) > 2²⁵⁶, we can just conditionally subtract ord(G),
// keeping the result if it doesn't underflow.
t0, b := bits.Sub64(s[0], 0xf3b9cac2fc632551, 0)
t1, b := bits.Sub64(s[1], 0xbce6faada7179e84, b)
t2, b := bits.Sub64(s[2], 0xffffffffffffffff, b)
t3, b := bits.Sub64(s[3], 0xffffffff00000000, b)
tMask := b - 1 // zero if subtraction underflowed
s[0] ^= (t0 ^ s[0]) & tMask
s[1] ^= (t1 ^ s[1]) & tMask
s[2] ^= (t2 ^ s[2]) & tMask
s[3] ^= (t3 ^ s[3]) & tMask
}
func p256OrdLittleToBig(b *[32]byte, l *p256OrdElement) {
limbsToBytes(b, (*[4]uint64)(l))
}
func p256OrdBigToLittle(l *p256OrdElement, b *[32]byte) {
bytesToLimbs((*[4]uint64)(l), b)
}
// Add sets q = p1 + p2, and returns q. The points may overlap.
func (q *P256Point) Add(r1, r2 *P256Point) *P256Point {
var sum, double P256Point
r1IsInfinity := r1.isInfinity()
r2IsInfinity := r2.isInfinity()
pointsEqual := p256PointAddAsm(&sum, r1, r2)
p256PointDoubleAsm(&double, r1)
p256MovCond(&sum, &double, &sum, pointsEqual)
p256MovCond(&sum, r1, &sum, r2IsInfinity)
p256MovCond(&sum, r2, &sum, r1IsInfinity)
return q.Set(&sum)
}
// Double sets q = p + p, and returns q. The points may overlap.
func (q *P256Point) Double(p *P256Point) *P256Point {
var double P256Point
p256PointDoubleAsm(&double, p)
return q.Set(&double)
}
// ScalarBaseMult sets r = scalar * generator, where scalar is a 32-byte big
// endian value, and returns r. If scalar is not 32 bytes long, ScalarBaseMult
// returns an error and the receiver is unchanged.
func (r *P256Point) ScalarBaseMult(scalar []byte) (*P256Point, error) {
if len(scalar) != 32 {
return nil, errors.New("invalid scalar length")
}
scalarReversed := new(p256OrdElement)
p256OrdBigToLittle(scalarReversed, (*[32]byte)(scalar))
p256OrdReduce(scalarReversed)
r.p256BaseMult(scalarReversed)
return r, nil
}
// ScalarMult sets r = scalar * q, where scalar is a 32-byte big endian value,
// and returns r. If scalar is not 32 bytes long, ScalarBaseMult returns an
// error and the receiver is unchanged.
func (r *P256Point) ScalarMult(q *P256Point, scalar []byte) (*P256Point, error) {
if len(scalar) != 32 {
return nil, errors.New("invalid scalar length")
}
scalarReversed := new(p256OrdElement)
p256OrdBigToLittle(scalarReversed, (*[32]byte)(scalar))
p256OrdReduce(scalarReversed)
r.Set(q).p256ScalarMult(scalarReversed)
return r, nil
}
// uint64IsZero returns 1 if x is zero and zero otherwise.
func uint64IsZero(x uint64) int {
x = ^x
x &= x >> 32
x &= x >> 16
x &= x >> 8
x &= x >> 4
x &= x >> 2
x &= x >> 1
return int(x & 1)
}
// p256Equal returns 1 if a and b are equal and 0 otherwise.
func p256Equal(a, b *p256Element) int {
var acc uint64
for i := range a {
acc |= a[i] ^ b[i]
}
return uint64IsZero(acc)
}
// isInfinity returns 1 if p is the point at infinity and 0 otherwise.
func (p *P256Point) isInfinity() int {
return p256Equal(&p.z, &p256Zero)
}
// Bytes returns the uncompressed or infinity encoding of p, as specified in
// SEC 1, Version 2.0, Section 2.3.3. Note that the encoding of the point at
// infinity is shorter than all other encodings.
func (p *P256Point) Bytes() []byte {
// This function is outlined to make the allocations inline in the caller
// rather than happen on the heap.
var out [p256UncompressedLength]byte
return p.bytes(&out)
}
func (p *P256Point) bytes(out *[p256UncompressedLength]byte) []byte {
// The proper representation of the point at infinity is a single zero byte.
if p.isInfinity() == 1 {
return append(out[:0], 0)
}
x, y := new(p256Element), new(p256Element)
p.affineFromMont(x, y)
out[0] = 4 // Uncompressed form.
p256LittleToBig((*[32]byte)(out[1:33]), x)
p256LittleToBig((*[32]byte)(out[33:65]), y)
return out[:]
}
// affineFromMont sets (x, y) to the affine coordinates of p, converted out of the
// Montgomery domain.
func (p *P256Point) affineFromMont(x, y *p256Element) {
p256Inverse(y, &p.z)
p256Sqr(x, y, 1)
p256Mul(y, y, x)
p256Mul(x, &p.x, x)
p256Mul(y, &p.y, y)
p256FromMont(x, x)
p256FromMont(y, y)
}
// BytesX returns the encoding of the x-coordinate of p, as specified in SEC 1,
// Version 2.0, Section 2.3.5, or an error if p is the point at infinity.
func (p *P256Point) BytesX() ([]byte, error) {
// This function is outlined to make the allocations inline in the caller
// rather than happen on the heap.
var out [p256ElementLength]byte
return p.bytesX(&out)
}
func (p *P256Point) bytesX(out *[p256ElementLength]byte) ([]byte, error) {
if p.isInfinity() == 1 {
return nil, errors.New("P256 point is the point at infinity")
}
x := new(p256Element)
p256Inverse(x, &p.z)
p256Sqr(x, x, 1)
p256Mul(x, &p.x, x)
p256FromMont(x, x)
p256LittleToBig((*[32]byte)(out[:]), x)
return out[:], nil
}
// BytesCompressed returns the compressed or infinity encoding of p, as
// specified in SEC 1, Version 2.0, Section 2.3.3. Note that the encoding of the
// point at infinity is shorter than all other encodings.
func (p *P256Point) BytesCompressed() []byte {
// This function is outlined to make the allocations inline in the caller
// rather than happen on the heap.
var out [p256CompressedLength]byte
return p.bytesCompressed(&out)
}
func (p *P256Point) bytesCompressed(out *[p256CompressedLength]byte) []byte {
if p.isInfinity() == 1 {
return append(out[:0], 0)
}
x, y := new(p256Element), new(p256Element)
p.affineFromMont(x, y)
out[0] = 2 | byte(y[0]&1)
p256LittleToBig((*[32]byte)(out[1:33]), x)
return out[:]
}
// Select sets q to p1 if cond == 1, and to p2 if cond == 0.
func (q *P256Point) Select(p1, p2 *P256Point, cond int) *P256Point {
p256MovCond(q, p1, p2, cond)
return q
}
// p256Inverse sets out to in⁻¹ mod p. If in is zero, out will be zero.
func p256Inverse(out, in *p256Element) {
// Inversion is calculated through exponentiation by p - 2, per Fermat's
// little theorem.
//
// The sequence of 12 multiplications and 255 squarings is derived from the
// following addition chain generated with github.com/mmcloughlin/addchain
// v0.4.0.
//
// _10 = 2*1
// _11 = 1 + _10
// _110 = 2*_11
// _111 = 1 + _110
// _111000 = _111 << 3
// _111111 = _111 + _111000
// x12 = _111111 << 6 + _111111
// x15 = x12 << 3 + _111
// x16 = 2*x15 + 1
// x32 = x16 << 16 + x16
// i53 = x32 << 15
// x47 = x15 + i53
// i263 = ((i53 << 17 + 1) << 143 + x47) << 47
// return (x47 + i263) << 2 + 1
//
var z = new(p256Element)
var t0 = new(p256Element)
var t1 = new(p256Element)
p256Sqr(z, in, 1)
p256Mul(z, in, z)
p256Sqr(z, z, 1)
p256Mul(z, in, z)
p256Sqr(t0, z, 3)
p256Mul(t0, z, t0)
p256Sqr(t1, t0, 6)
p256Mul(t0, t0, t1)
p256Sqr(t0, t0, 3)
p256Mul(z, z, t0)
p256Sqr(t0, z, 1)
p256Mul(t0, in, t0)
p256Sqr(t1, t0, 16)
p256Mul(t0, t0, t1)
p256Sqr(t0, t0, 15)
p256Mul(z, z, t0)
p256Sqr(t0, t0, 17)
p256Mul(t0, in, t0)
p256Sqr(t0, t0, 143)
p256Mul(t0, z, t0)
p256Sqr(t0, t0, 47)
p256Mul(z, z, t0)
p256Sqr(z, z, 2)
p256Mul(out, in, z)
}
func boothW5(in uint) (int, int) {
var s uint = ^((in >> 5) - 1)
var d uint = (1 << 6) - in - 1
d = (d & s) | (in & (^s))
d = (d >> 1) + (d & 1)
return int(d), int(s & 1)
}
func boothW6(in uint) (int, int) {
var s uint = ^((in >> 6) - 1)
var d uint = (1 << 7) - in - 1
d = (d & s) | (in & (^s))
d = (d >> 1) + (d & 1)
return int(d), int(s & 1)
}
func (p *P256Point) p256BaseMult(scalar *p256OrdElement) {
var t0 p256AffinePoint
wvalue := (scalar[0] << 1) & 0x7f
sel, sign := boothW6(uint(wvalue))
p256SelectAffine(&t0, &p256Precomputed[0], sel)
p.x, p.y, p.z = t0.x, t0.y, p256One
p256NegCond(&p.y, sign)
index := uint(5)
zero := sel
for i := 1; i < 43; i++ {
if index < 192 {
wvalue = ((scalar[index/64] >> (index % 64)) + (scalar[index/64+1] << (64 - (index % 64)))) & 0x7f
} else {
wvalue = (scalar[index/64] >> (index % 64)) & 0x7f
}
index += 6
sel, sign = boothW6(uint(wvalue))
p256SelectAffine(&t0, &p256Precomputed[i], sel)
p256PointAddAffineAsm(p, p, &t0, sign, sel, zero)
zero |= sel
}
// If the whole scalar was zero, set to the point at infinity.
p256MovCond(p, p, NewP256Point(), zero)
}
func (p *P256Point) p256ScalarMult(scalar *p256OrdElement) {
// precomp is a table of precomputed points that stores powers of p
// from p^1 to p^16.
var precomp p256Table
var t0, t1, t2, t3 P256Point
// Prepare the table
precomp[0] = *p // 1
p256PointDoubleAsm(&t0, p)
p256PointDoubleAsm(&t1, &t0)
p256PointDoubleAsm(&t2, &t1)
p256PointDoubleAsm(&t3, &t2)
precomp[1] = t0 // 2
precomp[3] = t1 // 4
precomp[7] = t2 // 8
precomp[15] = t3 // 16
p256PointAddAsm(&t0, &t0, p)
p256PointAddAsm(&t1, &t1, p)
p256PointAddAsm(&t2, &t2, p)
precomp[2] = t0 // 3
precomp[4] = t1 // 5
precomp[8] = t2 // 9
p256PointDoubleAsm(&t0, &t0)
p256PointDoubleAsm(&t1, &t1)
precomp[5] = t0 // 6
precomp[9] = t1 // 10
p256PointAddAsm(&t2, &t0, p)
p256PointAddAsm(&t1, &t1, p)
precomp[6] = t2 // 7
precomp[10] = t1 // 11
p256PointDoubleAsm(&t0, &t0)
p256PointDoubleAsm(&t2, &t2)
precomp[11] = t0 // 12
precomp[13] = t2 // 14
p256PointAddAsm(&t0, &t0, p)
p256PointAddAsm(&t2, &t2, p)
precomp[12] = t0 // 13
precomp[14] = t2 // 15
// Start scanning the window from top bit
index := uint(254)
var sel, sign int
wvalue := (scalar[index/64] >> (index % 64)) & 0x3f
sel, _ = boothW5(uint(wvalue))
p256Select(p, &precomp, sel)
zero := sel
for index > 4 {
index -= 5
p256PointDoubleAsm(p, p)
p256PointDoubleAsm(p, p)
p256PointDoubleAsm(p, p)
p256PointDoubleAsm(p, p)
p256PointDoubleAsm(p, p)
if index < 192 {
wvalue = ((scalar[index/64] >> (index % 64)) + (scalar[index/64+1] << (64 - (index % 64)))) & 0x3f
} else {
wvalue = (scalar[index/64] >> (index % 64)) & 0x3f
}
sel, sign = boothW5(uint(wvalue))
p256Select(&t0, &precomp, sel)
p256NegCond(&t0.y, sign)
p256PointAddAsm(&t1, p, &t0)
p256MovCond(&t1, &t1, p, sel)
p256MovCond(p, &t1, &t0, zero)
zero |= sel
}
p256PointDoubleAsm(p, p)
p256PointDoubleAsm(p, p)
p256PointDoubleAsm(p, p)
p256PointDoubleAsm(p, p)
p256PointDoubleAsm(p, p)
wvalue = (scalar[0] << 1) & 0x3f
sel, sign = boothW5(uint(wvalue))
p256Select(&t0, &precomp, sel)
p256NegCond(&t0.y, sign)
p256PointAddAsm(&t1, p, &t0)
p256MovCond(&t1, &t1, p, sel)
p256MovCond(p, &t1, &t0, zero)
}
|
08e771cc10015114e8fbe6e09e7935fd40a0785c
|
src/crypto/internal/fips/nistec/p256_asm_test.go
|
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build (amd64 || arm64 || ppc64le || s390x) && !purego && linux
package nistec
import (
"syscall"
"testing"
"unsafe"
)
// Lightly adapted from the bytes test package. Allocate a pair of T one at the start of a page, another at the
// end. Any access beyond or before the page boundary should cause a fault. This is linux specific.
func dangerousObjs[T any](t *testing.T) (start *T, end *T) {
pagesize := syscall.Getpagesize()
b, err := syscall.Mmap(0, 0, 3*pagesize, syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_ANONYMOUS|syscall.MAP_PRIVATE)
if err != nil {
t.Fatalf("mmap failed %s", err)
}
err = syscall.Mprotect(b[:pagesize], syscall.PROT_NONE)
if err != nil {
t.Fatalf("mprotect low failed %s\n", err)
}
err = syscall.Mprotect(b[2*pagesize:], syscall.PROT_NONE)
if err != nil {
t.Fatalf("mprotect high failed %s\n", err)
}
b = b[pagesize : 2*pagesize]
end = (*T)(unsafe.Pointer(&b[len(b)-(int)(unsafe.Sizeof(*end))]))
start = (*T)(unsafe.Pointer(&b[0]))
return start, end
}
func TestP256SelectAffinePageBoundary(t *testing.T) {
var out p256AffinePoint
begintp, endtp := dangerousObjs[p256AffineTable](t)
for i := 0; i < 31; i++ {
p256SelectAffine(&out, begintp, i)
p256SelectAffine(&out, endtp, i)
}
}
func TestP256SelectPageBoundary(t *testing.T) {
var out P256Point
begintp, endtp := dangerousObjs[p256Table](t)
for i := 0; i < 15; i++ {
p256Select(&out, begintp, i)
p256Select(&out, endtp, i)
}
}
|
71edb9575c43ce3a8a4aeaad21f10173e5778fc3
|
46988d2705294b127235a78253eb0f5fa2c9efa790b8af8cd1ea6f4f7ef1ffe4
|
gohugoio/hugo
|
predicate_identity.go
|
identity/predicate_identity.go
|
// Copyright 2024 The Hugo Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package provides ways to identify values in Hugo. Used for dependency tracking etc.
package identity
import (
"fmt"
"sync/atomic"
hglob "github.com/gohugoio/hugo/hugofs/glob"
)
// NewGlobIdentity creates a new Identity that
// is probably dependent on any other Identity
// that matches the given pattern.
func NewGlobIdentity(pattern string) Identity {
glob, err := hglob.GetGlob(pattern)
if err != nil {
panic(err)
}
predicate := func(other Identity) bool {
return glob.Match(other.IdentifierBase())
}
return NewPredicateIdentity(predicate, nil)
}
var predicateIdentityCounter = &atomic.Uint32{}
type predicateIdentity struct {
id string
probablyDependent func(Identity) bool
probablyDependency func(Identity) bool
}
var (
_ IsProbablyDependencyProvider = &predicateIdentity{}
_ IsProbablyDependentProvider = &predicateIdentity{}
)
// NewPredicateIdentity creates a new Identity that implements both IsProbablyDependencyProvider and IsProbablyDependentProvider
// using the provided functions, both of which are optional.
func NewPredicateIdentity(
probablyDependent func(Identity) bool,
probablyDependency func(Identity) bool,
) *predicateIdentity {
if probablyDependent == nil {
probablyDependent = func(Identity) bool { return false }
}
if probablyDependency == nil {
probablyDependency = func(Identity) bool { return false }
}
return &predicateIdentity{probablyDependent: probablyDependent, probablyDependency: probablyDependency, id: fmt.Sprintf("predicate%d", predicateIdentityCounter.Add(1))}
}
func (id *predicateIdentity) IdentifierBase() string {
return id.id
}
func (id *predicateIdentity) IsProbablyDependent(other Identity) bool {
return id.probablyDependent(other)
}
func (id *predicateIdentity) IsProbablyDependency(other Identity) bool {
return id.probablyDependency(other)
}
|
bad247867201a77a414678cfd49e89cca5e5ed96
|
identity/predicate_identity_test.go
|
// Copyright 2024 The Hugo Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package provides ways to identify values in Hugo. Used for dependency tracking etc.
package identity
import (
"testing"
qt "github.com/frankban/quicktest"
)
func TestGlobIdentity(t *testing.T) {
c := qt.New(t)
gid := NewGlobIdentity("/a/b/*")
c.Assert(isNotDependent(gid, StringIdentity("/a/b/c")), qt.IsFalse)
c.Assert(isNotDependent(gid, StringIdentity("/a/c/d")), qt.IsTrue)
c.Assert(isNotDependent(StringIdentity("/a/b/c"), gid), qt.IsTrue)
c.Assert(isNotDependent(StringIdentity("/a/c/d"), gid), qt.IsTrue)
}
func isNotDependent(a, b Identity) bool {
f := NewFinder(FinderConfig{})
r := f.Contains(a, b, -1)
return r == 0
}
func TestPredicateIdentity(t *testing.T) {
c := qt.New(t)
isDependent := func(id Identity) bool {
return id.IdentifierBase() == "foo"
}
isDependency := func(id Identity) bool {
return id.IdentifierBase() == "baz"
}
id := NewPredicateIdentity(isDependent, isDependency)
c.Assert(id.IsProbablyDependent(StringIdentity("foo")), qt.IsTrue)
c.Assert(id.IsProbablyDependent(StringIdentity("bar")), qt.IsFalse)
c.Assert(id.IsProbablyDependent(id), qt.IsFalse)
c.Assert(id.IsProbablyDependent(NewPredicateIdentity(isDependent, nil)), qt.IsFalse)
c.Assert(id.IsProbablyDependency(StringIdentity("baz")), qt.IsTrue)
c.Assert(id.IsProbablyDependency(StringIdentity("foo")), qt.IsFalse)
}
|
3a54dee750a394730da325b0e21db63bd6e3d54c
|
ccf9a46cbf5e4eefd3254e6c80d6a5cfd98a62a8198fdb77fe07304b116930d0
|
kubernetes/kubernetes
|
controller_ref.go
|
staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go
|
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/utils/ptr"
)
// IsControlledBy checks if the object has a controllerRef set to the given owner
func IsControlledBy(obj Object, owner Object) bool {
ref := GetControllerOfNoCopy(obj)
if ref == nil {
return false
}
return ref.UID == owner.GetUID()
}
// GetControllerOf returns a pointer to a copy of the controllerRef if controllee has a controller
func GetControllerOf(controllee Object) *OwnerReference {
ref := GetControllerOfNoCopy(controllee)
if ref == nil {
return nil
}
cp := *ref
cp.Controller = ptr.To(*ref.Controller)
if ref.BlockOwnerDeletion != nil {
cp.BlockOwnerDeletion = ptr.To(*ref.BlockOwnerDeletion)
}
return &cp
}
// GetControllerOfNoCopy returns a pointer to the controllerRef if controllee has a controller
func GetControllerOfNoCopy(controllee Object) *OwnerReference {
refs := controllee.GetOwnerReferences()
for i := range refs {
if refs[i].Controller != nil && *refs[i].Controller {
return &refs[i]
}
}
return nil
}
// NewControllerRef creates an OwnerReference pointing to the given owner.
func NewControllerRef(owner Object, gvk schema.GroupVersionKind) *OwnerReference {
return &OwnerReference{
APIVersion: gvk.GroupVersion().String(),
Kind: gvk.Kind,
Name: owner.GetName(),
UID: owner.GetUID(),
BlockOwnerDeletion: ptr.To(true),
Controller: ptr.To(true),
}
}
|
5005beb12db24d833e230ffc723af3b3a1536225
|
staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref_test.go
|
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"testing"
"k8s.io/apimachinery/pkg/runtime/schema"
)
type metaObj struct {
ObjectMeta
TypeMeta
}
func TestNewControllerRef(t *testing.T) {
gvk := schema.GroupVersionKind{
Group: "group",
Version: "v1",
Kind: "Kind",
}
obj1 := &metaObj{
ObjectMeta: ObjectMeta{
Name: "name",
UID: "uid1",
},
}
controllerRef := NewControllerRef(obj1, gvk)
if controllerRef.UID != obj1.UID {
t.Errorf("Incorrect UID: %s", controllerRef.UID)
}
if controllerRef.Controller == nil || *controllerRef.Controller != true {
t.Error("Controller must be set to true")
}
if controllerRef.BlockOwnerDeletion == nil || *controllerRef.BlockOwnerDeletion != true {
t.Error("BlockOwnerDeletion must be set to true")
}
if controllerRef.APIVersion == "" ||
controllerRef.Kind == "" ||
controllerRef.Name == "" {
t.Errorf("All controllerRef fields must be set: %v", controllerRef)
}
}
func TestGetControllerOf(t *testing.T) {
gvk := schema.GroupVersionKind{
Group: "group",
Version: "v1",
Kind: "Kind",
}
obj1 := &metaObj{
ObjectMeta: ObjectMeta{
UID: "uid1",
Name: "name1",
},
}
controllerRef := NewControllerRef(obj1, gvk)
controllerRef.BlockOwnerDeletion = nil
var falseRef = false
obj2 := &metaObj{
ObjectMeta: ObjectMeta{
UID: "uid2",
Name: "name1",
OwnerReferences: []OwnerReference{
{
Name: "owner1",
Controller: &falseRef,
},
*controllerRef,
{
Name: "owner2",
Controller: &falseRef,
},
},
},
}
if GetControllerOf(obj1) != nil {
t.Error("GetControllerOf must return null")
}
c := GetControllerOf(obj2)
if c.Name != controllerRef.Name || c.UID != controllerRef.UID {
t.Errorf("Incorrect result of GetControllerOf: %v", c)
}
// test that all pointers are also deep copied
if (c.Controller == controllerRef.Controller) ||
(c.BlockOwnerDeletion != nil && c.BlockOwnerDeletion == controllerRef.BlockOwnerDeletion) {
t.Errorf("GetControllerOf did not return deep copy: %v", c)
}
}
func BenchmarkGetControllerOf(b *testing.B) {
gvk := schema.GroupVersionKind{
Group: "group",
Version: "v1",
Kind: "Kind",
}
obj1 := &metaObj{
ObjectMeta: ObjectMeta{
UID: "9d0cdf8a-dedc-11e9-bf91-42010a800167",
Name: "my-object",
},
}
controllerRef := NewControllerRef(obj1, gvk)
controllerRef2 := *controllerRef
controllerRef2.Controller = nil
obj2 := &metaObj{
ObjectMeta: ObjectMeta{
UID: "uid2",
Name: "name1",
OwnerReferences: []OwnerReference{controllerRef2, controllerRef2, *controllerRef},
},
}
b.ReportAllocs()
b.ResetTimer()
for n := 0; n < b.N; n++ {
c := GetControllerOf(obj2)
if c.Name != controllerRef.Name || c.UID != controllerRef.UID {
b.Errorf("Incorrect result of GetControllerOf: %v", c)
}
}
}
func TestIsControlledBy(t *testing.T) {
gvk := schema.GroupVersionKind{
Group: "group",
Version: "v1",
Kind: "Kind",
}
obj1 := &metaObj{
ObjectMeta: ObjectMeta{
UID: "uid1",
},
}
obj2 := &metaObj{
ObjectMeta: ObjectMeta{
UID: "uid2",
OwnerReferences: []OwnerReference{
*NewControllerRef(obj1, gvk),
},
},
}
obj3 := &metaObj{
ObjectMeta: ObjectMeta{
UID: "uid3",
OwnerReferences: []OwnerReference{
*NewControllerRef(obj2, gvk),
},
},
}
if !IsControlledBy(obj2, obj1) || !IsControlledBy(obj3, obj2) {
t.Error("Incorrect IsControlledBy result: false")
}
if IsControlledBy(obj3, obj1) {
t.Error("Incorrect IsControlledBy result: true")
}
}
|
3abfb9aa919969e37afa13b07ed2b88059e1687e
|
d66fa34792e35b1f0282db0437b1013d34ad27a1578617974470b0c50151ef16
|
kubernetes/kubernetes
|
filter.go
|
pkg/proxy/conntrack/filter.go
|
//go:build linux
// +build linux
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package conntrack
import (
"net"
"github.com/vishvananda/netlink"
"k8s.io/klog/v2"
)
type connectionTuple struct {
srcIP net.IP
srcPort uint16
dstIP net.IP
dstPort uint16
}
type conntrackFilter struct {
protocol uint8
original *connectionTuple
reply *connectionTuple
}
var _ netlink.CustomConntrackFilter = (*conntrackFilter)(nil)
// MatchConntrackFlow applies the filter to the flow and returns true if the flow matches the filter
// false otherwise.
func (f *conntrackFilter) MatchConntrackFlow(flow *netlink.ConntrackFlow) bool {
// return false in case of empty filter
if f.protocol == 0 && f.original == nil && f.reply == nil {
return false
}
// -p, --protonum proto [Layer 4 Protocol, eg. 'tcp']
if f.protocol != 0 && f.protocol != flow.Forward.Protocol {
return false
}
// filter on original direction
if f.original != nil {
// --orig-src ip [Source address from original direction]
if f.original.srcIP != nil && !f.original.srcIP.Equal(flow.Forward.SrcIP) {
return false
}
// --orig-dst ip [Destination address from original direction]
if f.original.dstIP != nil && !f.original.dstIP.Equal(flow.Forward.DstIP) {
return false
}
// --orig-port-src port [Source port from original direction]
if f.original.srcPort != 0 && f.original.srcPort != flow.Forward.SrcPort {
return false
}
// --orig-port-dst port [Destination port from original direction]
if f.original.dstPort != 0 && f.original.dstPort != flow.Forward.DstPort {
return false
}
}
// filter on reply direction
if f.reply != nil {
// --reply-src ip [Source NAT ip]
if f.reply.srcIP != nil && !f.reply.srcIP.Equal(flow.Reverse.SrcIP) {
return false
}
// --reply-dst ip [Destination NAT ip]
if f.reply.dstIP != nil && !f.reply.dstIP.Equal(flow.Reverse.DstIP) {
return false
}
// --reply-port-src port [Source port from reply direction]
if f.reply.srcPort != 0 && f.reply.srcPort != flow.Reverse.SrcPort {
return false
}
// --reply-port-dst port [Destination port from reply direction]
if f.reply.dstPort != 0 && f.reply.dstPort != flow.Reverse.DstPort {
return false
}
}
// appending a new line to the flow makes klog print multiline log which is easier to debug and understand.
klog.V(4).InfoS("Deleting conntrack entry", "flow", flow.String()+"\n")
return true
}
|
75dd3f0c57fc8b03c02c01babe24b020313555fe
|
pkg/proxy/conntrack/filter_test.go
|
//go:build linux
// +build linux
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package conntrack
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/vishvananda/netlink"
"golang.org/x/sys/unix"
netutils "k8s.io/utils/net"
)
func applyFilter(flowList []netlink.ConntrackFlow, ipv4Filter *conntrackFilter, ipv6Filter *conntrackFilter) (ipv4Match, ipv6Match int) {
for _, flow := range flowList {
if ipv4Filter.MatchConntrackFlow(&flow) == true {
ipv4Match++
}
if ipv6Filter.MatchConntrackFlow(&flow) == true {
ipv6Match++
}
}
return ipv4Match, ipv6Match
}
func TestConntrackFilter(t *testing.T) {
var flowList []netlink.ConntrackFlow
flow1 := netlink.ConntrackFlow{}
flow1.FamilyType = unix.AF_INET
flow1.Forward.SrcIP = netutils.ParseIPSloppy("10.0.0.1")
flow1.Forward.DstIP = netutils.ParseIPSloppy("20.0.0.1")
flow1.Forward.SrcPort = 1000
flow1.Forward.DstPort = 2000
flow1.Forward.Protocol = 17
flow1.Reverse.SrcIP = netutils.ParseIPSloppy("20.0.0.1")
flow1.Reverse.DstIP = netutils.ParseIPSloppy("192.168.1.1")
flow1.Reverse.SrcPort = 2000
flow1.Reverse.DstPort = 1000
flow1.Reverse.Protocol = 17
flow2 := netlink.ConntrackFlow{}
flow2.FamilyType = unix.AF_INET
flow2.Forward.SrcIP = netutils.ParseIPSloppy("10.0.0.2")
flow2.Forward.DstIP = netutils.ParseIPSloppy("20.0.0.2")
flow2.Forward.SrcPort = 5000
flow2.Forward.DstPort = 6000
flow2.Forward.Protocol = 6
flow2.Reverse.SrcIP = netutils.ParseIPSloppy("20.0.0.2")
flow2.Reverse.DstIP = netutils.ParseIPSloppy("192.168.1.1")
flow2.Reverse.SrcPort = 6000
flow2.Reverse.DstPort = 5000
flow2.Reverse.Protocol = 6
flow3 := netlink.ConntrackFlow{}
flow3.FamilyType = unix.AF_INET6
flow3.Forward.SrcIP = netutils.ParseIPSloppy("eeee:eeee:eeee:eeee:eeee:eeee:eeee:eeee")
flow3.Forward.DstIP = netutils.ParseIPSloppy("dddd:dddd:dddd:dddd:dddd:dddd:dddd:dddd")
flow3.Forward.SrcPort = 1000
flow3.Forward.DstPort = 2000
flow3.Forward.Protocol = 132
flow3.Reverse.SrcIP = netutils.ParseIPSloppy("dddd:dddd:dddd:dddd:dddd:dddd:dddd:dddd")
flow3.Reverse.DstIP = netutils.ParseIPSloppy("eeee:eeee:eeee:eeee:eeee:eeee:eeee:eeee")
flow3.Reverse.SrcPort = 2000
flow3.Reverse.DstPort = 1000
flow3.Reverse.Protocol = 132
flowList = append(flowList, flow1, flow2, flow3)
testCases := []struct {
name string
filterV4 *conntrackFilter
filterV6 *conntrackFilter
expectedV4Matches int
expectedV6Matches int
}{
{
name: "Empty filter",
filterV4: &conntrackFilter{},
filterV6: &conntrackFilter{},
expectedV4Matches: 0,
expectedV6Matches: 0,
},
{
name: "Protocol filter",
filterV4: &conntrackFilter{protocol: 6},
filterV6: &conntrackFilter{protocol: 17},
expectedV4Matches: 1,
expectedV6Matches: 1,
},
{
name: "Original Source IP filter",
filterV4: &conntrackFilter{original: &connectionTuple{srcIP: netutils.ParseIPSloppy("10.0.0.1")}},
filterV6: &conntrackFilter{original: &connectionTuple{srcIP: netutils.ParseIPSloppy("eeee:eeee:eeee:eeee:eeee:eeee:eeee:eeee")}},
expectedV4Matches: 1,
expectedV6Matches: 1,
},
{
name: "Original Destination IP filter",
filterV4: &conntrackFilter{original: &connectionTuple{dstIP: netutils.ParseIPSloppy("20.0.0.1")}},
filterV6: &conntrackFilter{original: &connectionTuple{dstIP: netutils.ParseIPSloppy("dddd:dddd:dddd:dddd:dddd:dddd:dddd:dddd")}},
expectedV4Matches: 1,
expectedV6Matches: 1,
},
{
name: "Original Source Port Filter",
filterV4: &conntrackFilter{protocol: 6, original: &connectionTuple{srcPort: 5000}},
filterV6: &conntrackFilter{protocol: 132, original: &connectionTuple{srcPort: 1000}},
expectedV4Matches: 1,
expectedV6Matches: 1,
},
{
name: "Original Destination Port Filter",
filterV4: &conntrackFilter{protocol: 6, original: &connectionTuple{dstPort: 6000}},
filterV6: &conntrackFilter{protocol: 132, original: &connectionTuple{dstPort: 2000}},
expectedV4Matches: 1,
expectedV6Matches: 1,
},
{
name: "Reply Source IP filter",
filterV4: &conntrackFilter{reply: &connectionTuple{srcIP: netutils.ParseIPSloppy("20.0.0.1")}},
filterV6: &conntrackFilter{reply: &connectionTuple{srcIP: netutils.ParseIPSloppy("dddd:dddd:dddd:dddd:dddd:dddd:dddd:dddd")}},
expectedV4Matches: 1,
expectedV6Matches: 1,
},
{
name: "Reply Destination IP filter",
filterV4: &conntrackFilter{reply: &connectionTuple{dstIP: netutils.ParseIPSloppy("192.168.1.1")}},
filterV6: &conntrackFilter{reply: &connectionTuple{dstIP: netutils.ParseIPSloppy("dddd:dddd:dddd:dddd:dddd:dddd:dddd:dddd")}},
expectedV4Matches: 2,
expectedV6Matches: 0,
},
{
name: "Reply Source Port filter",
filterV4: &conntrackFilter{protocol: 17, reply: &connectionTuple{srcPort: 2000}},
filterV6: &conntrackFilter{protocol: 132, reply: &connectionTuple{srcPort: 2000}},
expectedV4Matches: 1,
expectedV6Matches: 1,
},
{
name: "Reply Destination Port filter",
filterV4: &conntrackFilter{protocol: 6, reply: &connectionTuple{dstPort: 5000}},
filterV6: &conntrackFilter{protocol: 132, reply: &connectionTuple{dstPort: 1000}},
expectedV4Matches: 1,
expectedV6Matches: 1,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
v4Matches, v6Matches := applyFilter(flowList, tc.filterV4, tc.filterV6)
require.Equal(t, tc.expectedV4Matches, v4Matches)
require.Equal(t, tc.expectedV6Matches, v6Matches)
})
}
}
|
a8b8a4b3f8704230bfb15a31186e313cc72f4530
|
9a91b55e7dc8b650e794cdf0ee8f0f4ededbfd02dfe2a83eb7573d8ed89d3efa
|
hashicorp/terraform
|
migration.go
|
internal/cloud/migration.go
|
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package cloud
import (
"github.com/hashicorp/terraform/internal/command/workdir"
"github.com/hashicorp/terraform/internal/configs"
)
// Most of the logic for migrating into and out of "cloud mode" actually lives
// in the "command" package as part of the general backend init mechanisms,
// but we have some cloud-specific helper functionality here.
// ConfigChangeMode is a rough way to think about different situations that
// our backend change and state migration codepaths need to distinguish in
// the context of Cloud integration mode.
type ConfigChangeMode rune
//go:generate go run golang.org/x/tools/cmd/stringer -type ConfigChangeMode
const (
// ConfigMigrationIn represents when the configuration calls for using
// Cloud mode but the working directory state disagrees.
ConfigMigrationIn ConfigChangeMode = '↘'
// ConfigMigrationOut represents when the working directory state calls
// for using Cloud mode but the working directory state disagrees.
ConfigMigrationOut ConfigChangeMode = '↖'
// ConfigChangeInPlace represents when both the working directory state
// and the config call for using Cloud mode, and so there might be
// (but won't necessarily be) cloud settings changing, but we don't
// need to do any actual migration.
ConfigChangeInPlace ConfigChangeMode = '↻'
// ConfigChangeIrrelevant represents when the config and working directory
// state disagree but neither calls for using Cloud mode, and so the
// Cloud integration is not involved in dealing with this.
ConfigChangeIrrelevant ConfigChangeMode = '🤷'
)
// DetectConfigChangeType encapsulates the fiddly logic for deciding what kind
// of Cloud configuration change we seem to be making, based on the existing
// working directory state (if any) and the current configuration.
//
// This is a pretty specialized sort of thing focused on finicky details of
// the way we currently model working directory settings and config, so its
// signature probably won't survive any non-trivial refactoring of how
// the CLI layer thinks about backends/state storage.
func DetectConfigChangeType(wdState *workdir.BackendState, config *configs.Backend, haveLocalStates bool) ConfigChangeMode {
// Although externally the cloud integration isn't really a "backend",
// internally we treat it a bit like one just to preserve all of our
// existing interfaces that assume backends. "cloud" is the placeholder
// name we use for it, even though that isn't a backend that's actually
// available for selection in the usual way.
wdIsCloud := wdState != nil && wdState.Type == "cloud"
configIsCloud := config != nil && config.Type == "cloud"
// "uninit" here means that the working directory is totally uninitialized,
// even taking into account the possibility of implied local state that
// therefore doesn't typically require explicit "terraform init".
wdIsUninit := wdState == nil && !haveLocalStates
switch {
case configIsCloud:
switch {
case wdIsCloud || wdIsUninit:
// If config has cloud and the working directory is completely
// uninitialized then we assume we're doing the initial activation
// of this working directory for an already-migrated-to-cloud
// remote state.
return ConfigChangeInPlace
default:
// Otherwise, we seem to be migrating into cloud mode from a backend.
return ConfigMigrationIn
}
default:
switch {
case wdIsCloud:
// If working directory is already cloud but config isn't, we're
// migrating away from cloud to a backend.
return ConfigMigrationOut
default:
// Otherwise, this situation seems to be something unrelated to
// cloud mode and so outside of our scope here.
return ConfigChangeIrrelevant
}
}
}
func (m ConfigChangeMode) InvolvesCloud() bool {
switch m {
case ConfigMigrationIn, ConfigMigrationOut, ConfigChangeInPlace:
return true
default:
return false
}
}
func (m ConfigChangeMode) IsCloudMigration() bool {
switch m {
case ConfigMigrationIn, ConfigMigrationOut:
return true
default:
return false
}
}
|
f42e8735674542f761ae3e43074058f1cb9e7c67
|
internal/cloud/migration_test.go
|
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package cloud
import (
"testing"
"github.com/hashicorp/terraform/internal/command/workdir"
"github.com/hashicorp/terraform/internal/configs"
)
func TestDetectConfigChangeType(t *testing.T) {
tests := map[string]struct {
stateType string
configType string
localStates bool
want ConfigChangeMode
wantInvolvesCloud bool
wantIsCloudMigration bool
}{
"init cloud": {
``, `cloud`, false,
ConfigChangeInPlace,
true, false,
},
"reinit cloud": {
`cloud`, `cloud`, false,
ConfigChangeInPlace,
true, false,
},
"migrate default local to cloud with existing local state": {
``, `cloud`, true,
ConfigMigrationIn,
true, true,
},
"migrate local to cloud": {
`local`, `cloud`, false,
ConfigMigrationIn,
true, true,
},
"migrate remote to cloud": {
`local`, `cloud`, false,
ConfigMigrationIn,
true, true,
},
"migrate cloud to local": {
`cloud`, `local`, false,
ConfigMigrationOut,
true, true,
},
"migrate cloud to remote": {
`cloud`, `remote`, false,
ConfigMigrationOut,
true, true,
},
"migrate cloud to default local": {
`cloud`, ``, false,
ConfigMigrationOut,
true, true,
},
// Various other cases can potentially be valid (decided by the
// Terraform CLI layer) but are irrelevant for Cloud mode purposes.
"init default local": {
``, ``, false,
ConfigChangeIrrelevant,
false, false,
},
"init default local with existing local state": {
``, ``, true,
ConfigChangeIrrelevant,
false, false,
},
"init remote backend": {
``, `remote`, false,
ConfigChangeIrrelevant,
false, false,
},
"init remote backend with existing local state": {
``, `remote`, true,
ConfigChangeIrrelevant,
false, false,
},
"reinit remote backend": {
`remote`, `remote`, false,
ConfigChangeIrrelevant,
false, false,
},
"migrate local to remote backend": {
`local`, `remote`, false,
ConfigChangeIrrelevant,
false, false,
},
"migrate remote to default local": {
`remote`, ``, false,
ConfigChangeIrrelevant,
false, false,
},
}
for name, test := range tests {
t.Run(name, func(t *testing.T) {
var state *workdir.BackendState
var config *configs.Backend
if test.stateType != "" {
state = &workdir.BackendState{
Type: test.stateType,
// everything else is irrelevant for our purposes here
}
}
if test.configType != "" {
config = &configs.Backend{
Type: test.configType,
// everything else is irrelevant for our purposes here
}
}
got := DetectConfigChangeType(state, config, test.localStates)
if got != test.want {
t.Errorf(
"wrong result\nstate type: %s\nconfig type: %s\nlocal states: %t\n\ngot: %s\nwant: %s",
test.stateType, test.configType, test.localStates,
got, test.want,
)
}
if got, want := got.InvolvesCloud(), test.wantInvolvesCloud; got != want {
t.Errorf(
"wrong InvolvesCloud result\ngot: %t\nwant: %t",
got, want,
)
}
if got, want := got.IsCloudMigration(), test.wantIsCloudMigration; got != want {
t.Errorf(
"wrong IsCloudMigration result\ngot: %t\nwant: %t",
got, want,
)
}
})
}
}
|
bfc83dae1a37c68be15c8654c2178a335c5ca9e5
|
2b659e4d276e4439596e1e6e7b440ca2cf35084032f29a89d10651ca9a2c4d16
|
kubernetes/kubernetes
|
fieldmanager.go
|
staging/src/k8s.io/apimachinery/pkg/util/managedfields/internal/fieldmanager.go
|
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package internal
import (
"fmt"
"reflect"
"time"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/klog/v2"
"sigs.k8s.io/structured-merge-diff/v4/merge"
)
// DefaultMaxUpdateManagers defines the default maximum retained number of managedFields entries from updates
// if the number of update managers exceeds this, the oldest entries will be merged until the number is below the maximum.
// TODO(jennybuckley): Determine if this is really the best value. Ideally we wouldn't unnecessarily merge too many entries.
const DefaultMaxUpdateManagers int = 10
// DefaultTrackOnCreateProbability defines the default probability that the field management of an object
// starts being tracked from the object's creation, instead of from the first time the object is applied to.
const DefaultTrackOnCreateProbability float32 = 1
var atMostEverySecond = NewAtMostEvery(time.Second)
// FieldManager updates the managed fields and merges applied
// configurations.
type FieldManager struct {
fieldManager Manager
subresource string
}
// NewFieldManager creates a new FieldManager that decodes, manages, then re-encodes managedFields
// on update and apply requests.
func NewFieldManager(f Manager, subresource string) *FieldManager {
return &FieldManager{fieldManager: f, subresource: subresource}
}
// newDefaultFieldManager is a helper function which wraps a Manager with certain default logic.
func NewDefaultFieldManager(f Manager, typeConverter TypeConverter, objectConverter runtime.ObjectConvertor, objectCreater runtime.ObjectCreater, kind schema.GroupVersionKind, subresource string) *FieldManager {
return NewFieldManager(
NewVersionCheckManager(
NewLastAppliedUpdater(
NewLastAppliedManager(
NewProbabilisticSkipNonAppliedManager(
NewCapManagersManager(
NewBuildManagerInfoManager(
NewManagedFieldsUpdater(
NewStripMetaManager(f),
), kind.GroupVersion(), subresource,
), DefaultMaxUpdateManagers,
), objectCreater, DefaultTrackOnCreateProbability,
), typeConverter, objectConverter, kind.GroupVersion(),
),
), kind,
), subresource,
)
}
func decodeLiveOrNew(liveObj, newObj runtime.Object, ignoreManagedFieldsFromRequestObject bool) (Managed, error) {
liveAccessor, err := meta.Accessor(liveObj)
if err != nil {
return nil, err
}
// We take the managedFields of the live object in case the request tries to
// manually set managedFields via a subresource.
if ignoreManagedFieldsFromRequestObject {
return emptyManagedFieldsOnErr(DecodeManagedFields(liveAccessor.GetManagedFields()))
}
// If the object doesn't have metadata, we should just return without trying to
// set the managedFields at all, so creates/updates/patches will work normally.
newAccessor, err := meta.Accessor(newObj)
if err != nil {
return nil, err
}
if isResetManagedFields(newAccessor.GetManagedFields()) {
return NewEmptyManaged(), nil
}
// If the managed field is empty or we failed to decode it,
// let's try the live object. This is to prevent clients who
// don't understand managedFields from deleting it accidentally.
managed, err := DecodeManagedFields(newAccessor.GetManagedFields())
if err != nil || len(managed.Fields()) == 0 {
return emptyManagedFieldsOnErr(DecodeManagedFields(liveAccessor.GetManagedFields()))
}
return managed, nil
}
func emptyManagedFieldsOnErr(managed Managed, err error) (Managed, error) {
if err != nil {
return NewEmptyManaged(), nil
}
return managed, nil
}
// Update is used when the object has already been merged (non-apply
// use-case), and simply updates the managed fields in the output
// object.
func (f *FieldManager) Update(liveObj, newObj runtime.Object, manager string) (object runtime.Object, err error) {
// First try to decode the managed fields provided in the update,
// This is necessary to allow directly updating managed fields.
isSubresource := f.subresource != ""
managed, err := decodeLiveOrNew(liveObj, newObj, isSubresource)
if err != nil {
return newObj, nil
}
RemoveObjectManagedFields(newObj)
if object, managed, err = f.fieldManager.Update(liveObj, newObj, managed, manager); err != nil {
return nil, err
}
if err = EncodeObjectManagedFields(object, managed); err != nil {
return nil, fmt.Errorf("failed to encode managed fields: %v", err)
}
return object, nil
}
// UpdateNoErrors is the same as Update, but it will not return
// errors. If an error happens, the object is returned with
// managedFields cleared.
func (f *FieldManager) UpdateNoErrors(liveObj, newObj runtime.Object, manager string) runtime.Object {
obj, err := f.Update(liveObj, newObj, manager)
if err != nil {
atMostEverySecond.Do(func() {
ns, name := "unknown", "unknown"
if accessor, err := meta.Accessor(newObj); err == nil {
ns = accessor.GetNamespace()
name = accessor.GetName()
}
klog.ErrorS(err, "[SHOULD NOT HAPPEN] failed to update managedFields", "versionKind",
newObj.GetObjectKind().GroupVersionKind(), "namespace", ns, "name", name)
})
// Explicitly remove managedFields on failure, so that
// we can't have garbage in it.
RemoveObjectManagedFields(newObj)
return newObj
}
return obj
}
// Returns true if the managedFields indicate that the user is trying to
// reset the managedFields, i.e. if the list is non-nil but empty, or if
// the list has one empty item.
func isResetManagedFields(managedFields []metav1.ManagedFieldsEntry) bool {
if len(managedFields) == 0 {
return managedFields != nil
}
if len(managedFields) == 1 {
return reflect.DeepEqual(managedFields[0], metav1.ManagedFieldsEntry{})
}
return false
}
// Apply is used when server-side apply is called, as it merges the
// object and updates the managed fields.
func (f *FieldManager) Apply(liveObj, appliedObj runtime.Object, manager string, force bool) (object runtime.Object, err error) {
// If the object doesn't have metadata, apply isn't allowed.
accessor, err := meta.Accessor(liveObj)
if err != nil {
return nil, fmt.Errorf("couldn't get accessor: %v", err)
}
// Decode the managed fields in the live object, since it isn't allowed in the patch.
managed, err := DecodeManagedFields(accessor.GetManagedFields())
if err != nil {
return nil, fmt.Errorf("failed to decode managed fields: %v", err)
}
object, managed, err = f.fieldManager.Apply(liveObj, appliedObj, managed, manager, force)
if err != nil {
if conflicts, ok := err.(merge.Conflicts); ok {
return nil, NewConflictError(conflicts)
}
return nil, err
}
if err = EncodeObjectManagedFields(object, managed); err != nil {
return nil, fmt.Errorf("failed to encode managed fields: %v", err)
}
return object, nil
}
|
eca04a711638b389c6f281aade15ceef78ad3857
|
staging/src/k8s.io/apimachinery/pkg/util/managedfields/internal/fieldmanager_test.go
|
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package internal_test
import (
"encoding/json"
"os"
"path/filepath"
"strings"
"k8s.io/apimachinery/pkg/util/managedfields/internal"
"k8s.io/kube-openapi/pkg/validation/spec"
)
var fakeTypeConverter = func() internal.TypeConverter {
data, err := os.ReadFile(filepath.Join(
strings.Repeat(".."+string(filepath.Separator), 8),
"api", "openapi-spec", "swagger.json"))
if err != nil {
panic(err)
}
convertedDefs := map[string]*spec.Schema{}
spec := spec.Swagger{}
if err := json.Unmarshal(data, &spec); err != nil {
panic(err)
}
for k, v := range spec.Definitions {
vCopy := v
convertedDefs[k] = &vCopy
}
typeConverter, err := internal.NewTypeConverter(convertedDefs, false)
if err != nil {
panic(err)
}
return typeConverter
}()
|
1ae01dab6e91e9974ad2303f3d145cf7c8e8247e
|
52f86873461eadc6a43add111d1545050c917fb1dcee2fbfd6fcbc10c7d7e5e7
|
hashicorp/terraform
|
get_cache.go
|
internal/plugin/discovery/get_cache.go
|
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package discovery
// PluginCache is an interface implemented by objects that are able to maintain
// a cache of plugins.
type PluginCache interface {
// CachedPluginPath returns a path where the requested plugin is already
// cached, or an empty string if the requested plugin is not yet cached.
CachedPluginPath(kind string, name string, version Version) string
// InstallDir returns the directory that new plugins should be installed into
// in order to populate the cache. This directory should be used as the
// first argument to getter.Get when downloading plugins with go-getter.
//
// After installing into this directory, use CachedPluginPath to obtain the
// path where the plugin was installed.
InstallDir() string
}
// NewLocalPluginCache returns a PluginCache that caches plugins in a
// given local directory.
func NewLocalPluginCache(dir string) PluginCache {
return &pluginCache{
Dir: dir,
}
}
type pluginCache struct {
Dir string
}
func (c *pluginCache) CachedPluginPath(kind string, name string, version Version) string {
allPlugins := FindPlugins(kind, []string{c.Dir})
plugins := allPlugins.WithName(name).WithVersion(version)
if plugins.Count() == 0 {
// nothing cached
return ""
}
// There should generally be only one plugin here; if there's more than
// one match for some reason then we'll just choose one arbitrarily.
plugin := plugins.Newest()
return plugin.Path
}
func (c *pluginCache) InstallDir() string {
return c.Dir
}
|
97b2b6edbb572c60c532d5987d92fbb3056254d0
|
internal/plugin/discovery/get_cache_test.go
|
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package discovery
import (
"testing"
)
func TestLocalPluginCache(t *testing.T) {
cache := NewLocalPluginCache("testdata/plugin-cache")
foo1Path := cache.CachedPluginPath("provider", "foo", VersionStr("v0.0.1").MustParse())
if foo1Path == "" {
t.Errorf("foo v0.0.1 not found; should have been found")
}
foo2Path := cache.CachedPluginPath("provider", "foo", VersionStr("v0.0.2").MustParse())
if foo2Path != "" {
t.Errorf("foo v0.0.2 found at %s; should not have been found", foo2Path)
}
baz1Path := cache.CachedPluginPath("provider", "baz", VersionStr("v0.0.1").MustParse())
if baz1Path != "" {
t.Errorf("baz v0.0.1 found at %s; should not have been found", baz1Path)
}
baz2Path := cache.CachedPluginPath("provider", "baz", VersionStr("v0.0.2").MustParse())
if baz1Path != "" {
t.Errorf("baz v0.0.2 found at %s; should not have been found", baz2Path)
}
}
|
f5a7b7a6d98bb937da8fa9df70843e5c39a3e193
|
End of preview. Expand
in Data Studio
No dataset card yet
- Downloads last month
- 14