Search is not available for this dataset
max_stars_repo_path
stringlengths
4
435
max_stars_repo_name
stringlengths
4
107
max_stars_count
int64
0
191k
id
stringlengths
1
7
content
stringlengths
10
1.05M
score
float64
-0.76
3.84
int_score
int64
0
4
src/app/api/endpoint/static.go
josephspurrier/govueapp
5
5
package endpoint import ( "net/http" "os" "path/filepath" "strings" ) // StaticEndpoint . type StaticEndpoint struct { Core } // SetupStatic . func SetupStatic(core Core) { p := new(StaticEndpoint) p.Core = core p.Router.Get("/api/v1", p.Index) p.Router.Get("/api/static...", p.Static) } // Index . // swagger:route GET /api/v1 healthcheck Ready // // API is ready. // // Responses: // 200: OKResponse func (p StaticEndpoint) Index(w http.ResponseWriter, r *http.Request) (int, error) { return p.Response.OK(w, "ready") } // Static . func (p StaticEndpoint) Static(w http.ResponseWriter, r *http.Request) (int, error) { if r.URL.Path == "/api/static/" { return http.StatusNotFound, nil } // Get the location of the executable. basepath, err := os.Executable() if err != nil { return http.StatusInternalServerError, nil } // If static folder is found to the executable, serve the file. staticPath := filepath.Join(basepath, "static") if stat, err := os.Stat(staticPath); err == nil && stat.IsDir() { // The static directory is found. } else if len(os.Getenv("GOPATH")) > 0 { // Else get the GOPATH. basepath = filepath.Join(os.Getenv("GOPATH"), "src/app/api") } // Serve the file to the user. http.ServeFile(w, r, filepath.Join(basepath, strings.TrimPrefix(r.URL.Path, "/api/"))) return http.StatusOK, nil }
1.453125
1
internal/crypto/libsodium/crypter.go
darora/wal-g
2,154
13
package libsodium // #cgo CFLAGS: -I../../../tmp/libsodium/include // #cgo LDFLAGS: -L../../../tmp/libsodium/lib -lsodium // #include <sodium.h> import "C" import ( "fmt" "io" "io/ioutil" "strings" "sync" "github.com/pkg/errors" "github.com/wal-g/wal-g/internal/crypto" ) const ( chunkSize = 8192 libsodiumKeybytes = 32 minimalKeyLength = 25 ) // libsodium should always be initialised func init() { C.sodium_init() } // Crypter is libsodium Crypter implementation type Crypter struct { key []byte KeyInline string KeyPath string KeyTransform string mutex sync.RWMutex } func (crypter *Crypter) Name() string { return "Libsodium" } // CrypterFromKey creates Crypter from key func CrypterFromKey(key string, keyTransform string) crypto.Crypter { return &Crypter{KeyInline: key, KeyTransform: keyTransform} } // CrypterFromKeyPath creates Crypter from key path func CrypterFromKeyPath(path string, keyTransform string) crypto.Crypter { return &Crypter{KeyPath: path, KeyTransform: keyTransform} } func (crypter *Crypter) setup() (err error) { crypter.mutex.RLock() if crypter.key != nil { crypter.mutex.RUnlock() return nil } crypter.mutex.RUnlock() crypter.mutex.Lock() defer crypter.mutex.Unlock() if crypter.key != nil { return nil } if crypter.KeyInline == "" && crypter.KeyPath == "" { return errors.New("libsodium Crypter: must have a key or key path") } keyString := crypter.KeyInline if keyString == "" { // read from file keyFileContents, err := ioutil.ReadFile(crypter.KeyPath) if err != nil { return fmt.Errorf("libsodium Crypter: unable to read key from file: %v", err) } keyString = strings.TrimSpace(string(keyFileContents)) } key, err := keyTransform(keyString, crypter.KeyTransform, libsodiumKeybytes) if err != nil { return fmt.Errorf("libsodium Crypter: during key transform: %v", err) } crypter.key = key return nil } // Encrypt creates encryption writer from ordinary writer func (crypter *Crypter) Encrypt(writer io.Writer) (io.WriteCloser, error) { if err := crypter.setup(); err != nil { return nil, err } return NewWriter(writer, crypter.key), nil } // Decrypt creates decrypted reader from ordinary reader func (crypter *Crypter) Decrypt(reader io.Reader) (io.Reader, error) { if err := crypter.setup(); err != nil { return nil, err } return NewReader(reader, crypter.key), nil } var _ error = &ErrShortKey{} type ErrShortKey struct { keyLength int } func (e ErrShortKey) Error() string { return fmt.Sprintf("key length must not be less than %v, got %v", minimalKeyLength, e.keyLength) } func newErrShortKey(keyLength int) *ErrShortKey { return &ErrShortKey{ keyLength: keyLength, } }
1.5
2
main.go
kevinlebrun/tvshows
2
21
package main import ( "flag" "fmt" "net/http" "net/http/cookiejar" "net/url" "os" "strconv" "strings" "github.com/PuerkitoBio/goquery" ) type Show struct { Name string Episodes []Episode } type Episode struct { Name string Season int64 Num int64 Aired bool } type Catalog struct { Client *http.Client } func NewCatalog() *Catalog { jar, _ := cookiejar.New(nil) client := &http.Client{Jar: jar} return &Catalog{client} } func (c *Catalog) Auth(username, password string) error { form := make(url.Values) form.Add("username", "<EMAIL>") form.Add("password", "<PASSWORD>") form.Add("sub_login", "Account Login") data := strings.NewReader(form.Encode()) req, err := http.NewRequest("POST", "http://www.pogdesign.co.uk/cat/", data) if err != nil { return err } req.Header.Add("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") resp, err := c.Client.Do(req) if err != nil { return err } resp.Body.Close() return nil } func (c *Catalog) Followed() ([]Show, error) { req, err := http.NewRequest("GET", "http://www.pogdesign.co.uk/cat/profile/all-shows", nil) if err != nil { return nil, err } resp, err := c.Client.Do(req) if err != nil { return nil, err } defer resp.Body.Close() doc, err := goquery.NewDocumentFromReader(resp.Body) if err != nil { return nil, err } shows := make([]Show, 0) doc.Find("a.prfimg.prfmed").Each(func(i int, s *goquery.Selection) { s.Find("span > strong").Remove() show := Show{ Name: strings.Trim(s.Find("span").Text(), " \n\t"), } shows = append(shows, show) }) return shows, nil } func (c *Catalog) Unwatched() ([]Show, error) { req, err := http.NewRequest("GET", "http://www.pogdesign.co.uk/cat/profile/unwatched-episodes", nil) if err != nil { return nil, err } resp, err := c.Client.Do(req) if err != nil { return nil, err } defer resp.Body.Close() doc, err := goquery.NewDocumentFromReader(resp.Body) if err != nil { return nil, err } shows := make([]Show, 0) doc.Find("a.prfimg.prfmed").Each(func(i int, s *goquery.Selection) { if url, exists := s.Attr("href"); exists { episodes, err := c.UnwatchedEpisodesByURL(url) if err != nil { panic(err) } show := Show{ Name: strings.Trim(s.Find("span").Text(), " \n\t"), Episodes: episodes, } shows = append(shows, show) } }) return shows, nil } func (c *Catalog) UnwatchedEpisodesByURL(url string) ([]Episode, error) { req, err := http.NewRequest("GET", "http://www.pogdesign.co.uk"+url, nil) if err != nil { return nil, err } resp, err := c.Client.Do(req) if err != nil { return nil, err } defer resp.Body.Close() doc, err := goquery.NewDocumentFromReader(resp.Body) if err != nil { return nil, err } episodes := make([]Episode, 0) doc.Find(".ep.info").Each(func(i int, s *goquery.Selection) { num, _ := strconv.ParseInt(s.Find(".pnumber").Text(), 10, 64) season, _ := strconv.ParseInt(s.PrevAllFiltered("h2.xxla").Eq(0).AttrOr("id", ""), 10, 64) name := s.Clone() name.Find("span").Remove() name.Find("label").Remove() episode := Episode{ Name: strings.Trim(name.Text(), " \n\t"), Num: num, Season: season, Aired: s.Children().Eq(1).Text() == "AIRED", } episodes = append(episodes, episode) }) return episodes, nil } func main() { var err error var shows []Show flag.Usage = func() { fmt.Fprintf(os.Stderr, "Usage: %s <command>\n", os.Args[0]) flag.PrintDefaults() } var ( username = flag.String("username", "", "www.pogdesign.co.uk/cat username") password = flag.String("password", "", "www.pogdesign.co.uk/cat password") ) flag.Parse() if flag.NArg() != 1 { flag.Usage() os.Exit(1) } command := flag.Arg(0) catalog := NewCatalog() err = catalog.Auth(*username, *password) if err != nil { panic(err) } switch command { case "followed": shows, err = catalog.Followed() if err != nil { panic(err) } for _, show := range shows { fmt.Println(show.Name) } case "unwatched": shows, err = catalog.Unwatched() if err != nil { panic(err) } for _, show := range shows { for _, episode := range show.Episodes { if episode.Aired { fmt.Printf("%s s%02d e%02d [%s]\n", show.Name, episode.Season, episode.Num, episode.Name) } } } default: fmt.Printf("Unknown command %q\n", command) os.Exit(1) } }
1.421875
1
src/crypto/aes/cipher_asm.go
zos-go/go
22
29
// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +build amd64 s390x package aes // defined in asm_$GOARCH.s func hasAsm() bool func encryptBlockAsm(nr int, xk *uint32, dst, src *byte) func decryptBlockAsm(nr int, xk *uint32, dst, src *byte) func expandKeyAsm(nr int, key *byte, enc *uint32, dec *uint32) var useAsm = hasAsm() func encryptBlock(xk []uint32, dst, src []byte) { if useAsm { encryptBlockAsm(len(xk)/4-1, &xk[0], &dst[0], &src[0]) } else { encryptBlockGo(xk, dst, src) } } func decryptBlock(xk []uint32, dst, src []byte) { if useAsm { decryptBlockAsm(len(xk)/4-1, &xk[0], &dst[0], &src[0]) } else { decryptBlockGo(xk, dst, src) } } func expandKey(key []byte, enc, dec []uint32) { if useAsm { rounds := 10 switch len(key) { case 128 / 8: rounds = 10 case 192 / 8: rounds = 12 case 256 / 8: rounds = 14 } expandKeyAsm(rounds, &key[0], &enc[0], &dec[0]) } else { expandKeyGo(key, enc, dec) } }
1.835938
2
db/security/auth.go
fossabot/noah
3
37
/* * Copyright (c) 2019 Ready Stock * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express * or implied. See the License for the specific language governing * permissions and limitations under the License. */ package security import ( "crypto/tls" "github.com/readystock/noah/db/util/protoutil" "github.com/pkg/errors" ) const ( // NodeUser is used by nodes for intra-cluster traffic. NodeUser = "node" // RootUser is the default cluster administrator. RootUser = "root" ) // UserAuthHook authenticates a user based on their username and whether their // connection originates from a client or another node in the cluster. type UserAuthHook func(string, bool) error // GetCertificateUser extract the username from a client certificate. func GetCertificateUser(tlsState *tls.ConnectionState) (string, error) { if tlsState == nil { return "", errors.Errorf("request is not using TLS") } if len(tlsState.PeerCertificates) == 0 { return "", errors.Errorf("no client certificates in request") } // The go server handshake code verifies the first certificate, using // any following certificates as intermediates. See: // https://github.com/golang/go/blob/go1.8.1/src/crypto/tls/handshake_server.go#L723:L742 return tlsState.PeerCertificates[0].Subject.CommonName, nil } // RequestWithUser must be implemented by `roachpb.Request`s which are // arguments to methods that are not permitted to skip user checks. type RequestWithUser interface { GetUser() string } // ProtoAuthHook builds an authentication hook based on the security // mode and client certificate. // The protoutil.Message passed to the hook must implement RequestWithUser. func ProtoAuthHook( insecureMode bool, tlsState *tls.ConnectionState, ) (func(protoutil.Message, bool) error, error) { userHook, err := UserAuthCertHook(insecureMode, tlsState) if err != nil { return nil, err } return func(request protoutil.Message, clientConnection bool) error { // RequestWithUser must be implemented. requestWithUser, ok := request.(RequestWithUser) if !ok { return errors.Errorf("unknown request type: %T", request) } if err := userHook(requestWithUser.GetUser(), clientConnection); err != nil { return errors.Errorf("%s error in request: %s", err, request) } return nil }, nil } // UserAuthCertHook builds an authentication hook based on the security // mode and client certificate. func UserAuthCertHook(insecureMode bool, tlsState *tls.ConnectionState) (UserAuthHook, error) { var certUser string if !insecureMode { var err error certUser, err = GetCertificateUser(tlsState) if err != nil { return nil, err } } return func(requestedUser string, clientConnection bool) error { // TODO(marc): we may eventually need stricter user syntax rules. if len(requestedUser) == 0 { return errors.New("user is missing") } if !clientConnection && requestedUser != NodeUser { return errors.Errorf("user %s is not allowed", requestedUser) } // If running in insecure mode, we have nothing to verify it against. if insecureMode { return nil } // The client certificate user must match the requested user, // except if the certificate user is NodeUser, which is allowed to // act on behalf of all other users. if !(certUser == NodeUser || certUser == requestedUser) { return errors.Errorf("requested user is %s, but certificate is for %s", requestedUser, certUser) } return nil }, nil } // UserAuthPasswordHook builds an authentication hook based on the security // mode, password, and its potentially matching hash. func UserAuthPasswordHook(insecureMode bool, password string, hashedPassword []byte) UserAuthHook { return func(requestedUser string, clientConnection bool) error { if len(requestedUser) == 0 { return errors.New("user is missing") } if !clientConnection { return errors.New("password authentication is only available for client connections") } if insecureMode { return nil } if requestedUser == RootUser { return errors.Errorf("user %s must use certificate authentication instead of password authentication", RootUser) } // If the requested user has an empty password, disallow authentication. if len(password) == 0 || CompareHashAndPassword(hashedPassword, password) != nil { return errors.New("invalid password") } return nil } }
1.492188
1
utils/entity/cve_sa/request.go
zhang-jian-jun/cve-sa-backend
2
45
package cveSa type RequestData struct { KeyWord string `json:"keyword"` Type string `json:"type"` Year interface{} `json:"year"` Status string `json:"status"` PackageName string `json:"packageName"` Pages Pages `json:"pages"` } type Pages struct { Page int `json:"page"` Size int `json:"size"` } type OeCompSearchRequest struct { Os string `json:"os"` Architecture string `json:"architecture"` KeyWord string `json:"keyword"` Lang string `json:"lang"` Cpu string `json:"cpu"` Pages Pages `json:"pages"` } type RequestOsv struct { KeyWord string `json:"keyword"` OsvName string `json:"osvName"` Type string `json:"type"` Pages Pages `json:"pages"` } type Osv struct { Arch string `json:"arch"` OsvName string `json:"osv_name"` OsVersion string `json:"os_version"` OsDownloadLink string `json:"os_download_link"` Type string `json:"type"` Date string `json:"date"` Details string `json:"details"` FriendlyLink string `json:"friendly_link"` TotalResult string `json:"total_result"` CheckSum string `json:"checksum"` BaseOpeneulerVersion string `json:"base_openeuler_version"` ToolsResult []Record `json:"tools_result"` PlatformResult []Record `json:"platform_result"` } type Record struct { Name string `json:"name"` Percent string `json:"percent"` Result string `json:"result"` }
0.742188
1
vendor/github.com/google/gopacket/layers/tcpip.go
rhuss/dash2alex
52
53
// Copyright 2012 Google, Inc. All rights reserved. // Copyright 2009-2011 <NAME>. All rights reserved. // // Use of this source code is governed by a BSD-style license // that can be found in the LICENSE file in the root of the source // tree. package layers import ( "errors" "fmt" "github.com/google/gopacket" ) // Checksum computation for TCP/UDP. type tcpipchecksum struct { pseudoheader tcpipPseudoHeader } type tcpipPseudoHeader interface { pseudoheaderChecksum() (uint32, error) } func (ip *IPv4) pseudoheaderChecksum() (csum uint32, err error) { if err := ip.AddressTo4(); err != nil { return 0, err } csum += (uint32(ip.SrcIP[0]) + uint32(ip.SrcIP[2])) << 8 csum += uint32(ip.SrcIP[1]) + uint32(ip.SrcIP[3]) csum += (uint32(ip.DstIP[0]) + uint32(ip.DstIP[2])) << 8 csum += uint32(ip.DstIP[1]) + uint32(ip.DstIP[3]) return csum, nil } func (ip *IPv6) pseudoheaderChecksum() (csum uint32, err error) { if err := ip.AddressTo16(); err != nil { return 0, err } for i := 0; i < 16; i += 2 { csum += uint32(ip.SrcIP[i]) << 8 csum += uint32(ip.SrcIP[i+1]) csum += uint32(ip.DstIP[i]) << 8 csum += uint32(ip.DstIP[i+1]) } return csum, nil } // Calculate the TCP/IP checksum defined in rfc1071. The passed-in csum is any // initial checksum data that's already been computed. func tcpipChecksum(data []byte, csum uint32) uint16 { // to handle odd lengths, we loop to length - 1, incrementing by 2, then // handle the last byte specifically by checking against the original // length. length := len(data) - 1 for i := 0; i < length; i += 2 { // For our test packet, doing this manually is about 25% faster // (740 ns vs. 1000ns) than doing it by calling binary.BigEndian.Uint16. csum += uint32(data[i]) << 8 csum += uint32(data[i+1]) } if len(data)%2 == 1 { csum += uint32(data[length]) << 8 } for csum > 0xffff { csum = (csum >> 16) + (csum & 0xffff) } return ^uint16(csum + (csum >> 16)) } // computeChecksum computes a TCP or UDP checksum. headerAndPayload is the // serialized TCP or UDP header plus its payload, with the checksum zero'd // out. headerProtocol is the IP protocol number of the upper-layer header. func (c *tcpipchecksum) computeChecksum(headerAndPayload []byte, headerProtocol IPProtocol) (uint16, error) { if c.pseudoheader == nil { return 0, errors.New("TCP/IP layer 4 checksum cannot be computed without network layer... call SetNetworkLayerForChecksum to set which layer to use") } length := uint32(len(headerAndPayload)) csum, err := c.pseudoheader.pseudoheaderChecksum() if err != nil { return 0, err } csum += uint32(headerProtocol) csum += length & 0xffff csum += length >> 16 return tcpipChecksum(headerAndPayload, csum), nil } // SetNetworkLayerForChecksum tells this layer which network layer is wrapping it. // This is needed for computing the checksum when serializing, since TCP/IP transport // layer checksums depends on fields in the IPv4 or IPv6 layer that contains it. // The passed in layer must be an *IPv4 or *IPv6. func (i *tcpipchecksum) SetNetworkLayerForChecksum(l gopacket.NetworkLayer) error { switch v := l.(type) { case *IPv4: i.pseudoheader = v case *IPv6: i.pseudoheader = v default: return fmt.Errorf("cannot use layer type %v for tcp checksum network layer", l.LayerType()) } return nil }
2.125
2
service/iam/api_op_AttachRolePolicy.go
int-tt/aws-sdk-go-v2
1
61
// Code generated by smithy-go-codegen DO NOT EDIT. package iam import ( "context" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" "github.com/awslabs/smithy-go/middleware" smithyhttp "github.com/awslabs/smithy-go/transport/http" ) // Attaches the specified managed policy to the specified IAM role. When you attach // a managed policy to a role, the managed policy becomes part of the role's // permission (access) policy. You cannot use a managed policy as the role's trust // policy. The role's trust policy is created at the same time as the role, using // CreateRole. You can update a role's trust policy using UpdateAssumeRolePolicy. // Use this API to attach a managed policy to a role. To embed an inline policy in // a role, use PutRolePolicy. For more information about policies, see Managed // Policies and Inline Policies // (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) // in the IAM User Guide. func (c *Client) AttachRolePolicy(ctx context.Context, params *AttachRolePolicyInput, optFns ...func(*Options)) (*AttachRolePolicyOutput, error) { if params == nil { params = &AttachRolePolicyInput{} } result, metadata, err := c.invokeOperation(ctx, "AttachRolePolicy", params, optFns, addOperationAttachRolePolicyMiddlewares) if err != nil { return nil, err } out := result.(*AttachRolePolicyOutput) out.ResultMetadata = metadata return out, nil } type AttachRolePolicyInput struct { // The Amazon Resource Name (ARN) of the IAM policy you want to attach. For more // information about ARNs, see Amazon Resource Names (ARNs) and AWS Service // Namespaces // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) in // the AWS General Reference. // // This member is required. PolicyArn *string // The name (friendly name, not ARN) of the role to attach the policy to. This // parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) a // string of characters consisting of upper and lowercase alphanumeric characters // with no spaces. You can also include any of the following characters: _+=,.@- // // This member is required. RoleName *string } type AttachRolePolicyOutput struct { // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata } func addOperationAttachRolePolicyMiddlewares(stack *middleware.Stack, options Options) (err error) { err = stack.Serialize.Add(&awsAwsquery_serializeOpAttachRolePolicy{}, middleware.After) if err != nil { return err } err = stack.Deserialize.Add(&awsAwsquery_deserializeOpAttachRolePolicy{}, middleware.After) if err != nil { return err } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { return err } if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { return err } if err = addRetryMiddlewares(stack, options); err != nil { return err } if err = addHTTPSignerV4Middleware(stack, options); err != nil { return err } if err = awsmiddleware.AddAttemptClockSkewMiddleware(stack); err != nil { return err } if err = addClientUserAgent(stack); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { return err } if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } if err = addOpAttachRolePolicyValidationMiddleware(stack); err != nil { return err } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opAttachRolePolicy(options.Region), middleware.Before); err != nil { return err } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } if err = addResponseErrorMiddleware(stack); err != nil { return err } if err = addRequestResponseLogging(stack, options); err != nil { return err } return nil } func newServiceMetadataMiddleware_opAttachRolePolicy(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, SigningName: "iam", OperationName: "AttachRolePolicy", } }
1.382813
1
memcache_test.go
ahampton/memcache
0
69
/* Copyright 2011 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Package memcache provides a client for the memcached cache server. package memcache import ( "fmt" "net" "os" "os/exec" "strings" "testing" "time" ) const testServer = "localhost:11211" func (c *Client) totalOpen() int { c.mu.Lock() defer c.mu.Unlock() count := 0 for _, v := range c.freeconn { count += len(v) } return count } func newLocalhostServer(tb testing.TB) *Client { c, err := net.Dial("tcp", testServer) if err != nil { tb.Skipf("skipping test; no server running at %s", testServer) return nil } c.Write([]byte("flush_all\r\n")) c.Close() client, err := New(testServer) if err != nil { tb.Fatal(err) } return client } func newUnixServer(tb testing.TB) (*exec.Cmd, *Client) { sock := fmt.Sprintf("/tmp/test-gomemcache-%d.sock", os.Getpid()) os.Remove(sock) cmd := exec.Command("memcached", "-s", sock) if err := cmd.Start(); err != nil { tb.Skip("skipping test; couldn't find memcached") return nil, nil } // Wait a bit for the socket to appear. for i := 0; i < 10; i++ { if _, err := os.Stat(sock); err == nil { break } time.Sleep(time.Duration(25*i) * time.Millisecond) } c, err := New(sock) if err != nil { tb.Fatal(err) } return cmd, c } func TestLocalhost(t *testing.T) { testWithClient(t, newLocalhostServer(t)) } // Run the memcached binary as a child process and connect to its unix socket. func TestUnixSocket(t *testing.T) { cmd, c := newUnixServer(t) defer cmd.Wait() defer cmd.Process.Kill() testWithClient(t, c) } func testWithClient(t *testing.T, c *Client) { checkErr := func(err error, format string, args ...interface{}) { if err != nil { t.Fatalf(format, args...) } } mustSet := func(it *Item) { if err := c.Set(it); err != nil { t.Fatalf("failed to Set %#v: %v", *it, err) } } // Set foo := &Item{Key: "foo", Value: []byte("fooval"), Flags: 123} err := c.Set(foo) checkErr(err, "first set(foo): %v", err) err = c.Set(foo) checkErr(err, "second set(foo): %v", err) // Get it, err := c.Get("foo") checkErr(err, "get(foo): %v", err) if it.Key != "foo" { t.Errorf("get(foo) Key = %q, want foo", it.Key) } if string(it.Value) != "fooval" { t.Errorf("get(foo) Value = %q, want fooval", string(it.Value)) } if it.Flags != 123 { t.Errorf("get(foo) Flags = %v, want 123", it.Flags) } // Get non-existant _, err = c.Get("not-exists") if err != ErrCacheMiss { t.Errorf("get(not-exists): expecting %v, got %v instead", ErrCacheMiss, err) } // Get and set a unicode key quxKey := "Hello_世界" qux := &Item{Key: quxKey, Value: []byte("hello world")} err = c.Set(qux) checkErr(err, "first set(Hello_世界): %v", err) it, err = c.Get(quxKey) checkErr(err, "get(Hello_世界): %v", err) if it.Key != quxKey { t.Errorf("get(Hello_世界) Key = %q, want Hello_世界", it.Key) } if string(it.Value) != "hello world" { t.Errorf("get(Hello_世界) Value = %q, want hello world", string(it.Value)) } // Set malformed keys malFormed := &Item{Key: "foo bar", Value: []byte("foobarval")} err = c.Set(malFormed) if err != ErrMalformedKey { t.Errorf("set(foo bar) should return ErrMalformedKey instead of %v", err) } malFormed = &Item{Key: "foo" + string(0x7f), Value: []byte("foobarval")} err = c.Set(malFormed) if err != ErrMalformedKey { t.Errorf("set(foo<0x7f>) should return ErrMalformedKey instead of %v", err) } // SetQuietly quiet := &Item{Key: "quiet", Value: []byte("Shhh")} err = c.SetQuietly(quiet) checkErr(err, "setQuietly: %v", err) it, err = c.Get(quiet.Key) checkErr(err, "setQuietly: get: %v", err) if it.Key != quiet.Key { t.Errorf("setQuietly: get: Key = %q, want %s", it.Key, quiet.Key) } if string(it.Value) != string(quiet.Value) { t.Errorf("setQuietly: get: Value = %q, want %q", string(it.Value), string(quiet.Value)) } // Add bar := &Item{Key: "bar", Value: []byte("barval")} err = c.Add(bar) checkErr(err, "first add(bar): %v", err) if err := c.Add(bar); err != ErrNotStored { t.Fatalf("second add(bar) want ErrNotStored, got %v", err) } // GetMulti m, err := c.GetMulti([]string{"foo", "bar"}) checkErr(err, "GetMulti: %v", err) if g, e := len(m), 2; g != e { t.Errorf("GetMulti: got len(map) = %d, want = %d", g, e) } if _, ok := m["foo"]; !ok { t.Fatalf("GetMulti: didn't get key 'foo'") } if _, ok := m["bar"]; !ok { t.Fatalf("GetMulti: didn't get key 'bar'") } if g, e := string(m["foo"].Value), "fooval"; g != e { t.Errorf("GetMulti: foo: got %q, want %q", g, e) } if g, e := string(m["bar"].Value), "barval"; g != e { t.Errorf("GetMulti: bar: got %q, want %q", g, e) } // SetMulti baz1 := &Item{Key: "baz1", Value: []byte("baz1val")} baz2 := &Item{Key: "baz2", Value: []byte("baz2val"), Flags: 123} err = c.SetMulti([]*Item{baz1, baz2}) checkErr(err, "first SetMulti: %v", err) err = c.SetMulti([]*Item{baz1, baz2}) checkErr(err, "second SetMulti: %v", err) m, err = c.GetMulti([]string{baz1.Key, baz2.Key}) checkErr(err, "SetMulti: %v", err) if g, e := len(m), 2; g != e { t.Errorf("SetMulti: got len(map) = %d, want = %d", g, e) } if _, ok := m[baz1.Key]; !ok { t.Fatalf("SetMulti: didn't get key '%s'", baz1.Key) } if _, ok := m[baz2.Key]; !ok { t.Fatalf("SetMulti: didn't get key '%s'", baz2.Key) } if g, e := string(m[baz1.Key].Value), string(baz1.Value); g != e { t.Errorf("SetMulti: got %q, want %q", g, e) } if g, e := string(m[baz2.Key].Value), string(baz2.Value); g != e { t.Errorf("SetMulti: got %q, want %q", g, e) } if m[baz1.Key].Flags != baz1.Flags { t.Errorf("SetMulti: Flags = %v, want %v", m[baz1.Key].Flags, baz1.Flags) } if m[baz2.Key].Flags != baz2.Flags { t.Errorf("SetMulti: Flags = %v, want %v", m[baz2.Key].Flags, baz2.Flags) } // SetMultiQuietly quiet1 := &Item{Key: "quiet1", Value: []byte("quiet1val")} quiet2 := &Item{Key: "quiet2", Value: []byte("quiet2val"), Flags: 123} err = c.SetMulti([]*Item{quiet1, quiet2}) checkErr(err, "first SetMultiQuietly: %v", err) err = c.SetMulti([]*Item{quiet1, quiet2}) checkErr(err, "second SetMultiQuietly: %v", err) m, err = c.GetMulti([]string{quiet1.Key, quiet2.Key}) checkErr(err, "SetMultiQuietly: %v", err) if g, e := len(m), 2; g != e { t.Errorf("SetMultiQuietly: got len(map) = %d, want = %d", g, e) } if _, ok := m[quiet1.Key]; !ok { t.Fatalf("SetMultiQuietly: didn't get key '%s'", quiet1.Key) } if _, ok := m[quiet2.Key]; !ok { t.Fatalf("SetMultiQuietly: didn't get key '%s'", quiet2.Key) } if g, e := string(m[quiet1.Key].Value), string(quiet1.Value); g != e { t.Errorf("SetMultiQuietly: got %q, want %q", g, e) } if g, e := string(m[quiet2.Key].Value), string(quiet2.Value); g != e { t.Errorf("SetMultiQuietly: got %q, want %q", g, e) } if m[quiet1.Key].Flags != quiet1.Flags { t.Errorf("SetMultiQuietly: Flags = %v, want %v", m[quiet1.Key].Flags, quiet1.Flags) } if m[quiet2.Key].Flags != quiet2.Flags { t.Errorf("SetMultiQuietly: Flags = %v, want %v", m[quiet2.Key].Flags, quiet2.Flags) } // Delete key := "foo" item, err := c.Get(key) checkErr(err, "pre-Delete: %v", err) if item == nil { t.Error("pre-Delete want item, got nil") } err = c.Delete(key) checkErr(err, "Delete: %v", err) _, err = c.Get(key) if err != ErrCacheMiss { t.Error("post-Delete want ErrCacheMiss, got nil") } err = c.Delete(key) if err != ErrCacheMiss { t.Error("post-Delete want ErrCacheMiss, got nil") } // DeleteQuietly key = "quiet" item, err = c.Get(key) checkErr(err, "pre-DeleteQuietly: %v", err) if item == nil { t.Error("pre-DeleteQuietly want item, got nil") } err = c.DeleteQuietly(key) checkErr(err, "DeleteQuietly: %v", err) _, err = c.Get(key) if err != ErrCacheMiss { t.Errorf("post-DeleteQuietly want ErrCacheMiss, got %v", err) } err = c.DeleteQuietly(key) if err != nil { t.Errorf("post-DeleteQuietly want nil err, got %v", err) } // DeleteMulti keys := []string{"baz1", "baz2"} items, err := c.GetMulti(keys) checkErr(err, "pre-DeleteMulti: %v", err) if len(items) != len(keys) { t.Errorf("pre-DeleteMulti want results, got %v", items) } err = c.DeleteMulti(keys) checkErr(err, "DeleteMulti: %v", err) items, err = c.GetMulti(keys) checkErr(err, "post-DeleteMulti: %v", err) if len(items) != 0 { t.Errorf("post-DeleteMulti want no results, got %v", items) } err = c.DeleteMulti(keys) if err == nil { t.Error("post-DeleteMulti want err, got nil") } // DeleteMultiQuietly keys = []string{"quiet1", "quiet2"} items, err = c.GetMulti(keys) checkErr(err, "pre-DeleteMultiQuietly: %v", err) if len(items) != len(keys) { t.Errorf("pre-DeleteMultiQuietly want results, got %v", items) } err = c.DeleteMultiQuietly(keys) checkErr(err, "DeleteMultiQuietly: %v", err) items, err = c.GetMulti(keys) checkErr(err, "post-DeleteMultiQuietly: %v", err) if len(items) != 0 { t.Errorf("post-DeleteMultiQuietly want no results, got %v", items) } err = c.DeleteMultiQuietly(keys) if err != nil { t.Errorf("post-DeleteMultiQuietly want nil err, got %v", err) } // Incr/Decr mustSet(&Item{Key: "num", Value: []byte("42")}) n, err := c.Increment("num", 8) checkErr(err, "Increment num + 8: %v", err) if n != 50 { t.Fatalf("Increment num + 8: want=50, got=%d", n) } n, err = c.Decrement("num", 49) checkErr(err, "Decrement: %v", err) if n != 1 { t.Fatalf("Decrement 49: want=1, got=%d", n) } err = c.Delete("num") checkErr(err, "delete num: %v", err) n, err = c.Increment("num", 1) if err != ErrCacheMiss { t.Fatalf("increment post-delete: want ErrCacheMiss, got %v", err) } mustSet(&Item{Key: "num", Value: []byte("not-numeric")}) n, err = c.Increment("num", 1) if err != ErrBadIncrDec { t.Fatalf("increment non-number: want %v, got %v", ErrBadIncrDec, err) } // Invalid key if err := c.Set(&Item{Key: strings.Repeat("f", 251), Value: []byte("bar")}); err != ErrMalformedKey { t.Errorf("expecting ErrMalformedKey when using key too long, got nil") } // Flush _, err = c.Get("bar") checkErr(err, "get(bar): %v", err) err = c.Flush(0) checkErr(err, "flush: %v", err) _, err = c.Get("bar") if err != ErrCacheMiss { t.Fatalf("post-flush: want ErrCacheMiss, got %v", err) } }
1.820313
2
Godeps/_workspace/src/github.com/ugorji/go/codec/noop.go
lhuard1A/origin
37
77
// Copyright (c) 2012-2015 <NAME>. All rights reserved. // Use of this source code is governed by a MIT license found in the LICENSE file. package codec import ( "math/rand" "time" ) // NoopHandle returns a no-op handle. It basically does nothing. // It is only useful for benchmarking, as it gives an idea of the // overhead from the codec framework. // // LIBRARY USERS: *** DO NOT USE *** func NoopHandle(slen int) *noopHandle { h := noopHandle{} h.rand = rand.New(rand.NewSource(time.Now().UnixNano())) h.B = make([][]byte, slen) h.S = make([]string, slen) for i := 0; i < len(h.S); i++ { b := make([]byte, i+1) for j := 0; j < len(b); j++ { b[j] = 'a' + byte(i) } h.B[i] = b h.S[i] = string(b) } return &h } // noopHandle does nothing. // It is used to simulate the overhead of the codec framework. type noopHandle struct { BasicHandle binaryEncodingType noopDrv // noopDrv is unexported here, so we can get a copy of it when needed. } type noopDrv struct { i int S []string B [][]byte mks []bool // stack. if map (true), else if array (false) mk bool // top of stack. what container are we on? map or array? ct valueType // last request for IsContainerType. cb bool // last response for IsContainerType. rand *rand.Rand } func (h *noopDrv) r(v int) int { return h.rand.Intn(v) } func (h *noopDrv) m(v int) int { h.i++; return h.i % v } func (h *noopDrv) newEncDriver(_ *Encoder) encDriver { return h } func (h *noopDrv) newDecDriver(_ *Decoder) decDriver { return h } // --- encDriver // stack functions (for map and array) func (h *noopDrv) start(b bool) { // println("start", len(h.mks)+1) h.mks = append(h.mks, b) h.mk = b } func (h *noopDrv) end() { // println("end: ", len(h.mks)-1) h.mks = h.mks[:len(h.mks)-1] if len(h.mks) > 0 { h.mk = h.mks[len(h.mks)-1] } else { h.mk = false } } func (h *noopDrv) EncodeBuiltin(rt uintptr, v interface{}) {} func (h *noopDrv) EncodeNil() {} func (h *noopDrv) EncodeInt(i int64) {} func (h *noopDrv) EncodeUint(i uint64) {} func (h *noopDrv) EncodeBool(b bool) {} func (h *noopDrv) EncodeFloat32(f float32) {} func (h *noopDrv) EncodeFloat64(f float64) {} func (h *noopDrv) EncodeRawExt(re *RawExt, e *Encoder) {} func (h *noopDrv) EncodeArrayStart(length int) { h.start(true) } func (h *noopDrv) EncodeMapStart(length int) { h.start(false) } func (h *noopDrv) EncodeEnd() { h.end() } func (h *noopDrv) EncodeString(c charEncoding, v string) {} func (h *noopDrv) EncodeSymbol(v string) {} func (h *noopDrv) EncodeStringBytes(c charEncoding, v []byte) {} func (h *noopDrv) EncodeExt(rv interface{}, xtag uint64, ext Ext, e *Encoder) {} // ---- decDriver func (h *noopDrv) initReadNext() {} func (h *noopDrv) CheckBreak() bool { return false } func (h *noopDrv) IsBuiltinType(rt uintptr) bool { return false } func (h *noopDrv) DecodeBuiltin(rt uintptr, v interface{}) {} func (h *noopDrv) DecodeInt(bitsize uint8) (i int64) { return int64(h.m(15)) } func (h *noopDrv) DecodeUint(bitsize uint8) (ui uint64) { return uint64(h.m(35)) } func (h *noopDrv) DecodeFloat(chkOverflow32 bool) (f float64) { return float64(h.m(95)) } func (h *noopDrv) DecodeBool() (b bool) { return h.m(2) == 0 } func (h *noopDrv) DecodeString() (s string) { return h.S[h.m(8)] } // func (h *noopDrv) DecodeStringAsBytes(bs []byte) []byte { return h.DecodeBytes(bs) } func (h *noopDrv) DecodeBytes(bs []byte, isstring, zerocopy bool) []byte { return h.B[h.m(len(h.B))] } func (h *noopDrv) ReadEnd() { h.end() } // toggle map/slice func (h *noopDrv) ReadMapStart() int { h.start(true); return h.m(10) } func (h *noopDrv) ReadArrayStart() int { h.start(false); return h.m(10) } func (h *noopDrv) IsContainerType(vt valueType) bool { // return h.m(2) == 0 // handle kStruct if h.ct == valueTypeMap && vt == valueTypeArray || h.ct == valueTypeArray && vt == valueTypeMap { h.cb = !h.cb h.ct = vt return h.cb } // go in a loop and check it. h.ct = vt h.cb = h.m(7) == 0 return h.cb } func (h *noopDrv) TryDecodeAsNil() bool { if h.mk { return false } else { return h.m(8) == 0 } } func (h *noopDrv) DecodeExt(rv interface{}, xtag uint64, ext Ext) uint64 { return 0 } func (h *noopDrv) DecodeNaked() (v interface{}, vt valueType, decodeFurther bool) { // use h.r (random) not h.m() because h.m() could cause the same value to be given. var sk int if h.mk { // if mapkey, do not support values of nil OR bytes, array, map or rawext sk = h.r(7) + 1 } else { sk = h.r(12) } switch sk { case 0: vt = valueTypeNil case 1: vt, v = valueTypeBool, false case 2: vt, v = valueTypeBool, true case 3: vt, v = valueTypeInt, h.DecodeInt(64) case 4: vt, v = valueTypeUint, h.DecodeUint(64) case 5: vt, v = valueTypeFloat, h.DecodeFloat(true) case 6: vt, v = valueTypeFloat, h.DecodeFloat(false) case 7: vt, v = valueTypeString, h.DecodeString() case 8: vt, v = valueTypeBytes, h.B[h.m(len(h.B))] case 9: vt, decodeFurther = valueTypeArray, true case 10: vt, decodeFurther = valueTypeMap, true default: vt, v = valueTypeExt, &RawExt{Tag: h.DecodeUint(64), Data: h.B[h.m(len(h.B))]} } h.ct = vt return }
2.125
2
handlebars/base_test.go
imantung/raymond
1
85
package handlebars import ( "fmt" "strings" "io/ioutil" "path" "strconv" "testing" "github.com/imantung/mario" "github.com/imantung/mario/ast" ) // cf. https://github.com/aymerick/go-fuzz-tests/raymond const dumpTpl = false var dumpTplNb = 0 type Test struct { name string input string data interface{} privData map[string]interface{} helpers map[string]interface{} partials map[string]string output interface{} } func launchTests(t *testing.T, tests []Test) { t.Parallel() for _, test := range tests { var err error var tpl *mario.Template if dumpTpl { filename := strconv.Itoa(dumpTplNb) if err := ioutil.WriteFile(path.Join(".", "dump_tpl", filename), []byte(test.input), 0644); err != nil { panic(err) } dumpTplNb++ } // parse template tpl, err = mario.New().Parse(test.input) if err != nil { t.Errorf("Test '%s' failed - Failed to parse template\ninput:\n\t'%s'\nerror:\n\t%s", test.name, test.input, err) } else { for name, fn := range test.helpers { tpl.WithHelperFunc(name, fn) } for name, source := range test.partials { tpl.WithPartial(name, mario.Must(mario.New().Parse(source))) } // setup private data frame var privData *mario.DataFrame if test.privData != nil { privData = mario.NewDataFrame() for k, v := range test.privData { privData.Set(k, v) } } // render template var b strings.Builder if err := tpl.ExecuteWith(&b, test.data, privData); err != nil { t.Errorf("Test '%s' failed\ninput:\n\t'%s'\ndata:\n\t%s\nerror:\n\t%s\nAST:\n\t%s", test.name, test.input, mario.Str(test.data), err, ast.Print(tpl.Program())) } else { output := b.String() // check output var expectedArr []string expectedArr, ok := test.output.([]string) if ok { match := false for _, expectedStr := range expectedArr { if expectedStr == output { match = true break } } if !match { t.Errorf("Test '%s' failed\ninput:\n\t'%s'\ndata:\n\t%s\npartials:\n\t%s\nexpected\n\t%q\ngot\n\t%q\nAST:\n%s", test.name, test.input, mario.Str(test.data), mario.Str(test.partials), expectedArr, output, ast.Print(tpl.Program())) } } else { expectedStr, ok := test.output.(string) if !ok { panic(fmt.Errorf("Erroneous test output description: %q", test.output)) } if expectedStr != output { t.Errorf("Test '%s' failed\ninput:\n\t'%s'\ndata:\n\t%s\npartials:\n\t%s\nexpected\n\t%q\ngot\n\t%q\nAST:\n%s", test.name, test.input, mario.Str(test.data), mario.Str(test.partials), expectedStr, output, ast.Print(tpl.Program())) } } } } } }
1.703125
2
pkg/render/manager.go
IoannisMatzaris/operator
0
93
// Copyright (c) 2020-2021 Tigera, Inc. All rights reserved. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package render import ( "fmt" "strconv" "strings" ocsv1 "github.com/openshift/api/security/v1" v3 "github.com/tigera/api/pkg/apis/projectcalico/v3" operatorv1 "github.com/tigera/operator/api/v1" "github.com/tigera/operator/pkg/common" "github.com/tigera/operator/pkg/components" "github.com/tigera/operator/pkg/render/common/authentication" tigerakvc "github.com/tigera/operator/pkg/render/common/authentication/tigera/key_validator_config" "github.com/tigera/operator/pkg/render/common/configmap" relasticsearch "github.com/tigera/operator/pkg/render/common/elasticsearch" rkibana "github.com/tigera/operator/pkg/render/common/kibana" rmeta "github.com/tigera/operator/pkg/render/common/meta" "github.com/tigera/operator/pkg/render/common/podaffinity" "github.com/tigera/operator/pkg/render/common/podsecuritycontext" "github.com/tigera/operator/pkg/render/common/podsecuritypolicy" "github.com/tigera/operator/pkg/render/common/secret" "github.com/tigera/operator/pkg/tls/certificatemanagement" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" policyv1beta1 "k8s.io/api/policy/v1beta1" rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" "sigs.k8s.io/controller-runtime/pkg/client" ) const ( managerPort = 9443 managerTargetPort = 9443 ManagerServiceName = "tigera-manager" ManagerNamespace = "tigera-manager" ManagerServiceIP = "localhost" ManagerServiceAccount = "tigera-manager" ManagerClusterRole = "tigera-manager-role" ManagerClusterRoleBinding = "tigera-manager-binding" ManagerTLSSecretName = "manager-tls" ManagerInternalTLSSecretName = "internal-manager-tls" ManagerClusterSettings = "cluster-settings" ManagerUserSettings = "user-settings" ManagerClusterSettingsLayerTigera = "cluster-settings.layer.tigera-infrastructure" ManagerClusterSettingsViewDefault = "cluster-settings.view.default" ElasticsearchManagerUserSecret = "tigera-ee-manager-elasticsearch-access" TlsSecretHashAnnotation = "hash.operator.tigera.io/tls-secret" KibanaTLSHashAnnotation = "hash.operator.tigera.io/kibana-secrets" ElasticsearchUserHashAnnotation = "hash.operator.tigera.io/elasticsearch-user" PrometheusTLSSecretName = "calico-node-prometheus-tls" ) // ManagementClusterConnection configuration constants const ( VoltronName = "tigera-voltron" VoltronTunnelSecretName = "tigera-management-cluster-connection" defaultVoltronPort = "9443" defaultTunnelVoltronPort = "9449" ) func Manager(cfg *ManagerConfiguration) (Component, error) { var tlsSecrets []*corev1.Secret tlsAnnotations := cfg.TrustedCertBundle.HashAnnotations() tlsAnnotations[KibanaTLSHashAnnotation] = rmeta.SecretsAnnotationHash(cfg.KibanaSecrets...) tlsAnnotations[cfg.TLSKeyPair.HashAnnotationKey()] = cfg.TLSKeyPair.HashAnnotationValue() if cfg.KeyValidatorConfig != nil { tlsSecrets = append(tlsSecrets, cfg.KeyValidatorConfig.RequiredSecrets(ManagerNamespace)...) for key, value := range cfg.KeyValidatorConfig.RequiredAnnotations() { tlsAnnotations[key] = value } } if cfg.ManagementCluster != nil { tlsAnnotations[cfg.InternalTrafficSecret.HashAnnotationKey()] = cfg.InternalTrafficSecret.HashAnnotationValue() tlsAnnotations[cfg.TunnelSecret.HashAnnotationKey()] = cfg.InternalTrafficSecret.HashAnnotationValue() } return &managerComponent{ cfg: cfg, tlsSecrets: tlsSecrets, tlsAnnotations: tlsAnnotations, }, nil } // ManagerConfiguration contains all the config information needed to render the component. type ManagerConfiguration struct { KeyValidatorConfig authentication.KeyValidatorConfig ESSecrets []*corev1.Secret KibanaSecrets []*corev1.Secret TrustedCertBundle certificatemanagement.TrustedBundle ESClusterConfig *relasticsearch.ClusterConfig TLSKeyPair certificatemanagement.KeyPairInterface PullSecrets []*corev1.Secret Openshift bool Installation *operatorv1.InstallationSpec ManagementCluster *operatorv1.ManagementCluster TunnelSecret certificatemanagement.KeyPairInterface InternalTrafficSecret certificatemanagement.KeyPairInterface ClusterDomain string ESLicenseType ElasticsearchLicenseType Replicas *int32 } type managerComponent struct { cfg *ManagerConfiguration tlsSecrets []*corev1.Secret tlsAnnotations map[string]string managerImage string proxyImage string esProxyImage string } func (c *managerComponent) ResolveImages(is *operatorv1.ImageSet) error { reg := c.cfg.Installation.Registry path := c.cfg.Installation.ImagePath prefix := c.cfg.Installation.ImagePrefix var err error c.managerImage, err = components.GetReference(components.ComponentManager, reg, path, prefix, is) errMsgs := []string{} if err != nil { errMsgs = append(errMsgs, err.Error()) } c.proxyImage, err = components.GetReference(components.ComponentManagerProxy, reg, path, prefix, is) if err != nil { errMsgs = append(errMsgs, err.Error()) } c.esProxyImage, err = components.GetReference(components.ComponentEsProxy, reg, path, prefix, is) if err != nil { errMsgs = append(errMsgs, err.Error()) } if len(errMsgs) != 0 { return fmt.Errorf(strings.Join(errMsgs, ",")) } return nil } func (c *managerComponent) SupportedOSType() rmeta.OSType { return rmeta.OSTypeLinux } func (c *managerComponent) Objects() ([]client.Object, []client.Object) { objs := []client.Object{ CreateNamespace(ManagerNamespace, c.cfg.Installation.KubernetesProvider), } objs = append(objs, secret.ToRuntimeObjects(secret.CopyToNamespace(ManagerNamespace, c.cfg.PullSecrets...)...)...) objs = append(objs, managerServiceAccount(), managerClusterRole(c.cfg.ManagementCluster != nil, false, c.cfg.Openshift), managerClusterRoleBinding(), managerClusterWideSettingsGroup(), managerUserSpecificSettingsGroup(), managerClusterWideTigeraLayer(), managerClusterWideDefaultView(), ) objs = append(objs, c.getTLSObjects()...) objs = append(objs, c.managerService(), ) // If we're running on openshift, we need to add in an SCC. if c.cfg.Openshift { objs = append(objs, c.securityContextConstraints()) } else { // If we're not running openshift, we need to add pod security policies. objs = append(objs, c.managerPodSecurityPolicy()) } objs = append(objs, secret.ToRuntimeObjects(secret.CopyToNamespace(ManagerNamespace, c.cfg.ESSecrets...)...)...) objs = append(objs, secret.ToRuntimeObjects(secret.CopyToNamespace(ManagerNamespace, c.cfg.KibanaSecrets...)...)...) objs = append(objs, c.managerDeployment()) if c.cfg.KeyValidatorConfig != nil { objs = append(objs, configmap.ToRuntimeObjects(c.cfg.KeyValidatorConfig.RequiredConfigMaps(ManagerNamespace)...)...) } return objs, nil } func (c *managerComponent) Ready() bool { return true } // managerDeployment creates a deployment for the Tigera Secure manager component. func (c *managerComponent) managerDeployment() *appsv1.Deployment { var initContainers []corev1.Container if c.cfg.TLSKeyPair.UseCertificateManagement() { initContainers = append(initContainers, c.cfg.TLSKeyPair.InitContainer(ManagerNamespace)) } podTemplate := relasticsearch.DecorateAnnotations(&corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Name: "tigera-manager", Namespace: ManagerNamespace, Labels: map[string]string{ "k8s-app": "tigera-manager", }, Annotations: c.tlsAnnotations, }, Spec: relasticsearch.PodSpecDecorate(corev1.PodSpec{ NodeSelector: c.cfg.Installation.ControlPlaneNodeSelector, ServiceAccountName: ManagerServiceAccount, Tolerations: c.managerTolerations(), ImagePullSecrets: secret.GetReferenceList(c.cfg.PullSecrets), InitContainers: initContainers, Containers: []corev1.Container{ relasticsearch.ContainerDecorate(c.managerContainer(), c.cfg.ESClusterConfig.ClusterName(), ElasticsearchManagerUserSecret, c.cfg.ClusterDomain, c.SupportedOSType()), relasticsearch.ContainerDecorate(c.managerEsProxyContainer(), c.cfg.ESClusterConfig.ClusterName(), ElasticsearchManagerUserSecret, c.cfg.ClusterDomain, c.SupportedOSType()), c.managerProxyContainer(), }, Volumes: c.managerVolumes(), }), }, c.cfg.ESClusterConfig, c.cfg.ESSecrets).(*corev1.PodTemplateSpec) if c.cfg.Replicas != nil && *c.cfg.Replicas > 1 { podTemplate.Spec.Affinity = podaffinity.NewPodAntiAffinity("tigera-manager", ManagerNamespace) } d := &appsv1.Deployment{ TypeMeta: metav1.TypeMeta{Kind: "Deployment", APIVersion: "apps/v1"}, ObjectMeta: metav1.ObjectMeta{ Name: "tigera-manager", Namespace: ManagerNamespace, Labels: map[string]string{ "k8s-app": "tigera-manager", }, }, Spec: appsv1.DeploymentSpec{ Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{ "k8s-app": "tigera-manager", }, }, Replicas: c.cfg.Replicas, Strategy: appsv1.DeploymentStrategy{ Type: appsv1.RecreateDeploymentStrategyType, }, Template: *podTemplate, }, } return d } // managerVolumes returns the volumes for the Tigera Secure manager component. func (c *managerComponent) managerVolumeMounts() []corev1.VolumeMount { if c.cfg.KeyValidatorConfig != nil { return c.cfg.KeyValidatorConfig.RequiredVolumeMounts() } return []corev1.VolumeMount{} } // managerVolumes returns the volumes for the Tigera Secure manager component. func (c *managerComponent) managerVolumes() []corev1.Volume { v := []corev1.Volume{ c.cfg.TLSKeyPair.Volume(), c.cfg.TrustedCertBundle.Volume(), { Name: KibanaPublicCertSecret, VolumeSource: corev1.VolumeSource{ Secret: &corev1.SecretVolumeSource{ SecretName: KibanaPublicCertSecret, }, }, }, } if c.cfg.ManagementCluster != nil { v = append(v, c.cfg.InternalTrafficSecret.Volume(), c.cfg.TunnelSecret.Volume(), ) } if c.cfg.KeyValidatorConfig != nil { v = append(v, c.cfg.KeyValidatorConfig.RequiredVolumes()...) } return v } // managerProbe returns the probe for the manager container. func (c *managerComponent) managerProbe() *corev1.Probe { return &corev1.Probe{ Handler: corev1.Handler{ HTTPGet: &corev1.HTTPGetAction{ Path: "/", Port: intstr.FromInt(managerPort), Scheme: corev1.URISchemeHTTPS, }, }, InitialDelaySeconds: 90, PeriodSeconds: 10, } } // managerEsProxyProbe returns the probe for the ES proxy container. func (c *managerComponent) managerEsProxyProbe() *corev1.Probe { return &corev1.Probe{ Handler: corev1.Handler{ HTTPGet: &corev1.HTTPGetAction{ Path: "/tigera-elasticsearch/version", Port: intstr.FromInt(managerPort), Scheme: corev1.URISchemeHTTPS, }, }, InitialDelaySeconds: 90, PeriodSeconds: 10, } } // managerProxyProbe returns the probe for the proxy container. func (c *managerComponent) managerProxyProbe() *corev1.Probe { return &corev1.Probe{ Handler: corev1.Handler{ HTTPGet: &corev1.HTTPGetAction{ Path: "/voltron/api/health", Port: intstr.FromInt(managerPort), Scheme: corev1.URISchemeHTTPS, }, }, InitialDelaySeconds: 90, PeriodSeconds: 10, } } // managerEnvVars returns the envvars for the manager container. func (c *managerComponent) managerEnvVars() []corev1.EnvVar { envs := []corev1.EnvVar{ {Name: "CNX_PROMETHEUS_API_URL", Value: fmt.Sprintf("/api/v1/namespaces/%s/services/calico-node-prometheus:9090/proxy/api/v1", common.TigeraPrometheusNamespace)}, {Name: "CNX_COMPLIANCE_REPORTS_API_URL", Value: "/compliance/reports"}, {Name: "CNX_QUERY_API_URL", Value: "/api/v1/namespaces/tigera-system/services/https:tigera-api:8080/proxy"}, {Name: "CNX_ELASTICSEARCH_API_URL", Value: "/tigera-elasticsearch"}, {Name: "CNX_ELASTICSEARCH_KIBANA_URL", Value: fmt.Sprintf("/%s", KibanaBasePath)}, {Name: "CNX_ENABLE_ERROR_TRACKING", Value: "false"}, {Name: "CNX_ALP_SUPPORT", Value: "true"}, {Name: "CNX_CLUSTER_NAME", Value: "cluster"}, {Name: "CNX_POLICY_RECOMMENDATION_SUPPORT", Value: "true"}, {Name: "ENABLE_MULTI_CLUSTER_MANAGEMENT", Value: strconv.FormatBool(c.cfg.ManagementCluster != nil)}, } envs = append(envs, c.managerOAuth2EnvVars()...) return envs } // managerContainer returns the manager container. func (c *managerComponent) managerContainer() corev1.Container { tm := corev1.Container{ Name: "tigera-manager", Image: c.managerImage, Env: c.managerEnvVars(), LivenessProbe: c.managerProbe(), SecurityContext: podsecuritycontext.NewBaseContext(), VolumeMounts: c.managerVolumeMounts(), } return tm } // managerOAuth2EnvVars returns the OAuth2/OIDC envvars depending on the authentication type. func (c *managerComponent) managerOAuth2EnvVars() []corev1.EnvVar { var envs []corev1.EnvVar if c.cfg.KeyValidatorConfig == nil { envs = []corev1.EnvVar{{Name: "CNX_WEB_AUTHENTICATION_TYPE", Value: "Token"}} } else { envs = []corev1.EnvVar{ {Name: "CNX_WEB_AUTHENTICATION_TYPE", Value: "OIDC"}, {Name: "CNX_WEB_OIDC_CLIENT_ID", Value: c.cfg.KeyValidatorConfig.ClientID()}} switch c.cfg.KeyValidatorConfig.(type) { case *DexKeyValidatorConfig: envs = append(envs, corev1.EnvVar{Name: "CNX_WEB_OIDC_AUTHORITY", Value: c.cfg.KeyValidatorConfig.Issuer()}) case *tigerakvc.KeyValidatorConfig: envs = append(envs, corev1.EnvVar{Name: "CNX_WEB_OIDC_AUTHORITY", Value: ""}) } } return envs } // managerProxyContainer returns the container for the manager proxy container. func (c *managerComponent) managerProxyContainer() corev1.Container { var keyPath, certPath, intKeyPath, intCertPath, tunnelKeyPath, tunnelCertPath string if c.cfg.TLSKeyPair != nil { keyPath, certPath = c.cfg.TLSKeyPair.VolumeMountKeyFilePath(), c.cfg.TLSKeyPair.VolumeMountCertificateFilePath() } if c.cfg.InternalTrafficSecret != nil { intKeyPath, intCertPath = c.cfg.InternalTrafficSecret.VolumeMountKeyFilePath(), c.cfg.InternalTrafficSecret.VolumeMountCertificateFilePath() } if c.cfg.TunnelSecret != nil { tunnelKeyPath, tunnelCertPath = c.cfg.TunnelSecret.VolumeMountKeyFilePath(), c.cfg.TunnelSecret.VolumeMountCertificateFilePath() } env := []corev1.EnvVar{ {Name: "VOLTRON_PORT", Value: defaultVoltronPort}, {Name: "VOLTRON_COMPLIANCE_ENDPOINT", Value: fmt.Sprintf("https://compliance.%s.svc.%s", ComplianceNamespace, c.cfg.ClusterDomain)}, {Name: "VOLTRON_LOGLEVEL", Value: "Info"}, {Name: "VOLTRON_KIBANA_ENDPOINT", Value: rkibana.HTTPSEndpoint(c.SupportedOSType(), c.cfg.ClusterDomain)}, {Name: "VOLTRON_KIBANA_BASE_PATH", Value: fmt.Sprintf("/%s/", KibanaBasePath)}, {Name: "VOLTRON_KIBANA_CA_BUNDLE_PATH", Value: "/certs/kibana/tls.crt"}, {Name: "VOLTRON_PACKET_CAPTURE_CA_BUNDLE_PATH", Value: c.cfg.TrustedCertBundle.MountPath()}, {Name: "VOLTRON_PROMETHEUS_CA_BUNDLE_PATH", Value: c.cfg.TrustedCertBundle.MountPath()}, {Name: "VOLTRON_COMPLIANCE_CA_BUNDLE_PATH", Value: c.cfg.TrustedCertBundle.MountPath()}, {Name: "VOLTRON_HTTPS_KEY", Value: keyPath}, {Name: "VOLTRON_HTTPS_CERT", Value: certPath}, {Name: "VOLTRON_TUNNEL_KEY", Value: tunnelKeyPath}, {Name: "VOLTRON_TUNNEL_CERT", Value: tunnelCertPath}, {Name: "VOLTRON_INTERNAL_HTTPS_KEY", Value: intKeyPath}, {Name: "VOLTRON_INTERNAL_HTTPS_CERT", Value: intCertPath}, {Name: "VOLTRON_ENABLE_MULTI_CLUSTER_MANAGEMENT", Value: strconv.FormatBool(c.cfg.ManagementCluster != nil)}, {Name: "VOLTRON_TUNNEL_PORT", Value: defaultTunnelVoltronPort}, {Name: "VOLTRON_DEFAULT_FORWARD_SERVER", Value: "tigera-secure-es-gateway-http.tigera-elasticsearch.svc:9200"}, } if c.cfg.KeyValidatorConfig != nil { env = append(env, c.cfg.KeyValidatorConfig.RequiredEnv("VOLTRON_")...) } if _, ok := c.cfg.TrustedCertBundle.HashAnnotations()[complianceServerTLSHashAnnotation]; !ok { env = append(env, corev1.EnvVar{Name: "VOLTRON_ENABLE_COMPLIANCE", Value: "false"}) } return corev1.Container{ Name: VoltronName, Image: c.proxyImage, Env: env, VolumeMounts: c.volumeMountsForProxyManager(), LivenessProbe: c.managerProxyProbe(), SecurityContext: podsecuritycontext.NewBaseContext(), } } func (c *managerComponent) volumeMountsForProxyManager() []corev1.VolumeMount { var mounts = []corev1.VolumeMount{ {Name: ManagerTLSSecretName, MountPath: "/manager-tls", ReadOnly: true}, {Name: KibanaPublicCertSecret, MountPath: "/certs/kibana", ReadOnly: true}, c.cfg.TrustedCertBundle.VolumeMount(), } if c.cfg.ManagementCluster != nil { mounts = append(mounts, c.cfg.InternalTrafficSecret.VolumeMount()) mounts = append(mounts, c.cfg.TunnelSecret.VolumeMount()) } if c.cfg.KeyValidatorConfig != nil { mounts = append(mounts, c.cfg.KeyValidatorConfig.RequiredVolumeMounts()...) } return mounts } // managerEsProxyContainer returns the ES proxy container func (c *managerComponent) managerEsProxyContainer() corev1.Container { env := []corev1.EnvVar{ {Name: "ELASTIC_LICENSE_TYPE", Value: string(c.cfg.ESLicenseType)}, {Name: "ELASTIC_KIBANA_ENDPOINT", Value: rkibana.HTTPSEndpoint(c.SupportedOSType(), c.cfg.ClusterDomain)}, } var volumeMounts []corev1.VolumeMount if c.cfg.ManagementCluster != nil { volumeMounts = append(volumeMounts, c.cfg.TrustedCertBundle.VolumeMount()) env = append(env, corev1.EnvVar{Name: "VOLTRON_CA_PATH", Value: certificatemanagement.TrustedCertBundleMountPath}) } if c.cfg.KeyValidatorConfig != nil { env = append(env, c.cfg.KeyValidatorConfig.RequiredEnv("")...) volumeMounts = append(volumeMounts, c.cfg.KeyValidatorConfig.RequiredVolumeMounts()...) } return corev1.Container{ Name: "tigera-es-proxy", Image: c.esProxyImage, LivenessProbe: c.managerEsProxyProbe(), SecurityContext: podsecuritycontext.NewBaseContext(), Env: env, VolumeMounts: volumeMounts, } } // managerTolerations returns the tolerations for the Tigera Secure manager deployment pods. func (c *managerComponent) managerTolerations() []corev1.Toleration { return append(c.cfg.Installation.ControlPlaneTolerations, rmeta.TolerateMaster, rmeta.TolerateCriticalAddonsOnly) } // managerService returns the service exposing the Tigera Secure web app. func (c *managerComponent) managerService() *corev1.Service { return &corev1.Service{ TypeMeta: metav1.TypeMeta{Kind: "Service", APIVersion: "v1"}, ObjectMeta: metav1.ObjectMeta{ Name: "tigera-manager", Namespace: ManagerNamespace, }, Spec: corev1.ServiceSpec{ Ports: []corev1.ServicePort{ { Port: managerPort, Protocol: corev1.ProtocolTCP, TargetPort: intstr.FromInt(managerTargetPort), }, }, Selector: map[string]string{ "k8s-app": "tigera-manager", }, }, } } // managerServiceAccount creates the serviceaccount used by the Tigera Secure web app. func managerServiceAccount() *corev1.ServiceAccount { return &corev1.ServiceAccount{ TypeMeta: metav1.TypeMeta{Kind: "ServiceAccount", APIVersion: "v1"}, ObjectMeta: metav1.ObjectMeta{Name: ManagerServiceAccount, Namespace: ManagerNamespace}, } } // managerClusterRole returns a clusterrole that allows authn/authz review requests. func managerClusterRole(managementCluster, managedCluster, openshift bool) *rbacv1.ClusterRole { cr := &rbacv1.ClusterRole{ TypeMeta: metav1.TypeMeta{Kind: "ClusterRole", APIVersion: "rbac.authorization.k8s.io/v1"}, ObjectMeta: metav1.ObjectMeta{ Name: ManagerClusterRole, }, Rules: []rbacv1.PolicyRule{ { APIGroups: []string{"authorization.k8s.io"}, Resources: []string{"subjectaccessreviews"}, Verbs: []string{"create"}, }, { APIGroups: []string{"authentication.k8s.io"}, Resources: []string{"tokenreviews"}, Verbs: []string{"create"}, }, { APIGroups: []string{"projectcalico.org"}, Resources: []string{ "networksets", "globalnetworksets", "globalnetworkpolicies", "tier.globalnetworkpolicies", "networkpolicies", "tier.networkpolicies", "stagedglobalnetworkpolicies", "tier.stagedglobalnetworkpolicies", "stagednetworkpolicies", "tier.stagednetworkpolicies", "stagedkubernetesnetworkpolicies", }, Verbs: []string{"list"}, }, { APIGroups: []string{"projectcalico.org"}, Resources: []string{ "tiers", }, Verbs: []string{"get", "list"}, }, { APIGroups: []string{"projectcalico.org"}, Resources: []string{ "hostendpoints", }, Verbs: []string{"list"}, }, { APIGroups: []string{"projectcalico.org"}, Resources: []string{ "felixconfigurations", }, ResourceNames: []string{ "default", }, Verbs: []string{"get"}, }, { APIGroups: []string{"projectcalico.org"}, Resources: []string{ "alertexceptions", }, Verbs: []string{"get", "list", "update"}, }, { APIGroups: []string{"networking.k8s.io"}, Resources: []string{"networkpolicies"}, Verbs: []string{"get", "list"}, }, { APIGroups: []string{""}, Resources: []string{"serviceaccounts", "namespaces", "nodes", "events", "services", "pods"}, Verbs: []string{"list"}, }, { APIGroups: []string{"apps"}, Resources: []string{"replicasets", "statefulsets", "daemonsets"}, Verbs: []string{"list"}, }, // When a request is made in the manager UI, they are proxied through the Voltron backend server. If the // request is targeting a k8s api or when it is targeting a managed cluster, Voltron will authenticate the // user based on the auth header and then impersonate the user. { APIGroups: []string{""}, Resources: []string{"users", "groups", "serviceaccounts"}, Verbs: []string{"impersonate"}, }, }, } if !managedCluster { cr.Rules = append(cr.Rules, rbacv1.PolicyRule{ APIGroups: []string{"projectcalico.org"}, Resources: []string{"managedclusters"}, Verbs: []string{"list", "get", "watch", "update"}, }, ) } if !openshift { // Allow access to the pod security policy in case this is enforced on the cluster cr.Rules = append(cr.Rules, rbacv1.PolicyRule{ APIGroups: []string{"policy"}, Resources: []string{"podsecuritypolicies"}, Verbs: []string{"use"}, ResourceNames: []string{"tigera-manager"}, }, ) } return cr } // managerClusterRoleBinding returns a clusterrolebinding that gives the tigera-manager serviceaccount // the permissions in the tigera-manager-role. func managerClusterRoleBinding() *rbacv1.ClusterRoleBinding { return &rbacv1.ClusterRoleBinding{ TypeMeta: metav1.TypeMeta{Kind: "ClusterRoleBinding", APIVersion: "rbac.authorization.k8s.io/v1"}, ObjectMeta: metav1.ObjectMeta{Name: ManagerClusterRoleBinding}, RoleRef: rbacv1.RoleRef{ APIGroup: "rbac.authorization.k8s.io", Kind: "ClusterRole", Name: ManagerClusterRole, }, Subjects: []rbacv1.Subject{ { Kind: "ServiceAccount", Name: ManagerServiceAccount, Namespace: ManagerNamespace, }, }, } } // TODO: Can we get rid of this and instead just bind to default ones? func (c *managerComponent) securityContextConstraints() *ocsv1.SecurityContextConstraints { privilegeEscalation := false return &ocsv1.SecurityContextConstraints{ TypeMeta: metav1.TypeMeta{Kind: "SecurityContextConstraints", APIVersion: "security.openshift.io/v1"}, ObjectMeta: metav1.ObjectMeta{Name: ManagerNamespace}, AllowHostDirVolumePlugin: true, AllowHostIPC: false, AllowHostNetwork: false, AllowHostPID: true, AllowHostPorts: false, AllowPrivilegeEscalation: &privilegeEscalation, AllowPrivilegedContainer: false, FSGroup: ocsv1.FSGroupStrategyOptions{Type: ocsv1.FSGroupStrategyRunAsAny}, RunAsUser: ocsv1.RunAsUserStrategyOptions{Type: ocsv1.RunAsUserStrategyRunAsAny}, ReadOnlyRootFilesystem: false, SELinuxContext: ocsv1.SELinuxContextStrategyOptions{Type: ocsv1.SELinuxStrategyMustRunAs}, SupplementalGroups: ocsv1.SupplementalGroupsStrategyOptions{Type: ocsv1.SupplementalGroupsStrategyRunAsAny}, Users: []string{fmt.Sprintf("system:serviceaccount:%s:tigera-manager", ManagerNamespace)}, Volumes: []ocsv1.FSType{"*"}, } } func (c *managerComponent) getTLSObjects() []client.Object { objs := []client.Object{} for _, s := range c.tlsSecrets { objs = append(objs, s) } return objs } func (c *managerComponent) managerPodSecurityPolicy() *policyv1beta1.PodSecurityPolicy { psp := podsecuritypolicy.NewBasePolicy() psp.GetObjectMeta().SetName("tigera-manager") return psp } // managerClusterWideSettingsGroup returns a UISettingsGroup with the description "cluster-wide settings" // // Calico Enterprise only func managerClusterWideSettingsGroup() *v3.UISettingsGroup { return &v3.UISettingsGroup{ TypeMeta: metav1.TypeMeta{Kind: "UISettingsGroup", APIVersion: "projectcalico.org/v3"}, ObjectMeta: metav1.ObjectMeta{ Name: ManagerClusterSettings, }, Spec: v3.UISettingsGroupSpec{ Description: "Cluster Settings", }, } } // managerUserSpecificSettingsGroup returns a UISettingsGroup with the description "user settings" // // Calico Enterprise only func managerUserSpecificSettingsGroup() *v3.UISettingsGroup { return &v3.UISettingsGroup{ TypeMeta: metav1.TypeMeta{Kind: "UISettingsGroup", APIVersion: "projectcalico.org/v3"}, ObjectMeta: metav1.ObjectMeta{ Name: ManagerUserSettings, }, Spec: v3.UISettingsGroupSpec{ Description: "User Settings", FilterType: v3.FilterTypeUser, }, } } // managerClusterWideTigeraLayer returns a UISettings layer belonging to the cluster-wide settings group that contains // all of the tigera namespaces. // // Calico Enterprise only func managerClusterWideTigeraLayer() *v3.UISettings { namespaces := []string{ "tigera-compliance", "tigera-dex", "tigera-dpi", "tigera-eck-operator", "tigera-elasticsearch", "tigera-fluentd", "tigera-guardian", "tigera-intrusion-detection", "tigera-kibana", "tigera-manager", "tigera-operator", "tigera-packetcapture", "tigera-prometheus", "tigera-system", "calico-system", } nodes := make([]v3.UIGraphNode, len(namespaces)) for i := range namespaces { ns := namespaces[i] nodes[i] = v3.UIGraphNode{ ID: "namespace/" + ns, Type: "namespace", Name: ns, } } return &v3.UISettings{ TypeMeta: metav1.TypeMeta{Kind: "UISettings", APIVersion: "projectcalico.org/v3"}, ObjectMeta: metav1.ObjectMeta{ Name: ManagerClusterSettingsLayerTigera, }, Spec: v3.UISettingsSpec{ Group: "cluster-settings", Description: "Tigera Infrastructure", Layer: &v3.UIGraphLayer{ Nodes: nodes, }, }, } } // managerClusterWideDefaultView returns a UISettings view belonging to the cluster-wide settings group that shows // everything and uses the tigera-infrastructure layer. // // Calico Enterprise only func managerClusterWideDefaultView() *v3.UISettings { return &v3.UISettings{ TypeMeta: metav1.TypeMeta{Kind: "UISettings", APIVersion: "projectcalico.org/v3"}, ObjectMeta: metav1.ObjectMeta{ Name: ManagerClusterSettingsViewDefault, }, Spec: v3.UISettingsSpec{ Group: "cluster-settings", Description: "Default", View: &v3.UIGraphView{ Nodes: []v3.UIGraphNodeView{{ UIGraphNode: v3.UIGraphNode{ ID: "layer/cluster-settings.layer.tigera-infrastructure", Type: "layer", Name: "cluster-settings.layer.tigera-infrastructure", }, }}, }, }, } }
1.132813
1
internal/pkg/edgectl/install/ui.go
Asher-Wang/ambassador
0
101
package edgectl import ( "bufio" "bytes" "context" "fmt" "io" "log" "os" "os/exec" "regexp" "strings" "time" "github.com/pkg/errors" ) var validEmailAddress = regexp.MustCompile("^[a-zA-Z0-9.!#$%&'*+/=?^_`{|}~-]+@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*$") func getEmailAddress(defaultEmail string, log *log.Logger) string { prompt := fmt.Sprintf("Email address [%s]: ", defaultEmail) errorFallback := defaultEmail if defaultEmail == "" { prompt = "Email address: " errorFallback = "<EMAIL>" } for { fmt.Print(prompt) scanner := bufio.NewScanner(os.Stdin) scanner.Scan() text := scanner.Text() if err := scanner.Err(); err != nil { log.Printf("Email query failed: %+v", err) return errorFallback } text = strings.TrimSpace(text) if defaultEmail != "" && text == "" { return defaultEmail } if validEmailAddress.MatchString(text) { return text } fmt.Printf("Sorry, %q does not appear to be a valid email address. Please check it and try again.\n", text) } } func (i *Installer) AskEmail() (string, Result) { // Attempt to grab a reasonable default for the user's email address defaultEmail, err := i.Capture("get email", true, "", "git", "config", "--global", "user.email") if err != nil { i.log.Print(err) defaultEmail = "" } else { defaultEmail = strings.TrimSpace(defaultEmail) if !validEmailAddress.MatchString(defaultEmail) { defaultEmail = "" } } // Ask for the user's email address i.ShowRequestEmail() // Do the goroutine dance to let the user hit Ctrl-C at the email prompt gotEmail := make(chan string) var emailAddress string go func() { gotEmail <- getEmailAddress(defaultEmail, i.log) close(gotEmail) }() select { case emailAddress = <-gotEmail: // Continue case <-i.ctx.Done(): return "", i.resEmailRequestError(errors.New("Interrupted")) } i.log.Printf("Using email address %q", emailAddress) return emailAddress, Result{} } // LoopFailedError is a fatal error for loopUntil(...) type LoopFailedError string // Error implements error func (s LoopFailedError) Error() string { return string(s) } type loopConfig struct { sleepTime time.Duration // How long to sleep between calls progressTime time.Duration // How long until we explain why we're waiting timeout time.Duration // How long until we give up } var lc2 = &loopConfig{ sleepTime: 500 * time.Millisecond, progressTime: 15 * time.Second, timeout: 120 * time.Second, } var lc5 = &loopConfig{ sleepTime: 3 * time.Second, progressTime: 30 * time.Second, timeout: 5 * time.Minute, } var lc10 = &loopConfig{ sleepTime: 3 * time.Second, progressTime: 30 * time.Second, timeout: 10 * time.Minute, } // loopUntil repeatedly calls a function until it succeeds, using a // (presently-fixed) loop period and timeout. func (i *Installer) loopUntil(what string, how func() error, lc *loopConfig) error { ctx, cancel := context.WithTimeout(i.ctx, lc.timeout) defer cancel() start := time.Now() i.log.Printf("Waiting for %s", what) defer func() { i.log.Printf("Wait for %s took %.1f seconds", what, time.Since(start).Seconds()) }() progTimer := time.NewTimer(lc.progressTime) defer progTimer.Stop() for { err := how() if err == nil { return nil // Success } else if _, ok := err.(LoopFailedError); ok { return err // Immediate failure } // Wait and try again select { case <-progTimer.C: i.ShowWaiting(what) case <-time.After(lc.sleepTime): // Try again case <-ctx.Done(): i.ShowTimedOut(what) return errors.Errorf("timed out waiting for %s (or interrupted)", what) } } } // ShowWrapped displays to the user (via the show logger) the text items passed // in with word wrapping applied. Leading and trailing newlines are dropped in // each text item (to make it easier to use multiline constants), but newlines // within each item are preserved. Use an empty string item to include a blank // line in the output between other items. func (i *Installer) ShowWrapped(texts ...string) { for _, text := range texts { text = strings.Trim(text, "\n") // Drop leading and trailing newlines for _, para := range strings.Split(text, "\n") { // Preserve newlines in the text for _, line := range doWordWrap(para, "", 79) { // But wrap text too i.show.Println(line) } } } } func doWordWrap(text string, prefix string, lineWidth int) []string { words := strings.Fields(strings.TrimSpace(text)) if len(words) == 0 { return []string{""} } lines := make([]string, 0) wrapped := prefix + words[0] for _, word := range words[1:] { if len(word)+1 > lineWidth-len(wrapped) { lines = append(lines, wrapped) wrapped = prefix + word } else { wrapped += " " + word } } if len(wrapped) > 0 { lines = append(lines, wrapped) } return lines } // Capture calls a command and returns its stdout func (i *Installer) Capture(name string, logToStdout bool, input string, args ...string) (res string, err error) { res = "" resAsBytes := &bytes.Buffer{} i.log.Printf("$ %s", strings.Join(args, " ")) cmd := exec.Command(args[0], args[1:]...) cmd.Stdin = strings.NewReader(input) if logToStdout { cmd.Stdout = io.MultiWriter(NewLoggingWriter(i.cmdOut), resAsBytes) } else { cmd.Stdout = resAsBytes } cmd.Stderr = NewLoggingWriter(i.cmdErr) err = cmd.Run() if err != nil { err = errors.Wrap(err, name) } res = resAsBytes.String() return }
1.859375
2
packages/arb-rpc-node/aggregator/aggregator.go
EazyReal/arbitrum
1
109
/* * Copyright 2020-2021, Offchain Labs, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package aggregator import ( "context" "github.com/offchainlabs/arbitrum/packages/arb-util/arblog" "math/big" "github.com/offchainlabs/arbitrum/packages/arb-rpc-node/batcher" "github.com/offchainlabs/arbitrum/packages/arb-rpc-node/snapshot" "github.com/offchainlabs/arbitrum/packages/arb-rpc-node/txdb" "github.com/offchainlabs/arbitrum/packages/arb-util/core" "github.com/pkg/errors" ethcommon "github.com/ethereum/go-ethereum/common" ethcore "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/bloombits" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/rpc" "github.com/offchainlabs/arbitrum/packages/arb-evm/evm" "github.com/offchainlabs/arbitrum/packages/arb-util/common" "github.com/offchainlabs/arbitrum/packages/arb-util/machine" ) var logger = arblog.Logger.With().Str("component", "aggregator").Logger() type Server struct { chainId *big.Int batch batcher.TransactionBatcher db *txdb.TxDB scope event.SubscriptionScope } // NewServer returns a new instance of the Server class func NewServer( batch batcher.TransactionBatcher, chainId *big.Int, db *txdb.TxDB, ) *Server { return &Server{ chainId: chainId, batch: batch, db: db, } } // SendTransaction takes a request signed transaction l2message from a Client // and puts it in a queue to be included in the next transaction batch func (m *Server) SendTransaction(ctx context.Context, tx *types.Transaction) error { return m.batch.SendTransaction(ctx, tx) } func (m *Server) GetBlockCount() (uint64, error) { latest, err := m.db.BlockCount() if err != nil { return 0, err } return latest, nil } func (m *Server) BlockNum(block *rpc.BlockNumber) (uint64, error) { if block == nil { return 0, errors.New("block number must not be null") } else if *block == rpc.LatestBlockNumber || *block == rpc.PendingBlockNumber { latest, err := m.db.LatestBlock() if err != nil { return 0, err } return latest.Header.Number.Uint64(), nil } else if *block >= 0 { return uint64(*block), nil } else { return 0, errors.Errorf("unsupported BlockNumber: %v", block.Int64()) } } func (m *Server) LatestBlockHeader() (*types.Header, error) { latest, err := m.db.LatestBlock() if err != nil || latest == nil { return nil, err } return latest.Header, nil } // GetRequestResult returns the value output by the VM in response to the // l2message with the given hash func (m *Server) GetRequestResult(requestId common.Hash) (*evm.TxResult, core.InboxState, *big.Int, error) { return m.db.GetRequest(requestId) } func (m *Server) GetL2ToL1Proof(batchNumber *big.Int, index uint64) (*evm.MerkleRootProof, error) { batch, err := m.db.GetMessageBatch(batchNumber) if err != nil { return nil, err } if batch == nil { return nil, errors.New("batch doesn't exist") } return batch.GenerateProof(index) } func (m *Server) ChainId() *big.Int { return m.chainId } func (m *Server) BlockInfoByNumber(height uint64) (*machine.BlockInfo, error) { return m.db.GetBlock(height) } func (m *Server) BlockLogFromInfo(block *machine.BlockInfo) (*evm.BlockInfo, error) { return m.db.GetL2Block(block) } func (m *Server) BlockInfoByHash(hash common.Hash) (*machine.BlockInfo, error) { return m.db.GetBlockWithHash(hash) } func (m *Server) GetMachineBlockResults(block *machine.BlockInfo) (*evm.BlockInfo, []*evm.TxResult, error) { return m.db.GetBlockResults(block) } func (m *Server) GetTxInBlockAtIndexResults(res *machine.BlockInfo, index uint64) (*evm.TxResult, error) { avmLog, err := core.GetZeroOrOneLog(m.db.Lookup, new(big.Int).SetUint64(res.InitialLogIndex()+index)) if err != nil || avmLog.Value == nil { return nil, err } evmRes, err := evm.NewTxResultFromValue(avmLog.Value) if err != nil { return nil, err } if evmRes.IncomingRequest.L2BlockNumber.Cmp(res.Header.Number) != 0 { return nil, nil } return evmRes, nil } func (m *Server) GetSnapshot(ctx context.Context, blockHeight uint64) (*snapshot.Snapshot, error) { return m.db.GetSnapshot(ctx, blockHeight) } func (m *Server) LatestSnapshot(ctx context.Context) (*snapshot.Snapshot, error) { return m.db.LatestSnapshot(ctx) } func (m *Server) PendingSnapshot(ctx context.Context) (*snapshot.Snapshot, error) { pending, err := m.batch.PendingSnapshot(ctx) if err != nil { return nil, err } if pending == nil { return m.LatestSnapshot(ctx) } return pending, nil } func (m *Server) Aggregator() *common.Address { return m.batch.Aggregator() } func (m *Server) PendingTransactionCount(ctx context.Context, account common.Address) (*uint64, error) { return m.batch.PendingTransactionCount(ctx, account) } func (m *Server) ChainDb() ethdb.Database { return nil } func (m *Server) HeaderByNumber(ctx context.Context, blockNumber rpc.BlockNumber) (*types.Header, error) { select { case <-ctx.Done(): return nil, errors.New("context cancelled") default: } height, err := m.BlockNum(&blockNumber) if err != nil { return nil, err } info, err := m.db.GetBlock(height) if err != nil || info == nil { return nil, err } return info.Header, nil } func (m *Server) HeaderByHash(_ context.Context, blockHash ethcommon.Hash) (*types.Header, error) { info, err := m.BlockInfoByHash(common.NewHashFromEth(blockHash)) if err != nil || info == nil { return nil, err } return info.Header, nil } func (m *Server) GetReceipts(_ context.Context, blockHash ethcommon.Hash) (types.Receipts, error) { info, err := m.db.GetBlockWithHash(common.NewHashFromEth(blockHash)) if err != nil || info == nil { return nil, err } _, results, err := m.GetMachineBlockResults(info) if err != nil || results == nil { return nil, err } receipts := make(types.Receipts, 0, len(results)) for _, res := range results { receipts = append(receipts, res.ToEthReceipt(common.NewHashFromEth(blockHash))) } return receipts, nil } func (m *Server) GetLogs(_ context.Context, blockHash ethcommon.Hash) ([][]*types.Log, error) { info, err := m.db.GetBlockWithHash(common.NewHashFromEth(blockHash)) if err != nil || info == nil { return nil, err } _, results, err := m.GetMachineBlockResults(info) if err != nil || results == nil { return nil, err } logs := make([][]*types.Log, 0, len(results)) for _, res := range results { logs = append(logs, res.EthLogs(common.NewHashFromEth(blockHash))) } return logs, nil } func (m *Server) BloomStatus() (uint64, uint64) { return 0, 0 } func (m *Server) ServiceFilter(_ context.Context, _ *bloombits.MatcherSession) { // Currently not implemented } func (m *Server) SubscribeNewTxsEvent(ch chan<- ethcore.NewTxsEvent) event.Subscription { return m.scope.Track(m.db.SubscribeNewTxsEvent(ch)) } func (m *Server) SubscribePendingLogsEvent(ch chan<- []*types.Log) event.Subscription { return m.scope.Track(m.db.SubscribePendingLogsEvent(ch)) } func (m *Server) SubscribeChainEvent(ch chan<- ethcore.ChainEvent) event.Subscription { return m.scope.Track(m.db.SubscribeChainEvent(ch)) } func (m *Server) SubscribeChainHeadEvent(ch chan<- ethcore.ChainEvent) event.Subscription { return m.scope.Track(m.db.SubscribeChainHeadEvent(ch)) } func (m *Server) SubscribeChainSideEvent(ch chan<- ethcore.ChainEvent) event.Subscription { return m.scope.Track(m.db.SubscribeChainSideEvent(ch)) } func (m *Server) SubscribeRemovedLogsEvent(ch chan<- ethcore.RemovedLogsEvent) event.Subscription { return m.scope.Track(m.db.SubscribeRemovedLogsEvent(ch)) } func (m *Server) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription { return m.scope.Track(m.db.SubscribeLogsEvent(ch)) } func (m *Server) SubscribeBlockProcessingEvent(ch chan<- []*types.Log) event.Subscription { return m.scope.Track(m.db.SubscribeBlockProcessingEvent(ch)) } func (m *Server) GetLookup() core.ArbCoreLookup { return m.db.Lookup }
1.148438
1
types/methods.go
mdempsky/amigo
65
117
// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package types // This file defines utilities for population of method sets. import ( "fmt" ) // MethodValue returns the Function implementing method sel, building // wrapper methods on demand. It returns nil if sel denotes an // abstract (interface) method. // // Precondition: sel.Kind() == MethodVal. // // Thread-safe. // // EXCLUSIVE_LOCKS_ACQUIRED(prog.methodsMu) // func (prog *Program) MethodValue(sel *Selection) *Function { if sel.Kind() != MethodVal { panic(fmt.Sprintf("MethodValue(%s) kind != MethodVal", sel)) } T := sel.Recv() if isInterface(T) { return nil // abstract method } if prog.mode&LogSource != 0 { defer logStack("MethodValue %s %v", T, sel)() } prog.methodsMu.Lock() defer prog.methodsMu.Unlock() return prog.addMethod(prog.createMethodSet(T), sel) } // LookupMethod returns the implementation of the method of type T // identified by (pkg, name). It returns nil if the method exists but // is abstract, and panics if T has no such method. // func (prog *Program) LookupMethod(T Type, pkg *Package, name string) *Function { sel := prog.MethodSets.MethodSet(T).Lookup(pkg, name) if sel == nil { panic(fmt.Sprintf("%s has no method %s", T, Id(pkg, name))) } return prog.MethodValue(sel) } // ssaMethodSet contains the (concrete) methods of a non-interface type. type ssaMethodSet struct { mapping map[string]*Function // populated lazily complete bool // mapping contains all methods } // Precondition: !isInterface(T). // EXCLUSIVE_LOCKS_REQUIRED(prog.methodsMu) func (prog *Program) createMethodSet(T Type) *ssaMethodSet { mset, ok := prog.methodSets.At(T).(*ssaMethodSet) if !ok { mset = &ssaMethodSet{mapping: make(map[string]*Function)} prog.methodSets.Set(T, mset) } return mset } // EXCLUSIVE_LOCKS_REQUIRED(prog.methodsMu) func (prog *Program) addMethod(mset *ssaMethodSet, sel *Selection) *Function { if sel.Kind() == MethodExpr { panic(sel) } id := sel.Obj().Id() fn := mset.mapping[id] if fn == nil { obj := sel.Obj().(*Func) needsPromotion := len(sel.Index()) > 1 needsIndirection := !isPointer(recvType(obj)) && isPointer(sel.Recv()) if needsPromotion || needsIndirection { fn = prog.makeWrapper(sel) } else { fn = prog.declaredFunc(obj) } if fn.Signature.Recv() == nil { panic(fn) // missing receiver } mset.mapping[id] = fn } return fn } // RuntimeTypes returns a new unordered slice containing all // concrete types in the program for which a complete (non-empty) // method set is required at run-time. // // Thread-safe. // // EXCLUSIVE_LOCKS_ACQUIRED(prog.methodsMu) // func (prog *Program) RuntimeTypes() []Type { prog.methodsMu.Lock() defer prog.methodsMu.Unlock() var res []Type prog.methodSets.Iterate(func(T Type, v interface{}) { if v.(*ssaMethodSet).complete { res = append(res, T) } }) return res } // declaredFunc returns the concrete function/method denoted by obj. // Panic ensues if there is none. // func (prog *Program) declaredFunc(obj *Func) *Function { if v := prog.packageLevelValue(obj); v != nil { return v.(*Function) } panic("no concrete method: " + obj.String()) } // needMethodsOf ensures that runtime type information (including the // complete method set) is available for the specified type T and all // its subcomponents. // // needMethodsOf must be called for at least every type that is an // operand of some MakeInterface instruction, and for the type of // every exported package member. // // Precondition: T is not a method signature (*Signature with Recv()!=nil). // // Thread-safe. (Called via emitConv from multiple builder goroutines.) // // TODO(adonovan): make this faster. It accounts for 20% of SSA build time. // // EXCLUSIVE_LOCKS_ACQUIRED(prog.methodsMu) // func (prog *Program) needMethodsOf(T Type) { prog.methodsMu.Lock() prog.needMethods(T, false) prog.methodsMu.Unlock() } // Precondition: T is not a method signature (*Signature with Recv()!=nil). // Recursive case: skip => don't create methods for T. // // EXCLUSIVE_LOCKS_REQUIRED(prog.methodsMu) // func (prog *Program) needMethods(T Type, skip bool) { // Each package maintains its own set of types it has visited. if prevSkip, ok := prog.runtimeTypes.At(T).(bool); ok { // needMethods(T) was previously called if !prevSkip || skip { return // already seen, with same or false 'skip' value } } prog.runtimeTypes.Set(T, skip) tmset := prog.MethodSets.MethodSet(T) if !skip && !isInterface(T) && tmset.Len() > 0 { // Create methods of T. mset := prog.createMethodSet(T) if !mset.complete { mset.complete = true n := tmset.Len() for i := 0; i < n; i++ { prog.addMethod(mset, tmset.At(i)) } } } // Recursion over signatures of each method. for i := 0; i < tmset.Len(); i++ { sig := tmset.At(i).Type().(*Signature) prog.needMethods(sig.Params(), false) prog.needMethods(sig.Results(), false) } switch t := T.(type) { case *Basic: // nop case *Interface, *TypeParam: // nop---handled by recursion over method set. case *Pointer: prog.needMethods(t.Elem(), false) case *Slice: prog.needMethods(t.Elem(), false) case *Chan: prog.needMethods(t.Elem(), false) case *Map: prog.needMethods(t.Key(), false) prog.needMethods(t.Elem(), false) case *Signature: if t.Recv() != nil { panic(fmt.Sprintf("Signature %s has Recv %s", t, t.Recv())) } prog.needMethods(t.Params(), false) prog.needMethods(t.Results(), false) case *Named: // A pointer-to-named type can be derived from a named // type via reflection. It may have methods too. prog.needMethods(NewPointer(T), false) // Consider 'type T struct{S}' where S has methods. // Reflection provides no way to get from T to struct{S}, // only to S, so the method set of struct{S} is unwanted, // so set 'skip' flag during recursion. prog.needMethods(t.Underlying(), true) case *Array: prog.needMethods(t.Elem(), false) case *Struct: for i, n := 0, t.NumFields(); i < n; i++ { prog.needMethods(t.Field(i).Type(), false) } case *Tuple: for i, n := 0, t.Len(); i < n; i++ { prog.needMethods(t.At(i).Type(), false) } default: panic(T) } }
1.75
2
pkg/api/http/job/job/job.go
onedomain/lastbackend
0
125
// // KULADO INC. CONFIDENTIAL // __________________ // // [2014] - [2019] KULADO INC. // All Rights Reserved. // // NOTICE: All information contained herein is, and remains // the property of KULADO INC. and its suppliers, // if any. The intellectual and technical concepts contained // herein are proprietary to KULADO INC. // and its suppliers and may be covered by Russian Federation and Foreign Patents, // patents in process, and are protected by trade secret or copyright law. // Dissemination of this information or reproduction of this material // is strictly forbidden unless prior written permission is obtained // from KULADO INC.. // package job import ( "context" "github.com/onedomain/lastbackend/pkg/api/envs" "github.com/onedomain/lastbackend/pkg/api/types/v1/request" "github.com/onedomain/lastbackend/pkg/distribution" "github.com/onedomain/lastbackend/pkg/distribution/errors" "github.com/onedomain/lastbackend/pkg/distribution/types" "github.com/onedomain/lastbackend/pkg/log" "github.com/onedomain/lastbackend/pkg/util/resource" "net/http" ) const ( logPrefix = "api:handler:job" logLevel = 3 ) func Fetch(ctx context.Context, namespace, name string) (*types.Job, *errors.Err) { jm := distribution.NewJobModel(ctx, envs.Get().GetStorage()) job, err := jm.Get(types.NewJobSelfLink(namespace, name).String()) if err != nil { log.V(logLevel).Errorf("%s:fetch:> err: %s", logPrefix, err.Error()) return nil, errors.New("job").InternalServerError(err) } if job == nil { err := errors.New("job not found") log.V(logLevel).Errorf("%s:fetch:> err: %s", logPrefix, err.Error()) return nil, errors.New("job").NotFound() } return job, nil } func Apply(ctx context.Context, ns *types.Namespace, mf *request.JobManifest) (*types.Job, *errors.Err) { if mf.Meta.Name == nil { return nil, errors.New("job").BadParameter("meta.name") } job, err := Fetch(ctx, ns.Meta.Name, *mf.Meta.Name) if err != nil { if err.Code != http.StatusText(http.StatusNotFound) { return nil, errors.New("job").InternalServerError() } } if job == nil { return Create(ctx, ns, mf) } return Update(ctx, ns, job, mf) } func Create(ctx context.Context, ns *types.Namespace, mf *request.JobManifest) (*types.Job, *errors.Err) { jm := distribution.NewJobModel(ctx, envs.Get().GetStorage()) nm := distribution.NewNamespaceModel(ctx, envs.Get().GetStorage()) if mf.Meta.Name != nil { job, err := jm.Get(types.NewJobSelfLink(ns.Meta.Name, *mf.Meta.Name).String()) if err != nil { log.V(logLevel).Errorf("%s:create:> get job by name `%s` in namespace `%s` err: %s", logPrefix, mf.Meta.Name, ns.Meta.Name, err.Error()) return nil, errors.New("job").InternalServerError() } if job != nil { log.V(logLevel).Warnf("%s:create:> job name `%s` in namespace `%s` not unique", logPrefix, mf.Meta.Name, ns.Meta.Name) return nil, errors.New("job").NotUnique("name") } } job := new(types.Job) mf.SetJobMeta(job) job.Meta.SelfLink = *types.NewJobSelfLink(ns.Meta.Name, *mf.Meta.Name) job.Meta.Namespace = ns.Meta.Name if err := mf.SetJobSpec(job); err != nil { return nil, errors.New("job").BadRequest(err.Error()) } if ns.Spec.Resources.Limits.RAM != 0 || ns.Spec.Resources.Limits.CPU != 0 { for _, c := range job.Spec.Task.Template.Containers { if c.Resources.Limits.RAM == 0 { c.Resources.Limits.RAM, _ = resource.DecodeMemoryResource(types.DEFAULT_RESOURCE_LIMITS_RAM) } if c.Resources.Limits.CPU == 0 { c.Resources.Limits.CPU, _ = resource.DecodeCpuResource(types.DEFAULT_RESOURCE_LIMITS_CPU) } } } if err := ns.AllocateResources(job.Spec.GetResourceRequest()); err != nil { log.V(logLevel).Errorf("%s:create:> %s", logPrefix, err.Error()) return nil, errors.New("job").BadRequest(err.Error()) } else { if err := nm.Update(ns); err != nil { log.V(logLevel).Errorf("%s:update:> update namespace err: %s", logPrefix, err.Error()) return nil, errors.New("job").InternalServerError() } } job, err := jm.Create(job) if err != nil { log.V(logLevel).Errorf("%s:create:> create job err: %s", logPrefix, err.Error()) return nil, errors.New("job").InternalServerError() } return job, nil } func Update(ctx context.Context, ns *types.Namespace, job *types.Job, mf *request.JobManifest) (*types.Job, *errors.Err) { jm := distribution.NewJobModel(ctx, envs.Get().GetStorage()) nm := distribution.NewNamespaceModel(ctx, envs.Get().GetStorage()) resources := job.Spec.GetResourceRequest() mf.SetJobMeta(job) if err := mf.SetJobSpec(job); err != nil { return nil, errors.New("job").BadRequest(err.Error()) } requestedResources := job.Spec.GetResourceRequest() if !resources.Equal(requestedResources) { allocatedResources := ns.Status.Resources.Allocated ns.ReleaseResources(resources) if err := ns.AllocateResources(job.Spec.GetResourceRequest()); err != nil { ns.Status.Resources.Allocated = allocatedResources log.V(logLevel).Errorf("%s:update:> %s", logPrefix, err.Error()) return nil, errors.New("job").BadRequest(err.Error()) } else { if err := nm.Update(ns); err != nil { log.V(logLevel).Errorf("%s:update:> update namespace err: %s", logPrefix, err.Error()) return nil, errors.New("job").InternalServerError() } } } if err := jm.Set(job); err != nil { log.V(logLevel).Errorf("%s:update:> update job err: %s", logPrefix, err.Error()) return nil, errors.New("job").InternalServerError() } return job, nil }
1.21875
1
internal/services/iam/resource_iam_activation_email_test.go
jdelucaa/terraform-provider-hsdp
26
133
package iam_test import ( "fmt" "testing" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/philips-software/terraform-provider-hsdp/internal/acctest" ) func TestResourceIAMActivationEmail_basic(t *testing.T) { resourceName := "hsdp_iam_activation_email.test" userID := "foo" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(t) }, ProviderFactories: acctest.ProviderFactories, Steps: []resource.TestStep{ { Config: testAccResourceIAMActivationEmailConfig(userID), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr(resourceName, "user_id", userID), ), }, { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, }, }, }) } func testAccResourceIAMActivationEmailConfig(id string) string { return fmt.Sprintf(` resource "hsdp_iam_activation_email" "test" { user_id = %[1]q }`, id) }
0.980469
1
core/codec/capture_test.go
v2pro/wallaby
5
141
package codec import ( "testing" "net/http" "github.com/stretchr/testify/require" "net/http/httputil" "bytes" "fmt" "bufio" ) func Test_bufio(t *testing.T) { should := require.New(t) req, err := http.NewRequest("GET", "/", nil) should.Nil(err) reqBytes, err := httputil.DumpRequest(req, true) should.Nil(err) buf := &bytes.Buffer{} buf.Write(reqBytes) buf.Write(reqBytes) reader := bufio.NewReaderSize(buf, 2048) fmt.Println(http.ReadRequest(reader)) fmt.Println(reader.Buffered()) fmt.Println(http.ReadRequest(reader)) fmt.Println(reader.Buffered()) }
1.289063
1
core/v1/core.go
tuxlinuxien/lesspass
10
149
// Package v1 provides core functions to build LessPass password. package v1 import ( "crypto/hmac" "crypto/sha256" "fmt" "strconv" "golang.org/x/crypto/pbkdf2" ) const ( iterations = 8192 keylen = 32 ) // EncryptLogin encrypts login with pbkdf2. func EncryptLogin(login, password string) []byte { var out = pbkdf2.Key([]byte(password), []byte(login), iterations, keylen, sha256.New) return []byte(fmt.Sprintf("%x", out)) } // RenderPassword returns the generated password. func RenderPassword(encLogin []byte, site string, len, counter int, template string) string { derivedEncryptedLogin := deriveEncryptedLogin(encLogin, site, len, counter) return prettyPrint(derivedEncryptedLogin, template) } func createHmac(encLogin []byte, salt string) []byte { mac := hmac.New(sha256.New, encLogin) mac.Write([]byte(salt)) return []byte(fmt.Sprintf("%x", mac.Sum(nil))) } func deriveEncryptedLogin(encLogin []byte, site string, length, counter int) []byte { var salt = site + strconv.Itoa(counter) return createHmac(encLogin, salt)[0:length] } func getPasswordChar(charType byte, index int) byte { var passwordsChars = map[byte]string{ 'V': "AEIOUY", 'C': "BC<KEY>", 'v': "aeiouy", 'c': "bcdfghjklmnpqrstvwxz", 'A': "AE<KEY>", 'a': "<KEY>", 'n': "0123456789", 's': "@&%?,=[]_:-+*$#!'^~;()/.", 'x': "AEIOUYaeiouyBCDFGHJKLMNPQRSTVWXZbcdfghjklmnpqrstvwxz0123456789@&%?,=[]_:-+*$#!'^~;()/.", } var passwordChar = passwordsChars[charType] return passwordChar[index%len(passwordChar)] } func getCharType(template string, index int) byte { return template[index%len(template)] } func prettyPrint(hash []byte, template string) string { var out = "" for i, c := range hash { tplStr := getCharType(template, i) out += string(getPasswordChar(tplStr, int(c))) } return out }
1.875
2
utils/switchable/snapshot.go
Enlighten-Fund/go-opera
0
157
package switchable import ( "sync" "github.com/Fantom-foundation/lachesis-base/kvdb" "github.com/ethereum/go-ethereum/common" ) type Snapshot struct { kvdb.Snapshot mu sync.RWMutex } func (s *Snapshot) SwitchTo(snap kvdb.Snapshot) kvdb.Snapshot { s.mu.Lock() defer s.mu.Unlock() old := s.Snapshot s.Snapshot = snap return old } func Wrap(snap kvdb.Snapshot) *Snapshot { s := &Snapshot{} s.SwitchTo(snap) return s } // Has checks if key is in the exists. func (s *Snapshot) Has(key []byte) (bool, error) { s.mu.RLock() defer s.mu.RUnlock() return s.Snapshot.Has(key) } // Get returns key-value pair by key. func (s *Snapshot) Get(key []byte) ([]byte, error) { s.mu.RLock() defer s.mu.RUnlock() return s.Snapshot.Get(key) } func (s *Snapshot) Release() { s.mu.Lock() defer s.mu.Unlock() s.Snapshot.Release() } // NewIterator creates a binary-alphabetical iterator over a subset // of database content with a particular key prefix, starting at a particular // initial key (or after, if it does not exist). func (s *Snapshot) NewIterator(prefix []byte, start []byte) kvdb.Iterator { s.mu.RLock() defer s.mu.RUnlock() return &switchableIterator{ mu: &s.mu, upd: &s.Snapshot, cur: s.Snapshot, parentIt: s.Snapshot.NewIterator(prefix, start), prefix: prefix, start: start, } } /* * Iterator */ type switchableIterator struct { mu *sync.RWMutex upd *kvdb.Snapshot cur kvdb.Snapshot parentIt kvdb.Iterator prefix, start []byte key, value []byte } func (it *switchableIterator) mayReopen() { if it.cur != *it.upd { // reopen iterator if DB was switched it.cur = *it.upd if it.key != nil { it.start = common.CopyBytes(it.key[len(it.prefix):]) } it.parentIt = it.cur.NewIterator(it.prefix, it.start) if it.key != nil { _ = it.parentIt.Next() // skip previous key } } } // Next scans key-value pair by key in lexicographic order. Looks in cache first, // then - in DB. func (it *switchableIterator) Next() bool { it.mu.RLock() defer it.mu.RUnlock() it.mayReopen() ok := it.parentIt.Next() if !ok { it.key = nil it.value = nil return false } it.key = it.parentIt.Key() it.value = it.parentIt.Value() return true } // Error returns any accumulated error. Exhausting all the key/value pairs // is not considered to be an error. A memory iterator cannot encounter errors. func (it *switchableIterator) Error() error { it.mu.RLock() defer it.mu.RUnlock() it.mayReopen() return it.parentIt.Error() } // Key returns the key of the current key/value pair, or nil if done. The caller // should not modify the contents of the returned slice, and its contents may // change on the next call to Next. func (it *switchableIterator) Key() []byte { return it.key } // Value returns the value of the current key/value pair, or nil if done. The // caller should not modify the contents of the returned slice, and its contents // may change on the next call to Next. func (it *switchableIterator) Value() []byte { return it.value } // Release releases associated resources. Release should always succeed and can // be called multiple times without causing error. func (it *switchableIterator) Release() { it.mu.RLock() defer it.mu.RUnlock() it.mayReopen() it.parentIt.Release() }
1.8125
2
pkg/jwt/time_test.go
josestg/justforfun
0
165
package jwt import ( "reflect" "testing" "time" ) func TestNewTime(t *testing.T) { t1 := NewTime(time.Now()) t2 := new(Time) b, err := t1.MarshalJSON() if err != nil { t.Fatalf("expecting error nil but got %v", err) } if err := t2.UnmarshalJSON(b); err != nil { t.Fatalf("expecting error nil but got %v", err) } if !reflect.DeepEqual(t1, t2) { t.Fatalf("expecting t1 and t2 are equal") } }
1.109375
1
server/leaderboard_rank_cache.go
kokizzu/nakama
0
173
// Copyright 2018 The Nakama Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package server import ( "database/sql" "sync" "time" "github.com/heroiclabs/nakama/v3/internal/skiplist" "github.com/heroiclabs/nakama-common/api" "github.com/gofrs/uuid" "go.uber.org/zap" ) type LeaderboardRankCache interface { Get(leaderboardId string, expiryUnix int64, ownerID uuid.UUID) int64 Fill(leaderboardId string, expiryUnix int64, records []*api.LeaderboardRecord) Insert(leaderboardId string, expiryUnix int64, sortOrder int, ownerID uuid.UUID, score, subscore int64) int64 Delete(leaderboardId string, expiryUnix int64, ownerID uuid.UUID) bool DeleteLeaderboard(leaderboardId string, expiryUnix int64) bool TrimExpired(nowUnix int64) bool } type LeaderboardWithExpiry struct { LeaderboardId string Expiry int64 } type RankAsc struct { OwnerId uuid.UUID Score int64 Subscore int64 } func (r *RankAsc) Less(other interface{}) bool { ro := other.(*RankAsc) if r.Score < ro.Score { return true } if r.Score > ro.Score { return false } if r.Subscore < ro.Subscore { return true } if r.Subscore > ro.Subscore { return false } return r.OwnerId.String() < ro.OwnerId.String() } type RankDesc struct { OwnerId uuid.UUID Score int64 Subscore int64 } func (r *RankDesc) Less(other interface{}) bool { ro := other.(*RankDesc) if ro.Score < r.Score { return true } if ro.Score > r.Score { return false } if ro.Subscore < r.Subscore { return true } if ro.Subscore > r.Subscore { return false } return ro.OwnerId.String() < r.OwnerId.String() } type RankCache struct { sync.RWMutex owners map[uuid.UUID]skiplist.Interface cache *skiplist.SkipList } type LocalLeaderboardRankCache struct { sync.RWMutex blacklistAll bool blacklistIds map[string]struct{} cache map[LeaderboardWithExpiry]*RankCache } var _ LeaderboardRankCache = &LocalLeaderboardRankCache{} func NewLocalLeaderboardRankCache(startupLogger *zap.Logger, db *sql.DB, config *LeaderboardConfig, leaderboardCache LeaderboardCache) LeaderboardRankCache { cache := &LocalLeaderboardRankCache{ blacklistIds: make(map[string]struct{}, len(config.BlacklistRankCache)), blacklistAll: len(config.BlacklistRankCache) == 1 && config.BlacklistRankCache[0] == "*", cache: make(map[LeaderboardWithExpiry]*RankCache, 0), } // If caching is disabled completely do not preload any records. if cache.blacklistAll { startupLogger.Info("Skipping leaderboard rank cache initialization") return cache } startupLogger.Info("Initializing leaderboard rank cache") nowTime := time.Now().UTC() go func() { skippedLeaderboards := make([]string, 0, 10) leaderboards := leaderboardCache.GetAllLeaderboards() cachedLeaderboards := make([]string, 0, len(leaderboards)) for _, leaderboard := range leaderboards { if _, ok := cache.blacklistIds[leaderboard.Id]; ok { startupLogger.Debug("Skip caching leaderboard ranks", zap.String("leaderboard_id", leaderboard.Id)) skippedLeaderboards = append(skippedLeaderboards, leaderboard.Id) continue } cachedLeaderboards = append(cachedLeaderboards, leaderboard.Id) startupLogger.Debug("Caching leaderboard ranks", zap.String("leaderboard_id", leaderboard.Id)) // Current expiry for this leaderboard. // This matches calculateTournamentDeadlines var expiryUnix int64 if leaderboard.ResetSchedule != nil { expiryUnix = leaderboard.ResetSchedule.Next(nowTime).UTC().Unix() if leaderboard.EndTime > 0 && expiryUnix > leaderboard.EndTime { expiryUnix = leaderboard.EndTime } } else { expiryUnix = leaderboard.EndTime } // Prepare structure to receive rank data. key := LeaderboardWithExpiry{LeaderboardId: leaderboard.Id, Expiry: expiryUnix} cache.Lock() rankCache, found := cache.cache[key] if !found { rankCache = &RankCache{ owners: make(map[uuid.UUID]skiplist.Interface), cache: skiplist.New(), } cache.cache[key] = rankCache } cache.Unlock() // Look up all active records for this leaderboard. query := ` SELECT owner_id, score, subscore FROM leaderboard_record WHERE leaderboard_id = $1 AND expiry_time = $2` rows, err := db.Query(query, leaderboard.Id, time.Unix(expiryUnix, 0).UTC()) if err != nil { startupLogger.Error("Failed to caching leaderboard ranks", zap.String("leaderboard_id", leaderboard.Id), zap.Error(err)) continue } // Process the records. for rows.Next() { var ownerIDStr string var score int64 var subscore int64 if err = rows.Scan(&ownerIDStr, &score, &subscore); err != nil { startupLogger.Error("Failed to scan leaderboard rank data", zap.String("leaderboard_id", leaderboard.Id), zap.Error(err)) break } ownerID, err := uuid.FromString(ownerIDStr) if err != nil { startupLogger.Error("Failed to parse scanned leaderboard rank data", zap.String("leaderboard_id", leaderboard.Id), zap.String("owner_id", ownerIDStr), zap.Error(err)) break } // Prepare new rank data for this leaderboard entry. var rankData skiplist.Interface if leaderboard.SortOrder == LeaderboardSortOrderDescending { rankData = &RankDesc{ OwnerId: ownerID, Score: score, Subscore: subscore, } } else { rankData = &RankAsc{ OwnerId: ownerID, Score: score, Subscore: subscore, } } rankCache.Lock() if _, alreadyInserted := rankCache.owners[ownerID]; alreadyInserted { rankCache.Unlock() continue } rankCache.owners[ownerID] = rankData rankCache.cache.Insert(rankData) rankCache.Unlock() } _ = rows.Close() } startupLogger.Info("Leaderboard rank cache initialization completed successfully", zap.Strings("cached", cachedLeaderboards), zap.Strings("skipped", skippedLeaderboards)) }() return cache } func (l *LocalLeaderboardRankCache) Get(leaderboardId string, expiryUnix int64, ownerID uuid.UUID) int64 { if l.blacklistAll { // If all rank caching is disabled. return 0 } if _, ok := l.blacklistIds[leaderboardId]; ok { // If rank caching is disabled for this particular leaderboard. return 0 } // Find rank map for this leaderboard/expiry pair. key := LeaderboardWithExpiry{LeaderboardId: leaderboardId, Expiry: expiryUnix} l.RLock() rankCache, ok := l.cache[key] l.RUnlock() if !ok { return 0 } // Find rank data for this owner. rankCache.RLock() rankData, ok := rankCache.owners[ownerID] if !ok { rankCache.RUnlock() return 0 } rank := rankCache.cache.GetRank(rankData) rankCache.RUnlock() return int64(rank) } func (l *LocalLeaderboardRankCache) Fill(leaderboardId string, expiryUnix int64, records []*api.LeaderboardRecord) { if l.blacklistAll { // If all rank caching is disabled. return } if _, ok := l.blacklistIds[leaderboardId]; ok { // If rank caching is disabled for this particular leaderboard. return } if len(records) == 0 { // Nothing to do. return } // Find rank map for this leaderboard/expiry pair. key := LeaderboardWithExpiry{LeaderboardId: leaderboardId, Expiry: expiryUnix} l.RLock() rankCache, ok := l.cache[key] l.RUnlock() if !ok { return } // Find rank data for each owner. rankCache.RLock() for _, record := range records { ownerID, err := uuid.FromString(record.OwnerId) if err != nil { continue } rankData, ok := rankCache.owners[ownerID] if !ok { continue } record.Rank = int64(rankCache.cache.GetRank(rankData)) } rankCache.RUnlock() } func (l *LocalLeaderboardRankCache) Insert(leaderboardId string, expiryUnix int64, sortOrder int, ownerID uuid.UUID, score, subscore int64) int64 { if l.blacklistAll { // If all rank caching is disabled. return 0 } if _, ok := l.blacklistIds[leaderboardId]; ok { // If rank caching is disabled for this particular leaderboard. return 0 } // No existing rank map for this leaderboard/expiry pair, prepare to create a new one. key := LeaderboardWithExpiry{LeaderboardId: leaderboardId, Expiry: expiryUnix} l.RLock() rankCache, ok := l.cache[key] l.RUnlock() if !ok { newRankCache := &RankCache{ owners: make(map[uuid.UUID]skiplist.Interface), cache: skiplist.New(), } l.Lock() // Last check if rank map was created by another writer just after last read. rankCache, ok = l.cache[key] if !ok { rankCache = newRankCache l.cache[key] = rankCache } l.Unlock() } // Prepare new rank data for this leaderboard entry. var rankData skiplist.Interface if sortOrder == LeaderboardSortOrderDescending { rankData = &RankDesc{ OwnerId: ownerID, Score: score, Subscore: subscore, } } else { rankData = &RankAsc{ OwnerId: ownerID, Score: score, Subscore: subscore, } } // Check for and remove any previous rank entry, then insert the new rank data and get its rank. rankCache.Lock() if oldRankData, ok := rankCache.owners[ownerID]; ok { rankCache.cache.Delete(oldRankData) } rankCache.owners[ownerID] = rankData rankCache.cache.Insert(rankData) rank := rankCache.cache.GetRank(rankData) rankCache.Unlock() return int64(rank) } func (l *LocalLeaderboardRankCache) Delete(leaderboardId string, expiryUnix int64, ownerID uuid.UUID) bool { if l.blacklistAll { // If all rank caching is disabled. return false } if _, ok := l.blacklistIds[leaderboardId]; ok { // If rank caching is disabled for this particular leaderboard. return false } // Find the rank map for this leaderboard/expiry pair. key := LeaderboardWithExpiry{LeaderboardId: leaderboardId, Expiry: expiryUnix} l.RLock() rankCache, ok := l.cache[key] l.RUnlock() if !ok { // No rank cache for this leaderboard and expiry combination. return true } // Remove any existing rank entry. rankCache.Lock() rankData, ok := rankCache.owners[ownerID] if !ok { rankCache.Unlock() return true } delete(rankCache.owners, ownerID) rankCache.cache.Delete(rankData) rankCache.Unlock() return true } func (l *LocalLeaderboardRankCache) DeleteLeaderboard(leaderboardId string, expiryUnix int64) bool { if l.blacklistAll { // If all rank caching is disabled. return false } if _, ok := l.blacklistIds[leaderboardId]; ok { // If rank caching is disabled for this particular leaderboard. return false } // Delete the rank map for this leaderboard/expiry pair. key := LeaderboardWithExpiry{LeaderboardId: leaderboardId, Expiry: expiryUnix} l.Lock() delete(l.cache, key) l.Unlock() return true } func (l *LocalLeaderboardRankCache) TrimExpired(nowUnix int64) bool { if l.blacklistAll { // If all rank caching is disabled. return false } // Used for the timer. l.Lock() for k := range l.cache { if k.Expiry != 0 && k.Expiry <= nowUnix { delete(l.cache, k) } } l.Unlock() return true }
1.632813
2
pkgs/sops-pgp-hook/hook_test.go
starcraft66/sops-nix
360
181
package main import ( "bytes" "fmt" "io/ioutil" "os" "os/exec" "path" "path/filepath" "runtime" "strings" "testing" ) // ok fails the test if an err is not nil. func ok(tb testing.TB, err error) { if err != nil { _, file, line, _ := runtime.Caller(1) fmt.Printf("\033[31m%s:%d: unexpected error: %s\033[39m\n\n", filepath.Base(file), line, err.Error()) tb.FailNow() } } func TestShellHook(t *testing.T) { assets := os.Getenv("TEST_ASSETS") if assets == "" { _, filename, _, _ := runtime.Caller(0) assets = path.Join(path.Dir(filename), "test-assets") } tempdir, err := ioutil.TempDir("", "testdir") ok(t, err) defer os.RemoveAll(tempdir) cmd := exec.Command("nix-shell", "shell.nix", "--run", "echo SOPS_PGP_FP=$SOPS_PGP_FP") cmd.Env = append(os.Environ(), fmt.Sprintf("GNUPGHOME=%s", tempdir)) var stdoutBuf, stderrBuf bytes.Buffer cmd.Stdout = &stdoutBuf cmd.Stderr = &stderrBuf cmd.Dir = assets err = cmd.Run() stdout := stdoutBuf.String() stderr := stderrBuf.String() fmt.Printf("$ %s\nstdout: \n%s\nstderr: \n%s\n", strings.Join(cmd.Args, " "), stdout, stderr) ok(t, err) expectedKeys := []string{ "C6DA56E69A7C756564A8AFEB4A6B05B714D13EFD", "4EC40F8E04A945339F7F7C0032C5225271038E3F", "7FB89715AADA920D65D25E63F9BA9DEBD03F57C0", "<KEY>", } for _, key := range expectedKeys { if !strings.Contains(stdout, key) { t.Fatalf("'%v' not in '%v'", key, stdout) } } // it should ignore subkeys from ./keys/key-with-subkeys.asc subkey := "94F174F588090494E73D0835A79B1680BC4D9A54" if strings.Contains(stdout, subkey) { t.Fatalf("subkey found in %s", stdout) } expectedStderr := "./non-existing-key.gpg does not exists" if !strings.Contains(stderr, expectedStderr) { t.Fatalf("'%v' not in '%v'", expectedStderr, stdout) } }
1.414063
1
examples/firmata_gpio_max7219.go
stevebargelt/gobot
0
189
// +build example // // Do not build by default. /* How to setup This examples requires you to daisy-chain 4 led matrices based on MAX7219. It will turn on one led at a time, from the first led at the first matrix to the last led of the last matrix. How to run Pass serial port to use as the first param: go run examples/firmata_gpio_max7219.go /dev/ttyACM0 */ package main import ( "os" "time" "github.com/stevebargelt/gobot" "github.com/stevebargelt/gobot/drivers/gpio" "github.com/stevebargelt/gobot/platforms/firmata" ) func main() { firmataAdaptor := firmata.NewAdaptor(os.Args[1]) max := gpio.NewMAX7219Driver(firmataAdaptor, "11", "10", "9", 4) var digit byte = 1 // digit address goes from 0x01 (MAX7219Digit0) to 0x08 (MAX7219Digit8) var bits byte = 1 var module uint count := 0 work := func() { gobot.Every(100*time.Millisecond, func() { max.ClearAll() max.One(module, digit, bits) bits = bits << 1 count++ if count > 7 { count = 0 digit++ bits = 1 if digit > 8 { digit = 1 module++ if module >= 4 { module = 0 count = 0 } } } }) } robot := gobot.NewRobot("Max7219Bot", []gobot.Connection{esp8266}, []gobot.Device{max}, work, ) robot.Start() }
2.25
2
service/autopilot/v1/assistant/model_builds/api_op_client.go
RJPearson94/twilio-sdk-go
14
197
// Package model_builds contains auto-generated files. DO NOT MODIFY package model_builds import "github.com/RJPearson94/twilio-sdk-go/client" // Client for managing model build resources // See https://www.twilio.com/docs/autopilot/api/model-build for more details type Client struct { client *client.Client assistantSid string } // ClientProperties are the properties required to manage the model builds resources type ClientProperties struct { AssistantSid string } // New creates a new instance of the model builds client func New(client *client.Client, properties ClientProperties) *Client { return &Client{ client: client, assistantSid: properties.AssistantSid, } }
0.761719
1
cmd/siva/impl/unpack_test.go
vmarkovtsev/go-siva
94
205
package impl import ( "io/ioutil" "os" "path/filepath" "runtime" . "gopkg.in/check.v1" ) type UnpackSuite struct { folder string } var _ = Suite(&UnpackSuite{}) func (s *UnpackSuite) SetUpTest(c *C) { var err error s.folder, err = ioutil.TempDir("", "siva-cmd-unpack") c.Assert(err, IsNil) } func (s *UnpackSuite) TearDownTest(c *C) { err := os.RemoveAll(s.folder) c.Assert(err, IsNil) } func (s *UnpackSuite) TestBasic(c *C) { cmd := &CmdUnpack{} cmd.Output.Path = filepath.Join(s.folder, "files") cmd.Args.File = filepath.Join("..", "..", "..", "fixtures", "perms.siva") cmd.Overwrite = true err := cmd.Execute(nil) c.Assert(err, IsNil) dir, err := ioutil.ReadDir(cmd.Output.Path) c.Assert(err, IsNil) c.Assert(dir, HasLen, 3) perms := []string{"-rwxr-xr-x", "-rw-------", "-rw-r--r--"} if runtime.GOOS == "windows" { perms = []string{"-rw-rw-rw-", "-rw-rw-rw-", "-rw-rw-rw-"} } for i, f := range dir { c.Assert(f.Name(), Equals, files[i].Name) data, err := ioutil.ReadFile(filepath.Join(s.folder, "files", f.Name())) c.Assert(err, IsNil) c.Assert(string(data), Equals, files[i].Body) c.Assert(f.Mode().String(), Equals, perms[i]) } } func (s *UnpackSuite) TestIgnorePerms(c *C) { cmd := &CmdUnpack{} cmd.Output.Path = filepath.Join(s.folder, "files") cmd.Args.File = filepath.Join("..", "..", "..", "fixtures", "perms.siva") cmd.IgnorePerms = true err := cmd.Execute(nil) c.Assert(err, IsNil) dir, err := ioutil.ReadDir(cmd.Output.Path) c.Assert(err, IsNil) c.Assert(dir, HasLen, 3) for _, f := range dir { c.Assert(f.Mode(), Equals, os.FileMode(defaultPerms)) } } func (s *UnpackSuite) TestMatch(c *C) { cmd := &CmdUnpack{} cmd.Output.Path = filepath.Join(s.folder, "files") cmd.Args.File = filepath.Join("..", "..", "..", "fixtures", "basic.siva") cmd.Match = "gopher(.*)" err := cmd.Execute(nil) c.Assert(err, IsNil) dir, err := ioutil.ReadDir(cmd.Output.Path) c.Assert(err, IsNil) c.Assert(dir, HasLen, 1) c.Assert(dir[0].Name(), Equals, "gopher.txt") } func (s *UnpackSuite) TestOverwrite(c *C) { cmd := &CmdUnpack{} cmd.Output.Path = filepath.Join(s.folder, "files") cmd.Args.File = filepath.Join("..", "..", "..", "fixtures", "duplicate.siva") cmd.Overwrite = true err := cmd.Execute(nil) c.Assert(err, IsNil) dir, err := ioutil.ReadDir(cmd.Output.Path) c.Assert(err, IsNil) c.Assert(dir, HasLen, 3) } func (s *UnpackSuite) TestZipSlip(c *C) { cmd := &CmdUnpack{} cmd.Output.Path = filepath.Join(s.folder, "files/inside") cmd.Args.File = filepath.Join("..", "..", "..", "fixtures", "zipslip.siva") err := cmd.Execute(nil) c.Assert(err, NotNil) _, err = os.Stat(filepath.Join(s.folder, "files")) c.Assert(err, NotNil) c.Assert(os.IsNotExist(err), Equals, true) }
1.265625
1
cmd/repositories/update_repository.go
krok-o/krokctl
0
213
package repositories import ( "fmt" "github.com/krok-o/krok/pkg/models" "github.com/spf13/cobra" "github.com/krok-o/krokctl/cmd" "github.com/krok-o/krokctl/pkg/formatter" ) var ( // UpdateRepositoryCmd creates a repository with the given values. UpdateRepositoryCmd = &cobra.Command{ Use: "repository", Short: "Update repository", Run: runUpdateRepositoryCmd, } updateRepoArgs struct { name string id int } ) func init() { cmd.UpdateCmd.AddCommand(UpdateRepositoryCmd) f := UpdateRepositoryCmd.PersistentFlags() f.StringVar(&updateRepoArgs.name, "name", "", "The name of the repository.") f.IntVar(&updateRepoArgs.id, "id", -1, "The ID of the repository to update.") if err := UpdateRepositoryCmd.MarkPersistentFlagRequired("id"); err != nil { cmd.CLILog.Fatal().Err(err).Msg("Failed to mark required flag.") } } func runUpdateRepositoryCmd(c *cobra.Command, args []string) { cmd.CLILog.Debug().Msg("Creating repository...") repo := &models.Repository{ Name: updateRepoArgs.name, ID: updateRepoArgs.id, } repo, err := cmd.KC.RepositoryClient.Update(repo) if err != nil { cmd.CLILog.Fatal().Err(err).Msg("Failed to update repository.") } fmt.Print(formatter.FormatRepository(repo, cmd.KrokArgs.Formatter)) }
1.703125
2
pkg/snowflake/system_get_snowflake_platform_info_test.go
gary-beautypie/terraform-provider-snowflake
0
221
package snowflake import ( "testing" "github.com/stretchr/testify/require" ) func TestSystemGetSnowflakePlatformInfoQuery(t *testing.T) { r := require.New(t) sb := SystemGetSnowflakePlatformInfoQuery() r.Equal(sb, `SELECT SYSTEM$GET_SNOWFLAKE_PLATFORM_INFO() AS "info"`) } func TestSystemGetSnowflakePlatformInfoGetStructuredConfigAws(t *testing.T) { r := require.New(t) raw := &RawSnowflakePlatformInfo{ Info: `{"snowflake-vpc-id": ["vpc-1", "vpc-2"]}`, } c, e := raw.GetStructuredConfig() r.Nil(e) r.Equal([]string{"vpc-1", "vpc-2"}, c.AwsVpcIds) r.Equal([]string(nil), c.AzureVnetSubnetIds) } func TestSystemGetSnowflakePlatformInfoGetStructuredConfigAzure(t *testing.T) { r := require.New(t) raw := &RawSnowflakePlatformInfo{ Info: `{"snowflake-vnet-subnet-id": ["/subscription/1/1", "/subscription/1/2"]}`, } c, e := raw.GetStructuredConfig() r.Nil(e) r.Equal([]string{"/subscription/1/1", "/subscription/1/2"}, c.AzureVnetSubnetIds) r.Equal([]string(nil), c.AwsVpcIds) }
1.257813
1
opamp/observiq/identity.go
observIQ/observiq-otel-collector
1
229
// Copyright observIQ, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package observiq import ( "runtime" ios "github.com/observiq/observiq-otel-collector/internal/os" "github.com/observiq/observiq-otel-collector/internal/version" "github.com/observiq/observiq-otel-collector/opamp" "github.com/open-telemetry/opamp-go/protobufs" "go.uber.org/zap" ) // identity contains identifying information about the Collector type identity struct { agentID string agentName *string serviceName string version string labels *string oSArch string oSDetails string oSFamily string hostname string mac string } // newIdentity constructs a new identity for this collector func newIdentity(logger *zap.Logger, config opamp.Config) *identity { // Grab various fields from OS hostname, err := ios.Hostname() if err != nil { logger.Warn("Failed to retrieve hostname for collector. Creating partial identity", zap.Error(err)) } name, err := ios.Name() if err != nil { logger.Warn("Failed to retrieve host details on collector. Creating partial identity", zap.Error(err)) } return &identity{ agentID: config.AgentID, agentName: config.AgentName, serviceName: "com.observiq.collector", // Hardcoded defines this type of agent to the server version: version.Version(), labels: config.Labels, oSArch: runtime.GOARCH, oSDetails: name, oSFamily: runtime.GOOS, hostname: hostname, mac: ios.MACAddress(), } } // Copy creates a deep copy of this identity func (i identity) Copy() *identity { identCpy := &identity{ agentID: i.agentID, serviceName: i.serviceName, version: i.version, oSArch: i.oSArch, oSDetails: i.oSDetails, oSFamily: i.oSFamily, hostname: i.hostname, mac: i.mac, } if i.agentName != nil { identCpy.agentName = new(string) *identCpy.agentName = *i.agentName } if i.labels != nil { identCpy.labels = new(string) *identCpy.labels = *i.labels } return identCpy } func (i *identity) ToAgentDescription() *protobufs.AgentDescription { identifyingAttributes := []*protobufs.KeyValue{ opamp.StringKeyValue("service.instance.id", i.agentID), opamp.StringKeyValue("service.name", i.serviceName), opamp.StringKeyValue("service.version", i.version), } if i.agentName != nil { identifyingAttributes = append(identifyingAttributes, opamp.StringKeyValue("service.instance.name", *i.agentName)) } else { identifyingAttributes = append(identifyingAttributes, opamp.StringKeyValue("service.instance.name", i.hostname)) } nonIdentifyingAttributes := []*protobufs.KeyValue{ opamp.StringKeyValue("os.arch", i.oSArch), opamp.StringKeyValue("os.details", i.oSDetails), opamp.StringKeyValue("os.family", i.oSFamily), opamp.StringKeyValue("host.name", i.hostname), opamp.StringKeyValue("host.mac_address", i.mac), } if i.labels != nil { nonIdentifyingAttributes = append(nonIdentifyingAttributes, opamp.StringKeyValue("service.labels", *i.labels)) } agentDesc := &protobufs.AgentDescription{ IdentifyingAttributes: identifyingAttributes, NonIdentifyingAttributes: nonIdentifyingAttributes, } return agentDesc }
1.3125
1
timerange.go
sent-hil/timerange
0
237
package timerange import ( "errors" "fmt" "strings" "time" ) var ( ErrValueIsEmpty = errors.New("ERROR: value is empty.") ErrInvalidRange = errors.New("ERROR: values in time range are invalid.") ErrDateOrdering = errors.New("ERROR: start date is after end date") ) var ( DefaultTimeLayout = "2006/01/02" DefaultRangeSeperator = ".." ) type Timerange struct { TimeValues []time.Time TimeLayout string RangeSeparator string } func Parse(value string) ([]time.Time, error) { t := NewTimerange() if err := t.Set(value); err != nil { return nil, err } return t.TimeValues, nil } func NewTimerange() *Timerange { return &Timerange{ TimeValues: []time.Time{}, TimeLayout: DefaultTimeLayout, RangeSeparator: DefaultRangeSeperator, } } func (t *Timerange) String() string { return fmt.Sprint(t.TimeValues) } func (t *Timerange) Set(value string) error { if value == "" { return ErrValueIsEmpty } var ( timeValues []time.Time err error ) if t.hasRangeSeperator(value) { timeValues, err = t.parseRangeIntoTimeValues(value) } else { var tt time.Time tt, err = t.parseTimeFromValue(value) timeValues = []time.Time{tt} } if err != nil { return err } t.TimeValues = append(t.TimeValues, timeValues...) return nil } func (t *Timerange) hasRangeSeperator(value string) bool { return strings.Contains(value, t.RangeSeparator) } func (t *Timerange) parseTimeFromValue(value string) (time.Time, error) { return time.Parse(t.TimeLayout, value) } func (t *Timerange) parseRangeIntoTimeValues(rangeValue string) (timeValues []time.Time, err error) { split := strings.Split(rangeValue, t.RangeSeparator) if len(split) != 2 { return nil, ErrInvalidRange } startValue, endValue := split[0], split[1] startDate, err := t.parseTimeFromValue(startValue) if err != nil { return nil, err } endDate, err := t.parseTimeFromValue(endValue) if err != nil { return nil, err } duration := startDate.Sub(endDate).Hours() if duration >= 0 { return nil, ErrDateOrdering } durationInDays := (duration / 24) * -1 for i := 0; i <= int(durationInDays); i++ { timeValues = append(timeValues, startDate.Add(time.Duration(i)*time.Duration(24)*time.Hour)) } return timeValues, nil }
1.9375
2
internal/pkg/options/flags.go
cucxabong/Reloader
2
245
package options var ( // ConfigmapUpdateOnChangeAnnotation is an annotation to detect changes in configmaps ConfigmapUpdateOnChangeAnnotation = "configmap.reloader.stakater.com/reload" // SecretUpdateOnChangeAnnotation is an annotation to detect changes in secrets SecretUpdateOnChangeAnnotation = "secret.reloader.stakater.com/reload" // ReloaderAutoAnnotation is an annotation to detect changes in secrets ReloaderAutoAnnotation = "reloader.stakater.com/auto" // LogFormat is the log format to use (json, or empty string for default) LogFormat = "" )
0.412109
0
utils/x/crypto/openpgp/packet/public_key_v3.go
c2matrix/mqant
2
253
// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package packet import ( "crypto" "crypto/md5" "crypto/rsa" "encoding/binary" "fmt" "hash" "io" "math/big" "strconv" "time" "github.com/liangdas/mqant/utils/x/crypto/openpgp/errors" ) // PublicKeyV3 represents older, version 3 public keys. These keys are less secure and // should not be used for signing or encrypting. They are supported here only for // parsing version 3 key material and validating signatures. // See RFC 4880, section 5.5.2. type PublicKeyV3 struct { CreationTime time.Time DaysToExpire uint16 PubKeyAlgo PublicKeyAlgorithm PublicKey *rsa.PublicKey Fingerprint [16]byte KeyId uint64 IsSubkey bool n, e parsedMPI } // newRSAPublicKeyV3 returns a PublicKey that wraps the given rsa.PublicKey. // Included here for testing purposes only. RFC 4880, section 5.5.2: // "an implementation MUST NOT generate a V3 key, but MAY accept it." func newRSAPublicKeyV3(creationTime time.Time, pub *rsa.PublicKey) *PublicKeyV3 { pk := &PublicKeyV3{ CreationTime: creationTime, PublicKey: pub, n: fromBig(pub.N), e: fromBig(big.NewInt(int64(pub.E))), } pk.setFingerPrintAndKeyId() return pk } func (pk *PublicKeyV3) parse(r io.Reader) (err error) { // RFC 4880, section 5.5.2 var buf [8]byte if _, err = readFull(r, buf[:]); err != nil { return } if buf[0] < 2 || buf[0] > 3 { return errors.UnsupportedError("public key version") } pk.CreationTime = time.Unix(int64(uint32(buf[1])<<24|uint32(buf[2])<<16|uint32(buf[3])<<8|uint32(buf[4])), 0) pk.DaysToExpire = binary.BigEndian.Uint16(buf[5:7]) pk.PubKeyAlgo = PublicKeyAlgorithm(buf[7]) switch pk.PubKeyAlgo { case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: err = pk.parseRSA(r) default: err = errors.UnsupportedError("public key type: " + strconv.Itoa(int(pk.PubKeyAlgo))) } if err != nil { return } pk.setFingerPrintAndKeyId() return } func (pk *PublicKeyV3) setFingerPrintAndKeyId() { // RFC 4880, section 12.2 fingerPrint := md5.New() fingerPrint.Write(pk.n.bytes) fingerPrint.Write(pk.e.bytes) fingerPrint.Sum(pk.Fingerprint[:0]) pk.KeyId = binary.BigEndian.Uint64(pk.n.bytes[len(pk.n.bytes)-8:]) } // parseRSA parses RSA public key material from the given Reader. See RFC 4880, // section 5.5.2. func (pk *PublicKeyV3) parseRSA(r io.Reader) (err error) { if pk.n.bytes, pk.n.bitLength, err = readMPI(r); err != nil { return } if pk.e.bytes, pk.e.bitLength, err = readMPI(r); err != nil { return } // RFC 4880 Section 12.2 requires the low 8 bytes of the // modulus to form the key id. if len(pk.n.bytes) < 8 { return errors.StructuralError("v3 public key modulus is too short") } if len(pk.e.bytes) > 3 { err = errors.UnsupportedError("large public exponent") return } rsa := &rsa.PublicKey{N: new(big.Int).SetBytes(pk.n.bytes)} for i := 0; i < len(pk.e.bytes); i++ { rsa.E <<= 8 rsa.E |= int(pk.e.bytes[i]) } pk.PublicKey = rsa return } // SerializeSignaturePrefix writes the prefix for this public key to the given Writer. // The prefix is used when calculating a signature over this public key. See // RFC 4880, section 5.2.4. func (pk *PublicKeyV3) SerializeSignaturePrefix(w io.Writer) { var pLength uint16 switch pk.PubKeyAlgo { case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: pLength += 2 + uint16(len(pk.n.bytes)) pLength += 2 + uint16(len(pk.e.bytes)) default: panic("unknown public key algorithm") } pLength += 6 w.Write([]byte{0x99, byte(pLength >> 8), byte(pLength)}) return } func (pk *PublicKeyV3) Serialize(w io.Writer) (err error) { length := 8 // 8 byte header switch pk.PubKeyAlgo { case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: length += 2 + len(pk.n.bytes) length += 2 + len(pk.e.bytes) default: panic("unknown public key algorithm") } packetType := packetTypePublicKey if pk.IsSubkey { packetType = packetTypePublicSubkey } if err = serializeHeader(w, packetType, length); err != nil { return } return pk.serializeWithoutHeaders(w) } // serializeWithoutHeaders marshals the PublicKey to w in the form of an // OpenPGP public key packet, not including the packet header. func (pk *PublicKeyV3) serializeWithoutHeaders(w io.Writer) (err error) { var buf [8]byte // Version 3 buf[0] = 3 // Creation time t := uint32(pk.CreationTime.Unix()) buf[1] = byte(t >> 24) buf[2] = byte(t >> 16) buf[3] = byte(t >> 8) buf[4] = byte(t) // Days to expire buf[5] = byte(pk.DaysToExpire >> 8) buf[6] = byte(pk.DaysToExpire) // Public key algorithm buf[7] = byte(pk.PubKeyAlgo) if _, err = w.Write(buf[:]); err != nil { return } switch pk.PubKeyAlgo { case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: return writeMPIs(w, pk.n, pk.e) } return errors.InvalidArgumentError("bad public-key algorithm") } // CanSign returns true iff this public key can generate signatures func (pk *PublicKeyV3) CanSign() bool { return pk.PubKeyAlgo != PubKeyAlgoRSAEncryptOnly } // VerifySignatureV3 returns nil iff sig is a valid signature, made by this // public key, of the data hashed into signed. signed is mutated by this call. func (pk *PublicKeyV3) VerifySignatureV3(signed hash.Hash, sig *SignatureV3) (err error) { if !pk.CanSign() { return errors.InvalidArgumentError("public key cannot generate signatures") } suffix := make([]byte, 5) suffix[0] = byte(sig.SigType) binary.BigEndian.PutUint32(suffix[1:], uint32(sig.CreationTime.Unix())) signed.Write(suffix) hashBytes := signed.Sum(nil) if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] { return errors.SignatureError("hash tag doesn't match") } if pk.PubKeyAlgo != sig.PubKeyAlgo { return errors.InvalidArgumentError("public key and signature use different algorithms") } switch pk.PubKeyAlgo { case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: if err = rsa.VerifyPKCS1v15(pk.PublicKey, sig.Hash, hashBytes, sig.RSASignature.bytes); err != nil { return errors.SignatureError("RSA verification failure") } return default: // V3 public keys only support RSA. panic("shouldn't happen") } } // VerifyUserIdSignatureV3 returns nil iff sig is a valid signature, made by this // public key, that id is the identity of pub. func (pk *PublicKeyV3) VerifyUserIdSignatureV3(id string, pub *PublicKeyV3, sig *SignatureV3) (err error) { h, err := userIdSignatureV3Hash(id, pk, sig.Hash) if err != nil { return err } return pk.VerifySignatureV3(h, sig) } // VerifyKeySignatureV3 returns nil iff sig is a valid signature, made by this // public key, of signed. func (pk *PublicKeyV3) VerifyKeySignatureV3(signed *PublicKeyV3, sig *SignatureV3) (err error) { h, err := keySignatureHash(pk, signed, sig.Hash) if err != nil { return err } return pk.VerifySignatureV3(h, sig) } // userIdSignatureV3Hash returns a Hash of the message that needs to be signed // to assert that pk is a valid key for id. func userIdSignatureV3Hash(id string, pk signingKey, hfn crypto.Hash) (h hash.Hash, err error) { if !hfn.Available() { return nil, errors.UnsupportedError("hash function") } h = hfn.New() // RFC 4880, section 5.2.4 pk.SerializeSignaturePrefix(h) pk.serializeWithoutHeaders(h) h.Write([]byte(id)) return } // KeyIdString returns the public key's fingerprint in capital hex // (e.g. "6C7EE1B8621CC013"). func (pk *PublicKeyV3) KeyIdString() string { return fmt.Sprintf("%X", pk.KeyId) } // KeyIdShortString returns the short form of public key's fingerprint // in capital hex, as shown by gpg --list-keys (e.g. "621CC013"). func (pk *PublicKeyV3) KeyIdShortString() string { return fmt.Sprintf("%X", pk.KeyId&0xFFFFFFFF) } // BitLength returns the bit length for the given public key. func (pk *PublicKeyV3) BitLength() (bitLength uint16, err error) { switch pk.PubKeyAlgo { case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: bitLength = pk.n.bitLength default: err = errors.InvalidArgumentError("bad public-key algorithm") } return }
1.789063
2
ecr/ref_test.go
choo-stripe/amazon-ecr-containerd-resolver
0
261
/* * Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). You * may not use this file except in compliance with the License. A copy of * the License is located at * * http://aws.amazon.com/apache2.0/ * * or in the "license" file accompanying this file. This file is * distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF * ANY KIND, either express or implied. See the License for the specific * language governing permissions and limitations under the License. */ package ecr import ( "errors" "fmt" "testing" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/service/ecr" "github.com/stretchr/testify/assert" ) func TestRefRepresentations(t *testing.T) { cases := []struct { ref string arn string spec ECRSpec err error }{ { ref: "invalid", err: invalidARN, }, { ref: "ecr.aws/arn:nope", err: errors.New("arn: not enough sections"), }, { ref: "arn:aws:ecr:us-west-2:123456789012:repository/foo/bar", err: invalidARN, }, { ref: "ecr.aws/arn:aws:ecr:us-west-2:123456789012:repository/foo/bar", arn: "arn:aws:ecr:us-west-2:123456789012:repository/foo/bar", spec: ECRSpec{ arn: arn.ARN{ Partition: "aws", Region: "us-west-2", AccountID: "123456789012", Service: "ecr", Resource: "repository/foo/bar", }, Repository: "foo/bar", }, }, { ref: "ecr.aws/arn:aws:ecr:us-west-2:123456789012:repository/foo/bar:latest", arn: "arn:aws:ecr:us-west-2:123456789012:repository/foo/bar", spec: ECRSpec{ arn: arn.ARN{ Partition: "aws", Region: "us-west-2", AccountID: "123456789012", Service: "ecr", Resource: "repository/foo/bar", }, Repository: "foo/bar", Object: "latest", }, }, { ref: "ecr.aws/arn:aws:ecr:us-west-2:123456789012:repository/foo/bar:latest@sha256:digest", arn: "arn:aws:ecr:us-west-2:123456789012:repository/foo/bar", spec: ECRSpec{ arn: arn.ARN{ Partition: "aws", Region: "us-west-2", AccountID: "123456789012", Service: "ecr", Resource: "repository/foo/bar", }, Repository: "foo/bar", Object: "latest@sha256:digest", }, }, { ref: "ecr.aws/arn:aws:ecr:us-west-2:123456789012:repository/foo/bar@sha256:digest", arn: "arn:aws:ecr:us-west-2:123456789012:repository/foo/bar", spec: ECRSpec{ arn: arn.ARN{ Partition: "aws", Region: "us-west-2", AccountID: "123456789012", Service: "ecr", Resource: "repository/foo/bar", }, Repository: "foo/bar", Object: "@sha256:digest", }, }, } for _, tc := range cases { t.Run(fmt.Sprintf("ParseRef-%s", tc.ref), func(t *testing.T) { spec, err := ParseRef(tc.ref) assert.Equal(t, tc.spec, spec) if tc.err == nil { assert.Nil(t, err) } else { assert.Equal(t, tc.err, err) } }) if tc.err != nil { continue } t.Run(fmt.Sprintf("Canonical-%s", tc.ref), func(t *testing.T) { assert.Equal(t, tc.ref, tc.spec.Canonical()) }) t.Run(fmt.Sprintf("ARN-%s", tc.ref), func(t *testing.T) { assert.Equal(t, tc.arn, tc.spec.ARN()) }) } } func TestImageID(t *testing.T) { cases := []struct { name string spec ECRSpec imageID *ecr.ImageIdentifier }{ { name: "blank", spec: ECRSpec{ Repository: "foo/bar", }, imageID: &ecr.ImageIdentifier{}, }, { name: "tag", spec: ECRSpec{ Repository: "foo/bar", Object: "latest", }, imageID: &ecr.ImageIdentifier{ ImageTag: aws.String("latest"), }, }, { name: "digest", spec: ECRSpec{ Repository: "foo/bar", Object: "@sha256:digest", }, imageID: &ecr.ImageIdentifier{ ImageDigest: aws.String("sha256:digest"), }, }, { name: "tag+digest", spec: ECRSpec{ Repository: "foo/bar", Object: "latest@sha256:digest", }, imageID: &ecr.ImageIdentifier{ ImageTag: aws.String("latest"), ImageDigest: aws.String("sha256:digest"), }, }, } for _, tc := range cases { t.Run(tc.name, func(t *testing.T) { assert.Equal(t, tc.imageID, tc.spec.ImageID()) }) } }
1.132813
1
src/internal/poll/fd_unix.go
kavindyasinthasilva/go
3
269
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +build aix darwin dragonfly freebsd js,wasm linux netbsd openbsd solaris package poll import ( "io" "runtime" "sync/atomic" "syscall" ) // FD is a file descriptor. The net and os packages use this type as a // field of a larger type representing a network connection or OS file. type FD struct { // Lock sysfd and serialize access to Read and Write methods. fdmu fdMutex // System file descriptor. Immutable until Close. Sysfd int // I/O poller. pd pollDesc // Writev cache. iovecs *[]syscall.Iovec // Semaphore signaled when file is closed. csema uint32 // Non-zero if this file has been set to blocking mode. isBlocking uint32 // Whether this is a streaming descriptor, as opposed to a // packet-based descriptor like a UDP socket. Immutable. IsStream bool // Whether a zero byte read indicates EOF. This is false for a // message based socket connection. ZeroReadIsEOF bool // Whether this is a file rather than a network socket. isFile bool } // Init initializes the FD. The Sysfd field should already be set. // This can be called multiple times on a single FD. // The net argument is a network name from the net package (e.g., "tcp"), // or "file". // Set pollable to true if fd should be managed by runtime netpoll. func (fd *FD) Init(net string, pollable bool) error { // We don't actually care about the various network types. if net == "file" { fd.isFile = true } if !pollable { fd.isBlocking = 1 return nil } err := fd.pd.init(fd) if err != nil { // If we could not initialize the runtime poller, // assume we are using blocking mode. fd.isBlocking = 1 } return err } // Destroy closes the file descriptor. This is called when there are // no remaining references. func (fd *FD) destroy() error { // Poller may want to unregister fd in readiness notification mechanism, // so this must be executed before CloseFunc. fd.pd.close() err := CloseFunc(fd.Sysfd) fd.Sysfd = -1 runtime_Semrelease(&fd.csema) return err } // Close closes the FD. The underlying file descriptor is closed by the // destroy method when there are no remaining references. func (fd *FD) Close() error { if !fd.fdmu.increfAndClose() { return errClosing(fd.isFile) } // Unblock any I/O. Once it all unblocks and returns, // so that it cannot be referring to fd.sysfd anymore, // the final decref will close fd.sysfd. This should happen // fairly quickly, since all the I/O is non-blocking, and any // attempts to block in the pollDesc will return errClosing(fd.isFile). fd.pd.evict() // The call to decref will call destroy if there are no other // references. err := fd.decref() // Wait until the descriptor is closed. If this was the only // reference, it is already closed. Only wait if the file has // not been set to blocking mode, as otherwise any current I/O // may be blocking, and that would block the Close. // No need for an atomic read of isBlocking, increfAndClose means // we have exclusive access to fd. if fd.isBlocking == 0 { runtime_Semacquire(&fd.csema) } return err } // Shutdown wraps the shutdown network call. func (fd *FD) Shutdown(how int) error { if err := fd.incref(); err != nil { return err } defer fd.decref() return syscall.Shutdown(fd.Sysfd, how) } // SetBlocking puts the file into blocking mode. func (fd *FD) SetBlocking() error { if err := fd.incref(); err != nil { return err } defer fd.decref() // Atomic store so that concurrent calls to SetBlocking // do not cause a race condition. isBlocking only ever goes // from 0 to 1 so there is no real race here. atomic.StoreUint32(&fd.isBlocking, 1) return syscall.SetNonblock(fd.Sysfd, false) } // Darwin and FreeBSD can't read or write 2GB+ files at a time, // even on 64-bit systems. // The same is true of socket implementations on many systems. // See golang.org/issue/7812 and golang.org/issue/16266. // Use 1GB instead of, say, 2GB-1, to keep subsequent reads aligned. const maxRW = 1 << 30 // Read implements io.Reader. func (fd *FD) Read(p []byte) (int, error) { if err := fd.readLock(); err != nil { return 0, err } defer fd.readUnlock() if len(p) == 0 { // If the caller wanted a zero byte read, return immediately // without trying (but after acquiring the readLock). // Otherwise syscall.Read returns 0, nil which looks like // io.EOF. // TODO(bradfitz): make it wait for readability? (Issue 15735) return 0, nil } if err := fd.pd.prepareRead(fd.isFile); err != nil { return 0, err } if fd.IsStream && len(p) > maxRW { p = p[:maxRW] } for { n, err := syscall.Read(fd.Sysfd, p) if err != nil { n = 0 if err == syscall.EAGAIN && fd.pd.pollable() { if err = fd.pd.waitRead(fd.isFile); err == nil { continue } } // On MacOS we can see EINTR here if the user // pressed ^Z. See issue #22838. if runtime.GOOS == "darwin" && err == syscall.EINTR { continue } } err = fd.eofError(n, err) return n, err } } // Pread wraps the pread system call. func (fd *FD) Pread(p []byte, off int64) (int, error) { // Call incref, not readLock, because since pread specifies the // offset it is independent from other reads. // Similarly, using the poller doesn't make sense for pread. if err := fd.incref(); err != nil { return 0, err } if fd.IsStream && len(p) > maxRW { p = p[:maxRW] } n, err := syscall.Pread(fd.Sysfd, p, off) if err != nil { n = 0 } fd.decref() err = fd.eofError(n, err) return n, err } // ReadFrom wraps the recvfrom network call. func (fd *FD) ReadFrom(p []byte) (int, syscall.Sockaddr, error) { if err := fd.readLock(); err != nil { return 0, nil, err } defer fd.readUnlock() if err := fd.pd.prepareRead(fd.isFile); err != nil { return 0, nil, err } for { n, sa, err := syscall.Recvfrom(fd.Sysfd, p, 0) if err != nil { n = 0 if err == syscall.EAGAIN && fd.pd.pollable() { if err = fd.pd.waitRead(fd.isFile); err == nil { continue } } } err = fd.eofError(n, err) return n, sa, err } } // ReadMsg wraps the recvmsg network call. func (fd *FD) ReadMsg(p []byte, oob []byte) (int, int, int, syscall.Sockaddr, error) { if err := fd.readLock(); err != nil { return 0, 0, 0, nil, err } defer fd.readUnlock() if err := fd.pd.prepareRead(fd.isFile); err != nil { return 0, 0, 0, nil, err } for { n, oobn, flags, sa, err := syscall.Recvmsg(fd.Sysfd, p, oob, 0) if err != nil { // TODO(dfc) should n and oobn be set to 0 if err == syscall.EAGAIN && fd.pd.pollable() { if err = fd.pd.waitRead(fd.isFile); err == nil { continue } } } err = fd.eofError(n, err) return n, oobn, flags, sa, err } } // Write implements io.Writer. func (fd *FD) Write(p []byte) (int, error) { if err := fd.writeLock(); err != nil { return 0, err } defer fd.writeUnlock() if err := fd.pd.prepareWrite(fd.isFile); err != nil { return 0, err } var nn int for { max := len(p) if fd.IsStream && max-nn > maxRW { max = nn + maxRW } n, err := syscall.Write(fd.Sysfd, p[nn:max]) if n > 0 { nn += n } if nn == len(p) { return nn, err } if err == syscall.EAGAIN && fd.pd.pollable() { if err = fd.pd.waitWrite(fd.isFile); err == nil { continue } } if err != nil { return nn, err } if n == 0 { return nn, io.ErrUnexpectedEOF } } } // Pwrite wraps the pwrite system call. func (fd *FD) Pwrite(p []byte, off int64) (int, error) { // Call incref, not writeLock, because since pwrite specifies the // offset it is independent from other writes. // Similarly, using the poller doesn't make sense for pwrite. if err := fd.incref(); err != nil { return 0, err } defer fd.decref() var nn int for { max := len(p) if fd.IsStream && max-nn > maxRW { max = nn + maxRW } n, err := syscall.Pwrite(fd.Sysfd, p[nn:max], off+int64(nn)) if n > 0 { nn += n } if nn == len(p) { return nn, err } if err != nil { return nn, err } if n == 0 { return nn, io.ErrUnexpectedEOF } } } // WriteTo wraps the sendto network call. func (fd *FD) WriteTo(p []byte, sa syscall.Sockaddr) (int, error) { if err := fd.writeLock(); err != nil { return 0, err } defer fd.writeUnlock() if err := fd.pd.prepareWrite(fd.isFile); err != nil { return 0, err } for { err := syscall.Sendto(fd.Sysfd, p, 0, sa) if err == syscall.EAGAIN && fd.pd.pollable() { if err = fd.pd.waitWrite(fd.isFile); err == nil { continue } } if err != nil { return 0, err } return len(p), nil } } // WriteMsg wraps the sendmsg network call. func (fd *FD) WriteMsg(p []byte, oob []byte, sa syscall.Sockaddr) (int, int, error) { if err := fd.writeLock(); err != nil { return 0, 0, err } defer fd.writeUnlock() if err := fd.pd.prepareWrite(fd.isFile); err != nil { return 0, 0, err } for { n, err := syscall.SendmsgN(fd.Sysfd, p, oob, sa, 0) if err == syscall.EAGAIN && fd.pd.pollable() { if err = fd.pd.waitWrite(fd.isFile); err == nil { continue } } if err != nil { return n, 0, err } return n, len(oob), err } } // Accept wraps the accept network call. func (fd *FD) Accept() (int, syscall.Sockaddr, string, error) { if err := fd.readLock(); err != nil { return -1, nil, "", err } defer fd.readUnlock() if err := fd.pd.prepareRead(fd.isFile); err != nil { return -1, nil, "", err } for { s, rsa, errcall, err := accept(fd.Sysfd) if err == nil { return s, rsa, "", err } switch err { case syscall.EAGAIN: if fd.pd.pollable() { if err = fd.pd.waitRead(fd.isFile); err == nil { continue } } case syscall.ECONNABORTED: // This means that a socket on the listen // queue was closed before we Accept()ed it; // it's a silly error, so try again. continue } return -1, nil, errcall, err } } // Seek wraps syscall.Seek. func (fd *FD) Seek(offset int64, whence int) (int64, error) { if err := fd.incref(); err != nil { return 0, err } defer fd.decref() return syscall.Seek(fd.Sysfd, offset, whence) } // ReadDirent wraps syscall.ReadDirent. // We treat this like an ordinary system call rather than a call // that tries to fill the buffer. func (fd *FD) ReadDirent(buf []byte) (int, error) { if err := fd.incref(); err != nil { return 0, err } defer fd.decref() for { n, err := syscall.ReadDirent(fd.Sysfd, buf) if err != nil { n = 0 if err == syscall.EAGAIN && fd.pd.pollable() { if err = fd.pd.waitRead(fd.isFile); err == nil { continue } } } // Do not call eofError; caller does not expect to see io.EOF. return n, err } } // Fchdir wraps syscall.Fchdir. func (fd *FD) Fchdir() error { if err := fd.incref(); err != nil { return err } defer fd.decref() return syscall.Fchdir(fd.Sysfd) } // Fstat wraps syscall.Fstat func (fd *FD) Fstat(s *syscall.Stat_t) error { if err := fd.incref(); err != nil { return err } defer fd.decref() return syscall.Fstat(fd.Sysfd, s) } // tryDupCloexec indicates whether F_DUPFD_CLOEXEC should be used. // If the kernel doesn't support it, this is set to 0. var tryDupCloexec = int32(1) // DupCloseOnExec dups fd and marks it close-on-exec. func DupCloseOnExec(fd int) (int, string, error) { if atomic.LoadInt32(&tryDupCloexec) == 1 { r0, e1 := fcntl(fd, syscall.F_DUPFD_CLOEXEC, 0) if e1 == nil { return r0, "", nil } switch e1.(syscall.Errno) { case syscall.EINVAL, syscall.ENOSYS: // Old kernel, or js/wasm (which returns // ENOSYS). Fall back to the portable way from // now on. atomic.StoreInt32(&tryDupCloexec, 0) default: return -1, "fcntl", e1 } } return dupCloseOnExecOld(fd) } // dupCloseOnExecUnixOld is the traditional way to dup an fd and // set its O_CLOEXEC bit, using two system calls. func dupCloseOnExecOld(fd int) (int, string, error) { syscall.ForkLock.RLock() defer syscall.ForkLock.RUnlock() newfd, err := syscall.Dup(fd) if err != nil { return -1, "dup", err } syscall.CloseOnExec(newfd) return newfd, "", nil } // Dup duplicates the file descriptor. func (fd *FD) Dup() (int, string, error) { if err := fd.incref(); err != nil { return -1, "", err } defer fd.decref() return DupCloseOnExec(fd.Sysfd) } // On Unix variants only, expose the IO event for the net code. // WaitWrite waits until data can be read from fd. func (fd *FD) WaitWrite() error { return fd.pd.waitWrite(fd.isFile) } // WriteOnce is for testing only. It makes a single write call. func (fd *FD) WriteOnce(p []byte) (int, error) { if err := fd.writeLock(); err != nil { return 0, err } defer fd.writeUnlock() return syscall.Write(fd.Sysfd, p) } // RawControl invokes the user-defined function f for a non-IO // operation. func (fd *FD) RawControl(f func(uintptr)) error { if err := fd.incref(); err != nil { return err } defer fd.decref() f(uintptr(fd.Sysfd)) return nil } // RawRead invokes the user-defined function f for a read operation. func (fd *FD) RawRead(f func(uintptr) bool) error { if err := fd.readLock(); err != nil { return err } defer fd.readUnlock() if err := fd.pd.prepareRead(fd.isFile); err != nil { return err } for { if f(uintptr(fd.Sysfd)) { return nil } if err := fd.pd.waitRead(fd.isFile); err != nil { return err } } } // RawWrite invokes the user-defined function f for a write operation. func (fd *FD) RawWrite(f func(uintptr) bool) error { if err := fd.writeLock(); err != nil { return err } defer fd.writeUnlock() if err := fd.pd.prepareWrite(fd.isFile); err != nil { return err } for { if f(uintptr(fd.Sysfd)) { return nil } if err := fd.pd.waitWrite(fd.isFile); err != nil { return err } } }
1.765625
2
appgo/pkg/mus/caller.go
goecology/ecology
4
277
package mus import ( "time" "github.com/gin-gonic/gin" "github.com/go-resty/resty/v2" "github.com/i2eco/muses/pkg/cache/mixcache" mmysql "github.com/i2eco/muses/pkg/database/mysql" "github.com/i2eco/muses/pkg/logger" "github.com/i2eco/muses/pkg/open/github" "github.com/i2eco/muses/pkg/oss" musgin "github.com/i2eco/muses/pkg/server/gin" "github.com/i2eco/muses/pkg/session/ginsession" "github.com/jinzhu/gorm" ) var ( Cfg musgin.Cfg Logger *logger.Client Gin *gin.Engine Db *gorm.DB Session gin.HandlerFunc Oss *oss.Client Mixcache *mixcache.Client GithubClient *github.Client JsonRestyClient *resty.Client FormRestyClient *resty.Client ) // Init 初始化muses相关容器 func Init() error { Cfg = musgin.Config() Db = mmysql.Caller("ecology") Logger = logger.Caller("system") Gin = musgin.Caller() Oss = oss.Caller("ecology") Mixcache = mixcache.Caller("ecology") Session = ginsession.Caller() FormRestyClient = resty.New().SetDebug(true).SetTimeout(3*time.Second).SetHeader("Content-Type", "multipart/form-data") JsonRestyClient = resty.New().SetDebug(true).SetTimeout(10*time.Second).SetHeader("Content-Type", "application/json;charset=utf-8") GithubClient = github.Caller() return nil }
1.195313
1
libs/cosmos-sdk/x/gov/types/msgs_test.go
tokenchain/exchain
162
285
package types import ( "strings" "testing" "github.com/stretchr/testify/require" sdk "github.com/okex/exchain/libs/cosmos-sdk/types" ) var ( coinsPos = sdk.NewCoins(sdk.NewInt64Coin(sdk.DefaultBondDenom, 1000)) coinsZero = sdk.NewCoins() coinsMulti = sdk.NewCoins(sdk.NewInt64Coin(sdk.DefaultBondDenom, 1000), sdk.NewInt64Coin("foo", 10000)) addrs = []sdk.AccAddress{ sdk.AccAddress("test1"), sdk.AccAddress("test2"), } ) func init() { coinsMulti.Sort() } // test ValidateBasic for MsgCreateValidator func TestMsgSubmitProposal(t *testing.T) { tests := []struct { title, description string proposalType string proposerAddr sdk.AccAddress initialDeposit sdk.Coins expectPass bool }{ {"Test Proposal", "the purpose of this proposal is to test", ProposalTypeText, addrs[0], coinsPos, true}, {"", "the purpose of this proposal is to test", ProposalTypeText, addrs[0], coinsPos, false}, {"Test Proposal", "", ProposalTypeText, addrs[0], coinsPos, false}, {"Test Proposal", "the purpose of this proposal is to test", ProposalTypeText, sdk.AccAddress{}, coinsPos, false}, {"Test Proposal", "the purpose of this proposal is to test", ProposalTypeText, addrs[0], coinsZero, true}, {"Test Proposal", "the purpose of this proposal is to test", ProposalTypeText, addrs[0], coinsMulti, true}, {strings.Repeat("#", MaxTitleLength*2), "the purpose of this proposal is to test", ProposalTypeText, addrs[0], coinsMulti, false}, {"Test Proposal", strings.Repeat("#", MaxDescriptionLength*2), ProposalTypeText, addrs[0], coinsMulti, false}, } for i, tc := range tests { msg := NewMsgSubmitProposal( ContentFromProposalType(tc.title, tc.description, tc.proposalType), tc.initialDeposit, tc.proposerAddr, ) if tc.expectPass { require.NoError(t, msg.ValidateBasic(), "test: %v", i) } else { require.Error(t, msg.ValidateBasic(), "test: %v", i) } } } func TestMsgDepositGetSignBytes(t *testing.T) { addr := sdk.AccAddress("addr1") msg := NewMsgDeposit(addr, 0, coinsPos) res := msg.GetSignBytes() expected := `{"type":"cosmos-sdk/MsgDeposit","value":{"amount":[{"amount":"1000.000000000000000000","denom":"okt"}],"depositor":"cosmos1v9jxgu33kfsgr5","proposal_id":"0"}}` require.Equal(t, expected, string(res)) } // test ValidateBasic for MsgDeposit func TestMsgDeposit(t *testing.T) { tests := []struct { proposalID uint64 depositorAddr sdk.AccAddress depositAmount sdk.Coins expectPass bool }{ {0, addrs[0], coinsPos, true}, {1, sdk.AccAddress{}, coinsPos, false}, {1, addrs[0], coinsZero, true}, {1, addrs[0], coinsMulti, true}, } for i, tc := range tests { msg := NewMsgDeposit(tc.depositorAddr, tc.proposalID, tc.depositAmount) if tc.expectPass { require.NoError(t, msg.ValidateBasic(), "test: %v", i) } else { require.Error(t, msg.ValidateBasic(), "test: %v", i) } } } // test ValidateBasic for MsgDeposit func TestMsgVote(t *testing.T) { tests := []struct { proposalID uint64 voterAddr sdk.AccAddress option VoteOption expectPass bool }{ {0, addrs[0], OptionYes, true}, {0, sdk.AccAddress{}, OptionYes, false}, {0, addrs[0], OptionNo, true}, {0, addrs[0], OptionNoWithVeto, true}, {0, addrs[0], OptionAbstain, true}, {0, addrs[0], VoteOption(0x13), false}, } for i, tc := range tests { msg := NewMsgVote(tc.voterAddr, tc.proposalID, tc.option) if tc.expectPass { require.Nil(t, msg.ValidateBasic(), "test: %v", i) } else { require.NotNil(t, msg.ValidateBasic(), "test: %v", i) } } }
1.507813
2
agent/input/telegram/conn.go
2733284198/go-micro
1
293
package telegram import ( "errors" "strings" "sync" "github.com/forestgiant/sliceutil" "github.com/micro/go-micro/v2/agent/input" log "github.com/micro/go-micro/v2/logger" tgbotapi "gopkg.in/telegram-bot-api.v4" ) type telegramConn struct { input *telegramInput recv <-chan tgbotapi.Update exit chan bool syncCond *sync.Cond mutex sync.Mutex } func newConn(input *telegramInput) (*telegramConn, error) { conn := &telegramConn{ input: input, } conn.syncCond = sync.NewCond(&conn.mutex) go conn.run() return conn, nil } func (tc *telegramConn) run() { u := tgbotapi.NewUpdate(0) u.Timeout = 60 updates, err := tc.input.api.GetUpdatesChan(u) if err != nil { return } tc.recv = updates tc.syncCond.Signal() select { case <-tc.exit: return } } func (tc *telegramConn) Close() error { return nil } func (tc *telegramConn) Recv(event *input.Event) error { if event == nil { return errors.New("event cannot be nil") } for { if tc.recv == nil { tc.mutex.Lock() tc.syncCond.Wait() } update := <-tc.recv if update.Message == nil || (len(tc.input.whitelist) > 0 && !sliceutil.Contains(tc.input.whitelist, update.Message.From.UserName)) { continue } if event.Meta == nil { event.Meta = make(map[string]interface{}) } event.Type = input.TextEvent event.From = update.Message.From.UserName event.To = tc.input.api.Self.UserName event.Data = []byte(update.Message.Text) event.Meta["chatId"] = update.Message.Chat.ID event.Meta["chatType"] = update.Message.Chat.Type event.Meta["messageId"] = update.Message.MessageID return nil } } func (tc *telegramConn) Send(event *input.Event) error { messageText := strings.TrimSpace(string(event.Data)) chatId := event.Meta["chatId"].(int64) chatType := ChatType(event.Meta["chatType"].(string)) msgConfig := tgbotapi.NewMessage(chatId, messageText) msgConfig.ParseMode = tgbotapi.ModeHTML if sliceutil.Contains([]ChatType{Group, Supergroup}, chatType) { msgConfig.ReplyToMessageID = event.Meta["messageId"].(int) } _, err := tc.input.api.Send(msgConfig) if err != nil { // probably it could be because of nested HTML tags -- telegram doesn't allow nested tags log.Error("[telegram][Send] error:", err) msgConfig.Text = "This bot couldn't send the response (Internal error)" tc.input.api.Send(msgConfig) } return nil }
1.367188
1
pkg/scheme/value.go
liuzhen21/core
21
301
/* Copyright 2021 The tKeel Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package scheme import ( "encoding/json" logf "github.com/tkeel-io/core/pkg/logfield" "github.com/mitchellh/mapstructure" "github.com/pkg/errors" xerrors "github.com/tkeel-io/core/pkg/errors" "github.com/tkeel-io/core/pkg/util" "github.com/tkeel-io/kit/log" ) const ( PropertyTypeInt = "int" PropertyTypeBool = "bool" PropertyTypeFloat = "float" PropertyTypeDouble = "double" PropertyTypeString = "string" PropertyTypeArray = "array" PropertyTypeStruct = "struct" DefineFieldArrayLength = "length" DefineFieldArrayElemCfg = "elem_type" DefineFieldStructFields = "fields" ) type Config struct { ID string `json:"id" mapstructure:"id"` Type string `json:"type" mapstructure:"type"` Name string `json:"name" mapstructure:"name"` Weight int `json:"weight" mapstructure:"weight"` Enabled bool `json:"enabled" mapstructure:"enabled"` EnabledSearch bool `json:"enabled_search" mapstructure:"enabled_search"` EnabledTimeSeries bool `json:"enabled_time_series" mapstructure:"enabled_time_series"` Description string `json:"description" mapstructure:"description"` Define map[string]interface{} `json:"define" mapstructure:"define"` LastTime int64 `json:"last_time" mapstructure:"last_time"` } func (cfg *Config) getArrayDefine() DefineArray { length, _ := cfg.Define[DefineFieldArrayLength].(int) etype, _ := cfg.Define[DefineFieldArrayElemCfg].(Config) return DefineArray{Length: length, ElemType: etype} } func (cfg *Config) getStructDefine() DefineStruct { fields, ok := cfg.Define[DefineFieldStructFields].(map[string]Config) if !ok { fields = make(map[string]Config) cfg.Define[DefineFieldStructFields] = fields } return DefineStruct{Fields: fields} } func (cfg *Config) GetConfig(segs []string, index int) (int, *Config, error) { return cfg.getConfig(segs, index) } func (cfg *Config) getConfig(segs []string, index int) (int, *Config, error) { if len(segs) > index { if cfg.Type != PropertyTypeStruct { return index, cfg, xerrors.ErrPatchTypeInvalid } define := cfg.getStructDefine() c, ok := define.Fields[segs[index]] if !ok { return index, cfg, xerrors.ErrPatchPathLack } cc := &c return cc.getConfig(segs, index+1) } return index, cfg, nil } func (cfg *Config) AppendField(c Config) error { if cfg.Type != PropertyTypeStruct { return xerrors.ErrInvalidNodeType } define := cfg.getStructDefine() define.Fields[c.ID] = c return nil } func (cfg *Config) RemoveField(id string) error { if cfg.Type != PropertyTypeStruct { return xerrors.ErrInvalidNodeType } define := cfg.getStructDefine() delete(define.Fields, id) return nil } type DefineStruct struct { Fields map[string]Config `json:"fields" mapstructure:"fields"` } func newDefineStruct() DefineStruct { return DefineStruct{Fields: make(map[string]Config)} } type DefineArray struct { Length int `json:"length" mapstructure:"length"` ElemType Config `json:"elem_type" mapstructure:"elem_type"` } func Parse(bytes []byte) (map[string]*Config, error) { // parse state config again. configs := make(map[string]interface{}) if err := json.Unmarshal(bytes, &configs); nil != err { log.L().Error("json unmarshal", logf.Error(err), logf.String("configs", string(bytes))) return nil, errors.Wrap(err, "json unmarshal") } var err error var cfg Config cfgs := make(map[string]*Config) for key, val := range configs { if cfg, err = ParseConfigFrom(val); nil != err { // TODO: dispose error. log.L().Error("parse configs", logf.Error(err)) continue } cfgs[key] = &cfg } return cfgs, nil } func ParseFrom(bytes []byte) (*Config, error) { v := make(map[string]interface{}) if err := json.Unmarshal(bytes, &v); nil != err { log.L().Error("unmarshal Config", logf.Error(err)) return nil, errors.Wrap(err, "unmarshal Config") } cfg, err := ParseConfigFrom(v) return &cfg, errors.Wrap(err, "parse Config") } func ParseConfigFrom(data interface{}) (cfg Config, err error) { cfgRequest := Config{} if err = mapstructure.Decode(data, &cfgRequest); nil != err { return cfg, errors.Wrap(err, "decode property config failed") } else if cfgRequest, err = parseField(cfgRequest); nil != err { return cfg, errors.Wrap(err, "parse config failed") } return cfgRequest, nil } func parseField(in Config) (out Config, err error) { switch in.Type { case PropertyTypeInt: case PropertyTypeBool: case PropertyTypeFloat: case PropertyTypeDouble: case PropertyTypeString: case PropertyTypeArray: arrDefine := DefineArray{} if err = mapstructure.Decode(in.Define, &arrDefine); nil != err { return out, errors.Wrap(err, "parse property config failed") } else if arrDefine.Length <= 0 { return out, xerrors.ErrEntityConfigInvalid } arrDefine.ElemType, err = parseField(arrDefine.ElemType) in.Define["elem_type"] = arrDefine.ElemType case PropertyTypeStruct: jsonDefine, jsonDefine2 := newDefineStruct(), newDefineStruct() if err = mapstructure.Decode(in.Define, &jsonDefine); nil != err { return out, errors.Wrap(err, "parse property config failed") } for cfgID, field := range jsonDefine.Fields { var cfg Config if cfg, err = parseField(field); nil != err { return out, errors.Wrap(err, "parse property config failed") } cfg.ID = cfgID jsonDefine2.Fields[cfgID] = cfg } in.Define["fields"] = jsonDefine2.Fields default: return out, xerrors.ErrEntityConfigInvalid } in.LastTime = lastTimestamp(in.LastTime) return in, errors.Wrap(err, "parse property config failed") } func lastTimestamp(timestamp int64) int64 { if timestamp == 0 { timestamp = util.UnixMilli() } return timestamp }
0.964844
1
apis/contact/func_all_test.go
jpbede/go-autodns-
0
309
package contact_test import ( "context" "github.com/stretchr/testify/assert" "go.bnck.me/autodns/apis/contact" "go.bnck.me/autodns/internal/transport" "net/http" "net/http/httptest" "testing" ) func TestClient_All(t *testing.T) { srv := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { assert.Equal(t, "/contact/_search", req.URL.String()) assert.Equal(t, http.MethodPost, req.Method) rw.Write([]byte("{\"stid\":\"20210312-app1-demo-55340\",\"status\":{\"code\":\"S0304\",\"text\":\"Die Daten des Domain-Kontaktes wurden erfolgreich ermittelt.\",\"type\":\"SUCCESS\"},\"object\":{\"type\":\"Contact\",\"summary\":1},\"data\":[{\"created\":\"2021-03-12T15:06:54.000+0100\",\"updated\":\"2021-03-12T22:07:49.000+0100\",\"id\":31364475,\"owner\":{\"context\":1,\"user\":\"2021_03_11_jpbe_la\"},\"alias\":\"<NAME>\",\"type\":\"PERSON\",\"organization\":\"\",\"title\":\"\",\"city\":\"Musterhausen\",\"country\":\"DE\",\"state\":\"DE\",\"fname\":\"Jan-Philipp\",\"lname\":\"Benecke\",\"address\":[\"Musterstraße 1\"],\"pcode\":\"12345\"}]}")) })) tc := transport.New(srv.URL) tc.HTTPClient = srv.Client() tc.Credentials = &transport.APICredentials{Username: "abc", Password: "<PASSWORD>", Context: 1} cl := contact.New(tc) resp, err := cl.All(context.Background()) assert.NoError(t, err) assert.NotNil(t, resp) assert.Len(t, resp, 1) } func TestClient_All_InvalidJson(t *testing.T) { srv := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { rw.Write([]byte("no json")) })) tc := transport.New(srv.URL) tc.HTTPClient = srv.Client() tc.Credentials = &transport.APICredentials{Username: "abc", Password: "<PASSWORD>", Context: 1} cl := contact.New(tc) _, err := cl.All(context.Background()) assert.Error(t, err) assert.EqualError(t, err, "invalid character 'o' in literal null (expecting 'u')") }
1.46875
1
vms/platformvm/static_service_test.go
lasthyphen/avalanchego
0
317
// (c) 2019-2020, Dijets Desk, Inc. All rights reserved. // See the file LICENSE for licensing terms. package platformvm import ( "testing" "github.com/lasthyphen/avalanchego/ids" "github.com/lasthyphen/avalanchego/utils/constants" "github.com/lasthyphen/avalanchego/utils/formatting" "github.com/lasthyphen/avalanchego/utils/json" ) func TestBuildGenesisInvalidUTXOBalance(t *testing.T) { id := ids.ShortID{1, 2, 3} nodeID := id.PrefixedString(constants.NodeIDPrefix) hrp := constants.NetworkIDToHRP[testNetworkID] addr, err := formatting.FormatBech32(hrp, id.Bytes()) if err != nil { t.Fatal(err) } utxo := APIUTXO{ Address: addr, Amount: 0, } weight := json.Uint64(987654321) validator := APIPrimaryValidator{ APIStaker: APIStaker{ EndTime: 15, Weight: &weight, NodeID: nodeID, }, RewardOwner: &APIOwner{ Threshold: 1, Addresses: []string{addr}, }, Staked: []APIUTXO{{ Amount: weight, Address: addr, }}, } args := BuildGenesisArgs{ UTXOs: []APIUTXO{ utxo, }, Validators: []APIPrimaryValidator{ validator, }, Time: 5, Encoding: formatting.Hex, } reply := BuildGenesisReply{} ss := StaticService{} if err := ss.BuildGenesis(nil, &args, &reply); err == nil { t.Fatalf("Should have errored due to an invalid balance") } } func TestBuildGenesisInvalidAmount(t *testing.T) { id := ids.ShortID{1, 2, 3} nodeID := id.PrefixedString(constants.NodeIDPrefix) hrp := constants.NetworkIDToHRP[testNetworkID] addr, err := formatting.FormatBech32(hrp, id.Bytes()) if err != nil { t.Fatal(err) } utxo := APIUTXO{ Address: addr, Amount: 123456789, } weight := json.Uint64(0) validator := APIPrimaryValidator{ APIStaker: APIStaker{ StartTime: 0, EndTime: 15, NodeID: nodeID, }, RewardOwner: &APIOwner{ Threshold: 1, Addresses: []string{addr}, }, Staked: []APIUTXO{{ Amount: weight, Address: addr, }}, } args := BuildGenesisArgs{ UTXOs: []APIUTXO{ utxo, }, Validators: []APIPrimaryValidator{ validator, }, Time: 5, Encoding: formatting.Hex, } reply := BuildGenesisReply{} ss := StaticService{} if err := ss.BuildGenesis(nil, &args, &reply); err == nil { t.Fatalf("Should have errored due to an invalid amount") } } func TestBuildGenesisInvalidEndtime(t *testing.T) { id := ids.ShortID{1, 2, 3} nodeID := id.PrefixedString(constants.NodeIDPrefix) hrp := constants.NetworkIDToHRP[testNetworkID] addr, err := formatting.FormatBech32(hrp, id.Bytes()) if err != nil { t.Fatal(err) } utxo := APIUTXO{ Address: addr, Amount: 123456789, } weight := json.Uint64(987654321) validator := APIPrimaryValidator{ APIStaker: APIStaker{ StartTime: 0, EndTime: 5, NodeID: nodeID, }, RewardOwner: &APIOwner{ Threshold: 1, Addresses: []string{addr}, }, Staked: []APIUTXO{{ Amount: weight, Address: addr, }}, } args := BuildGenesisArgs{ UTXOs: []APIUTXO{ utxo, }, Validators: []APIPrimaryValidator{ validator, }, Time: 5, Encoding: formatting.Hex, } reply := BuildGenesisReply{} ss := StaticService{} if err := ss.BuildGenesis(nil, &args, &reply); err == nil { t.Fatalf("Should have errored due to an invalid end time") } } func TestBuildGenesisReturnsSortedValidators(t *testing.T) { id := ids.ShortID{1} nodeID := id.PrefixedString(constants.NodeIDPrefix) hrp := constants.NetworkIDToHRP[testNetworkID] addr, err := formatting.FormatBech32(hrp, id.Bytes()) if err != nil { t.Fatal(err) } utxo := APIUTXO{ Address: addr, Amount: 123456789, } weight := json.Uint64(987654321) validator1 := APIPrimaryValidator{ APIStaker: APIStaker{ StartTime: 0, EndTime: 20, NodeID: nodeID, }, RewardOwner: &APIOwner{ Threshold: 1, Addresses: []string{addr}, }, Staked: []APIUTXO{{ Amount: weight, Address: addr, }}, } validator2 := APIPrimaryValidator{ APIStaker: APIStaker{ StartTime: 3, EndTime: 15, NodeID: nodeID, }, RewardOwner: &APIOwner{ Threshold: 1, Addresses: []string{addr}, }, Staked: []APIUTXO{{ Amount: weight, Address: addr, }}, } validator3 := APIPrimaryValidator{ APIStaker: APIStaker{ StartTime: 1, EndTime: 10, NodeID: nodeID, }, RewardOwner: &APIOwner{ Threshold: 1, Addresses: []string{addr}, }, Staked: []APIUTXO{{ Amount: weight, Address: addr, }}, } args := BuildGenesisArgs{ DjtxAssetID: ids.ID{'d', 'u', 'm', 'm', 'y', ' ', 'I', 'D'}, UTXOs: []APIUTXO{ utxo, }, Validators: []APIPrimaryValidator{ validator1, validator2, validator3, }, Time: 5, Encoding: formatting.Hex, } reply := BuildGenesisReply{} ss := StaticService{} if err := ss.BuildGenesis(nil, &args, &reply); err != nil { t.Fatalf("BuildGenesis should not have errored but got error: %s", err) } genesisBytes, err := formatting.Decode(reply.Encoding, reply.Bytes) if err != nil { t.Fatalf("Problem decoding BuildGenesis response: %s", err) } genesis := &Genesis{} if _, err := Codec.Unmarshal(genesisBytes, genesis); err != nil { t.Fatal(err) } validators := genesis.Validators if len(validators) != 3 { t.Fatal("Validators should contain 3 validators") } }
1.445313
1
pkg/dbmate/postgres.go
brutallino/dbmate
1
325
package dbmate import ( "bytes" "database/sql" "fmt" "net/url" "strings" "github.com/lib/pq" ) func init() { RegisterDriver(PostgresDriver{}, "postgres") RegisterDriver(PostgresDriver{}, "postgresql") } // PostgresDriver provides top level database functions type PostgresDriver struct { } // Open creates a new database connection func (drv PostgresDriver) Open(u *url.URL) (*sql.DB, error) { return sql.Open("postgres", u.String()) } func (drv PostgresDriver) openPostgresDB(u *url.URL) (*sql.DB, error) { // connect to postgres database postgresURL := *u postgresURL.Path = "postgres" return drv.Open(&postgresURL) } // CreateDatabase creates the specified database func (drv PostgresDriver) CreateDatabase(u *url.URL) error { name := databaseName(u) fmt.Printf("Creating: %s\n", name) db, err := drv.openPostgresDB(u) if err != nil { return err } defer mustClose(db) _, err = db.Exec(fmt.Sprintf("create database %s", pq.QuoteIdentifier(name))) return err } // DropDatabase drops the specified database (if it exists) func (drv PostgresDriver) DropDatabase(u *url.URL) error { name := databaseName(u) fmt.Printf("Dropping: %s\n", name) db, err := drv.openPostgresDB(u) if err != nil { return err } defer mustClose(db) _, err = db.Exec(fmt.Sprintf("drop database if exists %s", pq.QuoteIdentifier(name))) return err } func postgresSchemaMigrationsDump(db *sql.DB) ([]byte, error) { // load applied migrations migrations, err := queryColumn(db, "select quote_literal(version) from public.schema_migrations order by version asc") if err != nil { return nil, err } // build schema_migrations table data var buf bytes.Buffer buf.WriteString("\n--\n-- Dbmate schema migrations\n--\n\n") if len(migrations) > 0 { buf.WriteString("INSERT INTO public.schema_migrations (version) VALUES\n (" + strings.Join(migrations, "),\n (") + ");\n") } return buf.Bytes(), nil } // DumpSchema returns the current database schema func (drv PostgresDriver) DumpSchema(u *url.URL, db *sql.DB) ([]byte, error) { // load schema schema, err := runCommand("pg_dump", "--format=plain", "--encoding=UTF8", "--schema-only", "--no-privileges", "--no-owner", u.String()) if err != nil { return nil, err } migrations, err := postgresSchemaMigrationsDump(db) if err != nil { return nil, err } schema = append(schema, migrations...) return trimLeadingSQLComments(schema) } // DatabaseExists determines whether the database exists func (drv PostgresDriver) DatabaseExists(u *url.URL) (bool, error) { name := databaseName(u) db, err := drv.openPostgresDB(u) if err != nil { return false, err } defer mustClose(db) exists := false err = db.QueryRow("select true from pg_database where datname = $1", name). Scan(&exists) if err == sql.ErrNoRows { return false, nil } return exists, err } // CreateMigrationsTable creates the schema_migrations table func (drv PostgresDriver) CreateMigrationsTable(db *sql.DB) error { _, err := db.Exec("create table if not exists public.schema_migrations " + "(version varchar(255) primary key)") return err } // SelectMigrations returns a list of applied migrations // with an optional limit (in descending order) func (drv PostgresDriver) SelectMigrations(db *sql.DB, limit int) (map[string]bool, error) { query := "select version from public.schema_migrations order by version desc" if limit >= 0 { query = fmt.Sprintf("%s limit %d", query, limit) } rows, err := db.Query(query) if err != nil { return nil, err } defer mustClose(rows) migrations := map[string]bool{} for rows.Next() { var version string if err := rows.Scan(&version); err != nil { return nil, err } migrations[version] = true } return migrations, nil } // InsertMigration adds a new migration record func (drv PostgresDriver) InsertMigration(db Transaction, version string) error { _, err := db.Exec("insert into public.schema_migrations (version) values ($1)", version) return err } // DeleteMigration removes a migration record func (drv PostgresDriver) DeleteMigration(db Transaction, version string) error { _, err := db.Exec("delete from public.schema_migrations where version = $1", version) return err } // Ping verifies a connection to the database server. It does not verify whether the // specified database exists. func (drv PostgresDriver) Ping(u *url.URL) error { db, err := drv.openPostgresDB(u) if err != nil { return err } defer mustClose(db) return db.Ping() }
1.851563
2
vendor/github.com/coreos/go-semver/semver/semver_test.go
Simran-B/arangodb-starter
121
333
// Copyright 2013-2015 CoreOS, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package semver import ( "bytes" "encoding/json" "errors" "flag" "fmt" "math/rand" "reflect" "testing" "time" "gopkg.in/yaml.v2" ) type fixture struct { GreaterVersion string LesserVersion string } var fixtures = []fixture{ fixture{"0.0.0", "0.0.0-foo"}, fixture{"0.0.1", "0.0.0"}, fixture{"1.0.0", "0.9.9"}, fixture{"0.10.0", "0.9.0"}, fixture{"0.99.0", "0.10.0"}, fixture{"2.0.0", "1.2.3"}, fixture{"0.0.0", "0.0.0-foo"}, fixture{"0.0.1", "0.0.0"}, fixture{"1.0.0", "0.9.9"}, fixture{"0.10.0", "0.9.0"}, fixture{"0.99.0", "0.10.0"}, fixture{"2.0.0", "1.2.3"}, fixture{"0.0.0", "0.0.0-foo"}, fixture{"0.0.1", "0.0.0"}, fixture{"1.0.0", "0.9.9"}, fixture{"0.10.0", "0.9.0"}, fixture{"0.99.0", "0.10.0"}, fixture{"2.0.0", "1.2.3"}, fixture{"1.2.3", "1.2.3-asdf"}, fixture{"1.2.3", "1.2.3-4"}, fixture{"1.2.3", "1.2.3-4-foo"}, fixture{"1.2.3-5-foo", "1.2.3-5"}, fixture{"1.2.3-5", "1.2.3-4"}, fixture{"1.2.3-5-foo", "1.2.3-5-Foo"}, fixture{"3.0.0", "2.7.2+asdf"}, fixture{"3.0.0+foobar", "2.7.2"}, fixture{"1.2.3-a.10", "1.2.3-a.5"}, fixture{"1.2.3-a.b", "1.2.3-a.5"}, fixture{"1.2.3-a.b", "1.2.3-a"}, fixture{"1.2.3-a.b.c.10.d.5", "1.2.3-a.b.c.5.d.100"}, fixture{"1.0.0", "1.0.0-rc.1"}, fixture{"1.0.0-rc.2", "1.0.0-rc.1"}, fixture{"1.0.0-rc.1", "1.0.0-beta.11"}, fixture{"1.0.0-beta.11", "1.0.0-beta.2"}, fixture{"1.0.0-beta.2", "1.0.0-beta"}, fixture{"1.0.0-beta", "1.0.0-alpha.beta"}, fixture{"1.0.0-alpha.beta", "1.0.0-alpha.1"}, fixture{"1.0.0-alpha.1", "1.0.0-alpha"}, fixture{"1.2.3-rc.1-1-1hash", "1.2.3-rc.2"}, } func TestCompare(t *testing.T) { for _, v := range fixtures { gt, err := NewVersion(v.GreaterVersion) if err != nil { t.Error(err) } lt, err := NewVersion(v.LesserVersion) if err != nil { t.Error(err) } if gt.LessThan(*lt) { t.Errorf("%s should not be less than %s", gt, lt) } if gt.Equal(*lt) { t.Errorf("%s should not be equal to %s", gt, lt) } if gt.Compare(*lt) <= 0 { t.Errorf("%s should be greater than %s", gt, lt) } if !lt.LessThan(*gt) { t.Errorf("%s should be less than %s", lt, gt) } if !lt.Equal(*lt) { t.Errorf("%s should be equal to %s", lt, lt) } if lt.Compare(*gt) > 0 { t.Errorf("%s should not be greater than %s", lt, gt) } } } func testString(t *testing.T, orig string, version *Version) { if orig != version.String() { t.Errorf("%s != %s", orig, version) } } func TestString(t *testing.T) { for _, v := range fixtures { gt, err := NewVersion(v.GreaterVersion) if err != nil { t.Error(err) } testString(t, v.GreaterVersion, gt) lt, err := NewVersion(v.LesserVersion) if err != nil { t.Error(err) } testString(t, v.LesserVersion, lt) } } func shuffleStringSlice(src []string) []string { dest := make([]string, len(src)) rand.Seed(time.Now().Unix()) perm := rand.Perm(len(src)) for i, v := range perm { dest[v] = src[i] } return dest } func TestSort(t *testing.T) { sortedVersions := []string{"1.0.0", "1.0.2", "1.2.0", "3.1.1"} unsortedVersions := shuffleStringSlice(sortedVersions) semvers := []*Version{} for _, v := range unsortedVersions { sv, err := NewVersion(v) if err != nil { t.Fatal(err) } semvers = append(semvers, sv) } Sort(semvers) for idx, sv := range semvers { if sv.String() != sortedVersions[idx] { t.Fatalf("incorrect sort at index %v", idx) } } } func TestBumpMajor(t *testing.T) { version, _ := NewVersion("1.0.0") version.BumpMajor() if version.Major != 2 { t.Fatalf("bumping major on 1.0.0 resulted in %v", version) } version, _ = NewVersion("1.5.2") version.BumpMajor() if version.Minor != 0 && version.Patch != 0 { t.Fatalf("bumping major on 1.5.2 resulted in %v", version) } version, _ = NewVersion("1.0.0+build.1-alpha.1") version.BumpMajor() if version.PreRelease != "" && version.Metadata != "" { t.Fatalf("bumping major on 1.0.0+build.1-alpha.1 resulted in %v", version) } } func TestBumpMinor(t *testing.T) { version, _ := NewVersion("1.0.0") version.BumpMinor() if version.Major != 1 { t.Fatalf("bumping minor on 1.0.0 resulted in %v", version) } if version.Minor != 1 { t.Fatalf("bumping major on 1.0.0 resulted in %v", version) } version, _ = NewVersion("1.0.0+build.1-alpha.1") version.BumpMinor() if version.PreRelease != "" && version.Metadata != "" { t.Fatalf("bumping major on 1.0.0+build.1-alpha.1 resulted in %v", version) } } func TestBumpPatch(t *testing.T) { version, _ := NewVersion("1.0.0") version.BumpPatch() if version.Major != 1 { t.Fatalf("bumping minor on 1.0.0 resulted in %v", version) } if version.Minor != 0 { t.Fatalf("bumping major on 1.0.0 resulted in %v", version) } if version.Patch != 1 { t.Fatalf("bumping major on 1.0.0 resulted in %v", version) } version, _ = NewVersion("1.0.0+build.1-alpha.1") version.BumpPatch() if version.PreRelease != "" && version.Metadata != "" { t.Fatalf("bumping major on 1.0.0+build.1-alpha.1 resulted in %v", version) } } func TestMust(t *testing.T) { tests := []struct { versionStr string version *Version recov interface{} }{ { versionStr: "1.0.0", version: &Version{Major: 1}, }, { versionStr: "version number", recov: errors.New("version number is not in dotted-tri format"), }, } for _, tt := range tests { func() { defer func() { recov := recover() if !reflect.DeepEqual(tt.recov, recov) { t.Fatalf("incorrect panic for %q: want %v, got %v", tt.versionStr, tt.recov, recov) } }() version := Must(NewVersion(tt.versionStr)) if !reflect.DeepEqual(tt.version, version) { t.Fatalf("incorrect version for %q: want %+v, got %+v", tt.versionStr, tt.version, version) } }() } } type fixtureJSON struct { GreaterVersion *Version LesserVersion *Version } func TestJSON(t *testing.T) { fj := make([]fixtureJSON, len(fixtures)) for i, v := range fixtures { var err error fj[i].GreaterVersion, err = NewVersion(v.GreaterVersion) if err != nil { t.Fatal(err) } fj[i].LesserVersion, err = NewVersion(v.LesserVersion) if err != nil { t.Fatal(err) } } fromStrings, err := json.Marshal(fixtures) if err != nil { t.Fatal(err) } fromVersions, err := json.Marshal(fj) if err != nil { t.Fatal(err) } if !bytes.Equal(fromStrings, fromVersions) { t.Errorf("Expected: %s", fromStrings) t.Errorf("Unexpected: %s", fromVersions) } fromJson := make([]fixtureJSON, 0, len(fj)) err = json.Unmarshal(fromStrings, &fromJson) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(fromJson, fj) { t.Error("Expected: ", fj) t.Error("Unexpected: ", fromJson) } } func TestYAML(t *testing.T) { document, err := yaml.Marshal(fixtures) if err != nil { t.Fatal(err) } expected := make([]fixtureJSON, len(fixtures)) for i, v := range fixtures { var err error expected[i].GreaterVersion, err = NewVersion(v.GreaterVersion) if err != nil { t.Fatal(err) } expected[i].LesserVersion, err = NewVersion(v.LesserVersion) if err != nil { t.Fatal(err) } } fromYAML := make([]fixtureJSON, 0, len(fixtures)) err = yaml.Unmarshal(document, &fromYAML) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(fromYAML, expected) { t.Error("Expected: ", expected) t.Error("Unexpected: ", fromYAML) } } func TestBadInput(t *testing.T) { bad := []string{ "1.2", "1.2.3x", "0x1.3.4", "-1.2.3", "1.2.3.4", } for _, b := range bad { if _, err := NewVersion(b); err == nil { t.Error("Improperly accepted value: ", b) } } } func TestFlag(t *testing.T) { v := Version{} f := flag.NewFlagSet("version", flag.ContinueOnError) f.Var(&v, "version", "set version") if err := f.Set("version", "1.2.3"); err != nil { t.Fatal(err) } if v.String() != "1.2.3" { t.Errorf("Set wrong value %q", v) } } func ExampleVersion_LessThan() { vA := New("1.2.3") vB := New("3.2.1") fmt.Printf("%s < %s == %t\n", vA, vB, vA.LessThan(*vB)) // Output: // 1.2.3 < 3.2.1 == true }
1.226563
1
bitcarbs/main.go
cirello-io/exp
1
341
package main import ( "encoding/json" "fmt" "io/ioutil" "log" "net/http" "os" "golang.org/x/net/context" "golang.org/x/oauth2" "golang.org/x/oauth2/google" "google.golang.org/api/sheets/v4" ) var TokenFileFn string var CredsFn string var SpreadsheetID string // Retrieve a token, saves the token, then returns the generated client. func getClient(config *oauth2.Config) *http.Client { tok, err := tokenFromFile(TokenFileFn) if err != nil { tok = getTokenFromWeb(config) saveToken(TokenFileFn, tok) } return config.Client(context.Background(), tok) } func getTokenFromWeb(config *oauth2.Config) *oauth2.Token { authURL := config.AuthCodeURL("state-token", oauth2.AccessTypeOffline) fmt.Printf("Go to the following link in your browser then type the "+ "authorization code: \n%v\n", authURL) var authCode string if _, err := fmt.Scan(&authCode); err != nil { log.Fatalf("Unable to read authorization code: %v", err) } tok, err := config.Exchange(context.TODO(), authCode) if err != nil { log.Fatalf("Unable to retrieve token from web: %v", err) } return tok } func tokenFromFile(file string) (*oauth2.Token, error) { f, err := os.Open(file) if err != nil { return nil, err } defer f.Close() tok := &oauth2.Token{} err = json.NewDecoder(f).Decode(tok) return tok, err } func saveToken(path string, token *oauth2.Token) { fmt.Printf("Saving credential file to: %s\n", path) f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600) if err != nil { log.Fatalf("Unable to cache oauth token: %v", err) } defer f.Close() json.NewEncoder(f).Encode(token) } func main() { b, err := ioutil.ReadFile(CredsFn) if err != nil { log.Fatalf("Unable to read client secret file: %v", err) } config, err := google.ConfigFromJSON(b, "https://www.googleapis.com/auth/spreadsheets.readonly") if err != nil { log.Fatalf("Unable to parse client secret file to config: %v", err) } client := getClient(config) srv, err := sheets.New(client) if err != nil { log.Fatalf("Unable to retrieve Sheets client: %v", err) } readRange := "Sheet1!B1" resp, err := srv.Spreadsheets.Values.Get(SpreadsheetID, readRange).Do() if err != nil { log.Fatalf("Unable to retrieve data from sheet: %v", err) } if len(resp.Values) == 0 { os.Exit(1) } for _, row := range resp.Values { fmt.Println(row[0]) } }
1.351563
1
x/slashing/legacy/v040/migrate_test.go
khdegraaf/cosmos-sdk
3
349
package v040_test import ( "encoding/json" "testing" "github.com/stretchr/testify/require" "github.com/cosmos/cosmos-sdk/client" "github.com/cosmos/cosmos-sdk/simapp" sdk "github.com/cosmos/cosmos-sdk/types" v039slashing "github.com/cosmos/cosmos-sdk/x/slashing/legacy/v039" v040slashing "github.com/cosmos/cosmos-sdk/x/slashing/legacy/v040" ) func TestMigrate(t *testing.T) { encodingConfig := simapp.MakeEncodingConfig() clientCtx := client.Context{}. WithInterfaceRegistry(encodingConfig.InterfaceRegistry). WithTxConfig(encodingConfig.TxConfig). WithLegacyAmino(encodingConfig.Amino). WithJSONMarshaler(encodingConfig.Marshaler) addr1, err := sdk.ConsAddressFromBech32("cosmosvalcons104cjmxkrg8y8lmrp25de02e4zf00zle4mzs685") require.NoError(t, err) addr2, err := sdk.ConsAddressFromBech32("<KEY>") require.NoError(t, err) gs := v039slashing.GenesisState{ Params: v039slashing.DefaultParams(), SigningInfos: map[string]v039slashing.ValidatorSigningInfo{ "<KEY>": { Address: addr2, IndexOffset: 615501, MissedBlocksCounter: 1, Tombstoned: false, }, "cosmosvalcons104cjmxkrg8y8lmrp25de02e4zf00zle4mzs685": { Address: addr1, IndexOffset: 2, MissedBlocksCounter: 2, Tombstoned: false, }, }, MissedBlocks: map[string][]v039slashing.MissedBlock{ "cosmosvalcons10e4c5p6qk0sycy9u6u43t7csmlx9fyadr9yxph": { { Index: 2, Missed: true, }, }, "cosmosvalcons104cjmxkrg8y8lmrp25de02e4zf00zle4mzs685": { { Index: 3, Missed: true, }, { Index: 4, Missed: true, }, }, }, } migrated := v040slashing.Migrate(gs) // Check that in `signing_infos` and `missed_blocks`, the address // cosmosvalcons104cjmxkrg8y8lmrp25de02e4zf00zle4mzs685 // should always come before the address // cosmosvalcons10e4c5p6qk0sycy9u6u43t7csmlx9fyadr9yxph // (in alphabetic order, basically). expected := `{ "missed_blocks": [ { "address": "cosmosvalcons104cjmxkrg8y8lmrp25de02e4zf00zle4mzs685", "missed_blocks": [ { "index": "3", "missed": true }, { "index": "4", "missed": true } ] }, { "address": "cosmosvalcons10e4c5p6qk0sycy9u6u43t7csmlx9fyadr9yxph", "missed_blocks": [ { "index": "2", "missed": true } ] } ], "params": { "downtime_jail_duration": "600s", "min_signed_per_window": "0.500000000000000000", "signed_blocks_window": "100", "slash_fraction_double_sign": "0.050000000000000000", "slash_fraction_downtime": "0.010000000000000000" }, "signing_infos": [ { "address": "cosmosvalcons104cjmxkrg8y8lmrp25de02e4zf00zle4mzs685", "validator_signing_info": { "address": "cosmosvalcons104cjmxkrg8y8lmrp25de02e4zf00zle4mzs685", "index_offset": "2", "jailed_until": "0001-01-01T00:00:00Z", "missed_blocks_counter": "2", "start_height": "0", "tombstoned": false } }, { "address": "cosmosvalcons10e4c5p6qk0sycy9u6u43t7csmlx9fyadr9yxph", "validator_signing_info": { "address": "cosmosvalcons10e4c5p6qk0sycy9u6u43t7csmlx9fyadr9yxph", "index_offset": "615501", "jailed_until": "0001-01-01T00:00:00Z", "missed_blocks_counter": "1", "start_height": "0", "tombstoned": false } } ] }` bz, err := clientCtx.JSONMarshaler.MarshalJSON(migrated) require.NoError(t, err) // Indent the JSON bz correctly. var jsonObj map[string]interface{} err = json.Unmarshal(bz, &jsonObj) require.NoError(t, err) indentedBz, err := json.MarshalIndent(jsonObj, "", " ") require.NoError(t, err) require.Equal(t, expected, string(indentedBz)) }
1.351563
1
routes/routes.go
tomyweiss/postee
87
357
package routes type InputRoute struct { Name string `json:"name"` Input string `json:"input"` InputFiles []string `json:"input-files"` Outputs []string `json:"outputs"` Plugins Plugins `json:"plugins"` Template string `json:"template"` Scheduling chan struct{} } type Plugins struct { AggregateMessageNumber int `json:"aggregate-message-number"` AggregateMessageTimeout string `json:"aggregate-message-timeout"` AggregateTimeoutSeconds int UniqueMessageProps []string `json:"unique-message-props"` UniqueMessageTimeout string `json:"unique-message-timeout"` UniqueMessageTimeoutSeconds int } func (route *InputRoute) IsSchedulerRun() bool { return route.Scheduling != nil } func (route *InputRoute) StartScheduler() { route.Scheduling = make(chan struct{}) } func (route *InputRoute) StopScheduler() { if route.Scheduling != nil { close(route.Scheduling) } }
1.460938
1
doc.go
ananyagoel2/argparse
19
365
// Package argparse is a Golang command line argument parsing library, taking heavy influance from Python's argparse module. // // Using argparse, it is possible to easily create command-line interfaces, such // as: // // > exc --help // // usage: main [-h] [-v] [-e] [-x ...] [-n] [-f] [-k] [p PATTERN] [s SPLIT] [c CHAR] // // Construct and execute arguments from Stdin // // positional arguments: // [p PATTERN] Stdin regex grouping pattern // [s SPLIT] Delimiting regex for Stdin // [c CHAR] Replacement string for argument parsing // // optional arguments: // -h, --help Show program help // -v, --version Show program version // -e, --empty Allow empty text // -x, --exec Pasrable command string // -n, --dry-run Output commands instead of executing // -f, --force Force continue command execution upon errored commands // -k, --keep-newline Allow trailing newline from Stdin // // Much of the heavy lifting for creating a cmd-line interface is managed by argparse, // so you can focus on getting your program created and running. // // For example, the code required to create the above interface is as follows: // // import ( // "github.com/clagraff/argparse" // ) // // func main() { // p := argparse.NewParser("Construct and execute arguments from Stdin").Version("0.0.0") // p.AddHelp().AddVersion() // Enable `--help` & `-h` to display usage text to the user. // // pattern := argparse.NewArg("p pattern", "pattern", "Stdin regex grouping pattern").Default(".*") // split := argparse.NewArg("s split", "split", "Delimiting regex for Stdin").Default("\n") // nonEmpty := argparse.NewOption("e empty", "empty", "Allow empty text") // keepNewline := argparse.NewFlag("k keep-newline", "keep-newline", "Allow trailing newline from Stdin").Default("false") // command := argparse.NewOption("x exec", "exec", "Pasrable command string").Nargs("r").Action(argparse.Store) // replacementChar := argparse.NewArg("c char", "char", "Replacement string for argument parsing").Default("%") // dryRun := argparse.NewFlag("n dry-run", "dry", "Output commands instead of executing") // ignoreErrors := argparse.NewFlag("f force", "force", "Force continue command execution upon errored commands") // // p.AddOptions(pattern, split, nonEmpty, command, replacementChar, dryRun, ignoreErrors, keepNewline) // // ns, _, err := p.Parse(os.Args[1:]...) // switch err.(type) { // case argparse.ShowHelpErr: // return // case error: // fmt.Println(err, "\n") // p.ShowHelp() // return // } // // // To get started, all you need is a Parser and a few Options! package argparse
2.484375
2
console.go
linpaul2004/mahjong-helper
0
373
package main import ( "os/exec" "os" "runtime" ) var clearFuncMap = map[string]func(){} func init() { clearFuncMap["linux"] = func() { cmd := exec.Command("clear") cmd.Stdout = os.Stdout cmd.Run() } clearFuncMap["darwin"] = clearFuncMap["linux"] clearFuncMap["windows"] = func() { // TODO: 檢查是否有 cls 命令,若沒有提示用 Windows 自帶的 cmd.exe 打開助手 cmd := exec.Command("cmd", "/c", "cls") cmd.Stdout = os.Stdout cmd.Run() } } func clearConsole() { if clearFunc, ok := clearFuncMap[runtime.GOOS]; ok { clearFunc() } }
1.359375
1
internal/build/error.go
iggy/tilt
1,956
381
package build import ( "fmt" "github.com/pkg/errors" "k8s.io/client-go/util/exec" "github.com/tilt-dev/tilt/internal/container" "github.com/tilt-dev/tilt/internal/docker" "github.com/tilt-dev/tilt/pkg/model" ) // https://success.docker.com/article/what-causes-a-container-to-exit-with-code-137 const TaskKillExitCode = 137 func WrapCodeExitError(err error, cID container.ID, cmd model.Cmd) error { exitErr, isExitErr := err.(exec.CodeExitError) if isExitErr { return RunStepFailure{ Cmd: cmd, ExitCode: exitErr.ExitStatus(), } } return errors.Wrapf(err, "executing %v on container %s", cmd, cID.ShortStr()) } // Convert a Docker exec error into our own internal error type. func WrapContainerExecError(err error, cID container.ID, cmd model.Cmd) error { exitErr, isExitErr := err.(docker.ExitError) if isExitErr { if exitErr.ExitCode == TaskKillExitCode { // If we got a 137 error code, that's not the user's fault. // The k8s infrastructure killed the job. return fmt.Errorf("executing %v on container %s: killed by container engine", cmd, cID.ShortStr()) } return RunStepFailure{ Cmd: cmd, ExitCode: exitErr.ExitCode, } } return errors.Wrapf(err, "executing %v on container %s", cmd, cID.ShortStr()) } // Indicates that the update failed because one of the user's Runs failed // (i.e. exited non-zero) -- as opposed to an infrastructure issue. type RunStepFailure struct { Cmd model.Cmd ExitCode int } func (e RunStepFailure) Empty() bool { return e.Cmd.Empty() && e.ExitCode == 0 } func (e RunStepFailure) Error() string { return fmt.Sprintf("Run step %q failed with exit code: %d", e.Cmd.String(), e.ExitCode) } func IsRunStepFailure(err error) bool { _, ok := MaybeRunStepFailure(err) return ok } func MaybeRunStepFailure(err error) (RunStepFailure, bool) { e := err for { if e == nil { break } rsf, ok := e.(RunStepFailure) if ok { return rsf, true } cause := errors.Cause(e) if cause == e { // no more causes to drill into // (If err does not implement Causer, `Cause(err)` returns back the original error) break } e = cause } return RunStepFailure{}, false } var _ error = RunStepFailure{}
1.429688
1
tests/sourcemanager_test.go
DataWorkbench/sourcemanager
0
389
package tests import ( "context" "encoding/json" "strings" "testing" "github.com/DataWorkbench/glog" "github.com/stretchr/testify/require" "github.com/DataWorkbench/common/grpcwrap" "github.com/DataWorkbench/common/qerror" "github.com/DataWorkbench/common/utils/idgenerator" "github.com/DataWorkbench/gproto/pkg/datasourcepb" "github.com/DataWorkbench/gproto/pkg/flinkpb" "github.com/DataWorkbench/gproto/pkg/model" "github.com/DataWorkbench/gproto/pkg/request" "github.com/DataWorkbench/gproto/pkg/response" "github.com/DataWorkbench/gproto/pkg/smpb" ) var MysqlManager request.CreateSource //name mysql var MysqlSource request.CreateTable var MysqlDest request.CreateTable var ClickHouseManager request.CreateSource var ClickHouseSource request.CreateTable var ClickHouseDest request.CreateTable var KafkaManager request.CreateSource var KafkaSource request.CreateTable var PGManager request.CreateSource //var PGSource request.CreateTable //var PGDest request.CreateTable var S3Manager request.CreateSource var HbaseManager request.CreateSource var FtpManager request.CreateSource var HDFSManager request.CreateSource var NewSpaceManager request.CreateSource // name mysql var NameExistsManager request.CreateSource //create failed var NameErrorManager request.CreateSource //create failed var SourceTypeErrorManager request.CreateSource //create failed var TablePG request.CreateTable var TableNameExists request.CreateTable var TableNameError request.CreateTable var TableJsonError request.CreateTable var TableManagerError request.CreateTable var TableMysqlDimensionSource request.CreateTable var TableMysqlDimensionDest request.CreateTable var TableMysqlCommonSource request.CreateTable var TableMysqlCommonDest request.CreateTable var TableS3Source request.CreateTable var TableS3Dest request.CreateTable var TableUDFSource request.CreateTable var TableUDFDest request.CreateTable var TableHbaseSource request.CreateTable var TableHbaseDest request.CreateTable var TableFtpSource request.CreateTable var TableFtpDest request.CreateTable func typeToJsonString(v interface{}) string { s, _ := json.Marshal(&v) return string(s) } var client smpb.SourcemanagerClient var ctx context.Context var initDone bool var spaceid string var newspaceid string func mainInit(t *testing.T) { if initDone == true { return } initDone = true spaceid = "wks-0000000000000001" newspaceid = "wks-0000000000000002" // Mysql // https://segmentfault.com/a/1190000039048901 MysqlManager = request.CreateSource{SourceId: "som-00000000000mysql", SpaceId: spaceid, SourceType: model.DataSource_MySQL, Name: "mysql", Comment: "", Url: &datasourcepb.DataSourceURL{Mysql: &datasourcepb.MySQLURL{User: "root", Password: "password", Host: "127.0.0.1", Database: "data_workbench", Port: 3306}}} MysqlSource = request.CreateTable{TableId: "sot-00000mysqlsource", SourceId: MysqlManager.SourceId, SpaceId: spaceid, Name: "ms", Comment: "mysql", TableKind: model.TableInfo_Source, TableSchema: &flinkpb.TableSchema{Mysql: &flinkpb.MySQLTable{SqlColumn: []*flinkpb.SqlColumnType{&flinkpb.SqlColumnType{Column: "id", Type: "bigint", PrimaryKey: "t"}, &flinkpb.SqlColumnType{Column: "id1", Type: "bigint", Comment: "xxx", PrimaryKey: "f"}}}}} MysqlDest = request.CreateTable{TableId: "sot-0000000mysqldest", SourceId: MysqlManager.SourceId, SpaceId: spaceid, Name: "md", Comment: "mysql dest", TableKind: model.TableInfo_Destination, TableSchema: &flinkpb.TableSchema{Mysql: &flinkpb.MySQLTable{SqlColumn: []*flinkpb.SqlColumnType{&flinkpb.SqlColumnType{Column: "id", Type: "bigint", PrimaryKey: "t"}, &flinkpb.SqlColumnType{Column: "id1", Type: "bigint", Comment: "xxx", PrimaryKey: "f"}}}}} // ClickHouse // create table cks(paycount bigint, paymoney varchar(10)) ENGINE=TinyLog; // create table zz(id bigint, id1 bigint, t timestamp, v varchar(10), primary key (id)) engine=MergeTree; ClickHouseManager = request.CreateSource{SourceId: "som-000000clickhouse", SpaceId: spaceid, SourceType: model.DataSource_ClickHouse, Name: "clickhouse", Comment: "clickhouse", Url: &datasourcepb.DataSourceURL{Clickhouse: &datasourcepb.ClickHouseURL{User: "default", Password: "", Host: "127.0.0.1", Port: 8123, Database: "default"}}} ClickHouseSource = request.CreateTable{TableId: "sot-clickhousesource", SourceId: ClickHouseManager.SourceId, SpaceId: spaceid, Name: "cks", Comment: "cksource", TableKind: model.TableInfo_Source, TableSchema: &flinkpb.TableSchema{Clickhouse: &flinkpb.ClickHouseTable{SqlColumn: []*flinkpb.SqlColumnType{&flinkpb.SqlColumnType{Column: "paycount", Type: "bigint", PrimaryKey: "t"}, &flinkpb.SqlColumnType{Column: "paymoney", Type: "varchar", Length: "10", Comment: "xxx", PrimaryKey: "f"}}}}} ClickHouseDest = request.CreateTable{TableId: "sot-00clickhousedest", SourceId: ClickHouseManager.SourceId, SpaceId: spaceid, Name: "ckd", Comment: "ckdest", TableKind: model.TableInfo_Destination, TableSchema: &flinkpb.TableSchema{Clickhouse: &flinkpb.ClickHouseTable{SqlColumn: []*flinkpb.SqlColumnType{&flinkpb.SqlColumnType{Column: "paycount", Type: "bigint", PrimaryKey: "t"}, &flinkpb.SqlColumnType{Column: "paymoney", Type: "varchar", Length: "10", Comment: "xxx", PrimaryKey: "f"}}}}} // PostgreSQL PGManager = request.CreateSource{SourceId: "som-000000postgresql", SpaceId: spaceid, SourceType: model.DataSource_PostgreSQL, Name: "pg", Comment: "", Url: &datasourcepb.DataSourceURL{Postgresql: &datasourcepb.PostgreSQLURL{User: "lzzhang", Password: "<PASSWORD>", Host: "127.0.0.1", Database: "lzzhang", Port: 5432}}} //PGSource = request.CreateTable{TableId: "sot-postgresqlsource", SourceId: PGManager.SourceId, SpaceId: spaceid, Name: "pgs", Comment: "pgs", TableKind: model.TableInfo_Source, TableSchema: &flinkpb.TableSchema{MySQL: &model.MySQLTableDefine{SqlColumn: []*flinkpb.SqlColumnType{&flinkpb.SqlColumnType{Column: "id", Type: "bigint", PrimaryKey: "t"}, &flinkpb.SqlColumnType{Column: "id1", Type: "bigint", Comment: "xxx", PrimaryKey: "f"}}}}} //PGDest = request.CreateTable{TableId: "sot-00postgresqldest", SourceId: PGManager.SourceId, SpaceId: spaceid, Name: "pgd", Comment: "pgd", TableKind: model.TableInfo_Destination, TableSchema: &flinkpb.TableSchema{MySQL: &model.MySQLTableDefine{SqlColumn: []*flinkpb.SqlColumnType{&flinkpb.SqlColumnType{Column: "id", Type: "bigint", PrimaryKey: "t"}, &flinkpb.SqlColumnType{Column: "id1", Type: "bigint", Comment: "xxx", PrimaryKey: "f"}}}}} // kafka {"paycount": 2, "paymoney": "EUR"} {"paycount": 1, "paymoney": "USD"} KafkaManager = request.CreateSource{SourceId: "som-00000000000kafka", SpaceId: spaceid, SourceType: model.DataSource_Kafka, Name: "kafka", Comment: "", Url: &datasourcepb.DataSourceURL{Kafka: &datasourcepb.KafkaURL{KafkaBrokers: "dataworkbench-kafka-for-test:9092"}}} KafkaSource = request.CreateTable{TableId: "sot-00000kafkasource", SourceId: KafkaManager.SourceId, SpaceId: spaceid, Name: "billing", Comment: "", TableKind: model.TableInfo_Source, TableSchema: &flinkpb.TableSchema{Kafka: &flinkpb.KafkaTable{SqlColumn: []*flinkpb.SqlColumnType{&flinkpb.SqlColumnType{Column: "paycount", Type: "bigint", PrimaryKey: "f"}, &flinkpb.SqlColumnType{Column: "paymoney", Type: "string", Comment: "", PrimaryKey: "f"}}, TimeColumn: []*flinkpb.SqlTimeColumnType{&flinkpb.SqlTimeColumnType{Column: "tproctime", Type: "proctime"}}, Topic: "workbench", Format: "json", ConnectorOptions: []*flinkpb.ConnectorOption{&flinkpb.ConnectorOption{Name: "'json.fail-on-missing-field'", Value: "'false'"}, &flinkpb.ConnectorOption{Name: "'json.ignore-parse-errors'", Value: "'true'"}}}}} S3Manager = request.CreateSource{SourceId: "som-00000000000000s3", SpaceId: spaceid, SourceType: model.DataSource_S3, Name: "s3", Url: &datasourcepb.DataSourceURL{S3: &datasourcepb.S3URL{}}} //Url: &datasourcepb.DataSourceURL{S3: &model.S3Url{AccessKey: "<KEY>", SecretKey: "<KEY>", EndPoint: "http://s3.gd2.qingstor.com"}}} HbaseManager = request.CreateSource{SourceId: "som-00000000000hbase", SpaceId: spaceid, SourceType: model.DataSource_HBase, Name: "hbase", Url: &datasourcepb.DataSourceURL{Hbase: &datasourcepb.HBaseURL{Zookeeper: "hbase:2181", ZNode: "/hbase"}}} FtpManager = request.CreateSource{SourceId: "som-0000000000000ftp", SpaceId: spaceid, SourceType: model.DataSource_Ftp, Name: "ftp", Url: &datasourcepb.DataSourceURL{Ftp: &datasourcepb.FtpURL{Host: "192.168.3.11", Port: 21}}} HDFSManager = request.CreateSource{SourceId: "som-000000000000hdfs", SpaceId: spaceid, SourceType: model.DataSource_HDFS, Name: "hdfs", Url: &datasourcepb.DataSourceURL{Hdfs: &datasourcepb.HDFSURL{Nodes: &datasourcepb.HDFSURL_HDFSNodeURL{NameNode: "127.0.0.1", Port: 8020}}}} NewSpaceManager = request.CreateSource{SourceId: "som-00000000newspace", SpaceId: newspaceid, SourceType: model.DataSource_MySQL, Name: "mysql", Comment: "newspace", Url: &datasourcepb.DataSourceURL{Mysql: &datasourcepb.MySQLURL{User: "root", Password: "password", Host: "127.0.0.1", Database: "data_workbench", Port: 3306}}} NameExistsManager = request.CreateSource{SourceId: "som-000000nameexists", SpaceId: spaceid, SourceType: model.DataSource_MySQL, Name: "mysql", Url: &datasourcepb.DataSourceURL{Mysql: &datasourcepb.MySQLURL{User: "root", Password: "password", Host: "127.0.0.1", Database: "data_workbench", Port: 3306}}} NameErrorManager = request.CreateSource{SourceId: "som-000000nameerror", SpaceId: spaceid, SourceType: model.DataSource_MySQL, Name: "mysql.mysql", Url: &datasourcepb.DataSourceURL{Mysql: &datasourcepb.MySQLURL{User: "root", Password: "password", Host: "127.0.0.1", Database: "data_workbench", Port: 3306}}} SourceTypeErrorManager = request.CreateSource{SourceId: "som-0sourcetypeerror", SpaceId: spaceid, SourceType: 10000, Name: "SourceTypeError", Url: &datasourcepb.DataSourceURL{Mysql: &datasourcepb.MySQLURL{User: "root", Password: "password", Host: "127.0.0.1", Database: "data_workbench", Port: 3306}}} //// Source Tables //TablePG = request.CreateTable{ID: "sot-0123456789012345", SourceId: PGManager.ID, Name: "pd", Comment: "postgresql", Url: typeToJsonString(constants.FlinkTableDefinePostgreSQL{SqlColumn: []constants.SqlColumnType{constants.SqlColumnType{Name: "id", Type: "bigint", PrimaryKey: "t"}, constants.SqlColumnType{Name: "id1", Type: "bigint", Comment: "xxx", PrimaryKey: "f"}}})} //TableNameExists = request.CreateTable{ID: "sot-0123456789012351", SourceId: MysqlManager.ID, Name: "ms", Comment: "to pd", Url: typeToJsonString(constants.FlinkTableDefineMysql{SqlColumn: []constants.SqlColumnType{constants.SqlColumnType{Name: "id", Type: "bigint", PrimaryKey: "t"}, constants.SqlColumnType{Name: "id1", Type: "bigint", Comment: "xxx", PrimaryKey: "f"}}})} //TableNameError = request.CreateTable{ID: "sot-0123456789012352", SourceId: MysqlManager.ID, Name: "ms.ms", Comment: "to pd", Url: typeToJsonString(constants.FlinkTableDefineMysql{SqlColumn: []constants.SqlColumnType{constants.SqlColumnType{Name: "id", Type: "bigint", PrimaryKey: "t"}, constants.SqlColumnType{Name: "id1", Type: "bigint", Comment: "xxx", PrimaryKey: "f"}}})} //TableJsonError = request.CreateTable{ID: "sot-0123456789012353", SourceId: MysqlManager.ID, Name: "ms1", Comment: "to pd", Url: "sss,xx,xx, xx"} //TableManagerError = request.CreateTable{ID: "sot-0123456789012354", SourceId: "sot-xxxxyyyyzzzzxxxx", Name: "ms2", Comment: "to pd", Url: typeToJsonString(constants.FlinkTableDefineMysql{SqlColumn: []constants.SqlColumnType{constants.SqlColumnType{Name: "id", Type: "bigint", PrimaryKey: "t"}, constants.SqlColumnType{Name: "id1", Type: "bigint", Comment: "xxx", PrimaryKey: "f"}}})} //TableMysqlDimensionSource = request.CreateTable{ID: "sot-0123456789012355", SourceId: MysqlManager.ID, Name: "mw", Comment: "join dimension table", Url: typeToJsonString(constants.FlinkTableDefineMysql{SqlColumn: []constants.SqlColumnType{constants.SqlColumnType{Name: "rate", Type: "bigint", PrimaryKey: "f"}, constants.SqlColumnType{Name: "dbmoney", Type: "varchar", Length: "8", Comment: "xxx"}}})} //TableMysqlDimensionDest = request.CreateTable{ID: "sot-0123456789012356", SourceId: MysqlManager.ID, Name: "mwd", Comment: "join dimension table", Url: typeToJsonString(constants.FlinkTableDefineMysql{SqlColumn: []constants.SqlColumnType{constants.SqlColumnType{Name: "total", Type: "bigint", PrimaryKey: "f"}}})} //TableMysqlCommonSource = request.CreateTable{ID: "sot-0123456789012357", SourceId: MysqlManager.ID, Name: "mc", Comment: "join common table", Url: typeToJsonString(constants.FlinkTableDefineMysql{SqlColumn: []constants.SqlColumnType{constants.SqlColumnType{Name: "rate", Type: "bigint", PrimaryKey: "f"}, constants.SqlColumnType{Name: "dbmoney", Type: "varchar", Comment: "xxx", PrimaryKey: "f", Length: "8"}}})} //TableMysqlCommonDest = request.CreateTable{ID: "sot-0123456789012358", SourceId: MysqlManager.ID, Name: "mcd", Comment: "join common table", Url: typeToJsonString(constants.FlinkTableDefineMysql{SqlColumn: []constants.SqlColumnType{constants.SqlColumnType{Name: "total", Type: "bigint"}}})} //'connector.write.flush.max-rows' = '1' //TableS3Source = request.CreateTable{ID: "sot-0123456789012359", SourceId: S3Manager.ID, Name: "s3s", Comment: "s3 source", Url: typeToJsonString(constants.FlinkTableDefineS3{SqlColumn: []constants.SqlColumnType{constants.SqlColumnType{Name: "id", Type: "bigint", PrimaryKey: "t"}, constants.SqlColumnType{Name: "id1", Type: "bigint", Comment: "xxx", PrimaryKey: "f"}}, Path: "s3a://filesystem/source", Format: "json"})} //TableS3Dest = request.CreateTable{ID: "sot-0123456789012360", SourceId: S3Manager.ID, Name: "s3d", Comment: "s3 destination", Url: typeToJsonString(constants.FlinkTableDefineS3{SqlColumn: []constants.SqlColumnType{constants.SqlColumnType{Name: "id", Type: "bigint", PrimaryKey: "t"}, constants.SqlColumnType{Name: "id1", Type: "bigint", Comment: "xxx", PrimaryKey: "f"}}, Path: "s3a://filesystem/destination", Format: "json"})} //TableUDFSource = request.CreateTable{ID: "sot-0123456789012362", SourceId: MysqlManager.ID, Name: "udfs", Comment: "udfs", Url: typeToJsonString(constants.FlinkTableDefineMysql{SqlColumn: []constants.SqlColumnType{constants.SqlColumnType{Name: "a", Type: "varchar", PrimaryKey: "f", Length: "10"}}})} //TableUDFDest = request.CreateTable{ID: "sot-0123456789012363", SourceId: MysqlManager.ID, Name: "udfd", Comment: "udfd", Url: typeToJsonString(constants.FlinkTableDefineMysql{SqlColumn: []constants.SqlColumnType{constants.SqlColumnType{Name: "a", Type: "varchar", PrimaryKey: "f", Length: "10"}}})} //TableHbaseSource = request.CreateTable{ID: "sot-0123456789012364", SourceId: HbaseManager.ID, Name: "testsource", Comment: "hbase source", Url: typeToJsonString(constants.FlinkTableDefineHbase{SqlColumn: []constants.SqlColumnType{constants.SqlColumnType{Name: "rowkey", Type: "STRING", PrimaryKey: "f", Length: ""}, constants.SqlColumnType{Name: "columna", Type: "ROW<a STRING>"}}})} //TableHbaseDest = request.CreateTable{ID: "sot-0123456789012365", SourceId: HbaseManager.ID, Name: "testdest", Comment: "hbase dest", Url: typeToJsonString(constants.FlinkTableDefineHbase{SqlColumn: []constants.SqlColumnType{constants.SqlColumnType{Name: "rowkey", Type: "STRING", PrimaryKey: "f", Length: ""}, constants.SqlColumnType{Name: "columna", Type: "ROW<a STRING>"}}})} //TableFtpSource = request.CreateTable{ID: "sot-0123456789012366", SourceId: FtpManager.ID, Name: "ftpsource", Comment: "ftp source", Url: typeToJsonString(constants.FlinkTableDefineFtp{Path: "/u/", Format: "csv", SqlColumn: []constants.SqlColumnType{constants.SqlColumnType{Name: "readName", Type: "string", Comment: "xxx"}, constants.SqlColumnType{Name: "cellPhone", Type: "string", Comment: "xxx"}, {Name: "universityName", Type: "string", Comment: "xxx"}, {Name: "city", Type: "string", Comment: "xxx"}, {Name: "street", Type: "string", Comment: "xxx"}, {Name: "ip", Type: "string", Comment: "xxx"}, {Name: "pt", Type: "AS PROCTIME()"}}, ConnectorOptions: []string{"'username' = 'ftptest'", "'password' = '<PASSWORD>'"}})} //TableFtpDest = request.CreateTable{ID: "sot-0123456789012367", SourceId: FtpManager.ID, Name: "ftpdest", Comment: "ftp dest", Url: typeToJsonString(constants.FlinkTableDefineFtp{Path: "/sink.csv", Format: "csv", SqlColumn: []constants.SqlColumnType{constants.SqlColumnType{Name: "readName", Type: "string", Comment: "xxx"}, constants.SqlColumnType{Name: "cellPhone", Type: "string", Comment: "xxx"}, {Name: "universityName", Type: "string", Comment: "xxx"}, {Name: "city", Type: "string", Comment: "xxx"}, {Name: "street", Type: "string", Comment: "xxx"}, {Name: "ip", Type: "string", Comment: "xxx"}}, ConnectorOptions: []string{"'username' = 'ftptest'", "'password' = '<PASSWORD>'"}})} address := "127.0.0.1:9104" lp := glog.NewDefault() ctx = glog.WithContext(context.Background(), lp) conn, err := grpcwrap.NewConn(ctx, &grpcwrap.ClientConfig{ Address: address, }) require.Nil(t, err, "%+v", err) client = smpb.NewSourcemanagerClient(conn) logger := glog.NewDefault() worker := idgenerator.New("") reqId, _ := worker.Take() ln := logger.Clone() ln.WithFields().AddString("rid", reqId) ctx = grpcwrap.ContextWithRequest(context.Background(), ln, reqId) } func errorCode(err error) string { //rpc error: code = Unknown desc = InvalidSourceName return strings.Split(err.Error(), " ")[7] } // Source Manager func Test_CreateSource(t *testing.T) { mainInit(t) Clean(t) var err error _, err = client.Create(ctx, &MysqlManager) require.Nil(t, err, "%+v", err) _, err = client.Create(ctx, &ClickHouseManager) require.Nil(t, err, "%+v", err) _, err = client.Create(ctx, &PGManager) require.Nil(t, err, "%+v", err) _, err = client.Create(ctx, &KafkaManager) require.Nil(t, err, "%+v", err) _, err = client.Create(ctx, &S3Manager) require.Nil(t, err, "%+v", err) _, err = client.Create(ctx, &HbaseManager) require.Nil(t, err, "%+v", err) _, err = client.Create(ctx, &FtpManager) require.Nil(t, err, "%+v", err) _, err = client.Create(ctx, &HDFSManager) require.Nil(t, err, "%+v", err) _, err = client.Create(ctx, &NewSpaceManager) require.Nil(t, err, "%+v", err) _, err = client.Create(ctx, &NameErrorManager) require.Equal(t, qerror.InvalidSourceName.Code(), errorCode(err)) _, err = client.Create(ctx, &NameExistsManager) require.Equal(t, qerror.ResourceAlreadyExists.Code(), errorCode(err)) _, err = client.Create(ctx, &SourceTypeErrorManager) require.Equal(t, qerror.NotSupportSourceType.Code(), errorCode(err)) } func managerDescribe(t *testing.T, id string) *response.DescribeSource { var d request.DescribeSource var err error var rep *response.DescribeSource if id == "" { d.SourceId = MysqlManager.SourceId rep, err = client.Describe(ctx, &d) require.Nil(t, err, "%+v", err) require.Equal(t, rep.Info.SourceId, d.SourceId) } else { d.SourceId = id rep, err = client.Describe(ctx, &d) require.Nil(t, err, "%+v", err) require.Equal(t, rep.Info.SourceId, d.SourceId) return rep } return nil } func Test_DescribeSource(t *testing.T) { mainInit(t) managerDescribe(t, "") } func Test_UpdateSource(t *testing.T) { mainInit(t) var i request.UpdateSource var err error i.Name = MysqlManager.Name i.SourceId = MysqlManager.SourceId i.Comment = "update ok" i.SourceType = MysqlManager.SourceType i.Url = MysqlManager.Url _, err = client.Update(ctx, &i) require.Nil(t, err, "%+v", err) require.Equal(t, i.Comment, managerDescribe(t, i.SourceId).Info.Comment) } func Test_PingSource(t *testing.T) { mainInit(t) var p request.PingSource var err error p.SourceType = MysqlManager.SourceType p.Url = MysqlManager.Url _, err = client.PingSource(ctx, &p) require.Nil(t, err, "%+v", err) //p.SourceType = PGManager.SourceType //p.Url = PGManager.Url //_, err = client.PingSource(ctx, &p) //require.NotNil(t, err, "%+v", err) //p.SourceType = ClickHouseManager.SourceType //p.Url = ClickHouseManager.Url //_, err = client.PingSource(ctx, &p) //require.Nil(t, err, "%+v", err) //p.SourceType = KafkaManager.SourceType //p.Url = KafkaManager.Url //_, err = client.PingSource(ctx, &p) //require.Nil(t, err, "%+v", err) p.SourceType = S3Manager.SourceType p.Url = S3Manager.Url _, err = client.PingSource(ctx, &p) require.NotNil(t, err, "%+v", err) //p.SourceType = HbaseManager.SourceType //p.Url = HbaseManager.Url //_, err = client.PingSource(ctx, &p) //require.Nil(t, err, "%+v", err) //p.SourceType = FtpManager.SourceType //p.Url = FtpManager.Url //_, err = client.PingSource(ctx, &p) //require.Nil(t, err, "%+v", err) //p.SourceType = HDFSManager.SourceType //p.Url = HDFSManager.Url //_, err = client.PingSource(ctx, &p) //require.Nil(t, err, "%+v", err) } func Test_DisableSource(t *testing.T) { mainInit(t) var v request.DisableSource var err error v.SourceIds = []string{MysqlManager.SourceId, KafkaManager.SourceId} _, err = client.Disable(ctx, &v) require.Nil(t, err, "%+v", err) var i request.UpdateSource i.Name = MysqlManager.Name i.SourceId = MysqlManager.SourceId i.Comment = "update ok" i.SourceType = MysqlManager.SourceType i.Url = MysqlManager.Url _, err = client.Update(ctx, &i) require.NotNil(t, err, "%+v", err) require.Equal(t, qerror.SourceIsDisable.Code(), errorCode(err)) } func Test_EnableSource(t *testing.T) { mainInit(t) var v request.EnableSource var err error v.SourceIds = []string{MysqlManager.SourceId, KafkaManager.SourceId} _, err = client.Enable(ctx, &v) require.Nil(t, err, "%+v", err) } func Test_SourceKind(t *testing.T) { mainInit(t) _, err := client.SourceKind(ctx, &model.EmptyStruct{}) require.Nil(t, err, "%+v", err) } func Test_DataFormat(t *testing.T) { mainInit(t) _, err := client.DataFormat(ctx, &model.EmptyStruct{}) require.Nil(t, err, "%+v", err) } func Test_DataType(t *testing.T) { mainInit(t) _, err := client.DataType(ctx, &model.EmptyStruct{}) require.Nil(t, err, "%+v", err) } func Test_CreateTable(t *testing.T) { var err error mainInit(t) _, err = client.CreateTable(ctx, &MysqlSource) require.Nil(t, err, "%+v", err) _, err = client.CreateTable(ctx, &MysqlDest) require.Nil(t, err, "%+v", err) _, err = client.CreateTable(ctx, &ClickHouseSource) require.Nil(t, err, "%+v", err) _, err = client.CreateTable(ctx, &ClickHouseDest) require.Nil(t, err, "%+v", err) _, err = client.CreateTable(ctx, &KafkaSource) require.Nil(t, err, "%+v", err) } func tablesDescribe(t *testing.T, id string) *model.TableInfo { var i request.DescribeTable var err error var rep *response.DescribeTable if id == "" { i.TableId = MysqlSource.TableId rep, err = client.DescribeTable(ctx, &i) require.Nil(t, err, "%+v", err) } else { i.TableId = id rep, err = client.DescribeTable(ctx, &i) require.Nil(t, err, "%+v", err) return rep.Info } return nil } func Test_DescribeTable(t *testing.T) { mainInit(t) tablesDescribe(t, "") } func Test_UpdateTable(t *testing.T) { var i request.UpdateTable var err error mainInit(t) i.Comment = "Update" i.TableId = MysqlSource.TableId i.Name = MysqlSource.Name i.TableSchema = MysqlSource.TableSchema i.TableKind = MysqlSource.TableKind _, err = client.UpdateTable(ctx, &i) require.Nil(t, err, "%+v", err) require.Equal(t, i.Comment, tablesDescribe(t, i.TableId).Comment) } func tablesDelete(t *testing.T, id string) { var i request.DeleteTable var err error if id == "" { i.TableIds = []string{MysqlSource.TableId, MysqlDest.TableId, ClickHouseSource.TableId, ClickHouseDest.TableId, KafkaSource.TableId} _, err = client.DeleteTable(ctx, &i) require.Nil(t, err, "%+v", err) } else { i.TableIds = []string{id} _, err = client.DeleteTable(ctx, &i) require.Nil(t, err, "%+v", err) } } func managerLists(t *testing.T, SpaceId string) *response.ListSource { var i request.ListSource var rep *response.ListSource var err error if SpaceId == "" { i.SpaceId = spaceid i.Limit = 100 i.Offset = 0 rep, err = client.List(ctx, &i) require.Nil(t, err, "%+v", err) i.SpaceId = newspaceid i.Limit = 100 i.Offset = 0 i.Search = "my" rep, err = client.List(ctx, &i) require.Nil(t, err, "%+v", err) require.Equal(t, 1, len(rep.Infos)) require.Equal(t, int64(1), rep.Total) return nil } else { i.SpaceId = SpaceId i.Limit = 100 i.Offset = 0 rep, err = client.List(ctx, &i) require.Nil(t, err, "%+v", err) return rep } return nil } func Test_ListSource(t *testing.T) { mainInit(t) managerLists(t, "") } func tablesLists(t *testing.T, SourceId string) *response.ListTable { var i request.ListTable var err error var rep *response.ListTable if SourceId == "" { i.SpaceId = spaceid i.Limit = 100 i.Offset = 0 rep, err = client.ListTable(ctx, &i) require.Nil(t, err, "%+v", err) i.SourceId = MysqlManager.SourceId i.Limit = 100 i.Offset = 0 i.Search = "m" rep, err = client.ListTable(ctx, &i) require.Nil(t, err, "%+v", err) require.Equal(t, 2, len(rep.Infos)) require.Equal(t, int64(2), rep.Total) i.SpaceId = spaceid i.SourceId = MysqlManager.SourceId i.TableKind = model.TableInfo_Source i.Limit = 100 i.Offset = 0 i.Search = "m" rep, err = client.ListTable(ctx, &i) require.Nil(t, err, "%+v", err) require.Equal(t, 1, len(rep.Infos)) require.Equal(t, int64(1), rep.Total) } else { i.SourceId = SourceId i.Limit = 100 i.Offset = 0 rep, err = client.ListTable(ctx, &i) require.Nil(t, err, "%+v", err) return rep } return nil } func Test_ListTable(t *testing.T) { mainInit(t) tablesLists(t, "") } func managerDelete(t *testing.T, id string, iserror bool) { var i request.DeleteSource var err error if id == "" { if iserror == false { i.SourceIds = []string{MysqlManager.SourceId} _, err = client.Delete(ctx, &i) require.Nil(t, err, "%+v", err) Clean(t) } else { i.SourceIds = []string{MysqlManager.SourceId} _, err = client.Delete(ctx, &i) require.NotNil(t, err, "%+v", err) require.Equal(t, qerror.ResourceIsUsing.Code(), errorCode(err)) } } else { i.SourceIds = []string{id} _, err = client.Delete(ctx, &i) require.Nil(t, err, "%+v", err) } } func Clean(t *testing.T) { var ( d request.DeleteWorkspaces ) d.SpaceIds = []string{spaceid, newspaceid} _, err := client.DeleteAll(ctx, &d) require.Nil(t, err, "%+v", err) } func Test_SourceTables(t *testing.T) { var v request.SourceTables var err error mainInit(t) v.SourceId = MysqlManager.SourceId _, err = client.SourceTables(ctx, &v) require.Nil(t, err, "%+v", err) //v.SourceId = ClickHouseManager.SourceId //_, err = client.SourceTables(ctx, &v) //require.Nil(t, err, "%+v", err) //v.SourceId = PGManager.SourceId //_, err = client.SourceTables(ctx, &v) //require.Nil(t, err, "%+v", err) } func Test_TableColumns(t *testing.T) { var v request.TableColumns var err error mainInit(t) v.SourceId = MysqlManager.SourceId v.TableName = "sourcemanager" _, err = client.TableColumns(ctx, &v) require.Nil(t, err, "%+v", err) //v.SourceId = ClickHouseManager.SourceId //v.TableName = "zz" //_, err = client.TableColumns(ctx, &v) //require.Nil(t, err, "%+v", err) //v.SourceId = PGManager.SourceId //v.TableName = "zz" //_, err = client.TableColumns(ctx, &v) //require.Nil(t, err, "%+v", err) } func Test_DeleteTable(t *testing.T) { mainInit(t) tablesDelete(t, "") } func Test_DeleteSource(t *testing.T) { mainInit(t) managerDelete(t, "", false) } func Test_Clean(t *testing.T) { mainInit(t) Clean(t) } func Test_CreateTest(t *testing.T) { mainInit(t) Clean(t) // Test_CreateSource(t) // Test_CreateTable(t) }
1.21875
1
broker.go
vChrisR/cf-broker-skeleton
0
397
package main import ( "context" "errors" "code.cloudfoundry.org/lager" "github.com/pivotal-cf/brokerapi" ) type broker struct { services []brokerapi.Service logger lager.Logger env BrokerConfig } func (b *broker) Services(context context.Context) ([]brokerapi.Service, error) { //fmt.Println(b.services) return b.services, nil } func (b *broker) Provision(context context.Context, instanceID string, details brokerapi.ProvisionDetails, asyncAllowed bool) (brokerapi.ProvisionedServiceSpec, error) { return brokerapi.ProvisionedServiceSpec{}, nil } func (b *broker) Deprovision(context context.Context, instanceID string, details brokerapi.DeprovisionDetails, asyncAllowed bool) (brokerapi.DeprovisionServiceSpec, error) { return brokerapi.DeprovisionServiceSpec{}, nil } func (b *broker) GetInstance(context context.Context, instanceID string) (brokerapi.GetInstanceDetailsSpec, error) { return brokerapi.GetInstanceDetailsSpec{}, nil } func (b *broker) Bind(context context.Context, instanceID, bindingID string, details brokerapi.BindDetails, asyncAllowed bool ) (brokerapi.Binding, error) { return brokerapi.Binding{}, errors.New("service does not support bind") } func (b *broker) GetBinding(context context.Context, instanceID string, bindingID string) (brokerapi.GetBindingSpec, error) { return brokerapi.GetBindingSpec{}, nil } func (b *broker) Unbind(context context.Context, instanceID, bindingID string, details brokerapi.UnbindDetails, asyncAllowed bool) (brokerapi.UnbindSpec, error) { return brokerapi.UnbindSpec{}, errors.New("service does not support bind") } func (b *broker) Update(context context.Context, instanceID string, details brokerapi.UpdateDetails, asyncAllowed bool) (brokerapi.UpdateServiceSpec, error) { return brokerapi.UpdateServiceSpec{}, nil } func (b *broker) LastOperation(context context.Context, instanceID string, details brokerapi.PollDetails) (brokerapi.LastOperation, error) { return brokerapi.LastOperation{}, nil } func (b *broker) LastBindingOperation(context context.Context, instanceID, bindingID string, details brokerapi.PollDetails) (brokerapi.LastOperation, error) { return brokerapi.LastOperation{}, nil }
1.289063
1
mock_mgmtProvider_test.go
emrearslan/gocb
0
405
// Code generated by mockery v1.0.0. DO NOT EDIT. package gocb import mock "github.com/stretchr/testify/mock" // mockMgmtProvider is an autogenerated mock type for the mgmtProvider type type mockMgmtProvider struct { mock.Mock } // executeMgmtRequest provides a mock function with given fields: req func (_m *mockMgmtProvider) executeMgmtRequest(req mgmtRequest) (*mgmtResponse, error) { ret := _m.Called(req) var r0 *mgmtResponse if rf, ok := ret.Get(0).(func(mgmtRequest) *mgmtResponse); ok { r0 = rf(req) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*mgmtResponse) } } var r1 error if rf, ok := ret.Get(1).(func(mgmtRequest) error); ok { r1 = rf(req) } else { r1 = ret.Error(1) } return r0, r1 }
1.304688
1
demodcore/demodcore.go
N5FPP/segdsp
40
413
package demodcore import "github.com/racerxdl/segdsp/eventmanager" type DemodCore interface { Work(data []complex64) interface{} GetDemodParams() interface{} SetEventManager(ev *eventmanager.EventManager) GetLevel() float32 IsMuted() bool }
0.808594
1
core/services/job/spawner.go
vordev/VOR
0
421
package job import ( "context" "strconv" "sync" "time" "github.com/pkg/errors" "github.com/vordev/VOR/core/logger" "github.com/vordev/VOR/core/services/postgres" "github.com/vordev/VOR/core/store/models" "github.com/vordev/VOR/core/utils" ) //go:generate mockery --name Spawner --output ./mocks/ --case=underscore //go:generate mockery --name Delegate --output ./mocks/ --case=underscore type ( // The job spawner manages the spinning up and spinning down of the long-running // services that perform the work described by job specs. Each active job spec // has 1 or more of these services associated with it. // // At present, Flux Monitor and Offchain Reporting jobs can only have a single // "initiator", meaning that they only require a single service. But the older // "direct request" model allows for multiple initiators, which imply multiple // services. Spawner interface { Start() Stop() CreateJob(ctx context.Context, spec Spec) (int32, error) DeleteJob(ctx context.Context, jobID int32) error RegisterDelegate(delegate Delegate) } spawner struct { orm ORM config Config jobTypeDelegates map[Type]Delegate jobTypeDelegatesMu sync.RWMutex startUnclaimedServicesWorker utils.SleeperTask services map[int32][]Service chStopJob chan int32 utils.StartStopOnce chStop chan struct{} chDone chan struct{} } // TODO(spook): I can't wait for Go generics Delegate interface { JobType() Type ToDBRow(spec Spec) models.JobSpecV2 FromDBRow(spec models.JobSpecV2) Spec ServicesForSpec(spec Spec) ([]Service, error) } ) const checkForDeletedJobsPollInterval = 5 * time.Minute var _ Spawner = (*spawner)(nil) func NewSpawner(orm ORM, config Config) *spawner { s := &spawner{ orm: orm, config: config, jobTypeDelegates: make(map[Type]Delegate), services: make(map[int32][]Service), chStopJob: make(chan int32), chStop: make(chan struct{}), chDone: make(chan struct{}), } s.startUnclaimedServicesWorker = utils.NewSleeperTask( utils.SleeperTaskFuncWorker(s.startUnclaimedServices), ) return s } func (js *spawner) Start() { if !js.OkayToStart() { logger.Error("Job spawner has already been started") return } go js.runLoop() } func (js *spawner) Stop() { if !js.OkayToStop() { logger.Error("Job spawner has already been stopped") return } close(js.chStop) <-js.chDone } func (js *spawner) destroy() { js.stopAllServices() err := js.startUnclaimedServicesWorker.Stop() if err != nil { logger.Error(err) } } func (js *spawner) RegisterDelegate(delegate Delegate) { js.jobTypeDelegatesMu.Lock() defer js.jobTypeDelegatesMu.Unlock() if _, exists := js.jobTypeDelegates[delegate.JobType()]; exists { panic("registered job type " + string(delegate.JobType()) + " more than once") } logger.Infof("Registered job type '%v'", delegate.JobType()) js.jobTypeDelegates[delegate.JobType()] = delegate } func (js *spawner) runLoop() { defer close(js.chDone) defer js.destroy() // Initialize the Postgres event listener for created and deleted jobs var newJobEvents <-chan postgres.Event newJobs, err := js.orm.ListenForNewJobs() if err != nil { logger.Warn("Job spawner could not subscribe to new job events, falling back to polling") } else { defer newJobs.Close() newJobEvents = newJobs.Events() } var pgDeletedJobEvents <-chan postgres.Event deletedJobs, err := js.orm.ListenForDeletedJobs() if err != nil { logger.Warn("Job spawner could not subscribe to deleted job events") } else { defer deletedJobs.Close() pgDeletedJobEvents = deletedJobs.Events() } // Initialize the DB poll ticker dbPollTicker := time.NewTicker(js.config.JobPipelineDBPollInterval()) defer dbPollTicker.Stop() // Initialize the poll that checks for deleted jobs and removes them // This is only necessary as a fallback in case the event doesn't fire for some reason // It doesn't need to run very often deletedPollTicker := time.NewTicker(checkForDeletedJobsPollInterval) defer deletedPollTicker.Stop() ctx, cancel := utils.CombinedContext(js.chStop) defer cancel() js.startUnclaimedServicesWorker.WakeUp() for { select { case <-newJobEvents: js.startUnclaimedServicesWorker.WakeUp() case <-dbPollTicker.C: js.startUnclaimedServicesWorker.WakeUp() case jobID := <-js.chStopJob: js.stopService(jobID) case <-deletedPollTicker.C: js.checkForDeletedJobs(ctx) case deleteJobEvent := <-pgDeletedJobEvents: js.handlePGDeleteEvent(ctx, deleteJobEvent) case <-js.chStop: return } } } func (js *spawner) startUnclaimedServices() { ctx, cancel := utils.CombinedContext(js.chStop, 5*time.Second) defer cancel() specDBRows, err := js.orm.ClaimUnclaimedJobs(ctx) if err != nil { logger.Errorf("Couldn't fetch unclaimed jobs: %v", err) return } js.jobTypeDelegatesMu.RLock() defer js.jobTypeDelegatesMu.RUnlock() for _, specDBRow := range specDBRows { if _, exists := js.services[specDBRow.ID]; exists { logger.Warnw("Job spawner ORM attempted to claim locally-claimed job, skipping", "jobID", specDBRow.ID) continue } var services []Service for _, delegate := range js.jobTypeDelegates { spec := delegate.FromDBRow(specDBRow) if spec == nil { // This spec isn't owned by this delegate continue } moreServices, err := delegate.ServicesForSpec(spec) if err != nil { logger.Errorw("Error creating services for job", "jobID", specDBRow.ID, "error", err) continue } services = append(services, moreServices...) } logger.Infow("Starting services for job", "jobID", specDBRow.ID, "count", len(services)) for _, service := range services { err := service.Start() if err != nil { logger.Errorw("Error creating service for job", "jobID", specDBRow.ID, "error", err) continue } js.services[specDBRow.ID] = append(js.services[specDBRow.ID], service) } } } func (js *spawner) stopAllServices() { for jobID := range js.services { js.stopService(jobID) } } func (js *spawner) stopService(jobID int32) { for _, service := range js.services[jobID] { err := service.Close() if err != nil { logger.Errorw("Error stopping job service", "jobID", jobID, "error", err) } else { logger.Infow("Stopped job service", "jobID", jobID) } } delete(js.services, jobID) } func (js *spawner) checkForDeletedJobs(ctx context.Context) { jobIDs, err := js.orm.CheckForDeletedJobs(ctx) if err != nil { logger.Errorw("failed to CheckForDeletedJobs", "err", err) return } for _, jobID := range jobIDs { js.unloadDeletedJob(ctx, jobID) } } func (js *spawner) unloadDeletedJob(ctx context.Context, jobID int32) { logger.Infow("Unloading deleted job", "jobID", jobID) js.stopService(jobID) ctx, cancel := context.WithTimeout(ctx, 5*time.Second) defer cancel() if err := js.orm.UnclaimJob(ctx, jobID); err != nil { logger.Errorw("Unexpected error unclaiming job", "jobID", jobID) } } func (js *spawner) handlePGDeleteEvent(ctx context.Context, ev postgres.Event) { jobIDString := ev.Payload jobID64, err := strconv.ParseInt(jobIDString, 10, 32) if err != nil { logger.Errorw("Unexpected error decoding deleted job event payload, expected 32-bit integer", "payload", jobIDString, "channel", ev.Channel) } jobID := int32(jobID64) js.unloadDeletedJob(ctx, jobID) } func (js *spawner) CreateJob(ctx context.Context, spec Spec) (int32, error) { js.jobTypeDelegatesMu.Lock() defer js.jobTypeDelegatesMu.Unlock() delegate, exists := js.jobTypeDelegates[spec.JobType()] if !exists { logger.Errorf("job type '%s' has not been registered with the job.Spawner", spec.JobType()) return 0, errors.Errorf("job type '%s' has not been registered with the job.Spawner", spec.JobType()) } ctx, cancel := utils.CombinedContext(js.chStop, ctx) defer cancel() specDBRow := delegate.ToDBRow(spec) err := js.orm.CreateJob(ctx, &specDBRow, spec.TaskDAG()) if err != nil { logger.Errorw("Error creating job", "type", spec.JobType(), "error", err) return 0, err } logger.Infow("Created job", "type", spec.JobType(), "jobID", specDBRow.ID) return specDBRow.ID, err } func (js *spawner) DeleteJob(ctx context.Context, jobID int32) error { if jobID == 0 { return errors.New("will not delete job with 0 ID") } ctx, cancel := utils.CombinedContext(js.chStop, ctx) defer cancel() err := js.orm.DeleteJob(ctx, jobID) if err != nil { logger.Errorw("Error deleting job", "jobID", jobID, "error", err) return err } logger.Infow("Deleted job", "jobID", jobID) select { case <-js.chStop: case js.chStopJob <- jobID: } return nil }
1.554688
2
parser.go
letitbeat/bpf-parser
0
429
package bpf import ( "github.com/alecthomas/participle" "github.com/alecthomas/participle/lexer" "github.com/alecthomas/participle/lexer/ebnf" "log" ) // Filter expressions wrapper type Filter struct { Primitives *Expression ` @@ ` } // Expression consists of one or more Primitives type Expression struct { Primitive *Primitive ` @@ ` Op string `[ @( "and" | "or" ) ` Next *Expression ` @@ ]` } // Qualifiers returns a map containing the qualifiers // of the expression where the key is the qualifier and the // values are the Id's from the primitive func (e *Expression) Qualifiers() map[string][]string { m := make(map[string][]string) for _, q := range e.Primitive.Qualifiers { m[q.String()] = append(m[q.String()], e.Primitive.Id) } next := e.Next for next != nil { for _, q := range next.Primitive.Qualifiers { m[q.String()] = append(m[q.String()], next.Primitive.Id) } next = next.Next } return m } // Primitive consist of an id (name or number) preceded by one or more qualifiers type Primitive struct { Qualifiers []*Qualifier `@@ { @@ }` Id string `(@Mac | @Host | @Number)` } // Qualifier there are tree types of qualifiers in a BPF // expression type, dir, proto type Qualifier struct { Type *Type ` @@` Dir *Direction `| @@` Proto *Protocol `| @@` } func (q *Qualifier) String() string { k := "" if q.Type != nil { switch *q.Type { case QType.Host: k = "host" case QType.Net: k = "net" case QType.Port: k = "port" } } else if q.Proto != nil { switch *q.Proto { case QProtocol.TCP: k = "tcp" case QProtocol.UDP: k = "udp" } } else if q.Dir != nil { switch *q.Dir { case QDirection.Dst: k = "dst" case QDirection.Src: k = "src" } } return k } // Type kind of thing the id name or number refers to. // Possible types are host, net , port type Type struct { Host bool ` @ "host"` Net bool `| @ "net"` Port bool `| @ "port"` } // Direction specify a particular transfer direction to and/or from id. // Possible directions are src, dst type Direction struct { Src bool ` @ "src"` Dst bool `| @ "dst"` } // Protocol restricts the match to a particular protocol. // Possible protos are: ether, tcp and udp. E.g., 'ether src foo' 'tcp port 21' type Protocol struct { TCP bool ` @ "tcp"` UDP bool `| @ "udp"` Ether bool `| @ "ether"` } type typeList struct { Host Type Net Type Port Type } var QType = &typeList{ Host: Type{Host: true}, Net: Type{Net: true}, Port: Type{Port: true}, } type directionList struct { Src Direction Dst Direction } var QDirection = &directionList{ Src: Direction{Src: true}, Dst: Direction{Dst: true}, } type protocolList struct { TCP Protocol UDP Protocol Ether Protocol } var QProtocol = &protocolList{ TCP: Protocol{TCP: true}, UDP: Protocol{UDP: true}, Ether: Protocol{Ether: true}, } // Compare compares the qualifier value func (q *Qualifier) Compare(t *Qualifier) bool { if &q == &t { return true } if q.Type != nil && t.Type != nil && *q.Type != *t.Type { return false } if q.Dir != nil && t.Dir != nil && *q.Dir != *t.Dir { return false } if q.Proto != nil && t.Proto != nil && *q.Proto != *t.Proto { return false } return true } var ( bpfLexer = lexer.Must(ebnf.New(` Ident = (alpha | "_") { "_" | alpha | digit } . String = "\"" { "\u0000"…"\uffff"-"\""-"\\" | "\\" any } "\"" . Number = [ "-" | "+" ] ("." | digit) {"." | digit} . Punct = "!"…"/" | ":"…"@" | "["…` + "\"`\"" + ` | "{"…"~" . Whitespace = " " | "\t" | "\n" | "\r" . Mac = ("af:") . Host = (IPv4_1 IPv4_1 IPv4_1 "." IPv4_1) . IPv4_1 = ([ digit ] | [ digit1_9 ][ digit ] | "1"[ digit ]"1"[ digit ] | "2"["0"…"4"][ digit ] | "25"["0"…"5"]) . alpha = "a"…"z" | "A"…"Z" . digit = "0"…"9" . digit1_9 = "1"…"9" . mac_1 = "a"…"f" | "A"…"F" | digit . any = "\u0000"…"\uffff" . `)) bpfParser = participle.MustBuild( &Filter{}, participle.Lexer(bpfLexer), participle.Unquote("String"), participle.CaseInsensitive("Ident"), participle.Elide("Whitespace"), // Need to solve left recursion detection first, if possible. // participle.UseLookahead(), ) ) // Parse receives a BPF expression and returns an Filter object // from the parsed expression func Parse(s string) (*Filter, error) { result := &Filter{} error := bpfParser.ParseString(s, result) if error != nil { log.Fatal(error) return nil, error } return result, nil }
2.3125
2
error/gen/grpc/calc/client/client.go
goadesign/examples
164
437
// Code generated by goa v3.5.3, DO NOT EDIT. // // calc gRPC client // // Command: // $ goa gen goa.design/examples/error/design -o // $(GOPATH)/src/goa.design/examples/error package client import ( "context" calcpb "goa.design/examples/error/gen/grpc/calc/pb" goagrpc "goa.design/goa/v3/grpc" goapb "goa.design/goa/v3/grpc/pb" goa "goa.design/goa/v3/pkg" "google.golang.org/grpc" ) // Client lists the service endpoint gRPC clients. type Client struct { grpccli calcpb.CalcClient opts []grpc.CallOption } // NewClient instantiates gRPC client for all the calc service servers. func NewClient(cc *grpc.ClientConn, opts ...grpc.CallOption) *Client { return &Client{ grpccli: calcpb.NewCalcClient(cc), opts: opts, } } // Divide calls the "Divide" function in calcpb.CalcClient interface. func (c *Client) Divide() goa.Endpoint { return func(ctx context.Context, v interface{}) (interface{}, error) { inv := goagrpc.NewInvoker( BuildDivideFunc(c.grpccli, c.opts...), EncodeDivideRequest, DecodeDivideResponse) res, err := inv.Invoke(ctx, v) if err != nil { resp := goagrpc.DecodeError(err) switch message := resp.(type) { case *calcpb.DivideDivByZeroError: return nil, NewDivideDivByZeroError(message) case *goapb.ErrorResponse: return nil, goagrpc.NewServiceError(message) default: return nil, goa.Fault(err.Error()) } } return res, nil } }
1.289063
1
server/models/ScriptParameter.go
Mallekoppie/script-workflow-engine
0
445
package models type ScriptParameter struct { Name string `json:"name"` Description string `json:"description"` DataType string `json:"data_type"` }
0.347656
0
api/server/handlers/gitinstallation/get_accounts.go
lsnow99/porter
2
453
package gitinstallation import ( "context" "net/http" "sort" "time" "github.com/google/go-github/v41/github" "github.com/porter-dev/porter/api/server/authz" "github.com/porter-dev/porter/api/server/handlers" "github.com/porter-dev/porter/api/server/shared" "github.com/porter-dev/porter/api/server/shared/apierrors" "github.com/porter-dev/porter/api/server/shared/config" "github.com/porter-dev/porter/api/types" "golang.org/x/oauth2" "gorm.io/gorm" ) type GetGithubAppAccountsHandler struct { handlers.PorterHandlerReadWriter authz.KubernetesAgentGetter } func NewGetGithubAppAccountsHandler( config *config.Config, decoderValidator shared.RequestDecoderValidator, writer shared.ResultWriter, ) *GetGithubAppAccountsHandler { return &GetGithubAppAccountsHandler{ PorterHandlerReadWriter: handlers.NewDefaultPorterHandler(config, decoderValidator, writer), } } func (c *GetGithubAppAccountsHandler) getOrgList(ctx context.Context, client *github.Client, orgsChan chan<- *github.Organization, errChan chan<- error) { defer close(orgsChan) defer close(errChan) page := 1 for { select { case <-ctx.Done(): return default: orgs, pages, err := client.Organizations.List(context.Background(), "", &github.ListOptions{ PerPage: 100, Page: page, }) if err != nil { errChan <- err return } for _, org := range orgs { orgsChan <- org } if pages.NextPage == 0 { return } else { page = pages.NextPage } } } } func (c *GetGithubAppAccountsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { tok, err := GetGithubAppOauthTokenFromRequest(c.Config(), r) if err != nil { c.HandleAPIError(w, r, apierrors.NewErrForbidden(err)) return } client := github.NewClient(c.Config().GithubAppConf.Client(oauth2.NoContext, tok)) res := &types.GetGithubAppAccountsResponse{} resultChannel := make(chan *github.Organization, 10) errChan := make(chan error) ctx, cancel := context.WithTimeout(r.Context(), 3*time.Second) defer cancel() go c.getOrgList(ctx, client, resultChannel, errChan) resultOrErrorReader: for { select { case result, ok := <-resultChannel: if ok { res.Accounts = append(res.Accounts, *result.Login) } else { // channel has been closed now break resultOrErrorReader } case err, ok := <-errChan: if ok { c.HandleAPIError(w, r, apierrors.NewErrInternal(err)) return } else { // nothing in error, must be a close event break resultOrErrorReader } } } authUser, _, err := client.Users.Get(r.Context(), "") if err != nil { c.HandleAPIError(w, r, apierrors.NewErrInternal(err)) return } res.Username = *authUser.Login // check if user has app installed in their account installation, err := c.Repo().GithubAppInstallation().ReadGithubAppInstallationByAccountID(*authUser.ID) if err != nil && err != gorm.ErrRecordNotFound { c.HandleAPIError(w, r, apierrors.NewErrInternal(err)) return } if installation != nil { res.Accounts = append(res.Accounts, *authUser.Login) } sort.Strings(res.Accounts) c.WriteResult(w, r, res) }
1.351563
1
config/source/file/options.go
ijayer/go-micro
37
461
package file import ( "context" "github.com/micro/go-micro/v2/config/source" ) type filePathKey struct{} // WithPath sets the path to file func WithPath(p string) source.Option { return func(o *source.Options) { if o.Context == nil { o.Context = context.Background() } o.Context = context.WithValue(o.Context, filePathKey{}, p) } }
1.0625
1
pkg/porter/plugins_test.go
DARK-art108/porter
477
469
package porter import ( "testing" "get.porter.sh/porter/pkg/config" "get.porter.sh/porter/pkg/pkgmgmt" "get.porter.sh/porter/pkg/plugins" "get.porter.sh/porter/pkg/printer" "get.porter.sh/porter/pkg/storage/crudstore" "get.porter.sh/porter/pkg/storage/filesystem" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestRunInternalPluginOpts_Validate(t *testing.T) { cfg := config.NewTestConfig(t) var opts RunInternalPluginOpts t.Run("no key", func(t *testing.T) { err := opts.Validate(nil, cfg.Config) require.Error(t, err) assert.Equal(t, err.Error(), "The positional argument KEY was not specified") }) t.Run("too many keys", func(t *testing.T) { err := opts.Validate([]string{"foo", "bar"}, cfg.Config) require.Error(t, err) assert.Equal(t, err.Error(), "Multiple positional arguments were specified but only one, KEY is expected") }) t.Run("valid key", func(t *testing.T) { err := opts.Validate([]string{filesystem.PluginKey}, cfg.Config) require.NoError(t, err) assert.Equal(t, opts.selectedInterface, crudstore.PluginInterface) assert.NotNil(t, opts.selectedPlugin) }) t.Run("invalid key", func(t *testing.T) { err := opts.Validate([]string{"foo"}, cfg.Config) require.Error(t, err) assert.Equal(t, err.Error(), `invalid plugin key specified: "foo"`) }) } func TestPorter_PrintPlugins(t *testing.T) { t.Run("table", func(t *testing.T) { p := NewTestPorter(t) opts := PrintPluginsOptions{ PrintOptions: printer.PrintOptions{ Format: printer.FormatTable, }, } err := p.PrintPlugins(opts) require.Nil(t, err) expected := `Name Version Author plugin1 v1.0 Porter Authors plugin2 v1.0 Porter Authors unknown v1.0 Porter Authors ` actual := p.TestConfig.TestContext.GetOutput() assert.Equal(t, expected, actual) }) t.Run("yaml", func(t *testing.T) { p := NewTestPorter(t) opts := PrintPluginsOptions{ PrintOptions: printer.PrintOptions{ Format: printer.FormatYaml, }, } err := p.PrintPlugins(opts) require.Nil(t, err) expected := `- name: plugin1 versioninfo: version: v1.0 commit: abc123 author: Porter Authors implementations: - type: storage name: blob - type: storage name: mongo - name: plugin2 versioninfo: version: v1.0 commit: abc123 author: Porter Authors implementations: - type: storage name: blob - type: storage name: mongo - name: unknown versioninfo: version: v1.0 commit: abc123 author: Porter Authors implementations: [] ` actual := p.TestConfig.TestContext.GetOutput() assert.Equal(t, expected, actual) }) t.Run("json", func(t *testing.T) { p := NewTestPorter(t) opts := PrintPluginsOptions{ PrintOptions: printer.PrintOptions{ Format: printer.FormatJson, }, } err := p.PrintPlugins(opts) require.Nil(t, err) expected := `[ { "name": "plugin1", "version": "v1.0", "commit": "abc123", "author": "<NAME>", "implementations": [ { "type": "storage", "implementation": "blob" }, { "type": "storage", "implementation": "mongo" } ] }, { "name": "plugin2", "version": "v1.0", "commit": "abc<PASSWORD>", "author": "<NAME>", "implementations": [ { "type": "storage", "implementation": "blob" }, { "type": "storage", "implementation": "mongo" } ] }, { "name": "unknown", "version": "v1.0", "commit": "abc<PASSWORD>", "author": "<NAME>", "implementations": null } ] ` actual := p.TestConfig.TestContext.GetOutput() assert.Equal(t, expected, actual) }) } func TestPorter_ShowPlugin(t *testing.T) { t.Run("table", func(t *testing.T) { p := NewTestPorter(t) opts := ShowPluginOptions{Name: "plugin1"} opts.Format = printer.FormatTable err := p.ShowPlugin(opts) require.NoError(t, err, "ShowPlugin failed") expected := `Name: plugin1 Version: v1.0 Commit: abc123 Author: <NAME> --------------------------- Type Implementation --------------------------- storage blob storage mongo ` actual := p.TestConfig.TestContext.GetOutput() assert.Equal(t, expected, actual) }) t.Run("yaml", func(t *testing.T) { p := NewTestPorter(t) opts := ShowPluginOptions{Name: "plugin1"} opts.Format = printer.FormatYaml err := p.ShowPlugin(opts) require.NoError(t, err, "ShowPlugin failed") expected := `name: plugin1 versioninfo: version: v1.0 commit: abc123 author: <NAME> implementations: - type: storage name: blob - type: storage name: mongo ` actual := p.TestConfig.TestContext.GetOutput() assert.Equal(t, expected, actual) }) t.Run("json", func(t *testing.T) { p := NewTestPorter(t) opts := ShowPluginOptions{Name: "plugin1"} opts.Format = printer.FormatJson err := p.ShowPlugin(opts) require.NoError(t, err, "ShowPlugin failed") expected := `{ "name": "plugin1", "version": "v1.0", "commit": "abc123", "author": "<NAME>", "implementations": [ { "type": "storage", "implementation": "blob" }, { "type": "storage", "implementation": "mongo" } ] } ` actual := p.TestConfig.TestContext.GetOutput() assert.Equal(t, expected, actual) }) } func TestPorter_InstallPlugin(t *testing.T) { p := NewTestPorter(t) opts := plugins.InstallOptions{} opts.URL = "https://example.com" err := opts.Validate([]string{"plugin1"}) require.NoError(t, err, "Validate failed") err = p.InstallPlugin(opts) require.NoError(t, err, "InstallPlugin failed") wantOutput := "installed plugin1 plugin v1.0 (abc123)\n" gotOutput := p.TestConfig.TestContext.GetOutput() assert.Contains(t, wantOutput, gotOutput) } func TestPorter_UninstallPlugin(t *testing.T) { p := NewTestPorter(t) opts := pkgmgmt.UninstallOptions{} err := opts.Validate([]string{"plugin1"}) require.NoError(t, err, "Validate failed") err = p.UninstallPlugin(opts) require.NoError(t, err, "UninstallPlugin failed") wantOutput := "Uninstalled plugin1 plugin" gotoutput := p.TestConfig.TestContext.GetOutput() assert.Contains(t, wantOutput, gotoutput) }
1.609375
2
countries.go
Onhil/Openskytemp
1
477
package main //Countries //Due to the sheer size of the array, it has been decided to just keep it in it's own file //Country keeps the names for all the countries in database in an array for time reasons var Country = []string{ "Papua_New_Guinea", "Greenland", "Iceland", "Canada", "Algeria", "Benin", "Burkina_Faso", "Ghana", "Cote_d'Ivoire", "Nigeria", "Niger", "Tunisia", "Togo", "Belgium", "Germany", "Estonia", "Finland", "United_Kingdom", "Falkland_Islands", "Netherlands", "Ireland", "Denmark", "Luxemburg", "Norway", "Poland", "Sweden", "South_Africa", "Botswana", "Congo_(Brazzaville)", "Congo_(Kinshasa)", "Swaziland", "Central_African_Republic", "Equatorial Guinea", "Saint Helena", "Mauritius", "British_Indian_Ocean_Territory", "Cameroon", "Zambia", "Comoros", "Mayotte", "Reunion", "Madagascar", "Angola", "Gabon", "Sao Tome and Principe", "Mozambique", "Seychelles", "Chad", "Zimbabwe", "Malawi", "Lesotho", "Mali", "Gambia", "Spain", "Sierra_Leone", "Guinea-Bissau", "Liberia", "Morocco", "Senegal", "Mauritania", "Guinea", "Cape_Verde", "Ethiopia", "Burundi", "Somalia", "Egypt", "Kenya", "Libya", "Rwanda", "Sudan", "South_Sudan", "Tanzania", "Uganda", "Albania", "Bulgaria", "Cyprus", "Croatia", "France", "Greece", "Hungary", "Italy", "Slovenia", "Czech_Republic", "Israel", "Malta", "Austria", "Portugal", "Bosnia_and_Herzegovina", "Romania", "Switzerland", "Turkey", "Moldova", "Macedonia", "Gibraltar", "Serbia", "Slovakia", "Turks_and_Caicos_Islands", "Dominican_Republic", "Guatemala", "Honduras", "Jamaica", "Mexico", "Nicaragua", "Panama", "Costa_Rica", "El_Salvador", "Haiti", "Cuba", "Cayman_Islands", "Bahamas", "Belize", "Cook_Islands", "Fiji", "Tonga", "Kiribati", "Wallis_and_Futuna", "Samoa", "French_Polunesia", "Vanuatu", "New_Caledonia", "New_Zealand", "Afghanistan", "Bahrain", "Saudi_Arabia", "Iran", "Jordan", "West_Bank", "Kuwait", "Lebanon", "United_Arab_Emirates", "Oman", "Pakistan", "Iraq", "Syria", "Qatar", "Northern_Mariana_Islands", "Guam", "MArshall_Islands", "Midway_Islands", "Micronesia", "Palau", "Taiwan", "Japan", "South_Korea", "Philippines", "Argentina", "Brazil", "Chile", "Ecuador", "Paraguay", "Colombia", "Bolivia", "Suriname", "French_Guiana", "Peru", "Uruguay", "Venezuela", "Guyana", "Antigua_and_Barbuda", "Barbados", "Dominica", "Martinique", "Guadeloupe", "Grenada", "Virgin_Islands", "Puerto_Rico", "Saint_Kitts_and_Nevis", "Saint_Lucia", "Aruba", "Netherlands_Antilles", "Anguilla", "Trinidad_and_Tobago", "British_Virgin_Islands", "Saint_Vincent_and_the_Grenadines", "Kazakhstan", "Azerbaijan", "Russia", "Ukraine", "Belarus", "Turkmenistan", "Tajikistan", "Uzbekistan", "India", "Sri_Lanka", "Cambodia", "Bangladesh", "Hong_Kong", "Laos", "Macau", "Nepal", "Bhutan", "Maldives", "Thailand", "Vietnam", "Burma", "Indonesia", "Malaysia", "Brunei", "East_Timor", "Singapore", "Australia", "China", "United_States", "Lativa", "Lithuania", "Armenia", "Eritrea", "Palestine", "Georgia", "Yemen", "Bermuda", "Solomon_Islands", "Narau", "Tuvalu", "Namibia", "Djibouti", "Montserrat", "Johnston_Atoll", "Western_Sahara", "Niue", "Cocos_(Keeling)_Islands", "Mongolia", "Myanmar", "Svalbard", "Antartica", "American_Samoa", "Wake_Island", "Kyrgyzstan", "Faroe_Islands", "North_Korea", }
1.289063
1
fsm/fsm_test.go
ojkelly/go
1
485
package fsm_test import ( "testing" ) // func Test_Counter(t *testing.T) { // // States a machine can be in ------------------------------------------------ // const ( // // the first value (your zero-value) should be the default // Inactive fsm.State = iota // Active // ) // stateNames := fsm.StateNames{ // Inactive: "Inactive", // Active: "Active", // } // // Events that can change state ---------------------------------------------- // const ( // Activate fsm.Event = iota // Deactivate // Increment // Decrement // ) // eventNames := fsm.EventNames{ // Activate: "Activate", // Deactivate: "Deactivate", // Increment: "Increment", // Decrement: "Decrement", // } // // ContextKeys for storing extra state --------------------------------------- // const ( // KeyCounter fsm.ContextKey = iota // KeyIsReady // ) // contextKeyNames := fsm.ContextKeyNames{ // KeyCounter: "Counter", // KeyIsReady: "IsReady", // } // // Event Handlers ------------------------------------------------------------ // errorHandler := func(m *fsm.Machine, current fsm.State, next fsm.State, machineError fsm.MachineError) { // fmt.Println("Error: Left", m.GetNameForState(current), "entered", m.GetNameForState(next), machineError) // m.SendEvent(Deactivate) // } // successHandler := func(m *fsm.Machine, current fsm.State, next fsm.State, event fsm.TransitionEvent) { // fmt.Println("Success: Left", m.GetNameForState(current), "entered", m.GetNameForState(next)) // } // logEvent := func(m *fsm.Machine, current fsm.State, next fsm.State, event fsm.TransitionEvent) { // fmt.Println("Left", m.GetNameForState(current), "entered", m.GetNameForState(next), event) // } // guardActive := func(m *fsm.Machine, current fsm.State, next fsm.State) bool { // if v := m.GetContext(KeyIsReady); v != nil { // ready := v.(bool) // return ready // } // return false // } // // Machine Creator ----------------------------------------------------------- // machine := fsm.New( // // machine ID // "counterExample", // 1, // // initial state // Inactive, // // Context Keys // fsm.Context{ // KeyIsReady: fsm.ContextMeta{ // Protected: false, // Inital: false, // }, // KeyCounter: fsm.ContextMeta{ // Protected: false, // this can only be changed by events // Inital: 0, // }, // }, // // Possible events // []fsm.Event{Activate, Deactivate, Increment, Decrement}, // // State Map // fsm.States{ // // Inactive state // Inactive: fsm.StateNode{ // // Events that Inactive will transition on // Events: fsm.EventToTransition{ // // On Activate event tranisition to Active // Activate: fsm.Transition{ // State: Active, // Guard: guardActive, // Entry: logEvent, // Exit: func(m *fsm.Machine, current fsm.State, next fsm.State, event fsm.TransitionEvent) { // m.SetContext(KeyIsReady, false) // }, // }, // }, // }, // // Active state // Active: fsm.StateNode{ // Error: errorHandler, // Success: successHandler, // Events: fsm.EventToTransition{ // Increment: fsm.Transition{ // State: Active, // UpdateContext: func( // m *fsm.Machine, // current fsm.State, // next fsm.State, // event fsm.TransitionEvent, // ) ( // update fsm.UpdateContext, // err error, // ) { // update = fsm.UpdateContext{} // if v := m.GetContext(KeyCounter); v != nil { // update[KeyCounter] = v.(int) + 1 // } else { // err = fmt.Errorf("Unable to update KeyCounter") // } // return // }, // }, // Deactivate: fsm.Transition{ // State: Inactive, // }, // }, // }, // }, // // Machine level handlers // errorHandler, // ) // // This is optional, but useful if you want to enhance your logging, or // // you have a large number of states // machine.AddStateNames(stateNames) // machine.AddEventNames(eventNames) // machine.AddContextKeyNames(contextKeyNames) // assert.Equal(t, machine.State(), Inactive, "initial state should be Inactive") // assert.Equal( // t, // machine.GetNameForState(machine.State()), // stateNames[Inactive], // "our state names were set correctly", // ) // // Try to increment - nothing should happen as we're Inactive at the moment // machine.SendEvent(Increment) // counter := machine.GetContext(KeyCounter).(int) // assert.Equal(t, counter, 0, "our counter should still be 0") // isReady := machine.GetContext(KeyIsReady).(bool) // assert.Equal(t, isReady, false, "The machine shouldn't be ready yet") // machine.SetContext(KeyIsReady, true) // isReady = machine.GetContext(KeyIsReady).(bool) // assert.Equal(t, isReady, true, "context isReady should be true now") // machine.SendEvent(Activate) // assert.Equal(t, machine.State(), Active, "machine state should be Active") // // Increment 3 times // machine.SendEvent(Increment) // machine.SendEvent(Increment) // machine.SendEvent(Increment) // counter = machine.GetContext(KeyCounter).(int) // assert.Equal(t, counter, 3, "our counter should now be 3") // machine.SendEvent(Deactivate) // assert.Equal(t, machine.State(), Inactive, "machine state should now be Inactive") // } func Test_TCPMachine(t *testing.T) { // All the possible states for this FSM // const ( // NoConnection fsm.State = "NoConnection" // ConnectionEstablished fsm.State = "ConnectionEstablished" // // A mock TCP handshake // SendACK fsm.State = "SendACK" // RecieveSYN fsm.State = "RecieveSYN" // SendSYNACK fsm.State = "SendSYNACK" // ) // const ( // RemoteIp fsm.ContextKey = "RemoteIp" // ) // machine := fsm.New(NoConnection, // fsm.NewContextKeys( // RemoteIp, // )) // machine.Set("remoteIp", "0.0.0.0") // fmt.Printf("Current State: %v \n", machine.State()) // fmt.Printf("end of test %v\n", machine.Get("remoteIp").(string)) // t.Fail() }
2.21875
2
src/control/cmd/daos_server/network.go
daos-stack/daos-core
0
493
// // (C) Copyright 2019-2022 Intel Corporation. // // SPDX-License-Identifier: BSD-2-Clause-Patent // package main import ( "context" "strings" "github.com/daos-stack/daos/src/control/cmd/dmg/pretty" "github.com/daos-stack/daos/src/control/common" "github.com/daos-stack/daos/src/control/common/cmdutil" "github.com/daos-stack/daos/src/control/lib/control" "github.com/daos-stack/daos/src/control/lib/hardware" "github.com/daos-stack/daos/src/control/lib/hardware/hwprov" ) type networkCmd struct { Scan networkScanCmd `command:"scan" description:"Scan for network interface devices on local server"` } // networkScanCmd is the struct representing the command to scan the machine for network interface devices // that match the given fabric provider. type networkScanCmd struct { cfgCmd cmdutil.LogCmd FabricProvider string `short:"p" long:"provider" description:"Filter device list to those that support the given OFI provider or 'all' for all available (default is the provider specified in daos_server.yml)"` } func (cmd *networkScanCmd) Execute(_ []string) error { fabricScanner := hwprov.DefaultFabricScanner(cmd.Logger) results, err := fabricScanner.Scan(context.Background()) if err != nil { return nil } if cmd.FabricProvider == "" { cmd.FabricProvider = cmd.config.Fabric.Provider } hf := fabricInterfaceSetToHostFabric(results, cmd.FabricProvider) hfm := make(control.HostFabricMap) if err := hfm.Add("localhost", hf); err != nil { return err } var bld strings.Builder if err := pretty.PrintHostFabricMap(hfm, &bld); err != nil { return err } cmd.Info(bld.String()) return nil } func fabricInterfaceSetToHostFabric(fis *hardware.FabricInterfaceSet, filterProvider string) *control.HostFabric { hf := &control.HostFabric{} for _, fiName := range fis.Names() { fi, err := fis.GetInterface(fiName) if err != nil { continue } if fi.DeviceClass == hardware.Loopback { // Ignore loopback continue } netIFs := common.NewStringSet(fi.NetInterfaces.ToSlice()...) if len(fi.NetInterfaces) == 0 { netIFs.Add(fi.Name) } for _, name := range netIFs.ToSlice() { for _, provider := range fi.Providers.ToSlice() { if filterProvider == "all" || strings.HasPrefix(provider, filterProvider) { hf.AddInterface(&control.HostFabricInterface{ Provider: provider, Device: name, NumaNode: uint32(fi.NUMANode), NetDevClass: fi.DeviceClass, }) } } } } return hf }
1.53125
2
chatlog.go
dqn/chatlog
9
501
package chatlog import ( "encoding/json" "fmt" "io/ioutil" "net/http" "net/url" "strings" ) const ( baseURL = "https://www.youtube.com" userAgent = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36" ) type Chatlog struct { videoID string client *http.Client } type chatsResult struct { Action []ContinuationAction Continuation string } func New(videoID string) *Chatlog { return &Chatlog{videoID, &http.Client{}} } func (c *Chatlog) HandleChat(handler func(renderer ChatRenderer) error) error { cont, err := c.getInitialContinuation() if err != nil { return err } for cont != "" { result, err := c.fecthChats(cont) if err != nil { return err } cont = result.Continuation for _, continuationAction := range result.Action { for _, chatAction := range continuationAction.ReplayChatItemAction.Actions { r := selectChatRenderer(&chatAction.AddChatItemAction.Item) if r == nil { continue } if err = handler(r); err != nil { return err } } } } return nil } func selectChatRenderer(chatItem *ChatItem) ChatRenderer { switch { case chatItem.LiveChatViewerEngagementMessageRenderer.ID != "": return &chatItem.LiveChatViewerEngagementMessageRenderer case chatItem.LiveChatTextMessageRenderer.ID != "": return &chatItem.LiveChatTextMessageRenderer case chatItem.LiveChatMembershipItemRenderer.ID != "": return &chatItem.LiveChatMembershipItemRenderer case chatItem.LiveChatPaidMessageRenderer.ID != "": return &chatItem.LiveChatPaidMessageRenderer case chatItem.LiveChatPlaceholderItemRenderer.ID != "": return &chatItem.LiveChatPlaceholderItemRenderer default: return nil } } func (c *Chatlog) fetch(path string, values *url.Values) ([]byte, error) { req, err := http.NewRequest("GET", baseURL, nil) if err != nil { return nil, err } req.Header.Set("User-Agent", userAgent) req.URL.Path = path req.URL.RawQuery = values.Encode() resp, err := c.client.Do(req) if err != nil { return nil, err } defer resp.Body.Close() return ioutil.ReadAll(resp.Body) } func retrieveContinuation(body []byte) (string, error) { s := string(body) query := `"continuation":"` index := strings.LastIndex(s, query) if index == -1 { return "", fmt.Errorf("cannot find continuation") } b := make([]byte, 256) for i := index + len(query); s[i] != '"'; i++ { b = append(b, s[i]) } return string(b), nil } func (c *Chatlog) getInitialContinuation() (string, error) { v := url.Values{"v": {c.videoID}} body, err := c.fetch("/watch", &v) if err != nil { return "", err } cont, err := retrieveContinuation(body) if err != nil { return "", err } return cont, nil } func (c *Chatlog) fecthChats(continuation string) (*chatsResult, error) { v := &url.Values{ "pbj": {"1"}, "continuation": {continuation}, } body, err := c.fetch("/live_chat_replay/get_live_chat_replay", v) if err != nil { return nil, err } var chat ChatResponse if err := json.Unmarshal(body, &chat); err != nil { return nil, err } if errors := chat.Response.ResponseContext.Errors.Error; errors != nil { err = fmt.Errorf(errors[0].ExternalErrorMessage) return nil, err } cont := chat.Response.ContinuationContents.LiveChatContinuation r := chatsResult{ Action: cont.Actions, Continuation: cont.Continuations[0].LiveChatReplayContinuationData.Continuation, } return &r, nil }
1.703125
2
test/e2e/happypath_test.go
adesaegher/gloo
0
509
package e2e_test import ( "context" "fmt" "net" "os" "strings" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" "github.com/solo-io/gloo/test/services" "github.com/solo-io/go-utils/kubeutils" "github.com/solo-io/solo-kit/pkg/api/v1/clients" "github.com/solo-io/solo-kit/test/helpers" "github.com/solo-io/solo-kit/test/setup" gloov1 "github.com/solo-io/gloo/projects/gloo/pkg/api/v1" static_plugin_gloo "github.com/solo-io/gloo/projects/gloo/pkg/api/v1/plugins/static" "github.com/solo-io/gloo/projects/gloo/pkg/defaults" gloohelpers "github.com/solo-io/gloo/test/helpers" "github.com/solo-io/gloo/test/v1helpers" "github.com/solo-io/solo-kit/pkg/api/v1/resources/core" kubev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" kubecore "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/apis/core/validation" ) var _ = Describe("Happypath", func() { var ( ctx context.Context cancel context.CancelFunc testClients services.TestClients envoyInstance *services.EnvoyInstance tu *v1helpers.TestUpstream envoyPort uint32 ) BeforeEach(func() { ctx, cancel = context.WithCancel(context.Background()) var err error envoyInstance, err = envoyFactory.NewEnvoyInstance() Expect(err).NotTo(HaveOccurred()) tu = v1helpers.NewTestHttpUpstream(ctx, envoyInstance.LocalAddr()) envoyPort = services.NextBindPort() }) AfterEach(func() { if envoyInstance != nil { envoyInstance.Clean() } cancel() }) TestUpstremReachable := func() { v1helpers.TestUpstremReachable(envoyPort, tu, nil) } Describe("in memory", func() { var up *gloov1.Upstream BeforeEach(func() { ns := defaults.GlooSystem ro := &services.RunOptions{ NsToWrite: ns, NsToWatch: []string{"default", ns}, WhatToRun: services.What{ DisableGateway: true, DisableUds: true, DisableFds: true, }, } testClients = services.RunGlooGatewayUdsFds(ctx, ro) err := envoyInstance.Run(testClients.GlooPort) Expect(err).NotTo(HaveOccurred()) up = tu.Upstream _, err = testClients.UpstreamClient.Write(up, clients.WriteOpts{}) Expect(err).NotTo(HaveOccurred()) }) It("should not crash", func() { proxycli := testClients.ProxyClient proxy := getTrivialProxyForUpstream("default", envoyPort, up.Metadata.Ref()) _, err := proxycli.Write(proxy, clients.WriteOpts{}) Expect(err).NotTo(HaveOccurred()) TestUpstremReachable() }) Context("ssl", func() { var upSsl *gloov1.Upstream BeforeEach(func() { sslSecret := &gloov1.Secret{ Metadata: core.Metadata{ Name: "secret", Namespace: "default", }, Kind: &gloov1.Secret_Tls{ Tls: &gloov1.TlsSecret{ RootCa: gloohelpers.Certificate(), }, }, } _, err := testClients.SecretClient.Write(sslSecret, clients.WriteOpts{}) Expect(err).NotTo(HaveOccurred()) // create ssl proxy copyUp := *tu.Upstream copyUp.Metadata.Name = copyUp.Metadata.Name + "-ssl" port := tu.Upstream.UpstreamSpec.UpstreamType.(*gloov1.UpstreamSpec_Static).Static.Hosts[0].Port addr := tu.Upstream.UpstreamSpec.UpstreamType.(*gloov1.UpstreamSpec_Static).Static.Hosts[0].Addr sslport := v1helpers.StartSslProxy(ctx, port) ref := sslSecret.Metadata.Ref() copyUp.UpstreamSpec = &gloov1.UpstreamSpec{ UpstreamType: &gloov1.UpstreamSpec_Static{ Static: &static_plugin_gloo.UpstreamSpec{ Hosts: []*static_plugin_gloo.Host{{ Addr: addr, Port: sslport, }}, }, }, } copyUp.UpstreamSpec.SslConfig = &gloov1.UpstreamSslConfig{ SslSecrets: &gloov1.UpstreamSslConfig_SecretRef{ SecretRef: &ref, }, } upSsl = &copyUp _, err = testClients.UpstreamClient.Write(upSsl, clients.WriteOpts{}) Expect(err).NotTo(HaveOccurred()) }) It("should work with ssl", func() { proxycli := testClients.ProxyClient proxy := getTrivialProxyForUpstream("default", envoyPort, upSsl.Metadata.Ref()) _, err := proxycli.Write(proxy, clients.WriteOpts{}) Expect(err).NotTo(HaveOccurred()) TestUpstremReachable() }) }) Context("sad path", func() { It("should error the proxy with two listeners with the same bind address", func() { proxycli := testClients.ProxyClient proxy := getTrivialProxyForUpstream("default", envoyPort, up.Metadata.Ref()) // add two identical listeners two see errors come up proxy.Listeners = append(proxy.Listeners, proxy.Listeners[0]) _, err := proxycli.Write(proxy, clients.WriteOpts{}) Expect(err).NotTo(HaveOccurred()) getStatus := func() (core.Status_State, error) { updatedProxy, err := proxycli.Read(proxy.Metadata.Namespace, proxy.Metadata.Name, clients.ReadOpts{}) if err != nil { return 0, err } return updatedProxy.Status.State, nil } Eventually(getStatus, "10s").ShouldNot(Equal(core.Status_Pending)) st, err := getStatus() Expect(err).NotTo(HaveOccurred()) Expect(st).To(Equal(core.Status_Rejected)) }) }) }) Describe("kubernetes happy path", func() { BeforeEach(func() { if os.Getenv("RUN_KUBE_TESTS") != "1" { Skip("This test creates kubernetes resources and is disabled by default. To enable, set RUN_KUBE_TESTS=1 in your env.") } }) var ( namespace string writeNamespace string cfg *rest.Config kubeClient kubernetes.Interface svc *kubev1.Service ) BeforeEach(func() { namespace = "" writeNamespace = "" var err error svc = nil cfg, err = kubeutils.GetConfig("", "") Expect(err).NotTo(HaveOccurred()) kubeClient, err = kubernetes.NewForConfig(cfg) Expect(err).NotTo(HaveOccurred()) }) prepNamespace := func() { if namespace == "" { namespace = "gloo-e2e-" + helpers.RandString(8) } err := setup.SetupKubeForTest(namespace) Expect(err).NotTo(HaveOccurred()) svc, err = kubeClient.CoreV1().Services(namespace).Create(&kubev1.Service{ ObjectMeta: metav1.ObjectMeta{ Namespace: namespace, Name: "headlessservice", }, Spec: kubev1.ServiceSpec{ Ports: []kubev1.ServicePort{ { Name: "foo", Port: int32(tu.Port), }, }, }, }) Expect(err).NotTo(HaveOccurred()) _, err = kubeClient.CoreV1().Endpoints(namespace).Create(&kubev1.Endpoints{ ObjectMeta: metav1.ObjectMeta{ Namespace: namespace, Name: svc.Name, }, Subsets: []kubev1.EndpointSubset{{ Addresses: []kubev1.EndpointAddress{{ IP: getIpThatsNotLocalhost(), Hostname: "localhost", }}, Ports: []kubev1.EndpointPort{{ Port: int32(tu.Port), }}, }}, }) Expect(err).NotTo(HaveOccurred()) } AfterEach(func() { if namespace != "" { setup.TeardownKube(namespace) } }) getUpstream := func() (*gloov1.Upstream, error) { l, err := testClients.UpstreamClient.List(writeNamespace, clients.ListOpts{}) if err != nil { return nil, err } for _, u := range l { if strings.Contains(u.Metadata.Name, svc.Name) && strings.Contains(u.Metadata.Name, svc.Namespace) { return u, nil } } return nil, fmt.Errorf("not found") } getStatus := func() (core.Status_State, error) { u, err := getUpstream() if err != nil { return core.Status_Pending, err } return u.Status.State, nil } Context("specific namespace", func() { BeforeEach(func() { prepNamespace() writeNamespace = namespace ro := &services.RunOptions{ NsToWrite: writeNamespace, NsToWatch: []string{"default", namespace}, WhatToRun: services.What{ DisableGateway: true, }, KubeClient: kubeClient, } testClients = services.RunGlooGatewayUdsFds(ctx, ro) role := namespace + "~proxy" err := envoyInstance.RunWithRole(role, testClients.GlooPort) Expect(err).NotTo(HaveOccurred()) Eventually(getStatus, "20s", "0.5s").Should(Equal(core.Status_Accepted)) }) It("should discover service", func() { up, err := getUpstream() Expect(err).NotTo(HaveOccurred()) proxycli := testClients.ProxyClient proxy := getTrivialProxyForUpstream(namespace, envoyPort, up.Metadata.Ref()) var opts clients.WriteOpts _, err = proxycli.Write(proxy, opts) Expect(err).NotTo(HaveOccurred()) TestUpstremReachable() }) }) Context("all namespaces", func() { BeforeEach(func() { namespace = "gloo-e2e-" + helpers.RandString(8) writeNamespace = defaults.GlooSystem ro := &services.RunOptions{ NsToWrite: writeNamespace, NsToWatch: []string{}, WhatToRun: services.What{ DisableGateway: true, }, KubeClient: kubeClient, } testClients = services.RunGlooGatewayUdsFds(ctx, ro) role := namespace + "~proxy" err := envoyInstance.RunWithRole(role, testClients.GlooPort) Expect(err).NotTo(HaveOccurred()) prepNamespace() }) It("watch all namespaces", func() { Eventually(getStatus, "20s", "0.5s").Should(Equal(core.Status_Accepted)) up, err := getUpstream() Expect(err).NotTo(HaveOccurred()) proxycli := testClients.ProxyClient proxy := getTrivialProxyForUpstream(namespace, envoyPort, up.Metadata.Ref()) var opts clients.WriteOpts _, err = proxycli.Write(proxy, opts) Expect(err).NotTo(HaveOccurred()) TestUpstremReachable() }) }) }) }) func getTrivialProxyForUpstream(ns string, bindport uint32, upstream core.ResourceRef) *gloov1.Proxy { return &gloov1.Proxy{ Metadata: core.Metadata{ Name: "proxy", Namespace: ns, }, Listeners: []*gloov1.Listener{{ Name: "listener", BindAddress: "127.0.0.1", BindPort: bindport, ListenerType: &gloov1.Listener_HttpListener{ HttpListener: &gloov1.HttpListener{ VirtualHosts: []*gloov1.VirtualHost{{ Name: "virt1", Domains: []string{"*"}, Routes: []*gloov1.Route{{ Matcher: &gloov1.Matcher{ PathSpecifier: &gloov1.Matcher_Prefix{ Prefix: "/", }, }, Action: &gloov1.Route_RouteAction{ RouteAction: &gloov1.RouteAction{ Destination: &gloov1.RouteAction_Single{ Single: &gloov1.Destination{ DestinationType: &gloov1.Destination_Upstream{ Upstream: &upstream, }, }, }, }, }, }}, }}, }, }, }}, } } func getIpThatsNotLocalhost() string { // kubernetes endpoints doesn't like localhost, so we just give it some other local address // from: k8s.io/kubernetes/pkg/apis/core/validation/validation.go /* func validateNonSpecialIP(ipAddress string, fldPath *field.Path) field.ErrorList { // We disallow some IPs as endpoints or external-ips. Specifically, // unspecified and loopback addresses are nonsensical and link-local // addresses tend to be used for node-centric purposes (e.g. metadata // service). */ ifaces, err := net.Interfaces() ExpectWithOffset(1, err).NotTo(HaveOccurred()) for _, iface := range ifaces { if iface.Flags&net.FlagLoopback != 0 { continue } addrs, err := iface.Addrs() if err != nil { continue } for _, addr := range addrs { var ip net.IP switch v := addr.(type) { case *net.IPNet: ip = v.IP case *net.IPAddr: ip = v.IP default: continue } // make sure that kubernetes like this endpoint: endpoints := &kubecore.Endpoints{ ObjectMeta: metav1.ObjectMeta{ Namespace: "validate", Name: "validate", }, Subsets: []kubecore.EndpointSubset{{ Addresses: []kubecore.EndpointAddress{{ IP: ip.String(), Hostname: "localhost", }}, Ports: []kubecore.EndpointPort{{ Port: int32(5555), Protocol: kubecore.ProtocolTCP, }}, }}, } errs := validation.ValidateEndpoints(endpoints) if len(errs) != 0 { continue } return ip.String() } } Fail("no ip address available", 1) return "" }
1.265625
1
internal/utils/pinger.go
ivkos/luxaudio
1
517
package utils import ( "github.com/ivkos/luxaudio/internal/led" "log" "net" "time" ) type Pinger struct { conn *net.UDPConn interval time.Duration timeout time.Duration verbose bool IsReachable bool } func NewPinger(conn *net.UDPConn, interval time.Duration, verbose bool) *Pinger { pinger := &Pinger{ conn: conn, interval: interval, timeout: 1 * time.Second, verbose: verbose, IsReachable: true, } go pinger.start() return pinger } func (pinger *Pinger) start() { timer := time.NewTimer(0) pingPayload := led.MakePingPayload() for { timer.Reset(pinger.interval) <-timer.C _, err := pinger.conn.Write(pingPayload) if err != nil { pinger.setReachable(false) pinger.logVerbose("WARN: Could not write ping payload: %v", err) continue } err = pinger.conn.SetReadDeadline(time.Now().Add(pinger.timeout)) if err != nil { pinger.setReachable(false) pinger.logVerbose("WARN: Could not set ping deadline: %v", err) continue } result := make([]byte, 1) n, err := pinger.conn.Read(result) if err != nil { pinger.logVerbose("WARN: Could not read ping response: %v", err) pinger.setReachable(false) continue } if n != 1 { pinger.logVerbose("WARN: Ping response has unexpected length %d", n) pinger.setReachable(false) continue } if result[0] != '1' { pinger.logVerbose("WARN: Ping response is unexpected: %x", result[0]) pinger.setReachable(false) continue } pinger.setReachable(true) } } func (pinger *Pinger) setReachable(reachable bool) { if reachable != pinger.IsReachable { log.Printf("Reachable = %t", reachable) } pinger.IsReachable = reachable } func (pinger *Pinger) logVerbose(format string, v ...interface{}) { if pinger.verbose { log.Printf(format, v...) } }
1.914063
2
vendor/github.com/cilium/cilium/api/v1/client/policy/get_fqdn_cache_id_parameters.go
joestringer/hubble
6
525
// Code generated by go-swagger; DO NOT EDIT. package policy // This file was generated by the swagger tool. // Editing this file might prove futile when you re-run the swagger generate command import ( "context" "net/http" "time" "github.com/go-openapi/errors" "github.com/go-openapi/runtime" cr "github.com/go-openapi/runtime/client" strfmt "github.com/go-openapi/strfmt" ) // NewGetFqdnCacheIDParams creates a new GetFqdnCacheIDParams object // with the default values initialized. func NewGetFqdnCacheIDParams() *GetFqdnCacheIDParams { var () return &GetFqdnCacheIDParams{ timeout: cr.DefaultTimeout, } } // NewGetFqdnCacheIDParamsWithTimeout creates a new GetFqdnCacheIDParams object // with the default values initialized, and the ability to set a timeout on a request func NewGetFqdnCacheIDParamsWithTimeout(timeout time.Duration) *GetFqdnCacheIDParams { var () return &GetFqdnCacheIDParams{ timeout: timeout, } } // NewGetFqdnCacheIDParamsWithContext creates a new GetFqdnCacheIDParams object // with the default values initialized, and the ability to set a context for a request func NewGetFqdnCacheIDParamsWithContext(ctx context.Context) *GetFqdnCacheIDParams { var () return &GetFqdnCacheIDParams{ Context: ctx, } } // NewGetFqdnCacheIDParamsWithHTTPClient creates a new GetFqdnCacheIDParams object // with the default values initialized, and the ability to set a custom HTTPClient for a request func NewGetFqdnCacheIDParamsWithHTTPClient(client *http.Client) *GetFqdnCacheIDParams { var () return &GetFqdnCacheIDParams{ HTTPClient: client, } } /*GetFqdnCacheIDParams contains all the parameters to send to the API endpoint for the get fqdn cache ID operation typically these are written to a http.Request */ type GetFqdnCacheIDParams struct { /*Cidr A CIDR range of IPs */ Cidr *string /*ID String describing an endpoint with the format ``[prefix:]id``. If no prefix is specified, a prefix of ``cilium-local:`` is assumed. Not all endpoints will be addressable by all endpoint ID prefixes with the exception of the local Cilium UUID which is assigned to all endpoints. Supported endpoint id prefixes: - cilium-local: Local Cilium endpoint UUID, e.g. cilium-local:3389595 - cilium-global: Global Cilium endpoint UUID, e.g. cilium-global:cluster1:nodeX:452343 - container-id: Container runtime ID, e.g. container-id:22222 - container-name: Container name, e.g. container-name:foobar - pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar - docker-endpoint: Docker libnetwork endpoint ID, e.g. docker-endpoint:4444 */ ID string /*Matchpattern A toFQDNs compatible matchPattern expression */ Matchpattern *string timeout time.Duration Context context.Context HTTPClient *http.Client } // WithTimeout adds the timeout to the get fqdn cache ID params func (o *GetFqdnCacheIDParams) WithTimeout(timeout time.Duration) *GetFqdnCacheIDParams { o.SetTimeout(timeout) return o } // SetTimeout adds the timeout to the get fqdn cache ID params func (o *GetFqdnCacheIDParams) SetTimeout(timeout time.Duration) { o.timeout = timeout } // WithContext adds the context to the get fqdn cache ID params func (o *GetFqdnCacheIDParams) WithContext(ctx context.Context) *GetFqdnCacheIDParams { o.SetContext(ctx) return o } // SetContext adds the context to the get fqdn cache ID params func (o *GetFqdnCacheIDParams) SetContext(ctx context.Context) { o.Context = ctx } // WithHTTPClient adds the HTTPClient to the get fqdn cache ID params func (o *GetFqdnCacheIDParams) WithHTTPClient(client *http.Client) *GetFqdnCacheIDParams { o.SetHTTPClient(client) return o } // SetHTTPClient adds the HTTPClient to the get fqdn cache ID params func (o *GetFqdnCacheIDParams) SetHTTPClient(client *http.Client) { o.HTTPClient = client } // WithCidr adds the cidr to the get fqdn cache ID params func (o *GetFqdnCacheIDParams) WithCidr(cidr *string) *GetFqdnCacheIDParams { o.SetCidr(cidr) return o } // SetCidr adds the cidr to the get fqdn cache ID params func (o *GetFqdnCacheIDParams) SetCidr(cidr *string) { o.Cidr = cidr } // WithID adds the id to the get fqdn cache ID params func (o *GetFqdnCacheIDParams) WithID(id string) *GetFqdnCacheIDParams { o.SetID(id) return o } // SetID adds the id to the get fqdn cache ID params func (o *GetFqdnCacheIDParams) SetID(id string) { o.ID = id } // WithMatchpattern adds the matchpattern to the get fqdn cache ID params func (o *GetFqdnCacheIDParams) WithMatchpattern(matchpattern *string) *GetFqdnCacheIDParams { o.SetMatchpattern(matchpattern) return o } // SetMatchpattern adds the matchpattern to the get fqdn cache ID params func (o *GetFqdnCacheIDParams) SetMatchpattern(matchpattern *string) { o.Matchpattern = matchpattern } // WriteToRequest writes these params to a swagger request func (o *GetFqdnCacheIDParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { if err := r.SetTimeout(o.timeout); err != nil { return err } var res []error if o.Cidr != nil { // query param cidr var qrCidr string if o.Cidr != nil { qrCidr = *o.Cidr } qCidr := qrCidr if qCidr != "" { if err := r.SetQueryParam("cidr", qCidr); err != nil { return err } } } // path param id if err := r.SetPathParam("id", o.ID); err != nil { return err } if o.Matchpattern != nil { // query param matchpattern var qrMatchpattern string if o.Matchpattern != nil { qrMatchpattern = *o.Matchpattern } qMatchpattern := qrMatchpattern if qMatchpattern != "" { if err := r.SetQueryParam("matchpattern", qMatchpattern); err != nil { return err } } } if len(res) > 0 { return errors.CompositeValidationError(res...) } return nil }
1.101563
1
controlplane/pkg/monitor/crossconnect/crossconnect_event.go
stanislav-chlebec/networkservicemesh
1
533
package crossconnect import ( "fmt" "github.com/networkservicemesh/networkservicemesh/controlplane/pkg/apis/crossconnect" "github.com/networkservicemesh/networkservicemesh/controlplane/pkg/monitor" ) // Event is a monitor.Event for crossconnect GRPC API type Event struct { monitor.BaseEvent Statistics map[string]*crossconnect.Metrics } // Message converts Event to CrossConnectEvent func (e *Event) Message() (interface{}, error) { eventType, err := eventTypeToXconEventType(e.EventType()) if err != nil { return nil, err } xcons, err := xconsFromEntities(e.Entities()) if err != nil { return nil, err } return &crossconnect.CrossConnectEvent{ Type: eventType, CrossConnects: xcons, Metrics: e.Statistics, }, nil } type eventFactory struct { } func (m *eventFactory) NewEvent(eventType monitor.EventType, entities map[string]monitor.Entity) monitor.Event { return &Event{ BaseEvent: monitor.NewBaseEvent(eventType, entities), Statistics: map[string]*crossconnect.Metrics{}, } } func (m *eventFactory) EventFromMessage(message interface{}) (monitor.Event, error) { xconEvent, ok := message.(*crossconnect.CrossConnectEvent) if !ok { return nil, fmt.Errorf("unable to cast %v to CrossConnectEvent", message) } eventType, err := xconEventTypeToEventType(xconEvent.GetType()) if err != nil { return nil, err } entities := entitiesFromXcons(xconEvent.CrossConnects) return &Event{ BaseEvent: monitor.NewBaseEvent(eventType, entities), Statistics: xconEvent.Metrics, }, nil } func eventTypeToXconEventType(eventType monitor.EventType) (crossconnect.CrossConnectEventType, error) { switch eventType { case monitor.EventTypeInitialStateTransfer: return crossconnect.CrossConnectEventType_INITIAL_STATE_TRANSFER, nil case monitor.EventTypeUpdate: return crossconnect.CrossConnectEventType_UPDATE, nil case monitor.EventTypeDelete: return crossconnect.CrossConnectEventType_DELETE, nil default: return 0, fmt.Errorf("unable to cast %v to CrossConnectEventType", eventType) } } func xconEventTypeToEventType(connectionEventType crossconnect.CrossConnectEventType) (monitor.EventType, error) { switch connectionEventType { case crossconnect.CrossConnectEventType_INITIAL_STATE_TRANSFER: return monitor.EventTypeInitialStateTransfer, nil case crossconnect.CrossConnectEventType_UPDATE: return monitor.EventTypeUpdate, nil case crossconnect.CrossConnectEventType_DELETE: return monitor.EventTypeDelete, nil default: return "", fmt.Errorf("unable to cast %v to monitor.EventType", connectionEventType) } } func xconsFromEntities(entities map[string]monitor.Entity) (map[string]*crossconnect.CrossConnect, error) { xcons := map[string]*crossconnect.CrossConnect{} for k, v := range entities { if conn, ok := v.(*crossconnect.CrossConnect); ok { xcons[k] = conn } else { return nil, fmt.Errorf("unable to cast Entity to CrossConnect") } } return xcons, nil } func entitiesFromXcons(xcons map[string]*crossconnect.CrossConnect) map[string]monitor.Entity { entities := map[string]monitor.Entity{} for k, v := range xcons { entities[k] = v } return entities }
1.335938
1
geom/circle/copy_from.go
gravestench/pho
0
541
package circle // Copies the `x`, `y` and `radius` properties from the `source` Circle // into the given `dest` Circle, then returns the `dest` Circle. func CopyFrom(source, dest *Circle) *Circle { return dest.SetTo(source.X, source.Y, source.radius) }
1.28125
1
main.go
Akecel/api-goilerplate
1
549
// API Boilerplate in Go using Echo and Viper package main import ( "gechoplate/config" "gechoplate/database" "gechoplate/router" "github.com/labstack/echo/v4" "github.com/labstack/echo/v4/middleware" ) // main launch all part of the project func main() { config.InitConfig() database.Connect() database.Migrate() database.Seed() e := echo.New() router.InitRoutes(e) e.Use(middleware.CORSWithConfig(middleware.CORSConfig{ AllowOrigins: []string{"http://localhost:3000"}, AllowHeaders: []string{echo.HeaderOrigin, echo.HeaderContentType, echo.HeaderAccept}, })) e.Use(middleware.RequestID()) e.Pre(middleware.RemoveTrailingSlash()) e.Use(middleware.Recover()) e.Logger.Fatal(e.Start(":80")) }
1.023438
1
chain/vm/vm.go
HuangHuai1/lotus
0
557
package vm import ( "bytes" "context" "fmt" "reflect" "sync/atomic" "time" "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/metrics" block "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" cbor "github.com/ipfs/go-ipld-cbor" logging "github.com/ipfs/go-log/v2" mh "github.com/multiformats/go-multihash" cbg "github.com/whyrusleeping/cbor-gen" "go.opencensus.io/stats" "go.opencensus.io/trace" "golang.org/x/xerrors" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/go-state-types/exitcode" "github.com/filecoin-project/go-state-types/network" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/aerrors" "github.com/filecoin-project/lotus/chain/actors/builtin/account" "github.com/filecoin-project/lotus/chain/actors/builtin/reward" "github.com/filecoin-project/lotus/chain/state" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/lib/blockstore" bstore "github.com/filecoin-project/lotus/lib/blockstore" "github.com/filecoin-project/lotus/lib/bufbstore" ) const MaxCallDepth = 4096 var log = logging.Logger("vm") var actorLog = logging.Logger("actors") var gasOnActorExec = newGasCharge("OnActorExec", 0, 0) // stat counters var ( StatSends uint64 StatApplied uint64 ) // ResolveToKeyAddr returns the public key type of address (`BLS`/`SECP256K1`) of an account actor identified by `addr`. func ResolveToKeyAddr(state types.StateTree, cst cbor.IpldStore, addr address.Address) (address.Address, error) { if addr.Protocol() == address.BLS || addr.Protocol() == address.SECP256K1 { return addr, nil } act, err := state.GetActor(addr) if err != nil { return address.Undef, xerrors.Errorf("failed to find actor: %s", addr) } aast, err := account.Load(adt.WrapStore(context.TODO(), cst), act) if err != nil { return address.Undef, xerrors.Errorf("failed to get account actor state for %s: %w", addr, err) } return aast.PubkeyAddress() } var _ cbor.IpldBlockstore = (*gasChargingBlocks)(nil) var _ blockstore.Viewer = (*gasChargingBlocks)(nil) type gasChargingBlocks struct { chargeGas func(GasCharge) pricelist Pricelist under cbor.IpldBlockstore } func (bs *gasChargingBlocks) View(c cid.Cid, cb func([]byte) error) error { if v, ok := bs.under.(blockstore.Viewer); ok { bs.chargeGas(bs.pricelist.OnIpldGet()) return v.View(c, func(b []byte) error { // we have successfully retrieved the value; charge for it, even if the user-provided function fails. bs.chargeGas(newGasCharge("OnIpldViewEnd", 0, 0).WithExtra(len(b))) bs.chargeGas(gasOnActorExec) return cb(b) }) } // the underlying blockstore doesn't implement the viewer interface, fall back to normal Get behaviour. blk, err := bs.Get(c) if err == nil && blk != nil { return cb(blk.RawData()) } return err } func (bs *gasChargingBlocks) Get(c cid.Cid) (block.Block, error) { bs.chargeGas(bs.pricelist.OnIpldGet()) blk, err := bs.under.Get(c) if err != nil { return nil, aerrors.Escalate(err, "failed to get block from blockstore") } bs.chargeGas(newGasCharge("OnIpldGetEnd", 0, 0).WithExtra(len(blk.RawData()))) bs.chargeGas(gasOnActorExec) return blk, nil } func (bs *gasChargingBlocks) Put(blk block.Block) error { bs.chargeGas(bs.pricelist.OnIpldPut(len(blk.RawData()))) if err := bs.under.Put(blk); err != nil { return aerrors.Escalate(err, "failed to write data to disk") } bs.chargeGas(gasOnActorExec) return nil } func (vm *VM) makeRuntime(ctx context.Context, msg *types.Message, parent *Runtime) *Runtime { rt := &Runtime{ ctx: ctx, vm: vm, state: vm.cstate, origin: msg.From, originNonce: msg.Nonce, height: vm.blockHeight, gasUsed: 0, gasAvailable: msg.GasLimit, depth: 0, numActorsCreated: 0, pricelist: PricelistByEpoch(vm.blockHeight), allowInternal: true, callerValidated: false, executionTrace: types.ExecutionTrace{Msg: msg}, } if parent != nil { // TODO: The version check here should be unnecessary, but we can wait to take it out if !parent.allowInternal && rt.NetworkVersion() >= network.Version8 { rt.Abortf(exitcode.SysErrForbidden, "internal calls currently disabled") } rt.gasUsed = parent.gasUsed rt.origin = parent.origin rt.originNonce = parent.originNonce rt.numActorsCreated = parent.numActorsCreated rt.depth = parent.depth + 1 } if rt.depth > MaxCallDepth && rt.NetworkVersion() >= network.Version7 { rt.Abortf(exitcode.SysErrForbidden, "message execution exceeds call depth") } cbb := &gasChargingBlocks{rt.chargeGasFunc(2), rt.pricelist, vm.cst.Blocks} cst := cbor.NewCborStore(cbb) cst.Atlas = vm.cst.Atlas // associate the atlas. rt.cst = cst vmm := *msg resF, ok := rt.ResolveAddress(msg.From) if !ok { rt.Abortf(exitcode.SysErrInvalidReceiver, "resolve msg.From address failed") } vmm.From = resF if vm.ntwkVersion(ctx, vm.blockHeight) <= network.Version4 { rt.Message = &vmm } else { resT, _ := rt.ResolveAddress(msg.To) // may be set to undef if recipient doesn't exist yet vmm.To = resT rt.Message = &Message{msg: vmm} } rt.Syscalls = pricedSyscalls{ under: vm.Syscalls(ctx, rt), chargeGas: rt.chargeGasFunc(1), pl: rt.pricelist, } return rt } type UnsafeVM struct { VM *VM } func (vm *UnsafeVM) MakeRuntime(ctx context.Context, msg *types.Message) *Runtime { return vm.VM.makeRuntime(ctx, msg, nil) } type CircSupplyCalculator func(context.Context, abi.ChainEpoch, *state.StateTree) (abi.TokenAmount, error) type NtwkVersionGetter func(context.Context, abi.ChainEpoch) network.Version type LookbackStateGetter func(context.Context, abi.ChainEpoch) (*state.StateTree, error) type VM struct { cstate *state.StateTree base cid.Cid cst *cbor.BasicIpldStore buf *bufbstore.BufferedBS blockHeight abi.ChainEpoch areg *ActorRegistry rand Rand circSupplyCalc CircSupplyCalculator ntwkVersion NtwkVersionGetter baseFee abi.TokenAmount lbStateGet LookbackStateGetter Syscalls SyscallBuilder } type VMOpts struct { StateBase cid.Cid Epoch abi.ChainEpoch Rand Rand Bstore bstore.Blockstore Syscalls SyscallBuilder CircSupplyCalc CircSupplyCalculator NtwkVersion NtwkVersionGetter // TODO: stebalien: In what cases do we actually need this? It seems like even when creating new networks we want to use the 'global'/build-default version getter BaseFee abi.TokenAmount LookbackState LookbackStateGetter } func NewVM(ctx context.Context, opts *VMOpts) (*VM, error) { buf := bufbstore.NewBufferedBstore(opts.Bstore) cst := cbor.NewCborStore(buf) state, err := state.LoadStateTree(cst, opts.StateBase) if err != nil { return nil, err } return &VM{ cstate: state, base: opts.StateBase, cst: cst, buf: buf, blockHeight: opts.Epoch, areg: NewActorRegistry(), rand: opts.Rand, // TODO: Probably should be a syscall circSupplyCalc: opts.CircSupplyCalc, ntwkVersion: opts.NtwkVersion, Syscalls: opts.Syscalls, baseFee: opts.BaseFee, lbStateGet: opts.LookbackState, }, nil } type Rand interface { GetChainRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) GetBeaconRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) } type ApplyRet struct { types.MessageReceipt ActorErr aerrors.ActorError ExecutionTrace types.ExecutionTrace Duration time.Duration GasCosts *GasOutputs } func (vm *VM) send(ctx context.Context, msg *types.Message, parent *Runtime, gasCharge *GasCharge, start time.Time) ([]byte, aerrors.ActorError, *Runtime) { defer atomic.AddUint64(&StatSends, 1) st := vm.cstate rt := vm.makeRuntime(ctx, msg, parent) if EnableGasTracing { rt.lastGasChargeTime = start if parent != nil { rt.lastGasChargeTime = parent.lastGasChargeTime rt.lastGasCharge = parent.lastGasCharge defer func() { parent.lastGasChargeTime = rt.lastGasChargeTime parent.lastGasCharge = rt.lastGasCharge }() } } if parent != nil { defer func() { parent.gasUsed = rt.gasUsed }() } if gasCharge != nil { if err := rt.chargeGasSafe(*gasCharge); err != nil { // this should never happen return nil, aerrors.Wrap(err, "not enough gas for initial message charge, this should not happen"), rt } } ret, err := func() ([]byte, aerrors.ActorError) { _ = rt.chargeGasSafe(newGasCharge("OnGetActor", 0, 0)) toActor, err := st.GetActor(msg.To) if err != nil { if xerrors.Is(err, types.ErrActorNotFound) { a, aid, err := TryCreateAccountActor(rt, msg.To) if err != nil { return nil, aerrors.Wrapf(err, "could not create account") } toActor = a if vm.ntwkVersion(ctx, vm.blockHeight) <= network.Version4 { // Leave the rt.Message as is } else { nmsg := Message{ msg: types.Message{ To: aid, From: rt.Message.Caller(), Value: rt.Message.ValueReceived(), }, } rt.Message = &nmsg } } else { return nil, aerrors.Escalate(err, "getting actor") } } if aerr := rt.chargeGasSafe(rt.Pricelist().OnMethodInvocation(msg.Value, msg.Method)); aerr != nil { return nil, aerrors.Wrap(aerr, "not enough gas for method invocation") } // not charging any gas, just logging //nolint:errcheck defer rt.chargeGasSafe(newGasCharge("OnMethodInvocationDone", 0, 0)) if types.BigCmp(msg.Value, types.NewInt(0)) != 0 { if err := vm.transfer(msg.From, msg.To, msg.Value); err != nil { return nil, aerrors.Wrap(err, "failed to transfer funds") } } if msg.Method != 0 { var ret []byte _ = rt.chargeGasSafe(gasOnActorExec) ret, err := vm.Invoke(toActor, rt, msg.Method, msg.Params) return ret, err } return nil, nil }() mr := types.MessageReceipt{ ExitCode: aerrors.RetCode(err), Return: ret, GasUsed: rt.gasUsed, } rt.executionTrace.MsgRct = &mr rt.executionTrace.Duration = time.Since(start) if err != nil { rt.executionTrace.Error = err.Error() } return ret, err, rt } func checkMessage(msg *types.Message) error { if msg.GasLimit == 0 { return xerrors.Errorf("message has no gas limit set") } if msg.GasLimit < 0 { return xerrors.Errorf("message has negative gas limit") } if msg.GasFeeCap == types.EmptyInt { return xerrors.Errorf("message fee cap not set") } if msg.GasPremium == types.EmptyInt { return xerrors.Errorf("message gas premium not set") } if msg.Value == types.EmptyInt { return xerrors.Errorf("message no value set") } return nil } func (vm *VM) ApplyImplicitMessage(ctx context.Context, msg *types.Message) (*ApplyRet, error) { start := build.Clock.Now() defer atomic.AddUint64(&StatApplied, 1) ret, actorErr, rt := vm.send(ctx, msg, nil, nil, start) rt.finilizeGasTracing() return &ApplyRet{ MessageReceipt: types.MessageReceipt{ ExitCode: aerrors.RetCode(actorErr), Return: ret, GasUsed: 0, Refund: types.NewInt(0), }, ActorErr: actorErr, ExecutionTrace: rt.executionTrace, GasCosts: nil, Duration: time.Since(start), }, actorErr } func (vm *VM) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet, error) { start := build.Clock.Now() ctx, span := trace.StartSpan(ctx, "vm.ApplyMessage") defer span.End() defer atomic.AddUint64(&StatApplied, 1) msg := cmsg.VMMessage() if span.IsRecordingEvents() { span.AddAttributes( trace.StringAttribute("to", msg.To.String()), trace.Int64Attribute("method", int64(msg.Method)), trace.StringAttribute("value", msg.Value.String()), ) } if err := checkMessage(msg); err != nil { return nil, err } pl := PricelistByEpoch(vm.blockHeight) msgGas := pl.OnChainMessage(cmsg.ChainLength()) msgGasCost := msgGas.Total() // this should never happen, but is currently still exercised by some tests if msgGasCost > msg.GasLimit { gasOutputs := ZeroGasOutputs() gasOutputs.MinerPenalty = types.BigMul(vm.baseFee, abi.NewTokenAmount(msgGasCost)) return &ApplyRet{ MessageReceipt: types.MessageReceipt{ ExitCode: exitcode.SysErrOutOfGas, GasUsed: 0, Refund: types.NewInt(0), }, GasCosts: &gasOutputs, Duration: time.Since(start), }, nil } st := vm.cstate minerPenaltyAmount := types.BigMul(vm.baseFee, abi.NewTokenAmount(msg.GasLimit)) fromActor, err := st.GetActor(msg.From) // this should never happen, but is currently still exercised by some tests if err != nil { if xerrors.Is(err, types.ErrActorNotFound) { gasOutputs := ZeroGasOutputs() gasOutputs.MinerPenalty = minerPenaltyAmount return &ApplyRet{ MessageReceipt: types.MessageReceipt{ ExitCode: exitcode.SysErrSenderInvalid, GasUsed: 0, Refund: types.NewInt(0), }, ActorErr: aerrors.Newf(exitcode.SysErrSenderInvalid, "actor not found: %s", msg.From), GasCosts: &gasOutputs, Duration: time.Since(start), }, nil } return nil, xerrors.Errorf("failed to look up from actor: %w", err) } // this should never happen, but is currently still exercised by some tests if !builtin.IsAccountActor(fromActor.Code) { gasOutputs := ZeroGasOutputs() gasOutputs.MinerPenalty = minerPenaltyAmount return &ApplyRet{ MessageReceipt: types.MessageReceipt{ ExitCode: exitcode.SysErrSenderInvalid, GasUsed: 0, Refund: types.NewInt(0), }, ActorErr: aerrors.Newf(exitcode.SysErrSenderInvalid, "send from not account actor: %s", fromActor.Code), GasCosts: &gasOutputs, Duration: time.Since(start), }, nil } if msg.Nonce != fromActor.Nonce { gasOutputs := ZeroGasOutputs() gasOutputs.MinerPenalty = minerPenaltyAmount return &ApplyRet{ MessageReceipt: types.MessageReceipt{ ExitCode: exitcode.SysErrSenderStateInvalid, GasUsed: 0, Refund: types.NewInt(0), }, ActorErr: aerrors.Newf(exitcode.SysErrSenderStateInvalid, "actor nonce invalid: msg:%d != state:%d", msg.Nonce, fromActor.Nonce), GasCosts: &gasOutputs, Duration: time.Since(start), }, nil } gascost := types.BigMul(types.NewInt(uint64(msg.GasLimit)), msg.GasFeeCap) if fromActor.Balance.LessThan(gascost) { gasOutputs := ZeroGasOutputs() gasOutputs.MinerPenalty = minerPenaltyAmount return &ApplyRet{ MessageReceipt: types.MessageReceipt{ ExitCode: exitcode.SysErrSenderStateInvalid, GasUsed: 0, Refund: types.NewInt(0), }, ActorErr: aerrors.Newf(exitcode.SysErrSenderStateInvalid, "actor balance less than needed: %s < %s", types.FIL(fromActor.Balance), types.FIL(gascost)), GasCosts: &gasOutputs, Duration: time.Since(start), }, nil } gasHolder := &types.Actor{Balance: types.NewInt(0)} if err := vm.transferToGasHolder(msg.From, gasHolder, gascost); err != nil { return nil, xerrors.Errorf("failed to withdraw gas funds: %w", err) } if err := vm.incrementNonce(msg.From); err != nil { return nil, err } if err := st.Snapshot(ctx); err != nil { return nil, xerrors.Errorf("snapshot failed: %w", err) } defer st.ClearSnapshot() ret, actorErr, rt := vm.send(ctx, msg, nil, &msgGas, start) if aerrors.IsFatal(actorErr) { return nil, xerrors.Errorf("[from=%s,to=%s,n=%d,m=%d,h=%d] fatal error: %w", msg.From, msg.To, msg.Nonce, msg.Method, vm.blockHeight, actorErr) } if actorErr != nil { log.Warnw("Send actor error", "from", msg.From, "to", msg.To, "nonce", msg.Nonce, "method", msg.Method, "height", vm.blockHeight, "error", fmt.Sprintf("%+v", actorErr)) } if actorErr != nil && len(ret) != 0 { // This should not happen, something is wonky return nil, xerrors.Errorf("message invocation errored, but had a return value anyway: %w", actorErr) } if rt == nil { return nil, xerrors.Errorf("send returned nil runtime, send error was: %s", actorErr) } if len(ret) != 0 { // safely override actorErr since it must be nil actorErr = rt.chargeGasSafe(rt.Pricelist().OnChainReturnValue(len(ret))) if actorErr != nil { ret = nil } } var errcode exitcode.ExitCode var gasUsed int64 if errcode = aerrors.RetCode(actorErr); errcode != 0 { // revert all state changes since snapshot if err := st.Revert(); err != nil { return nil, xerrors.Errorf("revert state failed: %w", err) } } rt.finilizeGasTracing() gasUsed = rt.gasUsed if gasUsed < 0 { gasUsed = 0 } gasOutputs := ComputeGasOutputs(gasUsed, msg.GasLimit, vm.baseFee, msg.GasFeeCap, msg.GasPremium) if err := vm.transferFromGasHolder(builtin.BurntFundsActorAddr, gasHolder, gasOutputs.BaseFeeBurn); err != nil { return nil, xerrors.Errorf("failed to burn base fee: %w", err) } if err := vm.transferFromGasHolder(reward.Address, gasHolder, gasOutputs.MinerTip); err != nil { return nil, xerrors.Errorf("failed to give miner gas reward: %w", err) } if err := vm.transferFromGasHolder(builtin.BurntFundsActorAddr, gasHolder, gasOutputs.OverEstimationBurn); err != nil { return nil, xerrors.Errorf("failed to burn overestimation fee: %w", err) } // refund unused gas if err := vm.transferFromGasHolder(msg.From, gasHolder, gasOutputs.Refund); err != nil { return nil, xerrors.Errorf("failed to refund gas: %w", err) } if types.BigCmp(types.NewInt(0), gasHolder.Balance) != 0 { return nil, xerrors.Errorf("gas handling math is wrong") } return &ApplyRet{ MessageReceipt: types.MessageReceipt{ ExitCode: errcode, Return: ret, GasUsed: gasUsed, Refund: gasOutputs.Refund, }, ActorErr: actorErr, ExecutionTrace: rt.executionTrace, GasCosts: &gasOutputs, Duration: time.Since(start), }, nil } func (vm *VM) ActorBalance(addr address.Address) (types.BigInt, aerrors.ActorError) { act, err := vm.cstate.GetActor(addr) if err != nil { return types.EmptyInt, aerrors.Absorb(err, 1, "failed to find actor") } return act.Balance, nil } type vmFlushKey struct{} func (vm *VM) Flush(ctx context.Context) (cid.Cid, error) { _, span := trace.StartSpan(ctx, "vm.Flush") defer span.End() from := vm.buf to := vm.buf.Read() root, err := vm.cstate.Flush(ctx) if err != nil { return cid.Undef, xerrors.Errorf("flushing vm: %w", err) } if err := Copy(context.WithValue(ctx, vmFlushKey{}, true), from, to, root); err != nil { return cid.Undef, xerrors.Errorf("copying tree: %w", err) } return root, nil } // MutateState usage: MutateState(ctx, idAddr, func(cst cbor.IpldStore, st *ActorStateType) error {...}) func (vm *VM) MutateState(ctx context.Context, addr address.Address, fn interface{}) error { act, err := vm.cstate.GetActor(addr) if err != nil { return xerrors.Errorf("actor not found: %w", err) } st := reflect.New(reflect.TypeOf(fn).In(1).Elem()) if err := vm.cst.Get(ctx, act.Head, st.Interface()); err != nil { return xerrors.Errorf("read actor head: %w", err) } out := reflect.ValueOf(fn).Call([]reflect.Value{reflect.ValueOf(vm.cst), st}) if !out[0].IsNil() && out[0].Interface().(error) != nil { return out[0].Interface().(error) } head, err := vm.cst.Put(ctx, st.Interface()) if err != nil { return xerrors.Errorf("put new actor head: %w", err) } act.Head = head if err := vm.cstate.SetActor(addr, act); err != nil { return xerrors.Errorf("set actor: %w", err) } return nil } func linksForObj(blk block.Block, cb func(cid.Cid)) error { switch blk.Cid().Prefix().Codec { case cid.DagCBOR: err := cbg.ScanForLinks(bytes.NewReader(blk.RawData()), cb) if err != nil { return xerrors.Errorf("cbg.ScanForLinks: %w", err) } return nil case cid.Raw: // We implicitly have all children of raw blocks. return nil default: return xerrors.Errorf("vm flush copy method only supports dag cbor") } } func Copy(ctx context.Context, from, to blockstore.Blockstore, root cid.Cid) error { ctx, span := trace.StartSpan(ctx, "vm.Copy") // nolint defer span.End() start := time.Now() var numBlocks int var totalCopySize int const batchSize = 128 const bufCount = 3 freeBufs := make(chan []block.Block, bufCount) toFlush := make(chan []block.Block, bufCount) for i := 0; i < bufCount; i++ { freeBufs <- make([]block.Block, 0, batchSize) } errFlushChan := make(chan error) go func() { for b := range toFlush { if err := to.PutMany(b); err != nil { close(freeBufs) errFlushChan <- xerrors.Errorf("batch put in copy: %w", err) return } freeBufs <- b[:0] } close(errFlushChan) close(freeBufs) }() var batch = <-freeBufs batchCp := func(blk block.Block) error { numBlocks++ totalCopySize += len(blk.RawData()) batch = append(batch, blk) if len(batch) >= batchSize { toFlush <- batch var ok bool batch, ok = <-freeBufs if !ok { return <-errFlushChan } } return nil } if err := copyRec(from, to, root, batchCp); err != nil { return xerrors.Errorf("copyRec: %w", err) } if len(batch) > 0 { toFlush <- batch } close(toFlush) // close the toFlush triggering the loop to end err := <-errFlushChan // get error out or get nil if it was closed if err != nil { return err } span.AddAttributes( trace.Int64Attribute("numBlocks", int64(numBlocks)), trace.Int64Attribute("copySize", int64(totalCopySize)), ) if yes, ok := ctx.Value(vmFlushKey{}).(bool); yes && ok { took := metrics.SinceInMilliseconds(start) stats.Record(ctx, metrics.VMFlushCopyCount.M(int64(numBlocks)), metrics.VMFlushCopyDuration.M(took)) } return nil } func copyRec(from, to blockstore.Blockstore, root cid.Cid, cp func(block.Block) error) error { if root.Prefix().MhType == 0 { // identity cid, skip return nil } blk, err := from.Get(root) if err != nil { return xerrors.Errorf("get %s failed: %w", root, err) } var lerr error err = linksForObj(blk, func(link cid.Cid) { if lerr != nil { // Theres no erorr return on linksForObj callback :( return } prefix := link.Prefix() if prefix.Codec == cid.FilCommitmentSealed || prefix.Codec == cid.FilCommitmentUnsealed { return } // We always have blocks inlined into CIDs, but we may not have their children. if prefix.MhType == mh.IDENTITY { // Unless the inlined block has no children. if prefix.Codec == cid.Raw { return } } else { // If we have an object, we already have its children, skip the object. has, err := to.Has(link) if err != nil { lerr = xerrors.Errorf("has: %w", err) return } if has { return } } if err := copyRec(from, to, link, cp); err != nil { lerr = err return } }) if err != nil { return xerrors.Errorf("linksForObj (%x): %w", blk.RawData(), err) } if lerr != nil { return lerr } if err := cp(blk); err != nil { return xerrors.Errorf("copy: %w", err) } return nil } func (vm *VM) StateTree() types.StateTree { return vm.cstate } func (vm *VM) SetBlockHeight(h abi.ChainEpoch) { vm.blockHeight = h } func (vm *VM) Invoke(act *types.Actor, rt *Runtime, method abi.MethodNum, params []byte) ([]byte, aerrors.ActorError) { ctx, span := trace.StartSpan(rt.ctx, "vm.Invoke") defer span.End() if span.IsRecordingEvents() { span.AddAttributes( trace.StringAttribute("to", rt.Receiver().String()), trace.Int64Attribute("method", int64(method)), trace.StringAttribute("value", rt.ValueReceived().String()), ) } var oldCtx context.Context oldCtx, rt.ctx = rt.ctx, ctx defer func() { rt.ctx = oldCtx }() ret, err := vm.areg.Invoke(act.Code, rt, method, params) if err != nil { return nil, err } return ret, nil } func (vm *VM) SetInvoker(i *ActorRegistry) { vm.areg = i } func (vm *VM) GetNtwkVersion(ctx context.Context, ce abi.ChainEpoch) network.Version { return vm.ntwkVersion(ctx, ce) } func (vm *VM) GetCircSupply(ctx context.Context) (abi.TokenAmount, error) { return vm.circSupplyCalc(ctx, vm.blockHeight, vm.cstate) } func (vm *VM) incrementNonce(addr address.Address) error { return vm.cstate.MutateActor(addr, func(a *types.Actor) error { a.Nonce++ return nil }) } func (vm *VM) transfer(from, to address.Address, amt types.BigInt) aerrors.ActorError { if from == to { return nil } fromID, err := vm.cstate.LookupID(from) if err != nil { return aerrors.Fatalf("transfer failed when resolving sender address: %s", err) } toID, err := vm.cstate.LookupID(to) if err != nil { return aerrors.Fatalf("transfer failed when resolving receiver address: %s", err) } if fromID == toID { return nil } if amt.LessThan(types.NewInt(0)) { return aerrors.Newf(exitcode.SysErrForbidden, "attempted to transfer negative value: %s", amt) } f, err := vm.cstate.GetActor(fromID) if err != nil { return aerrors.Fatalf("transfer failed when retrieving sender actor: %s", err) } t, err := vm.cstate.GetActor(toID) if err != nil { return aerrors.Fatalf("transfer failed when retrieving receiver actor: %s", err) } if err := deductFunds(f, amt); err != nil { return aerrors.Newf(exitcode.SysErrInsufficientFunds, "transfer failed when deducting funds (%s): %s", types.FIL(amt), err) } depositFunds(t, amt) if err := vm.cstate.SetActor(fromID, f); err != nil { return aerrors.Fatalf("transfer failed when setting receiver actor: %s", err) } if err := vm.cstate.SetActor(toID, t); err != nil { return aerrors.Fatalf("transfer failed when setting sender actor: %s", err) } return nil } func (vm *VM) transferToGasHolder(addr address.Address, gasHolder *types.Actor, amt types.BigInt) error { if amt.LessThan(types.NewInt(0)) { return xerrors.Errorf("attempted to transfer negative value to gas holder") } return vm.cstate.MutateActor(addr, func(a *types.Actor) error { if err := deductFunds(a, amt); err != nil { return err } depositFunds(gasHolder, amt) return nil }) } func (vm *VM) transferFromGasHolder(addr address.Address, gasHolder *types.Actor, amt types.BigInt) error { if amt.LessThan(types.NewInt(0)) { return xerrors.Errorf("attempted to transfer negative value from gas holder") } if amt.Equals(big.NewInt(0)) { return nil } return vm.cstate.MutateActor(addr, func(a *types.Actor) error { if err := deductFunds(gasHolder, amt); err != nil { return err } depositFunds(a, amt) return nil }) } func deductFunds(act *types.Actor, amt types.BigInt) error { if act.Balance.LessThan(amt) { return fmt.Errorf("not enough funds") } act.Balance = types.BigSub(act.Balance, amt) return nil } func depositFunds(act *types.Actor, amt types.BigInt) { act.Balance = types.BigAdd(act.Balance, amt) }
1.257813
1
pkg/apiserver/git/git.go
zehuaiWANG/kubesphere
0
565
/* Copyright 2019 The KubeSphere Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package git import ( "net/http" "github.com/emicklei/go-restful" "kubesphere.io/kubesphere/pkg/models/git" "kubesphere.io/kubesphere/pkg/server/errors" ) func GitReadVerify(request *restful.Request, response *restful.Response) { authInfo := git.AuthInfo{} err := request.ReadEntity(&authInfo) ns := request.PathParameter("namespace") if err != nil { response.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) return } err = git.GitReadVerify(ns, authInfo) if err != nil { response.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) return } response.WriteAsJson(errors.None) }
1.132813
1
routers/api/auth.go
bylh/go-eth
0
573
package api import ( "fmt" "net/http" "github.com/astaxie/beego/validation" "github.com/gin-gonic/gin" "go-eth/pkg/app" "go-eth/pkg/e" "go-eth/pkg/util" "go-eth/service/auth_service" ) type auth struct { Username string `valid:"Required; MaxSize(50)"` Password string `valid:"Required; MaxSize(50)"` } // @Summary Get Auth // @Produce json // @Param username query string true "userName" // @Param password query string true "password" // @Success 200 {object} app.Response // @Failure 500 {object} app.Response // @Router /auth [get] func Login(c *gin.Context) { appG := app.Gin{C: c} valid := validation.Validation{} username := c.PostForm("username") password := c.PostForm("password") fmt.Println("post username", username) fmt.Println("post passward", password) //username := c.Query("username") //password := c.Query("password") a := auth{Username: username, Password: password} ok, _ := valid.Valid(&a) if !ok { app.MarkErrors(valid.Errors) appG.Response(http.StatusBadRequest, e.INVALID_PARAMS, nil) return } authService := auth_service.Auth{Username: username, Password: password} isExist, err := authService.Check() if err != nil { appG.Response(http.StatusInternalServerError, e.ERROR_AUTH_CHECK_TOKEN_FAIL, nil) return } if !isExist { appG.Response(http.StatusUnauthorized, e.ERROR_AUTH, nil) return } token, err := util.GenerateToken(username, password) if err != nil { appG.Response(http.StatusInternalServerError, e.ERROR_AUTH_TOKEN, nil) return } appG.Response(http.StatusOK, e.SUCCESS, map[string]string{ "token": token, }) }
1.523438
2
pkg/logentry/stages/drop_test.go
UniqueTokens/loki
1
581
package stages import ( "errors" "fmt" "testing" "time" "github.com/cortexproject/cortex/pkg/util" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ww "github.com/weaveworks/common/server" ) // Not all these are tested but are here to make sure the different types marshal without error var testDropYaml = ` pipeline_stages: - json: expressions: app: msg: - drop: source: src expression: ".*test.*" older_than: 24h longer_than: 8kb - drop: expression: ".*app1.*" - drop: source: app value: loki - drop: longer_than: 10000 ` func Test_dropStage_Process(t *testing.T) { // Enable debug logging cfg := &ww.Config{} cfg.LogLevel.Set("debug") util.InitLogger(cfg) Debug = true tests := []struct { name string config *DropConfig labels model.LabelSet extracted map[string]interface{} t *time.Time entry *string shouldDrop bool }{ { name: "Longer Than Should Drop", config: &DropConfig{ LongerThan: ptrFromString("10b"), }, labels: model.LabelSet{}, extracted: map[string]interface{}{}, t: nil, entry: ptrFromString("12345678901"), shouldDrop: true, }, { name: "Longer Than Should Not Drop When Equal", config: &DropConfig{ LongerThan: ptrFromString("10b"), }, labels: model.LabelSet{}, extracted: map[string]interface{}{}, t: nil, entry: ptrFromString("1234567890"), shouldDrop: false, }, { name: "Longer Than Should Not Drop When Less", config: &DropConfig{ LongerThan: ptrFromString("10b"), }, labels: model.LabelSet{}, extracted: map[string]interface{}{}, t: nil, entry: ptrFromString("123456789"), shouldDrop: false, }, { name: "Older than Should Drop", config: &DropConfig{ OlderThan: ptrFromString("1h"), }, labels: model.LabelSet{}, extracted: map[string]interface{}{}, t: ptrFromTime(time.Now().Add(-2 * time.Hour)), entry: nil, shouldDrop: true, }, { name: "Older than Should Not Drop", config: &DropConfig{ OlderThan: ptrFromString("1h"), }, labels: model.LabelSet{}, extracted: map[string]interface{}{}, t: ptrFromTime(time.Now().Add(-5 * time.Minute)), entry: nil, shouldDrop: false, }, { name: "Matched Source", config: &DropConfig{ Source: ptrFromString("key"), }, labels: model.LabelSet{}, extracted: map[string]interface{}{ "key": "", }, shouldDrop: true, }, { name: "Did not match Source", config: &DropConfig{ Source: ptrFromString("key1"), }, labels: model.LabelSet{}, extracted: map[string]interface{}{ "key": "val1", }, shouldDrop: false, }, { name: "Matched Source and Value", config: &DropConfig{ Source: ptrFromString("key"), Value: ptrFromString("val1"), }, labels: model.LabelSet{}, extracted: map[string]interface{}{ "key": "val1", }, shouldDrop: true, }, { name: "Did not match Source and Value", config: &DropConfig{ Source: ptrFromString("key"), Value: ptrFromString("val1"), }, labels: model.LabelSet{}, extracted: map[string]interface{}{ "key": "VALRUE1", }, shouldDrop: false, }, { name: "Regex Matched Source and Value", config: &DropConfig{ Source: ptrFromString("key"), Expression: ptrFromString(".*val.*"), }, labels: model.LabelSet{}, extracted: map[string]interface{}{ "key": "val1", }, shouldDrop: true, }, { name: "Regex Did not match Source and Value", config: &DropConfig{ Source: ptrFromString("key"), Expression: ptrFromString(".*val.*"), }, labels: model.LabelSet{}, extracted: map[string]interface{}{ "key": "pal1", }, shouldDrop: false, }, { name: "Regex No Matching Source", config: &DropConfig{ Source: ptrFromString("key"), Expression: ptrFromString(".*val.*"), }, labels: model.LabelSet{}, extracted: map[string]interface{}{ "pokey": "pal1", }, shouldDrop: false, }, { name: "Regex Did Not Match Line", config: &DropConfig{ Expression: ptrFromString(".*val.*"), }, labels: model.LabelSet{}, entry: ptrFromString("this is a line which does not match the regex"), extracted: map[string]interface{}{}, shouldDrop: false, }, { name: "Regex Matched Line", config: &DropConfig{ Expression: ptrFromString(".*val.*"), }, labels: model.LabelSet{}, entry: ptrFromString("this is a line with the word value in it"), extracted: map[string]interface{}{}, shouldDrop: true, }, { name: "Match Source and Length Both Match", config: &DropConfig{ Source: ptrFromString("key"), LongerThan: ptrFromString("10b"), }, labels: model.LabelSet{}, extracted: map[string]interface{}{ "key": "pal1", }, t: nil, entry: ptrFromString("12345678901"), shouldDrop: true, }, { name: "Match Source and Length Only First Matches", config: &DropConfig{ Source: ptrFromString("key"), LongerThan: ptrFromString("10b"), }, labels: model.LabelSet{}, extracted: map[string]interface{}{ "key": "pal1", }, t: nil, entry: ptrFromString("123456789"), shouldDrop: false, }, { name: "Match Source and Length Only Second Matches", config: &DropConfig{ Source: ptrFromString("key"), LongerThan: ptrFromString("10b"), }, labels: model.LabelSet{}, extracted: map[string]interface{}{ "WOOOOOOOOOOOOOO": "pal1", }, t: nil, entry: ptrFromString("123456789012"), shouldDrop: false, }, { name: "Everything Must Match", config: &DropConfig{ Source: ptrFromString("key"), Expression: ptrFromString(".*val.*"), OlderThan: ptrFromString("1h"), LongerThan: ptrFromString("10b"), }, labels: model.LabelSet{}, extracted: map[string]interface{}{ "key": "must contain value to match", }, t: ptrFromTime(time.Now().Add(-2 * time.Hour)), entry: ptrFromString("12345678901"), shouldDrop: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { err := validateDropConfig(tt.config) if err != nil { t.Error(err) } m := &dropStage{ cfg: tt.config, logger: util.Logger, } m.Process(tt.labels, tt.extracted, tt.t, tt.entry) if tt.shouldDrop { assert.Contains(t, tt.labels.String(), dropLabel) } else { assert.NotContains(t, tt.labels.String(), dropLabel) } }) } } func ptrFromString(str string) *string { return &str } func ptrFromTime(t time.Time) *time.Time { return &t } // TestDropPipeline is used to verify we properly parse the yaml config and create a working pipeline func TestDropPipeline(t *testing.T) { registry := prometheus.NewRegistry() plName := "test_pipeline" pl, err := NewPipeline(util.Logger, loadConfig(testDropYaml), &plName, registry) require.NoError(t, err) lbls := model.LabelSet{} ts := time.Now() // Process the first log line which should be dropped entry := testMatchLogLineApp1 extracted := map[string]interface{}{} pl.Process(lbls, extracted, &ts, &entry) assert.Contains(t, lbls.String(), dropLabel) // Process the second line which should not be dropped. entry = testMatchLogLineApp2 extracted = map[string]interface{}{} lbls = model.LabelSet{} pl.Process(lbls, extracted, &ts, &entry) assert.NotContains(t, lbls.String(), dropLabel) } var ( dropInvalidDur = "10y" dropVal = "msg" dropRegex = ".*blah" dropInvalidRegex = "(?P<ts[0-9]+).*" dropInvalidByteSize = "23QB" ) func Test_validateDropConfig(t *testing.T) { tests := []struct { name string config *DropConfig wantErr error }{ { name: "ErrEmpty", config: &DropConfig{}, wantErr: errors.New(ErrDropStageEmptyConfig), }, { name: "Invalid Duration", config: &DropConfig{ OlderThan: &dropInvalidDur, }, wantErr: fmt.Errorf(ErrDropStageInvalidDuration, dropInvalidDur, "time: unknown unit y in duration 10y"), }, { name: "Invalid Config", config: &DropConfig{ Value: &dropVal, Expression: &dropRegex, }, wantErr: errors.New(ErrDropStageInvalidConfig), }, { name: "Invalid Regex", config: &DropConfig{ Expression: &dropInvalidRegex, }, wantErr: fmt.Errorf(ErrDropStageInvalidRegex, "error parsing regexp: invalid named capture: `(?P<ts[0-9]+).*`"), }, { name: "Invalid Bytesize", config: &DropConfig{ LongerThan: &dropInvalidByteSize, }, wantErr: fmt.Errorf(ErrDropStageInvalidByteSize, "strconv.UnmarshalText: parsing \"23QB\": invalid syntax"), }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if err := validateDropConfig(tt.config); ((err != nil) && (err.Error() != tt.wantErr.Error())) || (err == nil && tt.wantErr != nil) { t.Errorf("validateDropConfig() error = %v, wantErr = %v", err, tt.wantErr) } }) } }
1.390625
1
pkg/services/notifications/notifications.go
bkzy-wangjp/grafana
0
589
package notifications import ( "context" "errors" "fmt" "html/template" "net/url" "path/filepath" "strings" "github.com/grafana/grafana/pkg/bus" "github.com/grafana/grafana/pkg/events" "github.com/grafana/grafana/pkg/infra/log" "github.com/grafana/grafana/pkg/models" "github.com/grafana/grafana/pkg/setting" "github.com/grafana/grafana/pkg/util" ) var mailTemplates *template.Template var tmplResetPassword = "<PASSWORD>" var tmplSignUpStarted = "signup_started" var tmplWelcomeOnSignUp = "welcome_on_signup" func ProvideService(bus bus.Bus, cfg *setting.Cfg) (*NotificationService, error) { ns := &NotificationService{ Bus: bus, Cfg: cfg, log: log.New("notifications"), mailQueue: make(chan *Message, 10), webhookQueue: make(chan *Webhook, 10), } ns.Bus.AddHandlerCtx(ns.sendResetPasswordEmail) ns.Bus.AddHandlerCtx(ns.validateResetPasswordCode) ns.Bus.AddHandlerCtx(ns.sendEmailCommandHandler) ns.Bus.AddHandlerCtx(ns.sendEmailCommandHandlerSync) ns.Bus.AddHandlerCtx(ns.SendWebhookSync) ns.Bus.AddEventListenerCtx(ns.signUpStartedHandler) ns.Bus.AddEventListenerCtx(ns.signUpCompletedHandler) mailTemplates = template.New("name") mailTemplates.Funcs(template.FuncMap{ "Subject": subjectTemplateFunc, }) for _, pattern := range ns.Cfg.Smtp.TemplatesPatterns { templatePattern := filepath.Join(ns.Cfg.StaticRootPath, pattern) _, err := mailTemplates.ParseGlob(templatePattern) if err != nil { return nil, err } } if !util.IsEmail(ns.Cfg.Smtp.FromAddress) { return nil, errors.New("invalid email address for SMTP from_address config") } if cfg.EmailCodeValidMinutes == 0 { cfg.EmailCodeValidMinutes = 120 } return ns, nil } type NotificationService struct { Bus bus.Bus Cfg *setting.Cfg mailQueue chan *Message webhookQueue chan *Webhook log log.Logger } func (ns *NotificationService) Run(ctx context.Context) error { for { select { case webhook := <-ns.webhookQueue: err := ns.sendWebRequestSync(context.Background(), webhook) if err != nil { ns.log.Error("Failed to send webrequest ", "error", err) } case msg := <-ns.mailQueue: num, err := ns.Send(msg) tos := strings.Join(msg.To, "; ") info := "" if err != nil { if len(msg.Info) > 0 { info = ", info: " + msg.Info } ns.log.Error(fmt.Sprintf("Async sent email %d succeed, not send emails: %s%s err: %s", num, tos, info, err)) } else { ns.log.Debug(fmt.Sprintf("Async sent email %d succeed, sent emails: %s%s", num, tos, info)) } case <-ctx.Done(): return ctx.Err() } } } func (ns *NotificationService) SendWebhookSync(ctx context.Context, cmd *models.SendWebhookSync) error { return ns.sendWebRequestSync(ctx, &Webhook{ Url: cmd.Url, User: cmd.User, Password: <PASSWORD>, Body: cmd.Body, HttpMethod: cmd.HttpMethod, HttpHeader: cmd.HttpHeader, ContentType: cmd.ContentType, }) } func subjectTemplateFunc(obj map[string]interface{}, value string) string { obj["value"] = value return "" } func (ns *NotificationService) sendEmailCommandHandlerSync(ctx context.Context, cmd *models.SendEmailCommandSync) error { message, err := ns.buildEmailMessage(&models.SendEmailCommand{ Data: cmd.Data, Info: cmd.Info, Template: cmd.Template, To: cmd.To, SingleEmail: cmd.SingleEmail, EmbeddedFiles: cmd.EmbeddedFiles, Subject: cmd.Subject, ReplyTo: cmd.ReplyTo, }) if err != nil { return err } _, err = ns.Send(message) return err } func (ns *NotificationService) sendEmailCommandHandler(ctx context.Context, cmd *models.SendEmailCommand) error { message, err := ns.buildEmailMessage(cmd) if err != nil { return err } ns.mailQueue <- message return nil } func (ns *NotificationService) sendResetPasswordEmail(ctx context.Context, cmd *models.SendResetPasswordEmailCommand) error { code, err := createUserEmailCode(ns.Cfg, cmd.User, nil) if err != nil { return err } return ns.sendEmailCommandHandler(ctx, &models.SendEmailCommand{ To: []string{cmd.User.Email}, Template: tmplResetPassword, Data: map[string]interface{}{ "Code": code, "Name": cmd.User.NameOrFallback(), }, }) } func (ns *NotificationService) validateResetPasswordCode(ctx context.Context, query *models.ValidateResetPasswordCodeQuery) error { login := getLoginForEmailCode(query.Code) if login == "" { return models.ErrInvalidEmailCode } userQuery := models.GetUserByLoginQuery{LoginOrEmail: login} if err := bus.DispatchCtx(ctx, &userQuery); err != nil { return err } validEmailCode, err := validateUserEmailCode(ns.Cfg, userQuery.Result, query.Code) if err != nil { return err } if !validEmailCode { return models.ErrInvalidEmailCode } query.Result = userQuery.Result return nil } func (ns *NotificationService) signUpStartedHandler(ctx context.Context, evt *events.SignUpStarted) error { if !setting.VerifyEmailEnabled { return nil } ns.log.Info("User signup started", "email", evt.Email) if evt.Email == "" { return nil } err := ns.sendEmailCommandHandler(ctx, &models.SendEmailCommand{ To: []string{evt.Email}, Template: tmplSignUpStarted, Data: map[string]interface{}{ "Email": evt.Email, "Code": evt.Code, "SignUpUrl": setting.ToAbsUrl(fmt.Sprintf("signup/?email=%s&code=%s", url.QueryEscape(evt.Email), url.QueryEscape(evt.Code))), }, }) if err != nil { return err } emailSentCmd := models.UpdateTempUserWithEmailSentCommand{Code: evt.Code} return bus.DispatchCtx(ctx, &emailSentCmd) } func (ns *NotificationService) signUpCompletedHandler(ctx context.Context, evt *events.SignUpCompleted) error { if evt.Email == "" || !ns.Cfg.Smtp.SendWelcomeEmailOnSignUp { return nil } return ns.sendEmailCommandHandler(ctx, &models.SendEmailCommand{ To: []string{evt.Email}, Template: tmplWelcomeOnSignUp, Data: map[string]interface{}{ "Name": evt.Name, }, }) }
1.234375
1
blog/posts/handler/posts.go
kzmake/services
4
597
package handler import ( "context" "encoding/json" "errors" "fmt" "math" "time" tagProto "github.com/micro/examples/blog/tags/proto/tags" "github.com/gosimple/slug" "github.com/micro/go-micro/v2/client" log "github.com/micro/go-micro/v2/logger" "github.com/micro/go-micro/v2/store" posts "github.com/micro/examples/blog/posts/proto/posts" ) const ( tagType = "post-tag" slugPrefix = "slug" idPrefix = "id" timeStampPrefix = "timestamp" ) type Post struct { ID string `json:"id"` Title string `json:"title"` Slug string `json:"slug"` Content string `json:"content"` CreateTimestamp int64 `json:"create_timestamp"` UpdateTimestamp int64 `json:"update_timestamp"` TagNames []string `json:"tagNames"` } type Posts struct { Store store.Store Client client.Client } func (t *Posts) Post(ctx context.Context, req *posts.PostRequest, rsp *posts.PostResponse) error { if len(req.Post.Id) == 0 || len(req.Post.Title) == 0 || len(req.Post.Content) == 0 { return errors.New("ID, title or content is missing") } // read by post records, err := t.Store.Read(fmt.Sprintf("%v:%v", idPrefix, req.Post.Id)) if err != nil && err != store.ErrNotFound { return err } postSlug := slug.Make(req.Post.Title) // If no existing record is found, create a new one if len(records) == 0 { post := &Post{ ID: req.Post.Id, Title: req.Post.Title, Content: req.Post.Content, TagNames: req.Post.TagNames, Slug: postSlug, CreateTimestamp: time.Now().Unix(), } return t.savePost(ctx, nil, post) } record := records[0] oldPost := &Post{} err = json.Unmarshal(record.Value, oldPost) if err != nil { return err } post := &Post{ ID: req.Post.Id, Title: req.Post.Title, Content: req.Post.Content, Slug: postSlug, TagNames: req.Post.TagNames, CreateTimestamp: oldPost.CreateTimestamp, UpdateTimestamp: time.Now().Unix(), } // Check if slug exists recordsBySlug, err := t.Store.Read(fmt.Sprintf("%v:%v", slugPrefix, postSlug)) if err != nil && err != store.ErrNotFound { return err } otherSlugPost := &Post{} err = json.Unmarshal(record.Value, otherSlugPost) if err != nil { return err } if len(recordsBySlug) > 0 && oldPost.ID != otherSlugPost.ID { return errors.New("An other post with this slug already exists") } return t.savePost(ctx, oldPost, post) } func (t *Posts) savePost(ctx context.Context, oldPost, post *Post) error { bytes, err := json.Marshal(post) if err != nil { return err } err = t.Store.Write(&store.Record{ Key: fmt.Sprintf("%v:%v", idPrefix, post.ID), Value: bytes, }) if err != nil { return err } // Delete old slug index if the slug has changed if oldPost != nil && oldPost.Slug != post.Slug { err = t.Store.Delete(fmt.Sprintf("%v:%v", slugPrefix, post.Slug)) if err != nil { return err } } err = t.Store.Write(&store.Record{ Key: fmt.Sprintf("%v:%v", slugPrefix, post.Slug), Value: bytes, }) if err != nil { return err } err = t.Store.Write(&store.Record{ Key: fmt.Sprintf("%v:%v", timeStampPrefix, math.MaxInt64-post.CreateTimestamp), Value: bytes, }) if err != nil { return err } if oldPost == nil { tagClient := tagProto.NewTagsService("go.micro.service.tags", t.Client) for _, tagName := range post.TagNames { _, err := tagClient.IncreaseCount(ctx, &tagProto.IncreaseCountRequest{ ParentID: post.ID, Type: tagType, Title: tagName, }) if err != nil { return err } } return nil } return t.diffTags(ctx, post.ID, oldPost.TagNames, post.TagNames) } func (t *Posts) diffTags(ctx context.Context, parentID string, oldTagNames, newTagNames []string) error { oldTags := map[string]struct{}{} for _, v := range oldTagNames { oldTags[v] = struct{}{} } newTags := map[string]struct{}{} for _, v := range newTagNames { newTags[v] = struct{}{} } tagClient := tagProto.NewTagsService("go.micro.service.tag", t.Client) for i := range oldTags { _, stillThere := newTags[i] if !stillThere { _, err := tagClient.DecreaseCount(ctx, &tagProto.DecreaseCountRequest{ ParentID: parentID, Type: tagType, Title: i, }) if err != nil { log.Errorf("Error decreasing count for tag '%v' with type '%v' for parent '%v'", i, tagType, parentID) } } } for i := range newTags { _, newlyAdded := oldTags[i] if newlyAdded { _, err := tagClient.IncreaseCount(ctx, &tagProto.IncreaseCountRequest{ ParentID: parentID, Type: tagType, Title: i, }) if err != nil { log.Errorf("Error increasing count for tag '%v' with type '%v' for parent '%v'", i, tagType, parentID) } } } return nil } func (t *Posts) Query(ctx context.Context, req *posts.QueryRequest, rsp *posts.QueryResponse) error { var records []*store.Record var err error if len(req.Slug) > 0 { key := fmt.Sprintf("%v:%v", slugPrefix, req.Slug) log.Infof("Reading post by slug: %v", req.Slug) records, err = t.Store.Read(key, store.ReadPrefix()) } else { key := fmt.Sprintf("%v:", timeStampPrefix) var limit uint limit = 20 if req.Limit > 0 { limit = uint(req.Limit) } log.Infof("Listing posts, offset: %v, limit: %v", req.Offset, limit) records, err = t.Store.Read(key, store.ReadPrefix(), store.ReadOffset(uint(req.Offset)), store.ReadLimit(limit)) } if err != nil { return err } rsp.Posts = make([]*posts.Post, len(records)) for i, record := range records { postRecord := &Post{} err := json.Unmarshal(record.Value, postRecord) if err != nil { return err } rsp.Posts[i] = &posts.Post{ Id: postRecord.ID, Title: postRecord.Title, Slug: postRecord.Slug, Content: postRecord.Content, TagNames: postRecord.TagNames, } } return nil } func (t *Posts) Delete(ctx context.Context, req *posts.DeleteRequest, rsp *posts.DeleteResponse) error { log.Info("Received Post.Delete request") records, err := t.Store.Read(fmt.Sprintf("%v:%v", idPrefix, req.Id)) if err != nil && err != store.ErrNotFound { return err } if len(records) == 0 { return fmt.Errorf("Post with ID %v not found", req.Id) } post := &Post{} err = json.Unmarshal(records[0].Value, post) if err != nil { return err } // Delete by ID err = t.Store.Delete(fmt.Sprintf("%v:%v", idPrefix, post.ID)) if err != nil { return err } // Delete by slug err = t.Store.Delete(fmt.Sprintf("%v:%v", slugPrefix, post.Slug)) if err != nil { return err } // Delete by timeStamp return t.Store.Delete(fmt.Sprintf("%v:%v", timeStampPrefix, post.CreateTimestamp)) }
1.640625
2
database/elasticsearch/elasticsearch.go
rayl15/pkgs
0
605
package elasticsearch import ( "context" "fmt" "os" "strings" "time" log "github.com/sirupsen/logrus" "github.com/rayl15/pkgs/database" "github.com/rayl15/pkgs/utils" "github.com/olivere/elastic/v7" "github.com/pkg/errors" ) // Database is the elasticsearch malice database object type Database struct { Host string `json:"host,omitempty"` Port string `json:"port,omitempty"` URL string `json:"url,omitempty"` Username string `json:"username,omitempty"` Password string `json:"password,omitempty"` Index string `json:"index,omitempty"` Type string `json:"type,omitempty"` Plugins map[string]interface{} `json:"plugins,omitempty"` } var ( defaultIndex string defaultType string defaultHost string defaultPort string defaultURL string ) func init() { defaultIndex = utils.Getopt("MALICE_ELASTICSEARCH_INDEX", "malice") defaultType = utils.Getopt("MALICE_ELASTICSEARCH_TYPE", "samples") defaultHost = utils.Getopt("MALICE_ELASTICSEARCH_HOST", "localhost") defaultPort = utils.Getopt("MALICE_ELASTICSEARCH_PORT", "9200") } // getURL with the following order of precedence // - user input (cli) // - user ENV // - sane defaults func (db *Database) getURL() { // If not set use defaults if len(strings.TrimSpace(db.Index)) == 0 { db.Index = defaultIndex } if len(strings.TrimSpace(db.Type)) == 0 { db.Type = defaultType } if len(strings.TrimSpace(db.Host)) == 0 { db.Host = defaultHost } if len(strings.TrimSpace(db.Port)) == 0 { db.Port = defaultPort } // If user set URL param use it if len(strings.TrimSpace(db.URL)) == 0 { // If running in docker use `elasticsearch` if _, exists := os.LookupEnv("MALICE_IN_DOCKER"); exists { db.URL = utils.Getopt("MALICE_ELASTICSEARCH_URL", fmt.Sprintf("%s:%s", "elasticsearch", db.Port)) log.WithField("elasticsearch_url", db.URL).Debug("running malice in docker") return } db.URL = utils.Getopt("MALICE_ELASTICSEARCH_URL", fmt.Sprintf("%s:%s", db.Host, db.Port)) } } // Init initalizes ElasticSearch for use with malice func (db *Database) Init() error { // Create URL from host/port db.getURL() // Test connection to ElasticSearch err := db.TestConnection() if err != nil { return errors.Wrap(err, "failed to connect to database") } client, err := elastic.NewSimpleClient( elastic.SetURL(db.URL), elastic.SetBasicAuth( utils.Getopts(db.Username, "MALICE_ELASTICSEARCH_USERNAME", ""), utils.Getopts(db.Password, "<PASSWORD>", ""), ), ) if err != nil { return errors.Wrap(err, "failed to create elasticsearch simple client") } exists, err := client.IndexExists(db.Index).Do(context.Background()) if err != nil { return errors.Wrap(err, "failed to check if index exists") } if !exists { // Index does not exist yet. createIndex, err := client.CreateIndex(db.Index).BodyString(mapping).Do(context.Background()) if err != nil { return errors.Wrapf(err, "failed to create index: %s", db.Index) } if !createIndex.Acknowledged { log.Error("index creation not acknowledged") } else { log.Debugf("created index %s", db.Index) } } else { log.Debugf("index %s already exists", db.Index) } return nil } // TestConnection tests the ElasticSearch connection func (db *Database) TestConnection() error { // Create URL from host/port db.getURL() // connect to ElasticSearch where --link elasticsearch was using via malice in Docker client, err := elastic.NewSimpleClient( elastic.SetURL(db.URL), elastic.SetBasicAuth( utils.Getopts(db.Username, "MALICE_ELASTICSEARCH_USERNAME", ""), utils.Getopts(db.Password, "<PASSWORD>", ""), ), ) if err != nil { return errors.Wrap(err, "failed to create elasticsearch simple client") } // Ping the Elasticsearch server to get e.g. the version number log.Debugf("attempting to PING to: %s", db.URL) info, code, err := client.Ping(db.URL).Do(context.Background()) if err != nil { return errors.Wrap(err, "failed to ping elasticsearch") } log.WithFields(log.Fields{ "code": code, "cluster": info.ClusterName, "version": info.Version.Number, "url": db.URL, }).Debug("elasticSearch connection successful") return nil } // WaitForConnection waits for connection to Elasticsearch to be ready func (db *Database) WaitForConnection(ctx context.Context, timeout int) error { var err error secondsWaited := 0 connCtx, cancel := context.WithTimeout(ctx, time.Duration(timeout)*time.Second) defer cancel() log.Debug("===> trying to connect to elasticsearch") for { // Try to connect to Elasticsearch select { case <-connCtx.Done(): return errors.Wrapf(err, "connecting to elasticsearch timed out after %d seconds", secondsWaited) default: err = db.TestConnection() if err == nil { log.Debugf("elasticsearch came online after %d seconds", secondsWaited) return nil } // not ready yet secondsWaited++ log.Debug(" * could not connect to elasticsearch (sleeping for 1 second)") time.Sleep(1 * time.Second) } } } // StoreFileInfo inserts initial sample info into database creating a placeholder for it func (db *Database) StoreFileInfo(sample map[string]interface{}) (elastic.IndexResponse, error) { if len(db.Plugins) == 0 { return elastic.IndexResponse{}, errors.New("Database.Plugins is empty (you must set this field to use this function)") } // Test connection to ElasticSearch err := db.TestConnection() if err != nil { return elastic.IndexResponse{}, errors.Wrap(err, "failed to connect to database") } client, err := elastic.NewSimpleClient( elastic.SetURL(db.URL), elastic.SetBasicAuth( utils.Getopts(db.Username, "MALICE_ELASTICSEARCH_USERNAME", ""), utils.Getopts(db.Password, "<PASSWORD>", ""), ), ) if err != nil { return elastic.IndexResponse{}, errors.Wrap(err, "failed to create elasticsearch simple client") } // NOTE: I am not setting ID because I want to be able to re-scan files with updated signatures in the future fInfo := map[string]interface{}{ // "id": sample.SHA256, "file": sample, "plugins": db.Plugins, "scan_date": time.Now().Format(time.RFC3339Nano), } newScan, err := client.Index(). Index(db.Index). Type(db.Type). OpType("index"). // Id("1"). BodyJson(fInfo). Do(context.Background()) if err != nil { return elastic.IndexResponse{}, errors.Wrap(err, "failed to index file info") } log.WithFields(log.Fields{ "id": newScan.Id, "index": newScan.Index, "type": newScan.Type, }).Debug("indexed sample") return *newScan, nil } // StoreHash stores a hash into the database that has been queried via intel-plugins func (db *Database) StoreHash(hash string) (elastic.IndexResponse, error) { if len(db.Plugins) == 0 { return elastic.IndexResponse{}, errors.New("Database.Plugins is empty (you must set this field to use this function)") } hashType, err := utils.GetHashType(hash) if err != nil { return elastic.IndexResponse{}, errors.Wrapf(err, "unable to detect hash type: %s", hash) } // Test connection to ElasticSearch err = db.TestConnection() if err != nil { return elastic.IndexResponse{}, errors.Wrap(err, "failed to connect to database") } client, err := elastic.NewSimpleClient( elastic.SetURL(db.URL), elastic.SetBasicAuth( utils.Getopts(db.Username, "MALICE_ELASTICSEARCH_USERNAME", ""), utils.Getopts(db.Password, "<PASSWORD>ELASTICSEARCH_PASSWORD", ""), ), ) if err != nil { return elastic.IndexResponse{}, errors.Wrap(err, "failed to create elasticsearch simple client") } scan := map[string]interface{}{ // "id": sample.SHA256, "file": map[string]interface{}{ hashType: hash, }, "plugins": db.Plugins, "scan_date": time.Now().Format(time.RFC3339Nano), } newScan, err := client.Index(). Index(db.Index). Type(db.Type). OpType("create"). // Id("1"). BodyJson(scan). Do(context.Background()) if err != nil { return elastic.IndexResponse{}, errors.Wrapf(err, "unable to index hash: %s", hash) } log.WithFields(log.Fields{ "id": newScan.Id, "index": newScan.Index, "type": newScan.Type, }).Debug("indexed sample") return *newScan, nil } // StorePluginResults stores a plugin's results in the database by updating // the placeholder created by the call to StoreFileInfo func (db *Database) StorePluginResults(results database.PluginResults) error { // Test connection to ElasticSearch err := db.TestConnection() if err != nil { return errors.Wrap(err, "failed to connect to database") } client, err := elastic.NewSimpleClient( elastic.SetURL(db.URL), elastic.SetBasicAuth( utils.Getopts(db.Username, "MALICE_ELASTICSEARCH_USERNAME", ""), utils.Getopts(db.Password, "<PASSWORD>", ""), ), ) if err != nil { return errors.Wrap(err, "failed to create elasticsearch simple client") } // get sample db record getSample, err := client.Get(). Index(db.Index). Type(db.Type). Id(results.ID). Do(context.Background()) // ignore 404 not found error if err != nil && !elastic.IsNotFound(err) { return errors.Wrapf(err, "failed to get sample with id: %s", results.ID) } if getSample != nil && getSample.Found { log.Debugf("got document %s in version %d from index %s, type %s\n", getSample.Id, getSample.Version, getSample.Index, getSample.Type) updateScan := map[string]interface{}{ "scan_date": time.Now().Format(time.RFC3339Nano), "plugins": map[string]interface{}{ results.Category: map[string]interface{}{ results.Name: results.Data, }, }, } update, err := client.Update(). Index(db.Index). Type(db.Type). Id(getSample.Id). Doc(updateScan). RetryOnConflict(3). Refresh("wait_for"). Do(context.Background()) if err != nil { return errors.Wrapf(err, "failed to update sample with id: %s", results.ID) } log.Debugf("updated version of sample %q is now %d\n", update.Id, update.Version) } else { // ID not found so create new document with `index` command scan := map[string]interface{}{ "plugins": map[string]interface{}{ results.Category: map[string]interface{}{ results.Name: results.Data, }, }, "scan_date": time.Now().Format(time.RFC3339Nano), } newScan, err := client.Index(). Index(db.Index). Type(db.Type). OpType("index"). // Id("1"). BodyJson(scan). Do(context.Background()) if err != nil { return errors.Wrapf(err, "failed to create new sample plugin doc with id: %s", results.ID) } log.WithFields(log.Fields{ "id": newScan.Id, "index": newScan.Index, "type": newScan.Type, }).Debug("indexed sample") } return nil }
1.53125
2
src/drivers/github_test.go
blaggacao/git-town
0
613
package drivers_test import ( "encoding/json" "io/ioutil" "net/http" "testing" . "github.com/git-town/git-town/src/drivers" "github.com/stretchr/testify/assert" httpmock "gopkg.in/jarcoal/httpmock.v1" ) var pullRequestBaseURL = "https://api.github.com/repos/git-town/git-town/pulls" var currentPullRequestURL = pullRequestBaseURL + "?base=main&head=git-town%3Afeature&state=open" var childPullRequestsURL = pullRequestBaseURL + "?base=feature&state=open" var mergePullRequestURL = pullRequestBaseURL + "/1/merge" var updatePullRequestBaseURL1 = pullRequestBaseURL + "/2" var updatePullRequestBaseURL2 = pullRequestBaseURL + "/3" func setupDriver(t *testing.T, token string) (CodeHostingDriver, func()) { httpmock.Activate() driver := GetDriver(DriverOptions{OriginURL: "<EMAIL>:git-town/git-town.git"}) assert.NotNil(t, driver) if token != "" { driver.SetAPIToken(token) } return driver, func() { httpmock.DeactivateAndReset() } } func TestGitHubDriver_CanMergePullRequest(t *testing.T) { driver, teardown := setupDriver(t, "TOKEN") defer teardown() httpmock.RegisterResponder("GET", currentPullRequestURL, httpmock.NewStringResponder(200, `[{"number": 1, "title": "my title" }]`)) canMerge, defaultCommintMessage, err := driver.CanMergePullRequest("feature", "main") assert.Nil(t, err) assert.True(t, canMerge) assert.Equal(t, "my title (#1)", defaultCommintMessage) } func TestGitHubDriver_CanMergePullRequest_EmptyGithubToken(t *testing.T) { driver, teardown := setupDriver(t, "") defer teardown() driver.SetAPIToken("") canMerge, _, err := driver.CanMergePullRequest("feature", "main") assert.Nil(t, err) assert.False(t, canMerge) } func TestGitHubDriver_CanMergePullRequest_GetPullRequestNumberFails(t *testing.T) { driver, teardown := setupDriver(t, "TOKEN") defer teardown() httpmock.RegisterResponder("GET", currentPullRequestURL, httpmock.NewStringResponder(404, "")) _, _, err := driver.CanMergePullRequest("feature", "main") assert.Error(t, err) } func TestGitHubDriver_CanMergePullRequest_NoPullRequestForBranch(t *testing.T) { driver, teardown := setupDriver(t, "TOKEN") defer teardown() httpmock.RegisterResponder("GET", currentPullRequestURL, httpmock.NewStringResponder(200, "[]")) canMerge, _, err := driver.CanMergePullRequest("feature", "main") assert.Nil(t, err) assert.False(t, canMerge) } func TestGitHubDriver_CanMergePullRequest_MultiplePullRequestsForBranch(t *testing.T) { driver, teardown := setupDriver(t, "TOKEN") defer teardown() httpmock.RegisterResponder("GET", currentPullRequestURL, httpmock.NewStringResponder(200, `[{"number": 1}, {"number": 2}]`)) canMerge, _, err := driver.CanMergePullRequest("feature", "main") assert.Nil(t, err) assert.False(t, canMerge) } func TestGitHubDriver_MergePullRequest_GetPullRequestIdsFails(t *testing.T) { driver, teardown := setupDriver(t, "TOKEN") defer teardown() options := MergePullRequestOptions{ Branch: "feature", CommitMessage: "title\nextra detail1\nextra detail2", ParentBranch: "main", } httpmock.RegisterResponder("GET", childPullRequestsURL, httpmock.NewStringResponder(404, "")) _, err := driver.MergePullRequest(options) assert.Error(t, err) } func TestGitHubDriver_MergePullRequest_GetPullRequestToMergeFails(t *testing.T) { driver, teardown := setupDriver(t, "TOKEN") defer teardown() options := MergePullRequestOptions{ Branch: "feature", CommitMessage: "title\nextra detail1\nextra detail2", ParentBranch: "main", } httpmock.RegisterResponder("GET", childPullRequestsURL, httpmock.NewStringResponder(200, "[]")) httpmock.RegisterResponder("GET", currentPullRequestURL, httpmock.NewStringResponder(404, "")) _, err := driver.MergePullRequest(options) assert.Error(t, err) } func TestGitHubDriver_MergePullRequest_PullRequestNotFound(t *testing.T) { driver, teardown := setupDriver(t, "TOKEN") defer teardown() options := MergePullRequestOptions{ Branch: "feature", CommitMessage: "title\nextra detail1\nextra detail2", ParentBranch: "main", } httpmock.RegisterResponder("GET", childPullRequestsURL, httpmock.NewStringResponder(200, "[]")) httpmock.RegisterResponder("GET", currentPullRequestURL, httpmock.NewStringResponder(200, "[]")) _, err := driver.MergePullRequest(options) assert.Error(t, err) assert.Equal(t, "no pull request found", err.Error()) } func TestGitHubDriver_MergePullRequest_MultiplePullRequestsFound(t *testing.T) { driver, teardown := setupDriver(t, "TOKEN") defer teardown() options := MergePullRequestOptions{ Branch: "feature", CommitMessage: "title\nextra detail1\nextra detail2", ParentBranch: "main", } httpmock.RegisterResponder("GET", childPullRequestsURL, httpmock.NewStringResponder(200, "[]")) httpmock.RegisterResponder("GET", currentPullRequestURL, httpmock.NewStringResponder(200, `[{"number": 1}, {"number": 2}]`)) _, err := driver.MergePullRequest(options) assert.Error(t, err) assert.Equal(t, "multiple pull requests found: 1, 2", err.Error()) } func TestGitHubDriver_MergePullRequest(t *testing.T) { driver, teardown := setupDriver(t, "TOKEN") defer teardown() options := MergePullRequestOptions{ Branch: "feature", CommitMessage: "title\nextra detail1\nextra detail2", ParentBranch: "main", } var mergeRequest *http.Request httpmock.RegisterResponder("GET", childPullRequestsURL, httpmock.NewStringResponder(200, "[]")) httpmock.RegisterResponder("GET", currentPullRequestURL, httpmock.NewStringResponder(200, `[{"number": 1}]`)) httpmock.RegisterResponder("PUT", mergePullRequestURL, func(req *http.Request) (*http.Response, error) { mergeRequest = req return httpmock.NewStringResponse(200, `{"sha": "abc123"}`), nil }) sha, err := driver.MergePullRequest(options) assert.Nil(t, err) assert.Equal(t, "abc123", sha) mergeParameters := getRequestData(mergeRequest) assert.Equal(t, "title", mergeParameters["commit_title"]) assert.Equal(t, "extra detail1\nextra detail2", mergeParameters["commit_message"]) assert.Equal(t, "squash", mergeParameters["merge_method"]) } func TestGitHubDriver_MergePullRequest_MergeFails(t *testing.T) { driver, teardown := setupDriver(t, "TOKEN") defer teardown() options := MergePullRequestOptions{ Branch: "feature", CommitMessage: "title\nextra detail1\nextra detail2", ParentBranch: "main", } httpmock.RegisterResponder("GET", childPullRequestsURL, httpmock.NewStringResponder(200, "[]")) httpmock.RegisterResponder("GET", currentPullRequestURL, httpmock.NewStringResponder(200, `[{"number": 1}]`)) httpmock.RegisterResponder("PUT", mergePullRequestURL, httpmock.NewStringResponder(404, "")) _, err := driver.MergePullRequest(options) assert.Error(t, err) } func TestGitHubDriver_MergePullRequest_UpdateChildPRs(t *testing.T) { driver, teardown := setupDriver(t, "TOKEN") defer teardown() options := MergePullRequestOptions{ Branch: "feature", CommitMessage: "title\nextra detail1\nextra detail2", ParentBranch: "main", } var updateRequest1, updateRequest2 *http.Request httpmock.RegisterResponder("GET", childPullRequestsURL, httpmock.NewStringResponder(200, `[{"number": 2}, {"number": 3}]`)) httpmock.RegisterResponder("PATCH", updatePullRequestBaseURL1, func(req *http.Request) (*http.Response, error) { updateRequest1 = req return httpmock.NewStringResponse(200, ""), nil }) httpmock.RegisterResponder("PATCH", updatePullRequestBaseURL2, func(req *http.Request) (*http.Response, error) { updateRequest2 = req return httpmock.NewStringResponse(200, ""), nil }) httpmock.RegisterResponder("GET", currentPullRequestURL, httpmock.NewStringResponder(200, `[{"number": 1}]`)) httpmock.RegisterResponder("PUT", mergePullRequestURL, httpmock.NewStringResponder(200, `{"sha": "abc123"}`)) _, err := driver.MergePullRequest(options) assert.Nil(t, err) updateParameters1 := getRequestData(updateRequest1) assert.Equal(t, "main", updateParameters1["base"]) updateParameters2 := getRequestData(updateRequest2) assert.Equal(t, "main", updateParameters2["base"]) } func getRequestData(request *http.Request) map[string]interface{} { dataStr, err := ioutil.ReadAll(request.Body) if err != nil { panic(err) } data := map[string]interface{}{} err = json.Unmarshal(dataStr, &data) if err != nil { panic(err) } return data }
1.492188
1
server/api/checker_test.go
srstack/pd
0
621
// Copyright 2021 TiKV Project Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package api import ( "encoding/json" "fmt" "time" . "github.com/pingcap/check" "github.com/pingcap/kvproto/pkg/metapb" "github.com/tikv/pd/server" ) var _ = Suite(&testCheckerSuite{}) type testCheckerSuite struct { svr *server.Server cleanup cleanUpFunc urlPrefix string } func (s *testCheckerSuite) SetUpSuite(c *C) { s.svr, s.cleanup = mustNewServer(c) mustWaitLeader(c, []*server.Server{s.svr}) addr := s.svr.GetAddr() s.urlPrefix = fmt.Sprintf("%s%s/api/v1/checker", addr, apiPrefix) mustBootstrapCluster(c, s.svr) mustPutStore(c, s.svr, 1, metapb.StoreState_Up, metapb.NodeState_Serving, nil) mustPutStore(c, s.svr, 2, metapb.StoreState_Up, metapb.NodeState_Serving, nil) } func (s *testCheckerSuite) TearDownSuite(c *C) { s.cleanup() } func (s *testCheckerSuite) TestAPI(c *C) { s.testErrCases(c) cases := []struct { name string }{ {name: "learner"}, {name: "replica"}, {name: "rule"}, {name: "split"}, {name: "merge"}, {name: "joint-state"}, } for _, ca := range cases { s.testGetStatus(ca.name, c) s.testPauseOrResume(ca.name, c) } } func (s *testCheckerSuite) testErrCases(c *C) { // missing args input := make(map[string]interface{}) pauseArgs, err := json.Marshal(input) c.Assert(err, IsNil) err = postJSON(testDialClient, s.urlPrefix+"/merge", pauseArgs) c.Assert(err, NotNil) // negative delay input["delay"] = -10 pauseArgs, err = json.Marshal(input) c.Assert(err, IsNil) err = postJSON(testDialClient, s.urlPrefix+"/merge", pauseArgs) c.Assert(err, NotNil) // wrong name name := "dummy" input["delay"] = 30 pauseArgs, err = json.Marshal(input) c.Assert(err, IsNil) err = postJSON(testDialClient, s.urlPrefix+"/"+name, pauseArgs) c.Assert(err, NotNil) input["delay"] = 0 pauseArgs, err = json.Marshal(input) c.Assert(err, IsNil) err = postJSON(testDialClient, s.urlPrefix+"/"+name, pauseArgs) c.Assert(err, NotNil) } func (s *testCheckerSuite) testGetStatus(name string, c *C) { handler := s.svr.GetHandler() // normal run resp := make(map[string]interface{}) err := readJSON(testDialClient, fmt.Sprintf("%s/%s", s.urlPrefix, name), &resp) c.Assert(err, IsNil) c.Assert(resp["paused"], IsFalse) // paused err = handler.PauseOrResumeChecker(name, 30) c.Assert(err, IsNil) resp = make(map[string]interface{}) err = readJSON(testDialClient, fmt.Sprintf("%s/%s", s.urlPrefix, name), &resp) c.Assert(err, IsNil) c.Assert(resp["paused"], IsTrue) // resumed err = handler.PauseOrResumeChecker(name, 1) c.Assert(err, IsNil) time.Sleep(time.Second) resp = make(map[string]interface{}) err = readJSON(testDialClient, fmt.Sprintf("%s/%s", s.urlPrefix, name), &resp) c.Assert(err, IsNil) c.Assert(resp["paused"], IsFalse) } func (s *testCheckerSuite) testPauseOrResume(name string, c *C) { handler := s.svr.GetHandler() input := make(map[string]interface{}) // test pause. input["delay"] = 30 pauseArgs, err := json.Marshal(input) c.Assert(err, IsNil) err = postJSON(testDialClient, s.urlPrefix+"/"+name, pauseArgs) c.Assert(err, IsNil) isPaused, err := handler.IsCheckerPaused(name) c.Assert(err, IsNil) c.Assert(isPaused, IsTrue) input["delay"] = 1 pauseArgs, err = json.Marshal(input) c.Assert(err, IsNil) err = postJSON(testDialClient, s.urlPrefix+"/"+name, pauseArgs) c.Assert(err, IsNil) time.Sleep(time.Second) isPaused, err = handler.IsCheckerPaused(name) c.Assert(err, IsNil) c.Assert(isPaused, IsFalse) // test resume. input = make(map[string]interface{}) input["delay"] = 30 pauseArgs, err = json.Marshal(input) c.Assert(err, IsNil) err = postJSON(testDialClient, s.urlPrefix+"/"+name, pauseArgs) c.Assert(err, IsNil) input["delay"] = 0 pauseArgs, err = json.Marshal(input) c.Assert(err, IsNil) err = postJSON(testDialClient, s.urlPrefix+"/"+name, pauseArgs) c.Assert(err, IsNil) isPaused, err = handler.IsCheckerPaused(name) c.Assert(err, IsNil) c.Assert(isPaused, IsFalse) }
1.71875
2
internal/provider/env_test.go
besharper/systemk
84
629
package provider import ( "testing" ) func TestProviderIPEnvironment(t *testing.T) { p := new(p) p.config = &Opts{ NodeInternalIP: []byte{192, 168, 1, 1}, NodeExternalIP: []byte{172, 16, 0, 1}, } env := p.defaultEnvironment() found := 0 for _, e := range env { if e == "SYSTEMK_NODE_INTERNAL_IP=192.168.1.1" { found++ } if e == "SYSTEMK_NODE_EXTERNAL_IP=172.16.0.1" { found++ } } if found != 2 { t.Errorf("failed to find SYSTEMK_NODE_INTERNAL_IP or SYSTEMK_NODE_EXTERNAL_IP") } }
1.023438
1
jwk/manager_test_helpers.go
coupa/hydra-sand
2
637
package jwk import ( "crypto/rand" "io" "testing" "github.com/ory/hydra/pkg" "github.com/pkg/errors" "github.com/square/go-jose" "github.com/stretchr/testify/assert" ) func RandomBytes(n int) ([]byte, error) { bytes := make([]byte, n) if _, err := io.ReadFull(rand.Reader, bytes); err != nil { return []byte{}, errors.WithStack(err) } return bytes, nil } func TestHelperManagerKey(m Manager, keys *jose.JSONWebKeySet) func(t *testing.T) { pub := keys.Key("public") priv := keys.Key("private") return func(t *testing.T) { _, err := m.GetKey("faz", "baz") assert.NotNil(t, err) err = m.AddKey("faz", First(priv)) assert.Nil(t, err) got, err := m.GetKey("faz", "private") assert.Nil(t, err) assert.Equal(t, priv, got.Keys) err = m.AddKey("faz", First(pub)) assert.Nil(t, err) got, err = m.GetKey("faz", "private") assert.Nil(t, err) assert.Equal(t, priv, got.Keys) got, err = m.GetKey("faz", "public") assert.Nil(t, err) assert.Equal(t, pub, got.Keys) err = m.DeleteKey("faz", "public") assert.Nil(t, err) _, err = m.GetKey("faz", "public") assert.NotNil(t, err) } } func TestHelperManagerKeySet(m Manager, keys *jose.JSONWebKeySet) func(t *testing.T) { return func(t *testing.T) { _, err := m.GetKeySet("foo") pkg.AssertError(t, true, err) err = m.AddKeySet("bar", keys) assert.Nil(t, err) got, err := m.GetKeySet("bar") assert.Nil(t, err) assert.Equal(t, keys.Key("public"), got.Key("public")) assert.Equal(t, keys.Key("private"), got.Key("private")) err = m.DeleteKeySet("bar") assert.Nil(t, err) _, err = m.GetKeySet("bar") assert.NotNil(t, err) } }
1.664063
2
go/src/infra/cmd/cros_test_platform/internal/cmd/skylab-execute.go
xswz8015/infra
0
645
// Copyright 2019 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package cmd import ( "context" "fmt" "github.com/maruel/subcommands" "go.chromium.org/luci/common/cli" "infra/cmd/cros_test_platform/luciexe/execute" ) // SkylabExecute subcommand: Run a set of enumerated tests against skylab backend. var SkylabExecute = &subcommands.Command{ UsageLine: "skylab-execute -input_json /path/to/input.json -output_json /path/to/output.json", ShortDesc: "Run a set of enumerated tests against skylab backend.", LongDesc: `Run a set of enumerated tests against skylab backend.`, CommandRun: func() subcommands.CommandRun { c := &skylabExecuteRun{} c.Flags.StringVar(&c.inputPath, "input_json", "", "Path to JSON ExecuteRequests to read.") c.Flags.StringVar(&c.outputPath, "output_json", "", "Path to JSON ExecuteResponses to write.") return c }, } type skylabExecuteRun struct { subcommands.CommandRunBase inputPath string outputPath string } func (c *skylabExecuteRun) Run(a subcommands.Application, args []string, env subcommands.Env) int { if err := c.validateArgs(); err != nil { fmt.Fprintln(a.GetErr(), err.Error()) c.Flags.Usage() return exitCode(err) } ctx := cli.GetContext(a, c, env) ctx = setupLogging(ctx) err := c.innerRun(ctx, args, env) if err != nil { logApplicationError(ctx, a, err) } return exitCode(err) } func (c *skylabExecuteRun) validateArgs() error { if c.inputPath == "" { return fmt.Errorf("-input_json not specified") } if c.outputPath == "" { return fmt.Errorf("-output_json not specified") } return nil } func (c *skylabExecuteRun) innerRun(ctx context.Context, args []string, env subcommands.Env) error { return execute.Run(ctx, execute.Args{ InputPath: c.inputPath, OutputPath: c.outputPath, SwarmingTaskID: env["SWARMING_TASK_ID"].Value, }) }
1.375
1
service.go
vania-pooh/nckl
0
653
package main import ( "bytes" "context" "encoding/json" "errors" "fmt" "github.com/abbot/go-http-auth" "io/ioutil" "log" "math" "net/http" "net/http/httputil" "net/url" "strconv" "strings" "sync" "time" ) const ( wdHub = "/wd/hub/" statusPath = "/status" queuePath = wdHub badRequestPath = "/badRequest" pingPath = "/ping" badRequestMessage = "msg" slash = "/" ) var ( sessions = make(Sessions) timeoutCancels = make(map[string]chan bool) leases = make(map[string]Lease) sessionLock sync.RWMutex stateLock sync.Mutex updateLock sync.Mutex ) func badRequest(w http.ResponseWriter, r *http.Request) { msg := r.URL.Query().Get(badRequestMessage) if msg == "" { msg = "bad request" } http.Error(w, msg, http.StatusBadRequest) } type requestInfo struct { maxConnections int browser BrowserId browserState BrowserState processName string process *Process command string lease Lease error error } func getRequestInfo(r *http.Request) *requestInfo { quotaName, _, _ := r.BasicAuth() stateLock.Lock() defer stateLock.Unlock() if _, ok := state[quotaName]; !ok { state[quotaName] = &QuotaState{} } quotaState := *state[quotaName] err, browserName, version, processName, priority, command := parsePath(r.URL) if err != nil { return &requestInfo{0, BrowserId{}, nil, "", nil, "", 0, err} } browserId := BrowserId{Name: browserName, Version: version} if _, ok := quotaState[browserId]; !ok { quotaState[browserId] = &BrowserState{} } browserState := *quotaState[browserId] maxConnections := quota.MaxConnections(quotaName, browserName, version) process := getProcess(browserState, processName, priority, maxConnections) return &requestInfo{maxConnections, browserId, browserState, processName, process, command, 0, nil} } type transport struct { http.RoundTripper } func (t *transport) RoundTrip(r *http.Request) (*http.Response, error) { requestInfo := getRequestInfo(r) ctx, _ := context.WithTimeout(r.Context(), requestTimeout) r = r.WithContext(ctx) err := requestInfo.error if err != nil { log.Printf("[INVALID_REQUEST] [%v]\n", err) redirectToBadRequest(r, err.Error()) return t.RoundTripper.RoundTrip(r) } // Only new session requests should wait in queue command := requestInfo.command browserId := requestInfo.browser processName := requestInfo.processName process := requestInfo.process isNewSessionRequest := isNewSessionRequest(r.Method, command) if isNewSessionRequest { log.Printf("[CREATING] [%s %s] [%s] [%d]\n", browserId.Name, browserId.Version, processName, process.Priority) if process.CapacityQueue.Capacity() == 0 { refreshCapacities(requestInfo.maxConnections, requestInfo.browserState) if process.CapacityQueue.Capacity() == 0 { log.Printf("[NOT_ENOUGH_SESSIONS] [%s %s] [%s]\n", browserId.Name, browserId.Version, processName) redirectToBadRequest(r, "Not enough sessions for this process. Come back later.") return t.RoundTripper.RoundTrip(r) } } process.AwaitQueue <- struct{}{} lease, disconnected := process.CapacityQueue.Push(r.Context()) <-process.AwaitQueue if disconnected { log.Printf("[CLIENT_DISCONNECTED_FROM_QUEUE] [%s %s] [%s] [%d]\n", browserId.Name, browserId.Version, processName, process.Priority) return emptyResponse(), nil } requestInfo.lease = lease } //Here we change request url r.URL.Scheme = "http" r.URL.Host = destination r.URL.Path = fmt.Sprintf("%s%s", wdHub, command) resp, err := t.RoundTripper.RoundTrip(r) select { case <-r.Context().Done(): { log.Printf("[CLIENT_DISCONNECTED] [%s %s] [%s] [%d]\n", browserId.Name, browserId.Version, requestInfo.processName, process.Priority) cleanupQueue(isNewSessionRequest, requestInfo) return emptyResponse(), nil } default: { if err != nil { log.Printf("[REQUEST_ERROR] [%s %s] [%s] [%d] [%v]\n", browserId.Name, browserId.Version, requestInfo.processName, process.Priority, err) cleanupQueue(isNewSessionRequest, requestInfo) } else { processResponse(isNewSessionRequest, requestInfo, r, resp) } } } if r.Body != nil { r.Body.Close() } return resp, err } func emptyResponse() *http.Response { return &http.Response{ Body: ioutil.NopCloser(bytes.NewBufferString("")), StatusCode: http.StatusOK, } } func processResponse(isNewSessionRequest bool, requestInfo *requestInfo, r *http.Request, resp *http.Response) { browserId := requestInfo.browser processName := requestInfo.processName process := requestInfo.process if isNewSessionRequest { if resp.StatusCode == http.StatusOK { body, _ := ioutil.ReadAll(resp.Body) var reply map[string]interface{} if json.Unmarshal(body, &reply) != nil { log.Printf("[JSON_ERROR] [%s %s] [%s] [%d]\n", browserId.Name, browserId.Version, processName, process.Priority) cleanupQueue(isNewSessionRequest, requestInfo) return } rawSessionId := reply["sessionId"] switch rawSessionId.(type) { case string: { sessionId := rawSessionId.(string) cancelTimeout := make(chan bool) sessionLock.Lock() sessions[sessionId] = process timeoutCancels[sessionId] = cancelTimeout leases[sessionId] = requestInfo.lease sessionLock.Unlock() storage.AddSession(sessionId) go func() { select { case <-time.After(requestTimeout): { deleteSessionWithTimeout(sessionId, requestInfo, true) } case <-cancelTimeout: } }() storage.OnSessionDeleted(sessionId, func(id string) { deleteSession(id, requestInfo) }) resp.Body.Close() resp.Body = ioutil.NopCloser(bytes.NewReader(body)) log.Printf("[CREATED] [%s %s] [%s] [%d] [%s]\n", browserId.Name, browserId.Version, processName, process.Priority, sessionId) return } } } log.Printf("[NOT_CREATED] [%s %s] [%s] [%d]\n", browserId.Name, browserId.Version, processName, process.Priority) cleanupQueue(isNewSessionRequest, requestInfo) } if ok, sessionId := isDeleteSessionRequest(r.Method, requestInfo.command); ok { deleteSession(sessionId, requestInfo) } } func cleanupQueue(isNewSessionRequest bool, requestInfo *requestInfo) { if isNewSessionRequest { process := requestInfo.process process.CapacityQueue.Pop(requestInfo.lease) } } func deleteSession(sessionId string, requestInfo *requestInfo) { deleteSessionWithTimeout(sessionId, requestInfo, false) } func deleteSessionWithTimeout(sessionId string, requestInfo *requestInfo, timedOut bool) { browserId := requestInfo.browser processName := requestInfo.processName process := requestInfo.process sessionLock.RLock() process, ok := sessions[sessionId] sessionLock.RUnlock() if ok { if timedOut { log.Printf("[TIMED_OUT] [%s %s] [%s] [%d] [%s]\n", browserId.Name, browserId.Version, processName, process.Priority, sessionId) } log.Printf("[DELETING] [%s %s] [%s] [%d] [%s]\n", browserId.Name, browserId.Version, processName, process.Priority, sessionId) sessionLock.Lock() delete(sessions, sessionId) if cancel, ok := timeoutCancels[sessionId]; ok { delete(timeoutCancels, sessionId) close(cancel) } lease := leases[sessionId] delete(leases, sessionId) sessionLock.Unlock() process.CapacityQueue.Pop(lease) log.Printf("[DELETED] [%s %s] [%s] [%d] [%s]\n", browserId.Name, browserId.Version, processName, process.Priority, sessionId) } storage.DeleteSession(sessionId) } func isNewSessionRequest(httpMethod string, command string) bool { return httpMethod == http.MethodPost && command == "session" } func isDeleteSessionRequest(httpMethod string, command string) (bool, string) { if httpMethod == http.MethodDelete && strings.HasPrefix(command, "session") { pieces := strings.Split(command, slash) if len(pieces) == 2 { //Against DELETE window url return true, pieces[1] } } return false, "" } func redirectToBadRequest(r *http.Request, msg string) { r.URL.Scheme = "http" r.URL.Host = listen r.Method = "GET" r.URL.Path = badRequestPath values := r.URL.Query() values.Set(badRequestMessage, msg) r.URL.RawQuery = values.Encode() } func parsePath(url *url.URL) (error, string, string, string, int, string) { p := strings.Split(strings.TrimPrefix(url.Path, wdHub), slash) if len(p) < 5 { err := errors.New(fmt.Sprintf("invalid url [%s]: should have format /browserName/version/processName/priority/command", url)) return err, "", "", "", 0, "" } priority, err := strconv.Atoi(p[3]) if err != nil { priority = 1 } return nil, p[0], p[1], p[2], priority, strings.Join(p[4:], slash) } func getProcess(browserState BrowserState, name string, priority int, maxConnections int) *Process { updateLock.Lock() defer updateLock.Unlock() if _, ok := browserState[name]; !ok { currentPriorities := getActiveProcessesPriorities(browserState) currentPriorities[name] = priority newCapacities := calculateCapacities(browserState, currentPriorities, maxConnections) browserState[name] = createProcess(priority, newCapacities[name]) updateProcessCapacities(browserState, newCapacities) } process := browserState[name] process.Priority = priority process.LastActivity = time.Now() return process } func createProcess(priority int, capacity int) *Process { return &Process{ Priority: priority, AwaitQueue: make(chan struct{}, math.MaxUint32), CapacityQueue: CreateQueue(capacity), LastActivity: time.Now(), } } func getActiveProcessesPriorities(browserState BrowserState) ProcessMetrics { currentPriorities := make(ProcessMetrics) for name, process := range browserState { if isProcessActive(process) { currentPriorities[name] = process.Priority } } return currentPriorities } func isProcessActive(process *Process) bool { return len(process.AwaitQueue) > 0 || process.CapacityQueue.Size() > 0 || time.Now().Sub(process.LastActivity) < updateRate } func calculateCapacities(browserState BrowserState, activeProcessesPriorities ProcessMetrics, maxConnections int) ProcessMetrics { sumOfPriorities := 0 membersCount := storage.MembersCount() for _, priority := range activeProcessesPriorities { sumOfPriorities += priority } ret := ProcessMetrics{} for processName, priority := range activeProcessesPriorities { ret[processName] = round(float64(priority) / float64(sumOfPriorities) * float64(maxConnections) / float64(membersCount)) } for processName := range browserState { if _, ok := activeProcessesPriorities[processName]; !ok { ret[processName] = 0 } } return ret } func round(num float64) int { i, frac := math.Modf(num) if frac < 0.5 { return int(i) } else { return int(i + 1) } } func updateProcessCapacities(browserState BrowserState, newCapacities ProcessMetrics) { for processName, newCapacity := range newCapacities { process := browserState[processName] process.CapacityQueue.SetCapacity(newCapacity) } } func refreshCapacities(maxConnections int, browserState BrowserState) { updateLock.Lock() defer updateLock.Unlock() currentPriorities := getActiveProcessesPriorities(browserState) newCapacities := calculateCapacities(browserState, currentPriorities, maxConnections) updateProcessCapacities(browserState, newCapacities) } func status(w http.ResponseWriter, r *http.Request) { quotaName, _, _ := r.BasicAuth() status := []BrowserStatus{} if _, ok := state[quotaName]; ok { quotaState := state[quotaName] for browserId, browserState := range *quotaState { processes := make(map[string]ProcessStatus) for processName, process := range *browserState { processes[processName] = ProcessStatus{ Priority: process.Priority, Queued: len(process.AwaitQueue), Processing: process.CapacityQueue.Size(), Max: process.CapacityQueue.Capacity(), LastActivity: process.LastActivity.Format(time.UnixDate), } } status = append(status, BrowserStatus{ Name: browserId.String(), Processes: processes, }) } } json.NewEncoder(w).Encode(&status) } func ping(w http.ResponseWriter, r *http.Request) { w.Write([]byte("OK\n")) } func requireBasicAuth(authenticator *auth.BasicAuth, handler func(http.ResponseWriter, *http.Request)) func(http.ResponseWriter, *http.Request) { return authenticator.Wrap(func(w http.ResponseWriter, r *auth.AuthenticatedRequest) { handler(w, &r.Request) }) } func withCloseNotifier(handler http.HandlerFunc) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { ctx, cancel := context.WithCancel(r.Context()) go func() { handler(w, r.WithContext(ctx)) cancel() }() select { case <-w.(http.CloseNotifier).CloseNotify(): cancel() case <-ctx.Done(): } } } func mux() http.Handler { mux := http.NewServeMux() authenticator := auth.NewBasicAuthenticator( "Selenium Load Balancer", auth.HtpasswdFileProvider(usersFile), ) proxyFunc := (&httputil.ReverseProxy{ Director: func(*http.Request) {}, Transport: &transport{http.DefaultTransport}, }).ServeHTTP mux.HandleFunc(queuePath, requireBasicAuth(authenticator, withCloseNotifier(proxyFunc))) mux.HandleFunc(statusPath, requireBasicAuth(authenticator, status)) mux.HandleFunc(badRequestPath, badRequest) mux.HandleFunc(pingPath, ping) return mux }
1.53125
2
internal/operators/ocs/mock_validations.go
iranzo/assisted-service
0
661
// Code generated by MockGen. DO NOT EDIT. // Source: validations.go // Package ocs is a generated GoMock package. package ocs import ( gomock "github.com/golang/mock/gomock" models "github.com/openshift/assisted-service/models" reflect "reflect" ) // MockOcsValidator is a mock of OcsValidator interface type MockOcsValidator struct { ctrl *gomock.Controller recorder *MockOcsValidatorMockRecorder } // MockOcsValidatorMockRecorder is the mock recorder for MockOcsValidator type MockOcsValidatorMockRecorder struct { mock *MockOcsValidator } // NewMockOcsValidator creates a new mock instance func NewMockOcsValidator(ctrl *gomock.Controller) *MockOcsValidator { mock := &MockOcsValidator{ctrl: ctrl} mock.recorder = &MockOcsValidatorMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use func (m *MockOcsValidator) EXPECT() *MockOcsValidatorMockRecorder { return m.recorder } // ValidateOCSRequirements mocks base method func (m *MockOcsValidator) ValidateOCSRequirements(cluster *models.Cluster) string { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ValidateOCSRequirements", cluster) ret0, _ := ret[0].(string) return ret0 } // ValidateOCSRequirements indicates an expected call of ValidateOCSRequirements func (mr *MockOcsValidatorMockRecorder) ValidateOCSRequirements(cluster interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateOCSRequirements", reflect.TypeOf((*MockOcsValidator)(nil).ValidateOCSRequirements), cluster) }
1.515625
2
compilerutil/cancelationhandle.go
serulian/compiler
7
669
// Copyright 2018 The Serulian Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package compilerutil // CancelFunction is a function that can be invoked to cancel the operation. type CancelFunction func() // CancelationHandle defines a handle for the cancelation of operations. type CancelationHandle interface { // Cancel marks the operation as canceled. Cancel() // WasCanceled returns whether the operation was canceled. WasCanceled() bool } // NewCancelationHandle returns a new handle for canceling an operation. func NewCancelationHandle() CancelationHandle { return &cancelationHandle{} } // NoopCancelationHandle returns a cancelation handle that cannot be canceled. func NoopCancelationHandle() CancelationHandle { return noopCancelationHandle{} } // GetCancelationHandle returns either the existing cancelation handle (if not nil) // or a new no-op handle. func GetCancelationHandle(existing CancelationHandle) CancelationHandle { if existing == nil { return noopCancelationHandle{} } return existing } type cancelationHandle struct { canceled bool } func (c *cancelationHandle) WasCanceled() bool { return c.canceled } func (c *cancelationHandle) Cancel() { c.canceled = true } type noopCancelationHandle struct{} func (noopCancelationHandle) WasCanceled() bool { return false } func (noopCancelationHandle) Cancel() { panic("Should never be called") }
2.671875
3
pkg/networkserver/downlink_internal_test.go
kurtmc/lorawan-stack
0
677
// Copyright © 2019 The Things Network Foundation, The Things Industries B.V. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package networkserver import ( "bytes" "context" "fmt" "math" "testing" "time" pbtypes "github.com/gogo/protobuf/types" "github.com/mohae/deepcopy" ulid "github.com/oklog/ulid/v2" "github.com/smartystreets/assertions" "go.thethings.network/lorawan-stack/pkg/band" "go.thethings.network/lorawan-stack/pkg/cluster" "go.thethings.network/lorawan-stack/pkg/component" "go.thethings.network/lorawan-stack/pkg/config" "go.thethings.network/lorawan-stack/pkg/crypto" "go.thethings.network/lorawan-stack/pkg/encoding/lorawan" "go.thethings.network/lorawan-stack/pkg/errors" "go.thethings.network/lorawan-stack/pkg/frequencyplans" "go.thethings.network/lorawan-stack/pkg/log" "go.thethings.network/lorawan-stack/pkg/ttnpb" "go.thethings.network/lorawan-stack/pkg/types" "go.thethings.network/lorawan-stack/pkg/util/test" "go.thethings.network/lorawan-stack/pkg/util/test/assertions/should" "google.golang.org/grpc" ) func TestProcessDownlinkTask(t *testing.T) { getPaths := []string{ "frequency_plan_id", "last_dev_status_received_at", "lorawan_phy_version", "mac_settings", "mac_state", "multicast", "pending_mac_state", "queued_application_downlinks", "recent_downlinks", "recent_uplinks", "session", } const appIDString = "process-downlink-test-app-id" appID := ttnpb.ApplicationIdentifiers{ApplicationID: appIDString} const devID = "process-downlink-test-dev-id" devAddr := types.DevAddr{0x42, 0xff, 0xff, 0xff} fNwkSIntKey := types.AES128Key{0x42, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} nwkSEncKey := types.AES128Key{0x42, 0x42, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} sNwkSIntKey := types.AES128Key{0x42, 0x42, 0x42, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} sessionKeys := &ttnpb.SessionKeys{ FNwkSIntKey: &ttnpb.KeyEnvelope{ Key: &fNwkSIntKey, }, NwkSEncKey: &ttnpb.KeyEnvelope{ Key: &nwkSEncKey, }, SNwkSIntKey: &ttnpb.KeyEnvelope{ Key: &sNwkSIntKey, }, } rxMetadata := MakeRxMetadataSlice() eu868macParameters := &ttnpb.MACParameters{ Channels: MakeEU868Channels(&ttnpb.MACParameters_Channel{ UplinkFrequency: 430000000, DownlinkFrequency: 431000000, MinDataRateIndex: ttnpb.DATA_RATE_0, MaxDataRateIndex: ttnpb.DATA_RATE_3, }), Rx1Delay: ttnpb.RX_DELAY_3, Rx1DataRateOffset: 2, Rx2DataRateIndex: ttnpb.DATA_RATE_1, Rx2Frequency: 420000000, } assertGetRxMetadataGatewayPeers := func(ctx context.Context, getPeerCh <-chan test.ClusterGetPeerRequest, peer124, peer3 cluster.Peer) bool { t := test.MustTFromContext(ctx) t.Helper() a := assertions.New(t) return test.AssertClusterGetPeerRequestSequence(ctx, getPeerCh, []cluster.Peer{ nil, peer124, peer124, peer3, peer124, }, func(reqCtx context.Context, role ttnpb.ClusterRole, ids ttnpb.Identifiers) bool { return a.So(reqCtx, should.HaveParentContextOrEqual, ctx) && a.So(role, should.Equal, ttnpb.ClusterRole_GATEWAY_SERVER) && a.So(ids, should.Resemble, ttnpb.GatewayIdentifiers{ GatewayID: "gateway-test-0", }) }, func(reqCtx context.Context, role ttnpb.ClusterRole, ids ttnpb.Identifiers) bool { return a.So(reqCtx, should.HaveParentContextOrEqual, ctx) && a.So(role, should.Equal, ttnpb.ClusterRole_GATEWAY_SERVER) && a.So(ids, should.Resemble, ttnpb.GatewayIdentifiers{ GatewayID: "gateway-test-1", }) }, func(reqCtx context.Context, role ttnpb.ClusterRole, ids ttnpb.Identifiers) bool { return a.So(reqCtx, should.HaveParentContextOrEqual, ctx) && a.So(role, should.Equal, ttnpb.ClusterRole_GATEWAY_SERVER) && a.So(ids, should.Resemble, ttnpb.GatewayIdentifiers{ GatewayID: "gateway-test-2", }) }, func(reqCtx context.Context, role ttnpb.ClusterRole, ids ttnpb.Identifiers) bool { return a.So(reqCtx, should.HaveParentContextOrEqual, ctx) && a.So(role, should.Equal, ttnpb.ClusterRole_GATEWAY_SERVER) && a.So(ids, should.Resemble, ttnpb.GatewayIdentifiers{ GatewayID: "gateway-test-3", }) }, func(reqCtx context.Context, role ttnpb.ClusterRole, ids ttnpb.Identifiers) bool { return a.So(reqCtx, should.HaveParentContextOrEqual, ctx) && a.So(role, should.Equal, ttnpb.ClusterRole_GATEWAY_SERVER) && a.So(ids, should.Resemble, ttnpb.GatewayIdentifiers{ GatewayID: "gateway-test-4", }) }, ) } assertScheduleRxMetadataGateways := func(ctx context.Context, authCh <-chan test.ClusterAuthRequest, scheduleDownlink124Ch, scheduleDownlink3Ch <-chan NsGsScheduleDownlinkRequest, payload []byte, makeTxRequest func(paths ...*ttnpb.DownlinkPath) *ttnpb.TxRequest, resps ...NsGsScheduleDownlinkResponse) (*ttnpb.DownlinkMessage, bool) { if len(resps) < 1 || len(resps) > 3 { panic("invalid response count specified") } t := test.MustTFromContext(ctx) t.Helper() a := assertions.New(t) var lastDown *ttnpb.DownlinkMessage var correlationIDs []string if !a.So(AssertAuthNsGsScheduleDownlinkRequest(ctx, authCh, scheduleDownlink124Ch, func(ctx context.Context, msg *ttnpb.DownlinkMessage) bool { correlationIDs = msg.CorrelationIDs lastDown = &ttnpb.DownlinkMessage{ CorrelationIDs: correlationIDs, RawPayload: payload, Settings: &ttnpb.DownlinkMessage_Request{ Request: makeTxRequest( &ttnpb.DownlinkPath{ Path: &ttnpb.DownlinkPath_UplinkToken{ UplinkToken: []byte("<PASSWORD>"), }, }, &ttnpb.DownlinkPath{ Path: &ttnpb.DownlinkPath_UplinkToken{ UplinkToken: []byte("<PASSWORD>"), }, }, ), }, } return a.So(msg, should.Resemble, lastDown) }, grpc.EmptyCallOption{}, resps[0], ), should.BeTrue) { t.Error("Downlink assertion failed for gateways 1 and 2") return nil, false } t.Logf("Downlink correlation IDs: %v", correlationIDs) if len(resps) == 1 { return lastDown, true } lastDown = &ttnpb.DownlinkMessage{ CorrelationIDs: correlationIDs, RawPayload: payload, Settings: &ttnpb.DownlinkMessage_Request{ Request: makeTxRequest( &ttnpb.DownlinkPath{ Path: &ttnpb.DownlinkPath_UplinkToken{ UplinkToken: []byte("token-gtw-3"), }, }, ), }, } if !a.So(AssertAuthNsGsScheduleDownlinkRequest(ctx, authCh, scheduleDownlink3Ch, func(ctx context.Context, msg *ttnpb.DownlinkMessage) bool { return a.So(msg, should.Resemble, lastDown) }, grpc.EmptyCallOption{}, resps[1], ), should.BeTrue) { t.Error("Downlink assertion failed for gateway 3") return nil, false } if len(resps) == 2 { return lastDown, true } lastDown = &ttnpb.DownlinkMessage{ CorrelationIDs: correlationIDs, RawPayload: payload, Settings: &ttnpb.DownlinkMessage_Request{ Request: makeTxRequest( &ttnpb.DownlinkPath{ Path: &ttnpb.DownlinkPath_UplinkToken{ UplinkToken: []byte("<PASSWORD>"), }, }, ), }, } if !a.So(AssertAuthNsGsScheduleDownlinkRequest(ctx, authCh, scheduleDownlink124Ch, func(ctx context.Context, msg *ttnpb.DownlinkMessage) bool { return a.So(msg, should.Resemble, lastDown) }, grpc.EmptyCallOption{}, resps[2], ), should.BeTrue) { t.Error("Downlink assertion failed for gateway 4") return nil, false } return lastDown, true } for _, tc := range []struct { Name string DownlinkPriorities DownlinkPriorities Handler func(context.Context, TestEnvironment) bool ErrorAssertion func(*testing.T, error) bool }{ { Name: "no device", DownlinkPriorities: DownlinkPriorities{ JoinAccept: ttnpb.TxSchedulePriority_HIGHEST, MACCommands: ttnpb.TxSchedulePriority_HIGH, MaxApplicationDownlink: ttnpb.TxSchedulePriority_NORMAL, }, Handler: func(ctx context.Context, env TestEnvironment) bool { t := test.MustTFromContext(ctx) a := assertions.New(t) var popRespCh chan<- error popFuncRespCh := make(chan error) select { case <-ctx.Done(): t.Error("Timed out while waiting for DownlinkTasks.Pop to be called") return false case req := <-env.DownlinkTasks.Pop: popRespCh = req.Response a.So(req.Context, should.HaveParentContextOrEqual, ctx) go func() { popFuncRespCh <- req.Func(req.Context, ttnpb.EndDeviceIdentifiers{ ApplicationIdentifiers: appID, DeviceID: devID, }, time.Now()) }() } var setRespCh chan<- DeviceRegistrySetByIDResponse setFuncRespCh := make(chan DeviceRegistrySetByIDRequestFuncResponse) select { case <-ctx.Done(): t.Error("Timed out while waiting for DeviceRegistry.SetByID to be called") return false case req := <-env.DeviceRegistry.SetByID: setRespCh = req.Response a.So(req.Context, should.HaveParentContextOrEqual, ctx) a.So(req.ApplicationIdentifiers, should.Resemble, appID) a.So(req.DeviceID, should.Resemble, devID) a.So(req.Paths, should.Resemble, getPaths) go func() { dev, sets, err := req.Func(nil) setFuncRespCh <- DeviceRegistrySetByIDRequestFuncResponse{ Device: dev, Paths: sets, Error: err, } }() } select { case <-ctx.Done(): t.Error("Timed out while waiting for DeviceRegistry.SetByID callback to return") case resp := <-setFuncRespCh: a.So(resp.Error, should.BeNil) a.So(resp.Paths, should.BeNil) a.So(resp.Device, should.BeNil) } close(setFuncRespCh) select { case <-ctx.Done(): t.Error("Timed out while waiting for DeviceRegistry.SetByID response to be processed") case setRespCh <- DeviceRegistrySetByIDResponse{}: } select { case <-ctx.Done(): t.Error("Timed out while waiting for DownlinkTasks.Pop callback to return") case resp := <-popFuncRespCh: a.So(resp, should.BeNil) } close(popFuncRespCh) select { case <-ctx.Done(): t.Error("Timed out while waiting for DownlinkTasks.Pop response to be processed") case popRespCh <- nil: } return true }, }, { Name: "no MAC state", DownlinkPriorities: DownlinkPriorities{ JoinAccept: ttnpb.TxSchedulePriority_HIGHEST, MACCommands: ttnpb.TxSchedulePriority_HIGH, MaxApplicationDownlink: ttnpb.TxSchedulePriority_NORMAL, }, Handler: func(ctx context.Context, env TestEnvironment) bool { t := test.MustTFromContext(ctx) a := assertions.New(t) var popRespCh chan<- error popFuncRespCh := make(chan error) select { case <-ctx.Done(): t.Error("Timed out while waiting for DownlinkTasks.Pop to be called") return false case req := <-env.DownlinkTasks.Pop: popRespCh = req.Response a.So(req.Context, should.HaveParentContextOrEqual, ctx) go func() { popFuncRespCh <- req.Func(req.Context, ttnpb.EndDeviceIdentifiers{ ApplicationIdentifiers: appID, DeviceID: devID, }, time.Now()) }() } lastUp := &ttnpb.UplinkMessage{ CorrelationIDs: []string{"correlation-up-1", "correlation-up-2"}, DeviceChannelIndex: 3, Payload: &ttnpb.Message{ MHDR: ttnpb.MHDR{ MType: ttnpb.MType_UNCONFIRMED_UP, }, Payload: &ttnpb.Message_MACPayload{MACPayload: &ttnpb.MACPayload{}}, }, ReceivedAt: time.Now().Add(-time.Second), RxMetadata: deepcopy.Copy(rxMetadata).([]*ttnpb.RxMetadata), Settings: ttnpb.TxSettings{ DataRateIndex: ttnpb.DATA_RATE_0, Frequency: 430000000, }, } getDevice := &ttnpb.EndDevice{ EndDeviceIdentifiers: ttnpb.EndDeviceIdentifiers{ ApplicationIdentifiers: appID, DeviceID: devID, DevAddr: &devAddr, }, FrequencyPlanID: test.EUFrequencyPlanID, LoRaWANPHYVersion: ttnpb.PHY_V1_1_REV_B, RecentUplinks: []*ttnpb.UplinkMessage{ CopyUplinkMessage(lastUp), }, Session: &ttnpb.Session{ DevAddr: devAddr, LastNFCntDown: 0x24, SessionKeys: *CopySessionKeys(sessionKeys), }, } var setRespCh chan<- DeviceRegistrySetByIDResponse setFuncRespCh := make(chan DeviceRegistrySetByIDRequestFuncResponse) select { case <-ctx.Done(): t.Error("Timed out while waiting for DeviceRegistry.SetByID to be called") return false case req := <-env.DeviceRegistry.SetByID: setRespCh = req.Response a.So(req.Context, should.HaveParentContextOrEqual, ctx) a.So(req.ApplicationIdentifiers, should.Resemble, appID) a.So(req.DeviceID, should.Resemble, devID) a.So(req.Paths, should.Resemble, getPaths) go func() { dev, sets, err := req.Func(CopyEndDevice(getDevice)) setFuncRespCh <- DeviceRegistrySetByIDRequestFuncResponse{ Device: dev, Paths: sets, Error: err, } }() } select { case <-ctx.Done(): t.Error("Timed out while waiting for DeviceRegistry.SetByID callback to return") case resp := <-setFuncRespCh: a.So(resp.Error, should.BeNil) a.So(resp.Paths, should.BeNil) a.So(resp.Device, should.NotBeNil) } close(setFuncRespCh) select { case <-ctx.Done(): t.Error("Timed out while waiting for DeviceRegistry.SetByID response to be processed") case setRespCh <- DeviceRegistrySetByIDResponse{ Device: getDevice, }: } select { case <-ctx.Done(): t.Error("Timed out while waiting for DownlinkTasks.Pop callback to return") case resp := <-popFuncRespCh: a.So(resp, should.BeNil) } close(popFuncRespCh) select { case <-ctx.Done(): t.Error("Timed out while waiting for DownlinkTasks.Pop response to be processed") case popRespCh <- nil: } return true }, }, { Name: "Class A/windows closed", DownlinkPriorities: DownlinkPriorities{ JoinAccept: ttnpb.TxSchedulePriority_HIGHEST, MACCommands: ttnpb.TxSchedulePriority_HIGH, MaxApplicationDownlink: ttnpb.TxSchedulePriority_NORMAL, }, Handler: func(ctx context.Context, env TestEnvironment) bool { t := test.MustTFromContext(ctx) a := assertions.New(t) var popRespCh chan<- error popFuncRespCh := make(chan error) select { case <-ctx.Done(): t.Error("Timed out while waiting for DownlinkTasks.Pop to be called") return false case req := <-env.DownlinkTasks.Pop: popRespCh = req.Response a.So(req.Context, should.HaveParentContextOrEqual, ctx) go func() { popFuncRespCh <- req.Func(req.Context, ttnpb.EndDeviceIdentifiers{ ApplicationIdentifiers: appID, DeviceID: devID, }, time.Now()) }() } lastUp := &ttnpb.UplinkMessage{ CorrelationIDs: []string{"correlation-up-1", "correlation-up-2"}, DeviceChannelIndex: 3, Payload: &ttnpb.Message{ MHDR: ttnpb.MHDR{ MType: ttnpb.MType_UNCONFIRMED_UP, }, Payload: &ttnpb.Message_MACPayload{MACPayload: &ttnpb.MACPayload{}}, }, ReceivedAt: time.Now().Add(-time.Second), RxMetadata: deepcopy.Copy(rxMetadata).([]*ttnpb.RxMetadata), Settings: ttnpb.TxSettings{ DataRateIndex: ttnpb.DATA_RATE_0, Frequency: 430000000, }, } getDevice := &ttnpb.EndDevice{ EndDeviceIdentifiers: ttnpb.EndDeviceIdentifiers{ ApplicationIdentifiers: appID, DeviceID: devID, DevAddr: &devAddr, }, FrequencyPlanID: test.EUFrequencyPlanID, LoRaWANPHYVersion: ttnpb.PHY_V1_1_REV_B, MACState: &ttnpb.MACState{ CurrentParameters: *CopyMACParameters(eu868macParameters), DesiredParameters: *CopyMACParameters(eu868macParameters), DeviceClass: ttnpb.CLASS_A, LoRaWANVersion: ttnpb.MAC_V1_1, }, RecentUplinks: []*ttnpb.UplinkMessage{ CopyUplinkMessage(lastUp), }, Session: &ttnpb.Session{ DevAddr: devAddr, LastNFCntDown: 0x24, SessionKeys: *CopySessionKeys(sessionKeys), }, } var setRespCh chan<- DeviceRegistrySetByIDResponse setFuncRespCh := make(chan DeviceRegistrySetByIDRequestFuncResponse) select { case <-ctx.Done(): t.Error("Timed out while waiting for DeviceRegistry.SetByID to be called") return false case req := <-env.DeviceRegistry.SetByID: setRespCh = req.Response a.So(req.Context, should.HaveParentContextOrEqual, ctx) a.So(req.ApplicationIdentifiers, should.Resemble, appID) a.So(req.DeviceID, should.Resemble, devID) a.So(req.Paths, should.Resemble, getPaths) go func() { dev, sets, err := req.Func(CopyEndDevice(getDevice)) setFuncRespCh <- DeviceRegistrySetByIDRequestFuncResponse{ Device: dev, Paths: sets, Error: err, } }() } select { case <-ctx.Done(): t.Error("Timed out while waiting for DeviceRegistry.SetByID callback to return") case resp := <-setFuncRespCh: a.So(resp.Error, should.BeNil) a.So(resp.Paths, should.BeNil) a.So(resp.Device, should.NotBeNil) } close(setFuncRespCh) select { case <-ctx.Done(): t.Error("Timed out while waiting for DeviceRegistry.SetByID response to be processed") case setRespCh <- DeviceRegistrySetByIDResponse{ Device: getDevice, }: } select { case <-ctx.Done(): t.Error("Timed out while waiting for DownlinkTasks.Pop callback to return") case resp := <-popFuncRespCh: a.So(resp, should.BeNil) } close(popFuncRespCh) select { case <-ctx.Done(): t.Error("Timed out while waiting for DownlinkTasks.Pop response to be processed") case popRespCh <- nil: } return true }, }, { Name: "Class A/windows open/no uplink", DownlinkPriorities: DownlinkPriorities{ JoinAccept: ttnpb.TxSchedulePriority_HIGHEST, MACCommands: ttnpb.TxSchedulePriority_HIGH, MaxApplicationDownlink: ttnpb.TxSchedulePriority_NORMAL, }, Handler: func(ctx context.Context, env TestEnvironment) bool { t := test.MustTFromContext(ctx) a := assertions.New(t) var popRespCh chan<- error popFuncRespCh := make(chan error) select { case <-ctx.Done(): t.Error("Timed out while waiting for DownlinkTasks.Pop to be called") return false case req := <-env.DownlinkTasks.Pop: popRespCh = req.Response a.So(req.Context, should.HaveParentContextOrEqual, ctx) go func() { popFuncRespCh <- req.Func(req.Context, ttnpb.EndDeviceIdentifiers{ ApplicationIdentifiers: appID, DeviceID: devID, }, time.Now()) }() } getDevice := &ttnpb.EndDevice{ EndDeviceIdentifiers: ttnpb.EndDeviceIdentifiers{ ApplicationIdentifiers: appID, DeviceID: devID, DevAddr: &devAddr, }, FrequencyPlanID: test.EUFrequencyPlanID, LoRaWANPHYVersion: ttnpb.PHY_V1_1_REV_B, MACState: &ttnpb.MACState{ CurrentParameters: *CopyMACParameters(eu868macParameters), DesiredParameters: *CopyMACParameters(eu868macParameters), DeviceClass: ttnpb.CLASS_A, LoRaWANVersion: ttnpb.MAC_V1_1, RxWindowsAvailable: true, }, Session: &ttnpb.Session{ DevAddr: devAddr, LastNFCntDown: 0x24, SessionKeys: *CopySessionKeys(sessionKeys), }, } var setRespCh chan<- DeviceRegistrySetByIDResponse setFuncRespCh := make(chan DeviceRegistrySetByIDRequestFuncResponse) select { case <-ctx.Done(): t.Error("Timed out while waiting for DeviceRegistry.SetByID to be called") return false case req := <-env.DeviceRegistry.SetByID: setRespCh = req.Response a.So(req.Context, should.HaveParentContextOrEqual, ctx) a.So(req.ApplicationIdentifiers, should.Resemble, appID) a.So(req.DeviceID, should.Resemble, devID) a.So(req.Paths, should.Resemble, getPaths) go func() { dev, sets, err := req.Func(CopyEndDevice(getDevice)) setFuncRespCh <- DeviceRegistrySetByIDRequestFuncResponse{ Device: dev, Paths: sets, Error: err, } }() } select { case <-ctx.Done(): t.Error("Timed out while waiting for DeviceRegistry.SetByID callback to return") case resp := <-setFuncRespCh: a.So(resp.Error, should.BeNil) a.So(resp.Paths, should.BeNil) a.So(resp.Device, should.NotBeNil) } close(setFuncRespCh) select { case <-ctx.Done(): t.Error("Timed out while waiting for DeviceRegistry.SetByID response to be processed") case setRespCh <- DeviceRegistrySetByIDResponse{ Device: getDevice, }: } select { case <-ctx.Done(): t.Error("Timed out while waiting for DownlinkTasks.Pop callback to return") case resp := <-popFuncRespCh: a.So(resp, should.BeNil) } close(popFuncRespCh) select { case <-ctx.Done(): t.Error("Timed out while waiting for DownlinkTasks.Pop response to be processed") case popRespCh <- nil: } return true }, }, { Name: "Class A/windows open/no session", DownlinkPriorities: DownlinkPriorities{ JoinAccept: ttnpb.TxSchedulePriority_HIGHEST, MACCommands: ttnpb.TxSchedulePriority_HIGH, MaxApplicationDownlink: ttnpb.TxSchedulePriority_NORMAL, }, Handler: func(ctx context.Context, env TestEnvironment) bool { t := test.MustTFromContext(ctx) a := assertions.New(t) var popRespCh chan<- error popFuncRespCh := make(chan error) select { case <-ctx.Done(): t.Error("Timed out while waiting for DownlinkTasks.Pop to be called") return false case req := <-env.DownlinkTasks.Pop: popRespCh = req.Response a.So(req.Context, should.HaveParentContextOrEqual, ctx) go func() { popFuncRespCh <- req.Func(req.Context, ttnpb.EndDeviceIdentifiers{ ApplicationIdentifiers: appID, DeviceID: devID, }, time.Now()) }() } getDevice := &ttnpb.EndDevice{ EndDeviceIdentifiers: ttnpb.EndDeviceIdentifiers{ ApplicationIdentifiers: appID, DeviceID: devID, DevAddr: &devAddr, }, FrequencyPlanID: test.EUFrequencyPlanID, LoRaWANPHYVersion: ttnpb.PHY_V1_1_REV_B, MACState: &ttnpb.MACState{ CurrentParameters: *CopyMACParameters(eu868macParameters), DesiredParameters: *CopyMACParameters(eu868macParameters), DeviceClass: ttnpb.CLASS_A, LoRaWANVersion: ttnpb.MAC_V1_1, }, } var setRespCh chan<- DeviceRegistrySetByIDResponse setFuncRespCh := make(chan DeviceRegistrySetByIDRequestFuncResponse) select { case <-ctx.Done(): t.Error("Timed out while waiting for DeviceRegistry.SetByID to be called") return false case req := <-env.DeviceRegistry.SetByID: setRespCh = req.Response a.So(req.Context, should.HaveParentContextOrEqual, ctx) a.So(req.ApplicationIdentifiers, should.Resemble, appID) a.So(req.DeviceID, should.Resemble, devID) a.So(req.Paths, should.Resemble, getPaths) go func() { dev, sets, err := req.Func(CopyEndDevice(getDevice)) setFuncRespCh <- DeviceRegistrySetByIDRequestFuncResponse{ Device: dev, Paths: sets, Error: err, } }() } select { case <-ctx.Done(): t.Error("Timed out while waiting for DeviceRegistry.SetByID callback to return") case resp := <-setFuncRespCh: a.So(resp.Error, should.BeNil) a.So(resp.Paths, should.BeNil) a.So(resp.Device, should.NotBeNil) } close(setFuncRespCh) select { case <-ctx.Done(): t.Error("Timed out while waiting for DeviceRegistry.SetByID response to be processed") case setRespCh <- DeviceRegistrySetByIDResponse{ Device: getDevice, }: } select { case <-ctx.Done(): t.Error("Timed out while waiting for DownlinkTasks.Pop callback to return") case resp := <-popFuncRespCh: a.So(resp, should.BeNil) } close(popFuncRespCh) select { case <-ctx.Done(): t.Error("Timed out while waiting for DownlinkTasks.Pop response to be processed") case popRespCh <- nil: } return true }, }, { Name: "Class A/windows open/Rx1,2 expired", DownlinkPriorities: DownlinkPriorities{ JoinAccept: ttnpb.TxSchedulePriority_HIGHEST, MACCommands: ttnpb.TxSchedulePriority_HIGH, MaxApplicationDownlink: ttnpb.TxSchedulePriority_NORMAL, }, Handler: func(ctx context.Context, env TestEnvironment) bool { t := test.MustTFromContext(ctx) a := assertions.New(t) var popRespCh chan<- error popFuncRespCh := make(chan error) select { case <-ctx.Done(): t.Error("Timed out while waiting for DownlinkTasks.Pop to be called") return false case req := <-env.DownlinkTasks.Pop: popRespCh = req.Response a.So(req.Context, should.HaveParentContextOrEqual, ctx) go func() { popFuncRespCh <- req.Func(req.Context, ttnpb.EndDeviceIdentifiers{ ApplicationIdentifiers: appID, DeviceID: devID, }, time.Now()) }() } lastUp := &ttnpb.UplinkMessage{ CorrelationIDs: []string{"correlation-up-1", "correlation-up-2"}, DeviceChannelIndex: 3, Payload: &ttnpb.Message{ MHDR: ttnpb.MHDR{ MType: ttnpb.MType_UNCONFIRMED_UP, }, Payload: &ttnpb.Message_MACPayload{MACPayload: &ttnpb.MACPayload{}}, }, ReceivedAt: time.Unix(0, 42), RxMetadata: deepcopy.Copy(rxMetadata).([]*ttnpb.RxMetadata), Settings: ttnpb.TxSettings{ DataRateIndex: ttnpb.DATA_RATE_0, Frequency: 430000000, }, } getDevice := &ttnpb.EndDevice{ EndDeviceIdentifiers: ttnpb.EndDeviceIdentifiers{ ApplicationIdentifiers: appID, DeviceID: devID, DevAddr: &devAddr, }, FrequencyPlanID: test.EUFrequencyPlanID, LoRaWANPHYVersion: ttnpb.PHY_V1_1_REV_B, MACState: &ttnpb.MACState{ CurrentParameters: *CopyMACParameters(eu868macParameters), DesiredParameters: *CopyMACParameters(eu868macParameters), DeviceClass: ttnpb.CLASS_A, LoRaWANVersion: ttnpb.MAC_V1_1, RxWindowsAvailable: true, }, MACSettings: &ttnpb.MACSettings{ StatusTimePeriodicity: DurationPtr(0), StatusCountPeriodicity: &pbtypes.UInt32Value{Value: 0}, }, RecentUplinks: []*ttnpb.UplinkMessage{ CopyUplinkMessage(lastUp), }, Session: &ttnpb.Session{ DevAddr: devAddr, LastNFCntDown: 0x24, SessionKeys: *CopySessionKeys(sessionKeys), }, } var setRespCh chan<- DeviceRegistrySetByIDResponse setFuncRespCh := make(chan DeviceRegistrySetByIDRequestFuncResponse) select { case <-ctx.Done(): t.Error("Timed out while waiting for DeviceRegistry.SetByID to be called") return false case req := <-env.DeviceRegistry.SetByID: setRespCh = req.Response a.So(req.Context, should.HaveParentContextOrEqual, ctx) a.So(req.ApplicationIdentifiers, should.Resemble, appID) a.So(req.DeviceID, should.Resemble, devID) a.So(req.Paths, should.Resemble, getPaths) go func() { dev, sets, err := req.Func(CopyEndDevice(getDevice)) setFuncRespCh <- DeviceRegistrySetByIDRequestFuncResponse{ Device: dev, Paths: sets, Error: err, } }() } select { case <-ctx.Done(): t.Error("Timed out while waiting for DeviceRegistry.SetByID callback to return") case resp := <-setFuncRespCh: a.So(resp.Error, should.BeNil) a.So(resp.Paths, should.HaveSameElementsDeep, []string{ "mac_state.rx_windows_available", }) if a.So(resp.Device, should.NotBeNil) && a.So(resp.Device.MACState, should.NotBeNil) { a.So(resp.Device.MACState.RxWindowsAvailable, should.BeFalse) } } close(setFuncRespCh) select { case <-ctx.Done(): t.Error("Timed out while waiting for DeviceRegistry.SetByID response to be processed") case setRespCh <- DeviceRegistrySetByIDResponse{ Device: getDevice, }: } select { case <-ctx.Done(): t.Error("Timed out while waiting for DownlinkTasks.Pop callback to return") case resp := <-popFuncRespCh: a.So(resp, should.BeNil) } close(popFuncRespCh) select { case <-ctx.Done(): t.Error("Timed out while waiting for DownlinkTasks.Pop response to be processed") case popRespCh <- nil: } return true }, }, { Name: "Class A/windows open/Rx1,2/no downlink", DownlinkPriorities: DownlinkPriorities{ JoinAccept: ttnpb.TxSchedulePriority_HIGHEST, MACCommands: ttnpb.TxSchedulePriority_HIGH, MaxApplicationDownlink: ttnpb.TxSchedulePriority_NORMAL, }, Handler: func(ctx context.Context, env TestEnvironment) bool { t := test.MustTFromContext(ctx) a := assertions.New(t) var popRespCh chan<- error popFuncRespCh := make(chan error) select { case <-ctx.Done(): t.Error("Timed out while waiting for DownlinkTasks.Pop to be called") return false case req := <-env.DownlinkTasks.Pop: popRespCh = req.Response a.So(req.Context, should.HaveParentContextOrEqual, ctx) go func() { popFuncRespCh <- req.Func(req.Context, ttnpb.EndDeviceIdentifiers{ ApplicationIdentifiers: appID, DeviceID: devID, }, time.Now()) }() } lastUp := &ttnpb.UplinkMessage{ CorrelationIDs: []string{"correlation-up-1", "correlation-up-2"}, DeviceChannelIndex: 3, Payload: &ttnpb.Message{ MHDR: ttnpb.MHDR{ MType: ttnpb.MType_UNCONFIRMED_UP, }, Payload: &ttnpb.Message_MACPayload{MACPayload: &ttnpb.MACPayload{}}, }, ReceivedAt: time.Now().Add(-time.Second), RxMetadata: deepcopy.Copy(rxMetadata).([]*ttnpb.RxMetadata), Settings: ttnpb.TxSettings{ DataRateIndex: ttnpb.DATA_RATE_0, Frequency: 430000000, }, } getDevice := &ttnpb.EndDevice{ EndDeviceIdentifiers: ttnpb.EndDeviceIdentifiers{ ApplicationIdentifiers: appID, DeviceID: devID, DevAddr: &devAddr, }, FrequencyPlanID: test.EUFrequencyPlanID, LoRaWANPHYVersion: ttnpb.PHY_V1_1_REV_B, MACState: &ttnpb.MACState{ CurrentParameters: *CopyMACParameters(eu868macParameters), DesiredParameters: *CopyMACParameters(eu868macParameters), DeviceClass: ttnpb.CLASS_A, LoRaWANVersion: ttnpb.MAC_V1_1, RxWindowsAvailable: true, }, MACSettings: &ttnpb.MACSettings{ StatusTimePeriodicity: DurationPtr(0), StatusCountPeriodicity: &pbtypes.UInt32Value{Value: 0}, }, RecentUplinks: []*ttnpb.UplinkMessage{ CopyUplinkMessage(lastUp), }, Session: &ttnpb.Session{ DevAddr: devAddr, LastNFCntDown: 0x24, SessionKeys: *CopySessionKeys(sessionKeys), }, } var setRespCh chan<- DeviceRegistrySetByIDResponse setFuncRespCh := make(chan DeviceRegistrySetByIDRequestFuncResponse) select { case <-ctx.Done(): t.Error("Timed out while waiting for DeviceRegistry.SetByID to be called") return false case req := <-env.DeviceRegistry.SetByID: setRespCh = req.Response a.So(req.Context, should.HaveParentContextOrEqual, ctx) a.So(req.ApplicationIdentifiers, should.Resemble, appID) a.So(req.DeviceID, should.Resemble, devID) a.So(req.Paths, should.Resemble, getPaths) go func() { dev, sets, err := req.Func(CopyEndDevice(getDevice)) setFuncRespCh <- DeviceRegistrySetByIDRequestFuncResponse{ Device: dev, Paths: sets, Error: err, } }() } select { case <-ctx.Done(): t.Error("Timed out while waiting for DeviceRegistry.SetByID callback to return") case resp := <-setFuncRespCh: a.So(resp.Error, should.BeNil) a.So(resp.Paths, should.BeNil) a.So(resp.Device, should.NotBeNil) } close(setFuncRespCh) select { case <-ctx.Done(): t.Error("Timed out while waiting for DeviceRegistry.SetByID response to be processed") case setRespCh <- DeviceRegistrySetByIDResponse{ Device: getDevice, }: } select { case <-ctx.Done(): t.Error("Timed out while waiting for DownlinkTasks.Pop callback to return") case resp := <-popFuncRespCh: a.So(resp, should.BeNil) } close(popFuncRespCh) select { case <-ctx.Done(): t.Error("Timed out while waiting for DownlinkTasks.Pop response to be processed") case popRespCh <- nil: } return true }, }, { Name: "Class A/windows open/Rx1,2/1.0.3/FCnt too low", DownlinkPriorities: DownlinkPriorities{ JoinAccept: ttnpb.TxSchedulePriority_HIGHEST, MACCommands: ttnpb.TxSchedulePriority_HIGH, MaxApplicationDownlink: ttnpb.TxSchedulePriority_NORMAL, }, Handler: func(ctx context.Context, env TestEnvironment) bool { t := test.MustTFromContext(ctx) a := assertions.New(t) var popRespCh chan<- error popFuncRespCh := make(chan error) select { case <-ctx.Done(): t.Error("Timed out while waiting for DownlinkTasks.Pop to be called") return false case req := <-env.DownlinkTasks.Pop: popRespCh = req.Response a.So(req.Context, should.HaveParentContextOrEqual, ctx) go func() { popFuncRespCh <- req.Func(req.Context, ttnpb.EndDeviceIdentifiers{ ApplicationIdentifiers: appID, DeviceID: devID, }, time.Now()) }() } lastUp := &ttnpb.UplinkMessage{ CorrelationIDs: []string{"correlation-up-1", "correlation-up-2"}, DeviceChannelIndex: 3, Payload: &ttnpb.Message{ MHDR: ttnpb.MHDR{ MType: ttnpb.MType_UNCONFIRMED_UP, }, Payload: &ttnpb.Message_MACPayload{MACPayload: &ttnpb.MACPayload{}}, }, ReceivedAt: time.Now().Add(-time.Second), RxMetadata: deepcopy.Copy(rxMetadata).([]*ttnpb.RxMetadata), Settings: ttnpb.TxSettings{ DataRateIndex: ttnpb.DATA_RATE_0, Frequency: 430000000, }, } getDevice := &ttnpb.EndDevice{ EndDeviceIdentifiers: ttnpb.EndDeviceIdentifiers{ ApplicationIdentifiers: appID, DeviceID: devID, DevAddr: &devAddr, }, FrequencyPlanID: test.EUFrequencyPlanID, LoRaWANPHYVersion: ttnpb.PHY_V1_0_3_REV_A, MACState: &ttnpb.MACState{ CurrentParameters: *CopyMACParameters(eu868macParameters), DesiredParameters: *CopyMACParameters(eu868macParameters), DeviceClass: ttnpb.CLASS_A, LoRaWANVersion: ttnpb.MAC_V1_0_3, RxWindowsAvailable: true, }, MACSettings: &ttnpb.MACSettings{ StatusTimePeriodicity: DurationPtr(0), StatusCountPeriodicity: &pbtypes.UInt32Value{Value: 0}, }, QueuedApplicationDownlinks: []*ttnpb.ApplicationDownlink{ { CorrelationIDs: []string{"correlation-app-down-1", "correlation-app-down-2"}, FCnt: 0x22, FPort: 0x1, FRMPayload: []byte("testPayload"), Priority: ttnpb.TxSchedulePriority_HIGHEST, SessionKeyID: []byte{0x11, 0x22, 0x33, 0x44}, }, { CorrelationIDs: []string{"correlation-app-down-1", "correlation-app-down-2"}, FCnt: 0x23, FPort: 0x1, FRMPayload: []byte("testPayload"), Priority: ttnpb.TxSchedulePriority_HIGHEST, SessionKeyID: <KEY> }, }, RecentUplinks: []*ttnpb.UplinkMessage{ CopyUplinkMessage(lastUp), }, Session: &ttnpb.Session{ DevAddr: devAddr, LastNFCntDown: 0x24, SessionKeys: *CopySessionKeys(sessionKeys), }, } var setRespCh chan<- DeviceRegistrySetByIDResponse setFuncRespCh := make(chan DeviceRegistrySetByIDRequestFuncResponse) select { case <-ctx.Done(): t.Error("Timed out while waiting for DeviceRegistry.SetByID to be called") return false case req := <-env.DeviceRegistry.SetByID: setRespCh = req.Response a.So(req.Context, should.HaveParentContextOrEqual, ctx) a.So(req.ApplicationIdentifiers, should.Resemble, appID) a.So(req.DeviceID, should.Resemble, devID) a.So(req.Paths, should.Resemble, getPaths) go func() { dev, sets, err := req.Func(CopyEndDevice(getDevice)) setFuncRespCh <- DeviceRegistrySetByIDRequestFuncResponse{ Device: dev, Paths: sets, Error: err, } }() } select { case <-ctx.Done(): t.Error("Timed out while waiting for DeviceRegistry.SetByID callback to return") case resp := <-setFuncRespCh: a.So(resp.Error, should.BeNil) a.So(resp.Paths, should.HaveSameElementsDeep, []string{ "queued_application_downlinks", }) if a.So(resp.Device, should.NotBeNil) { a.So(resp.Device.QueuedApplicationDownlinks, should.BeEmpty) } } close(setFuncRespCh) setDevice := CopyEndDevice(getDevice) setDevice.QueuedApplicationDownlinks = nil select { case <-ctx.Done(): t.Error("Timed out while waiting for DeviceRegistry.SetByID response to be processed") case setRespCh <- DeviceRegistrySetByIDResponse{ Device: setDevice, }: } select { case <-ctx.Done(): t.Error("Timed out while waiting for DownlinkTasks.Pop callback to return") case resp := <-popFuncRespCh: a.So(resp, should.BeNil) } close(popFuncRespCh) select { case <-ctx.Done(): t.Error("Timed out while waiting for DownlinkTasks.Pop response to be processed") case popRespCh <- nil: } return true }, }, { Name: "Class A/windows open/Rx1,2/payload too long", DownlinkPriorities: DownlinkPriorities{ JoinAccept: ttnpb.TxSchedulePriority_HIGHEST, MACCommands: ttnpb.TxSchedulePriority_HIGH, MaxApplicationDownlink: ttnpb.TxSchedulePriority_NORMAL, }, Handler: func(ctx context.Context, env TestEnvironment) bool { t := test.MustTFromContext(ctx) a := assertions.New(t) var popRespCh chan<- error popFuncRespCh := make(chan error) select { case <-ctx.Done(): t.Error("Timed out while waiting for DownlinkTasks.Pop to be called") return false case req := <-env.DownlinkTasks.Pop: popRespCh = req.Response a.So(req.Context, should.HaveParentContextOrEqual, ctx) go func() { popFuncRespCh <- req.Func(req.Context, ttnpb.EndDeviceIdentifiers{ ApplicationIdentifiers: appID, DeviceID: devID, }, time.Now()) }() } lastUp := &ttnpb.UplinkMessage{ CorrelationIDs: []string{"correlation-up-1", "correlation-up-2"}, DeviceChannelIndex: 3, Payload: &ttnpb.Message{ MHDR: ttnpb.MHDR{ MType: ttnpb.MType_UNCONFIRMED_UP, }, Payload: &ttnpb.Message_MACPayload{MACPayload: &ttnpb.MACPayload{}}, }, ReceivedAt: time.Now().Add(-time.Second), RxMetadata: deepcopy.Copy(rxMetadata).([]*ttnpb.RxMetadata), Settings: ttnpb.TxSettings{ DataRateIndex: ttnpb.DATA_RATE_0, Frequency: 430000000, }, } getDevice := &ttnpb.EndDevice{ EndDeviceIdentifiers: ttnpb.EndDeviceIdentifiers{ ApplicationIdentifiers: appID, DeviceID: devID, DevAddr: &devAddr, }, FrequencyPlanID: test.EUFrequencyPlanID, LoRaWANPHYVersion: ttnpb.PHY_V1_0_3_REV_A, MACState: &ttnpb.MACState{ CurrentParameters: *CopyMACParameters(eu868macParameters), DesiredParameters: *CopyMACParameters(eu868macParameters), DeviceClass: ttnpb.CLASS_A, LoRaWANVersion: ttnpb.MAC_V1_0_3, RxWindowsAvailable: true, }, MACSettings: &ttnpb.MACSettings{ StatusTimePeriodicity: DurationPtr(0), StatusCountPeriodicity: &pbtypes.UInt32Value{Value: 0}, }, QueuedApplicationDownlinks: []*ttnpb.ApplicationDownlink{ { CorrelationIDs: []string{"correlation-app-down-1", "correlation-app-down-2"}, FCnt: 0x42, FPort: 0x1, FRMPayload: bytes.Repeat([]byte("x"), 256), Priority: ttnpb.TxSchedulePriority_HIGHEST, SessionKeyID: []byte{<KEY> }, }, RecentUplinks: []*ttnpb.UplinkMessage{ CopyUplinkMessage(lastUp), }, Session: &ttnpb.Session{ DevAddr: devAddr, LastNFCntDown: 0x24, SessionKeys: *CopySessionKeys(sessionKeys), }, } var setRespCh chan<- DeviceRegistrySetByIDResponse setFuncRespCh := make(chan DeviceRegistrySetByIDRequestFuncResponse) select { case <-ctx.Done(): t.Error("Timed out while waiting for DeviceRegistry.SetByID to be called") return false case req := <-env.DeviceRegistry.SetByID: setRespCh = req.Response a.So(req.Context, should.HaveParentContextOrEqual, ctx) a.So(req.ApplicationIdentifiers, should.Resemble, appID) a.So(req.DeviceID, should.Resemble, devID) a.So(req.Paths, should.Resemble, getPaths) go func() { dev, sets, err := req.Func(CopyEndDevice(getDevice)) setFuncRespCh <- DeviceRegistrySetByIDRequestFuncResponse{ Device: dev, Paths: sets, Error: err, } }() } select { case <-ctx.Done(): t.Error("Timed out while waiting for DeviceRegistry.SetByID callback to return") case resp := <-setFuncRespCh: a.So(resp.Error, should.BeNil) a.So(resp.Paths, should.HaveSameElementsDeep, []string{ "queued_application_downlinks", }) if a.So(resp.Device, should.NotBeNil) { a.So(resp.Device.QueuedApplicationDownlinks, should.BeEmpty) } } close(setFuncRespCh) setDevice := CopyEndDevice(getDevice) setDevice.QueuedApplicationDownlinks = nil select { case <-ctx.Done(): t.Error("Timed out while waiting for DeviceRegistry.SetByID response to be processed") case setRespCh <- DeviceRegistrySetByIDResponse{ Device: setDevice, }: } select { case <-ctx.Done(): t.Error("Timed out while waiting for DownlinkTasks.Pop callback to return") case resp := <-popFuncRespCh: a.So(resp, should.BeNil) } close(popFuncRespCh) select { case <-ctx.Done(): t.Error("Timed out while waiting for DownlinkTasks.Pop response to be processed") case popRespCh <- nil: } return true }, }, { Name: "Class A/windows open/Rx1,2/application downlink/FOpts present/EU868/1.1/scheduling fail", DownlinkPriorities: DownlinkPriorities{ JoinAccept: ttnpb.TxSchedulePriority_HIGHEST, MACCommands: ttnpb.TxSchedulePriority_HIGH, MaxApplicationDownlink: ttnpb.TxSchedulePriority_NORMAL, }, Handler: func(ctx context.Context, env TestEnvironment) bool { t := test.MustTFromContext(ctx) a := assertions.New(t) var popRespCh chan<- error popFuncRespCh := make(chan error) select { case <-ctx.Done(): t.Error("Timed out while waiting for DownlinkTasks.Pop to be called") return false case req := <-env.DownlinkTasks.Pop: popRespCh = req.Response a.So(req.Context, should.HaveParentContextOrEqual, ctx) go func() { popFuncRespCh <- req.Func(req.Context, ttnpb.EndDeviceIdentifiers{ ApplicationIdentifiers: appID, DeviceID: devID, }, time.Now()) }() } lastUp := &ttnpb.UplinkMessage{ CorrelationIDs: []string{"correlation-up-1", "correlation-up-2"}, DeviceChannelIndex: 3, Payload: &ttnpb.Message{ MHDR: ttnpb.MHDR{ MType: ttnpb.MType_UNCONFIRMED_UP, }, Payload: &ttnpb.Message_MACPayload{MACPayload: &ttnpb.MACPayload{}}, }, ReceivedAt: time.Now().Add(-time.Second), RxMetadata: deepcopy.Copy(rxMetadata).([]*ttnpb.RxMetadata), Settings: ttnpb.TxSettings{ DataRateIndex: ttnpb.DATA_RATE_0, Frequency: 430000000, }, } getDevice := &ttnpb.EndDevice{ EndDeviceIdentifiers: ttnpb.EndDeviceIdentifiers{ ApplicationIdentifiers: appID, DeviceID: devID, DevAddr: &devAddr, }, FrequencyPlanID: test.EUFrequencyPlanID, LoRaWANPHYVersion: ttnpb.PHY_V1_1_REV_B, MACState: &ttnpb.MACState{ CurrentParameters: *CopyMACParameters(eu868macParameters), DesiredParameters: *CopyMACParameters(eu868macParameters), DeviceClass: ttnpb.CLASS_A, LoRaWANVersion: ttnpb.MAC_V1_1, QueuedResponses: []*ttnpb.MACCommand{ (&ttnpb.MACCommand_ResetConf{ MinorVersion: 1, }).MACCommand(), (&ttnpb.MACCommand_LinkCheckAns{ Margin: 2, GatewayCount: 5, }).MACCommand(), }, RxWindowsAvailable: true, }, QueuedApplicationDownlinks: []*ttnpb.ApplicationDownlink{ { CorrelationIDs: []string{"correlation-app-down-1", "correlation-app-down-2"}, FCnt: 0x42, FPort: 0x1, FRMPayload: []byte("testPayload"), Priority: ttnpb.TxSchedulePriority_HIGHEST, SessionKeyID: []byte{<KEY>}, }, }, RecentUplinks: []*ttnpb.UplinkMessage{ CopyUplinkMessage(lastUp), }, Session: &ttnpb.Session{ DevAddr: devAddr, LastNFCntDown: 0x24, SessionKeys: *CopySessionKeys(sessionKeys), }, } var setRespCh chan<- DeviceRegistrySetByIDResponse setFuncRespCh := make(chan DeviceRegistrySetByIDRequestFuncResponse) select { case <-ctx.Done(): t.Error("Timed out while waiting for DeviceRegistry.SetByID to be called") return false case req := <-env.DeviceRegistry.SetByID: setRespCh = req.Response a.So(req.Context, should.HaveParentContextOrEqual, ctx) a.So(req.ApplicationIdentifiers, should.Resemble, appID) a.So(req.DeviceID, should.Resemble, devID) a.So(req.Paths, should.Resemble, getPaths) go func() { dev, sets, err := req.Func(CopyEndDevice(getDevice)) setFuncRespCh <- DeviceRegistrySetByIDRequestFuncResponse{ Device: dev, Paths: sets, Error: err, } }() } scheduleDownlink124Ch := make(chan NsGsScheduleDownlinkRequest) peer124 := NewGSPeer(ctx, &MockNsGsServer{ ScheduleDownlinkFunc: MakeNsGsScheduleDownlinkChFunc(scheduleDownlink124Ch), }) scheduleDownlink3Ch := make(chan NsGsScheduleDownlinkRequest) peer3 := NewGSPeer(ctx, &MockNsGsServer{ ScheduleDownlinkFunc: MakeNsGsScheduleDownlinkChFunc(scheduleDownlink3Ch), }) if !a.So(assertGetRxMetadataGatewayPeers(ctx, env.Cluster.GetPeer, peer124, peer3), should.BeTrue) { return false } lastDown, ok := assertScheduleRxMetadataGateways( ctx, env.Cluster.Auth, scheduleDownlink124Ch, scheduleDownlink3Ch, func() []byte { b := []byte{ /* MHDR */ 0x60, /* MACPayload */ /** FHDR **/ /*** DevAddr ***/ devAddr[3], devAddr[2], devAddr[1], devAddr[0], /*** FCtrl ***/ 0x86, /*** FCnt ***/ 0x42, 0x00, } /** FOpts **/ b = append(b, test.Must(crypto.EncryptDownlink( nwkSEncKey, devAddr, 0x24, []byte{ /* ResetConf */ 0x01, 0x01, /* LinkCheckAns */ 0x02, 0x02, 0x05, /* DevStatusReq */ 0x06, }, )).([]byte)...) /** FPort **/ b = append(b, 0x1) /** FRMPayload **/ b = append(b, []byte("testPayload")...) /* MIC */ mic := test.Must(crypto.ComputeDownlinkMIC( sNwkSIntKey, devAddr, 0, 0x42, b, )).([4]byte) return append(b, mic[:]...) }(), func(paths ...*ttnpb.DownlinkPath) *ttnpb.TxRequest { return &ttnpb.TxRequest{ Class: ttnpb.CLASS_A, DownlinkPaths: paths, Priority: ttnpb.TxSchedulePriority_HIGH, Rx1Delay: ttnpb.RX_DELAY_3, Rx1DataRateIndex: ttnpb.DATA_RATE_0, Rx1Frequency: 431000000, Rx2DataRateIndex: ttnpb.DATA_RATE_1, Rx2Frequency: 420000000, } }, NsGsScheduleDownlinkResponse{ Error: errors.New("test"), }, NsGsScheduleDownlinkResponse{ Error: errors.New("test"), }, NsGsScheduleDownlinkResponse{ Error: errors.New("test"), }, ) if !a.So(ok, should.BeTrue) { t.Error("Scheduling assertion failed") return false } if a.So(lastDown.CorrelationIDs, should.HaveLength, 5) { a.So(lastDown.CorrelationIDs, should.Contain, "correlation-up-1") a.So(lastDown.CorrelationIDs, should.Contain, "correlation-up-2") a.So(lastDown.CorrelationIDs, should.Contain, "correlation-app-down-1") a.So(lastDown.CorrelationIDs, should.Contain, "correlation-app-down-2") } select { case <-ctx.Done(): t.Error("Timed out while waiting for DeviceRegistry.SetByID callback to return") case resp := <-setFuncRespCh: a.So(resp.Error, should.BeNil) a.So(resp.Paths, should.BeNil) a.So(resp.Device, should.NotBeNil) } close(setFuncRespCh) select { case <-ctx.Done(): t.Error("Timed out while waiting for DeviceRegistry.SetByID response to be processed") case setRespCh <- DeviceRegistrySetByIDResponse{ Device: getDevice, }: } select { case <-ctx.Done(): t.Error("Timed out while waiting for DownlinkTasks.Pop callback to return") case resp := <-popFuncRespCh: a.So(resp, should.BeNil) } close(popFuncRespCh) select { case <-ctx.Done(): t.Error("Timed out while waiting for DownlinkTasks.Pop response to be processed") case popRespCh <- nil: } return true }, }, { Name: "Class A/windows open/Rx1,2/application downlink/FOpts present/EU868/1.1", DownlinkPriorities: DownlinkPriorities{ JoinAccept: ttnpb.TxSchedulePriority_HIGHEST, MACCommands: ttnpb.TxSchedulePriority_HIGH, MaxApplicationDownlink: ttnpb.TxSchedulePriority_NORMAL, }, Handler: func(ctx context.Context, env TestEnvironment) bool { t := test.MustTFromContext(ctx) a := assertions.New(t) start := time.Now() var popRespCh chan<- error popFuncRespCh := make(chan error) select { case <-ctx.Done(): t.Error("Timed out while waiting for DownlinkTasks.Pop to be called") return false case req := <-env.DownlinkTasks.Pop: popRespCh = req.Response a.So(req.Context, should.HaveParentContextOrEqual, ctx) go func() { popFuncRespCh <- req.Func(req.Context, ttnpb.EndDeviceIdentifiers{ ApplicationIdentifiers: appID, DeviceID: devID, }, time.Now()) }() } lastUp := &ttnpb.UplinkMessage{ CorrelationIDs: []string{"correlation-up-1", "correlation-up-2"}, DeviceChannelIndex: 3, Payload: &ttnpb.Message{ MHDR: ttnpb.MHDR{ MType: ttnpb.MType_UNCONFIRMED_UP, }, Payload: &ttnpb.Message_MACPayload{MACPayload: &ttnpb.MACPayload{}}, }, ReceivedAt: time.Now().Add(-time.Second), RxMetadata: deepcopy.Copy(rxMetadata).([]*ttnpb.RxMetadata), Settings: ttnpb.TxSettings{ DataRateIndex: ttnpb.DATA_RATE_0, Frequency: 430000000, }, } getDevice := &ttnpb.EndDevice{ EndDeviceIdentifiers: ttnpb.EndDeviceIdentifiers{ ApplicationIdentifiers: appID, DeviceID: devID, DevAddr: &devAddr, }, FrequencyPlanID: test.EUFrequencyPlanID, LoRaWANPHYVersion: ttnpb.PHY_V1_1_REV_B, MACState: &ttnpb.MACState{ CurrentParameters: *CopyMACParameters(eu868macParameters), DesiredParameters: *CopyMACParameters(eu868macParameters), DeviceClass: ttnpb.CLASS_A, LoRaWANVersion: ttnpb.MAC_V1_1, QueuedResponses: []*ttnpb.MACCommand{ (&ttnpb.MACCommand_ResetConf{ MinorVersion: 1, }).MACCommand(), (&ttnpb.MACCommand_LinkCheckAns{ Margin: 2, GatewayCount: 5, }).MACCommand(), }, RxWindowsAvailable: true, }, QueuedApplicationDownlinks: []*ttnpb.ApplicationDownlink{ { CorrelationIDs: []string{"correlation-app-down-1", "correlation-app-down-2"}, FCnt: 0x42, FPort: 0x1, FRMPayload: []byte("testPayload"), Priority: ttnpb.TxSchedulePriority_HIGHEST, SessionKeyID: []byte{<KEY> }, }, RecentUplinks: []*ttnpb.UplinkMessage{ CopyUplinkMessage(lastUp), }, Session: &ttnpb.Session{ DevAddr: devAddr, LastNFCntDown: 0x24, SessionKeys: *CopySessionKeys(sessionKeys), }, } var setRespCh chan<- DeviceRegistrySetByIDResponse setFuncRespCh := make(chan DeviceRegistrySetByIDRequestFuncResponse) select { case <-ctx.Done(): t.Error("Timed out while waiting for DeviceRegistry.SetByID to be called") return false case req := <-env.DeviceRegistry.SetByID: setRespCh = req.Response a.So(req.Context, should.HaveParentContextOrEqual, ctx) a.So(req.ApplicationIdentifiers, should.Resemble, appID) a.So(req.DeviceID, should.Resemble, devID) a.So(req.Paths, should.Resemble, getPaths) go func() { dev, sets, err := req.Func(CopyEndDevice(getDevice)) setFuncRespCh <- DeviceRegistrySetByIDRequestFuncResponse{ Device: dev, Paths: sets, Error: err, } }() } scheduleDownlink124Ch := make(chan NsGsScheduleDownlinkRequest) peer124 := NewGSPeer(ctx, &MockNsGsServer{ ScheduleDownlinkFunc: MakeNsGsScheduleDownlinkChFunc(scheduleDownlink124Ch), }) scheduleDownlink3Ch := make(chan NsGsScheduleDownlinkRequest) peer3 := NewGSPeer(ctx, &MockNsGsServer{ ScheduleDownlinkFunc: MakeNsGsScheduleDownlinkChFunc(scheduleDownlink3Ch), }) if !a.So(assertGetRxMetadataGatewayPeers(ctx, env.Cluster.GetPeer, peer124, peer3), should.BeTrue) { return false } lastDown, ok := assertScheduleRxMetadataGateways( ctx, env.Cluster.Auth, scheduleDownlink124Ch, scheduleDownlink3Ch, func() []byte { b := []byte{ /* MHDR */ 0x60, /* MACPayload */ /** FHDR **/ /*** DevAddr ***/ devAddr[3], devAddr[2], devAddr[1], devAddr[0], /*** FCtrl ***/ 0x86, /*** FCnt ***/ 0x42, 0x00, } /** FOpts **/ b = append(b, test.Must(crypto.EncryptDownlink( nwkSEncKey, devAddr, 0x24, []byte{ /* ResetConf */ 0x01, 0x01, /* LinkCheckAns */ 0x02, 0x02, 0x05, /* DevStatusReq */ 0x06, }, )).([]byte)...) /** FPort **/ b = append(b, 0x1) /** FRMPayload **/ b = append(b, []byte("testPayload")...) /* MIC */ mic := test.Must(crypto.ComputeDownlinkMIC( sNwkSIntKey, devAddr, 0, 0x42, b, )).([4]byte) return append(b, mic[:]...) }(), func(paths ...*ttnpb.DownlinkPath) *ttnpb.TxRequest { return &ttnpb.TxRequest{ Class: ttnpb.CLASS_A, DownlinkPaths: paths, Priority: ttnpb.TxSchedulePriority_HIGH, Rx1Delay: ttnpb.RX_DELAY_3, Rx1DataRateIndex: ttnpb.DATA_RATE_0, Rx1Frequency: 431000000, Rx2DataRateIndex: ttnpb.DATA_RATE_1, Rx2Frequency: 420000000, } }, NsGsScheduleDownlinkResponse{ Error: errors.New("test"), }, NsGsScheduleDownlinkResponse{ Error: errors.New("test"), }, NsGsScheduleDownlinkResponse{ Response: &ttnpb.ScheduleDownlinkResponse{ Delay: time.Second, }, }, ) if !a.So(ok, should.BeTrue) { t.Error("Scheduling assertion failed") return false } if a.So(lastDown.CorrelationIDs, should.HaveLength, 5) { a.So(lastDown.CorrelationIDs, should.Contain, "correlation-up-1") a.So(lastDown.CorrelationIDs, should.Contain, "correlation-up-2") a.So(lastDown.CorrelationIDs, should.Contain, "correlation-app-down-1") a.So(lastDown.CorrelationIDs, should.Contain, "correlation-app-down-2") } setDevice := &ttnpb.EndDevice{ EndDeviceIdentifiers: ttnpb.EndDeviceIdentifiers{ ApplicationIdentifiers: appID, DeviceID: devID, DevAddr: &devAddr, }, FrequencyPlanID: test.EUFrequencyPlanID, LoRaWANPHYVersion: ttnpb.PHY_V1_1_REV_B, MACState: &ttnpb.MACState{ CurrentParameters: *CopyMACParameters(eu868macParameters), DesiredParameters: *CopyMACParameters(eu868macParameters), DeviceClass: ttnpb.CLASS_A, LoRaWANVersion: ttnpb.MAC_V1_1, PendingRequests: []*ttnpb.MACCommand{ { CID: ttnpb.CID_DEV_STATUS, }, }, }, QueuedApplicationDownlinks: []*ttnpb.ApplicationDownlink{}, RecentUplinks: []*ttnpb.UplinkMessage{ CopyUplinkMessage(lastUp), }, RecentDownlinks: []*ttnpb.DownlinkMessage{ lastDown, }, Session: &ttnpb.Session{ DevAddr: devAddr, LastNFCntDown: 0x24, SessionKeys: *CopySessionKeys(sessionKeys), }, } select { case <-ctx.Done(): t.Error("Timed out while waiting for DeviceRegistry.SetByID callback to return") case resp := <-setFuncRespCh: a.So(resp.Error, should.BeNil) a.So(resp.Paths, should.Resemble, []string{ "mac_state", "queued_application_downlinks", "recent_downlinks", "session", }) if a.So(resp.Device, should.NotBeNil) && a.So(resp.Device.MACState, should.NotBeNil) && a.So(resp.Device.MACState.LastConfirmedDownlinkAt, should.NotBeNil) { a.So([]time.Time{start, *resp.Device.MACState.LastConfirmedDownlinkAt, time.Now().Add(time.Second)}, should.BeChronological) setDevice.MACState.LastConfirmedDownlinkAt = resp.Device.MACState.LastConfirmedDownlinkAt } a.So(resp.Device, should.Resemble, setDevice) } close(setFuncRespCh) select { case <-ctx.Done(): t.Error("Timed out while waiting for DeviceRegistry.SetByID response to be processed") case setRespCh <- DeviceRegistrySetByIDResponse{ Device: setDevice, }: } select { case <-ctx.Done(): t.Error("Timed out while waiting for DownlinkTasks.Pop callback to return") case resp := <-popFuncRespCh: a.So(resp, should.BeNil) } close(popFuncRespCh) select { case <-ctx.Done(): t.Error("Timed out while waiting for DownlinkTasks.Pop response to be processed") case popRespCh <- nil: } return true }, }, // Adapted from https://github.com/TheThingsNetwork/lorawan-stack/issues/866#issue-461484955. { Name: "Class A/windows open/Rx1, Rx2 does not fit/application downlink/FOpts present/EU868/1.0.2", DownlinkPriorities: DownlinkPriorities{ JoinAccept: ttnpb.TxSchedulePriority_HIGHEST, MACCommands: ttnpb.TxSchedulePriority_HIGH, MaxApplicationDownlink: ttnpb.TxSchedulePriority_NORMAL, }, Handler: func(ctx context.Context, env TestEnvironment) bool { t := test.MustTFromContext(ctx) a := assertions.New(t) start := time.Now() var popRespCh chan<- error popFuncRespCh := make(chan error) select { case <-ctx.Done(): t.Error("Timed out while waiting for DownlinkTasks.Pop to be called") return false case req := <-env.DownlinkTasks.Pop: popRespCh = req.Response a.So(req.Context, should.HaveParentContextOrEqual, ctx) go func() { popFuncRespCh <- req.Func(req.Context, ttnpb.EndDeviceIdentifiers{ ApplicationIdentifiers: appID, DeviceID: devID, }, time.Now()) }() } lastUp := &ttnpb.UplinkMessage{ CorrelationIDs: []string{"correlation-up-1", "correlation-up-2"}, DeviceChannelIndex: 3, Payload: &ttnpb.Message{ MHDR: ttnpb.MHDR{ MType: ttnpb.MType_UNCONFIRMED_UP, }, Payload: &ttnpb.Message_MACPayload{MACPayload: &ttnpb.MACPayload{}}, }, ReceivedAt: time.Now().Add(-time.Second), RxMetadata: deepcopy.Copy(rxMetadata).([]*ttnpb.RxMetadata), Settings: ttnpb.TxSettings{ DataRateIndex: ttnpb.DATA_RATE_6, Frequency: 430000000, }, } getDevice := &ttnpb.EndDevice{ EndDeviceIdentifiers: ttnpb.EndDeviceIdentifiers{ ApplicationIdentifiers: appID, DeviceID: devID, DevAddr: &devAddr, }, FrequencyPlanID: test.EUFrequencyPlanID, LoRaWANPHYVersion: ttnpb.PHY_V1_1_REV_B, MACState: &ttnpb.MACState{ CurrentParameters: *CopyMACParameters(eu868macParameters), DesiredParameters: *CopyMACParameters(eu868macParameters), DeviceClass: ttnpb.CLASS_A, LoRaWANVersion: ttnpb.MAC_V1_1, QueuedResponses: []*ttnpb.MACCommand{ (&ttnpb.MACCommand_ResetConf{ MinorVersion: 1, }).MACCommand(), (&ttnpb.MACCommand_LinkCheckAns{ Margin: 2, GatewayCount: 5, }).MACCommand(), }, RxWindowsAvailable: true, }, QueuedApplicationDownlinks: []*ttnpb.ApplicationDownlink{ { CorrelationIDs: []string{"correlation-app-down-1", "correlation-app-down-2"}, FCnt: 0x42, FPort: 0x15, FRMPayload: []byte("<KEY>), Priority: ttnpb.TxSchedulePriority_HIGHEST, SessionKeyID: []byte{<KEY> }, }, RecentUplinks: []*ttnpb.UplinkMessage{ CopyUplinkMessage(lastUp), }, Session: &ttnpb.Session{ DevAddr: devAddr, LastNFCntDown: 0x24, SessionKeys: *CopySessionKeys(sessionKeys), }, } var setRespCh chan<- DeviceRegistrySetByIDResponse setFuncRespCh := make(chan DeviceRegistrySetByIDRequestFuncResponse) select { case <-ctx.Done(): t.Error("Timed out while waiting for DeviceRegistry.SetByID to be called") return false case req := <-env.DeviceRegistry.SetByID: setRespCh = req.Response a.So(req.Context, should.HaveParentContextOrEqual, ctx) a.So(req.ApplicationIdentifiers, should.Resemble, appID) a.So(req.DeviceID, should.Resemble, devID) a.So(req.Paths, should.Resemble, getPaths) go func() { dev, sets, err := req.Func(CopyEndDevice(getDevice)) setFuncRespCh <- DeviceRegistrySetByIDRequestFuncResponse{ Device: dev, Paths: sets, Error: err, } }() } scheduleDownlink124Ch := make(chan NsGsScheduleDownlinkRequest) peer124 := NewGSPeer(ctx, &MockNsGsServer{ ScheduleDownlinkFunc: MakeNsGsScheduleDownlinkChFunc(scheduleDownlink124Ch), }) scheduleDownlink3Ch := make(chan NsGsScheduleDownlinkRequest) peer3 := NewGSPeer(ctx, &MockNsGsServer{ ScheduleDownlinkFunc: MakeNsGsScheduleDownlinkChFunc(scheduleDownlink3Ch), }) if !a.So(assertGetRxMetadataGatewayPeers(ctx, env.Cluster.GetPeer, peer124, peer3), should.BeTrue) { return false } lastDown, ok := assertScheduleRxMetadataGateways( ctx, env.Cluster.Auth, scheduleDownlink124Ch, scheduleDownlink3Ch, func() []byte { b := []byte{ /* MHDR */ 0x60, /* MACPayload */ /** FHDR **/ /*** DevAddr ***/ devAddr[3], devAddr[2], devAddr[1], devAddr[0], /*** FCtrl ***/ 0x86, /*** FCnt ***/ 0x42, 0x00, } /** FOpts **/ b = append(b, test.Must(crypto.EncryptDownlink( nwkSEncKey, devAddr, 0x24, []byte{ /* ResetConf */ 0x01, 0x01, /* LinkCheckAns */ 0x02, 0x02, 0x05, /* DevStatusReq */ 0x06, }, )).([]byte)...) /** FPort **/ b = append(b, 0x15) /** FRMPayload **/ b = append(b, []byte("AAECAwQFBgcICQoLDA0ODxAREhMUFRYXGBkaGxwdHh8gISIjJCUmJygpKissLS4vMDEyMzQ1Njc4OTo7PD0+P0BBQkNERUU=")...) /* MIC */ mic := test.Must(crypto.ComputeDownlinkMIC( sNwkSIntKey, devAddr, 0, 0x42, b, )).([4]byte) return append(b, mic[:]...) }(), func(paths ...*ttnpb.DownlinkPath) *ttnpb.TxRequest { return &ttnpb.TxRequest{ Class: ttnpb.CLASS_A, DownlinkPaths: paths, Priority: ttnpb.TxSchedulePriority_HIGH, Rx1Delay: ttnpb.RX_DELAY_3, Rx1DataRateIndex: ttnpb.DATA_RATE_4, Rx1Frequency: 431000000, } }, NsGsScheduleDownlinkResponse{ Error: errors.New("test"), }, NsGsScheduleDownlinkResponse{ Error: errors.New("test"), }, NsGsScheduleDownlinkResponse{ Response: &ttnpb.ScheduleDownlinkResponse{ Delay: time.Second, }, }, ) if !a.So(ok, should.BeTrue) { t.Error("Scheduling assertion failed") return false } if a.So(lastDown.CorrelationIDs, should.HaveLength, 5) { a.So(lastDown.CorrelationIDs, should.Contain, "correlation-up-1") a.So(lastDown.CorrelationIDs, should.Contain, "correlation-up-2") a.So(lastDown.CorrelationIDs, should.Contain, "correlation-app-down-1") a.So(lastDown.CorrelationIDs, should.Contain, "correlation-app-down-2") } setDevice := &ttnpb.EndDevice{ EndDeviceIdentifiers: ttnpb.EndDeviceIdentifiers{ ApplicationIdentifiers: appID, DeviceID: devID, DevAddr: &devAddr, }, FrequencyPlanID: test.EUFrequencyPlanID, LoRaWANPHYVersion: ttnpb.PHY_V1_1_REV_B, MACState: &ttnpb.MACState{ CurrentParameters: *CopyMACParameters(eu868macParameters), DesiredParameters: *CopyMACParameters(eu868macParameters), DeviceClass: ttnpb.CLASS_A, LoRaWANVersion: ttnpb.MAC_V1_1, PendingRequests: []*ttnpb.MACCommand{ { CID: ttnpb.CID_DEV_STATUS, }, }, }, QueuedApplicationDownlinks: []*ttnpb.ApplicationDownlink{}, RecentUplinks: []*ttnpb.UplinkMessage{ CopyUplinkMessage(lastUp), }, RecentDownlinks: []*ttnpb.DownlinkMessage{ lastDown, }, Session: &ttnpb.Session{ DevAddr: devAddr, LastNFCntDown: 0x24, SessionKeys: *CopySessionKeys(sessionKeys), }, } select { case <-ctx.Done(): t.Error("Timed out while waiting for DeviceRegistry.SetByID callback to return") case resp := <-setFuncRespCh: a.So(resp.Error, should.BeNil) a.So(resp.Paths, should.Resemble, []string{ "mac_state", "queued_application_downlinks", "recent_downlinks", "session", }) if a.So(resp.Device, should.NotBeNil) && a.So(resp.Device.MACState, should.NotBeNil) && a.So(resp.Device.MACState.LastConfirmedDownlinkAt, should.NotBeNil) { a.So([]time.Time{start, *resp.Device.MACState.LastConfirmedDownlinkAt, time.Now().Add(time.Second)}, should.BeChronological) setDevice.MACState.LastConfirmedDownlinkAt = resp.Device.MACState.LastConfirmedDownlinkAt } a.So(resp.Device, should.Resemble, setDevice) } close(setFuncRespCh) select { case <-ctx.Done(): t.Error("Timed out while waiting for DeviceRegistry.SetByID response to be processed") case setRespCh <- DeviceRegistrySetByIDResponse{ Device: setDevice, }: } select { case <-ctx.Done(): t.Error("Timed out while waiting for DownlinkTasks.Pop callback to return") case resp := <-popFuncRespCh: a.So(resp, should.BeNil) } close(popFuncRespCh) select { case <-ctx.Done(): t.Error("Timed out while waiting for DownlinkTasks.Pop response to be processed") case popRespCh <- nil: } return true }, }, { Name: "Class B/windows closed", DownlinkPriorities: DownlinkPriorities{ JoinAccept: ttnpb.TxSchedulePriority_HIGHEST, MACCommands: ttnpb.TxSchedulePriority_HIGH, MaxApplicationDownlink: ttnpb.TxSchedulePriority_NORMAL, }, Handler: func(ctx context.Context, env TestEnvironment) bool { t := test.MustTFromContext(ctx) a := assertions.New(t) var popRespCh chan<- error popFuncRespCh := make(chan error) select { case <-ctx.Done(): t.Error("Timed out while waiting for DownlinkTasks.Pop to be called") return false case req := <-env.DownlinkTasks.Pop: popRespCh = req.Response a.So(req.Context, should.HaveParentContextOrEqual, ctx) go func() { popFuncRespCh <- req.Func(req.Context, ttnpb.EndDeviceIdentifiers{ ApplicationIdentifiers: appID, DeviceID: devID, }, time.Now()) }() } lastUp := &ttnpb.UplinkMessage{ CorrelationIDs: []string{"correlation-up-1", "correlation-up-2"}, DeviceChannelIndex: 3, Payload: &ttnpb.Message{ MHDR: ttnpb.MHDR{ MType: ttnpb.MType_UNCONFIRMED_UP, }, Payload: &ttnpb.Message_MACPayload{MACPayload: &ttnpb.MACPayload{}}, }, ReceivedAt: time.Now().Add(-time.Second), RxMetadata: deepcopy.Copy(rxMetadata).([]*ttnpb.RxMetadata), Settings: ttnpb.TxSettings{ DataRateIndex: ttnpb.DATA_RATE_0, Frequency: 430000000, }, } getDevice := &ttnpb.EndDevice{ EndDeviceIdentifiers: ttnpb.EndDeviceIdentifiers{ ApplicationIdentifiers: appID, DeviceID: devID, DevAddr: &devAddr, }, FrequencyPlanID: test.EUFrequencyPlanID, LoRaWANPHYVersion: ttnpb.PHY_V1_1_REV_B, MACState: &ttnpb.MACState{ CurrentParameters: *CopyMACParameters(eu868macParameters), DesiredParameters: *CopyMACParameters(eu868macParameters), DeviceClass: ttnpb.CLASS_B, LoRaWANVersion: ttnpb.MAC_V1_1, }, RecentUplinks: []*ttnpb.UplinkMessage{ CopyUplinkMessage(lastUp), }, Session: &ttnpb.Session{ DevAddr: devAddr, LastNFCntDown: 0x24, SessionKeys: *CopySessionKeys(sessionKeys), }, } var setRespCh chan<- DeviceRegistrySetByIDResponse setFuncRespCh := make(chan DeviceRegistrySetByIDRequestFuncResponse) select { case <-ctx.Done(): t.Error("Timed out while waiting for DeviceRegistry.SetByID to be called") return false case req := <-env.DeviceRegistry.SetByID: setRespCh = req.Response a.So(req.Context, should.HaveParentContextOrEqual, ctx) a.So(req.ApplicationIdentifiers, should.Resemble, appID) a.So(req.DeviceID, should.Resemble, devID) a.So(req.Paths, should.Resemble, getPaths) go func() { dev, sets, err := req.Func(CopyEndDevice(getDevice)) setFuncRespCh <- DeviceRegistrySetByIDRequestFuncResponse{ Device: dev, Paths: sets, Error: err, } }() } select { case <-ctx.Done(): t.Error("Timed out while waiting for DeviceRegistry.SetByID callback to return") case resp := <-setFuncRespCh: a.So(resp.Error, should.BeNil) a.So(resp.Paths, should.BeNil) a.So(resp.Device, should.NotBeNil) } close(setFuncRespCh) select { case <-ctx.Done(): t.Error("Timed out while waiting for DeviceRegistry.SetByID response to be processed") case setRespCh <- DeviceRegistrySetByIDResponse{ Device: getDevice, }: } select { case <-ctx.Done(): t.Error("Timed out while waiting for DownlinkTasks.Pop callback to return") case resp := <-popFuncRespCh: a.So(resp, should.BeNil) } close(popFuncRespCh) select { case <-ctx.Done(): t.Error("Timed out while waiting for DownlinkTasks.Pop response to be processed") case popRespCh <- nil: } return true }, }, { Name: "Class C/windows open/Rx1/application downlink/FOpts present/EU868/1.1", DownlinkPriorities: DownlinkPriorities{ JoinAccept: ttnpb.TxSchedulePriority_HIGHEST, MACCommands: ttnpb.TxSchedulePriority_HIGH, MaxApplicationDownlink: ttnpb.TxSchedulePriority_NORMAL, }, Handler: func(ctx context.Context, env TestEnvironment) bool { t := test.MustTFromContext(ctx) a := assertions.New(t) start := time.Now() var popRespCh chan<- error popFuncRespCh := make(chan error) select { case <-ctx.Done(): t.Error("Timed out while waiting for DownlinkTasks.Pop to be called") return false case req := <-env.DownlinkTasks.Pop: popRespCh = req.Response a.So(req.Context, should.HaveParentContextOrEqual, ctx) go func() { popFuncRespCh <- req.Func(req.Context, ttnpb.EndDeviceIdentifiers{ ApplicationIdentifiers: appID, DeviceID: devID, }, time.Now()) }() } lastUp := &ttnpb.UplinkMessage{ CorrelationIDs: []string{"correlation-up-1", "correlation-up-2"}, DeviceChannelIndex: 3, Payload: &ttnpb.Message{ MHDR: ttnpb.MHDR{ MType: ttnpb.MType_UNCONFIRMED_UP, }, Payload: &ttnpb.Message_MACPayload{MACPayload: &ttnpb.MACPayload{}}, }, ReceivedAt: time.Now().Add(-time.Second), RxMetadata: deepcopy.Copy(rxMetadata).([]*ttnpb.RxMetadata), Settings: ttnpb.TxSettings{ DataRateIndex: ttnpb.DATA_RATE_0, Frequency: 430000000, }, } getDevice := &ttnpb.EndDevice{ EndDeviceIdentifiers: ttnpb.EndDeviceIdentifiers{ ApplicationIdentifiers: appID, DeviceID: devID, DevAddr: &devAddr, }, FrequencyPlanID: test.EUFrequencyPlanID, LoRaWANPHYVersion: ttnpb.PHY_V1_1_REV_B, MACSettings: &ttnpb.MACSettings{ ClassCTimeout: DurationPtr(42 * time.Second), }, MACState: &ttnpb.MACState{ CurrentParameters: *CopyMACParameters(eu868macParameters), DesiredParameters: *CopyMACParameters(eu868macParameters), DeviceClass: ttnpb.CLASS_C, LoRaWANVersion: ttnpb.MAC_V1_1, QueuedResponses: []*ttnpb.MACCommand{ (&ttnpb.MACCommand_ResetConf{ MinorVersion: 1, }).MACCommand(), (&ttnpb.MACCommand_LinkCheckAns{ Margin: 2, GatewayCount: 5, }).MACCommand(), }, RxWindowsAvailable: true, }, QueuedApplicationDownlinks: []*ttnpb.ApplicationDownlink{ { CorrelationIDs: []string{"correlation-app-down-1", "correlation-app-down-2"}, FCnt: 0x42, FPort: 0x1, FRMPayload: []byte("testPayload"), Priority: ttnpb.TxSchedulePriority_HIGHEST, SessionKeyID: []byte{<KEY>}, }, }, RecentUplinks: []*ttnpb.UplinkMessage{ CopyUplinkMessage(lastUp), }, Session: &ttnpb.Session{ DevAddr: devAddr, LastNFCntDown: 0x24, SessionKeys: *CopySessionKeys(sessionKeys), }, } var setRespCh chan<- DeviceRegistrySetByIDResponse setFuncRespCh := make(chan DeviceRegistrySetByIDRequestFuncResponse) select { case <-ctx.Done(): t.Error("Timed out while waiting for DeviceRegistry.SetByID to be called") return false case req := <-env.DeviceRegistry.SetByID: setRespCh = req.Response a.So(req.Context, should.HaveParentContextOrEqual, ctx) a.So(req.ApplicationIdentifiers, should.Resemble, appID) a.So(req.DeviceID, should.Resemble, devID) a.So(req.Paths, should.Resemble, getPaths) go func() { dev, sets, err := req.Func(CopyEndDevice(getDevice)) setFuncRespCh <- DeviceRegistrySetByIDRequestFuncResponse{ Device: dev, Paths: sets, Error: err, } }() } scheduleDownlink124Ch := make(chan NsGsScheduleDownlinkRequest) peer124 := NewGSPeer(ctx, &MockNsGsServer{ ScheduleDownlinkFunc: MakeNsGsScheduleDownlinkChFunc(scheduleDownlink124Ch), }) scheduleDownlink3Ch := make(chan NsGsScheduleDownlinkRequest) peer3 := NewGSPeer(ctx, &MockNsGsServer{ ScheduleDownlinkFunc: MakeNsGsScheduleDownlinkChFunc(scheduleDownlink3Ch), }) if !a.So(assertGetRxMetadataGatewayPeers(ctx, env.Cluster.GetPeer, peer124, peer3), should.BeTrue) { return false } lastDown, ok := assertScheduleRxMetadataGateways( ctx, env.Cluster.Auth, scheduleDownlink124Ch, scheduleDownlink3Ch, func() []byte { b := []byte{ /* MHDR */ 0x60, /* MACPayload */ /** FHDR **/ /*** DevAddr ***/ devAddr[3], devAddr[2], devAddr[1], devAddr[0], /*** FCtrl ***/ 0x86, /*** FCnt ***/ 0x42, 0x00, } /** FOpts **/ b = append(b, test.Must(crypto.EncryptDownlink( nwkSEncKey, devAddr, 0x24, []byte{ /* ResetConf */ 0x01, 0x01, /* LinkCheckAns */ 0x02, 0x02, 0x05, /* DevStatusReq */ 0x06, }, )).([]byte)...) /** FPort **/ b = append(b, 0x1) /** FRMPayload **/ b = append(b, []byte("testPayload")...) /* MIC */ mic := test.Must(crypto.ComputeDownlinkMIC( sNwkSIntKey, devAddr, 0, 0x42, b, )).([4]byte) return append(b, mic[:]...) }(), func(paths ...*ttnpb.DownlinkPath) *ttnpb.TxRequest { return &ttnpb.TxRequest{ Class: ttnpb.CLASS_A, DownlinkPaths: paths, Priority: ttnpb.TxSchedulePriority_HIGH, Rx1Delay: ttnpb.RX_DELAY_3, Rx1DataRateIndex: ttnpb.DATA_RATE_0, Rx1Frequency: 431000000, } }, NsGsScheduleDownlinkResponse{ Error: errors.New("test"), }, NsGsScheduleDownlinkResponse{ Error: errors.New("test"), }, NsGsScheduleDownlinkResponse{ Response: &ttnpb.ScheduleDownlinkResponse{ Delay: time.Second, }, }, ) if !a.So(ok, should.BeTrue) { t.Error("Scheduling assertion failed") return false } if a.So(lastDown.CorrelationIDs, should.HaveLength, 5) { a.So(lastDown.CorrelationIDs, should.Contain, "correlation-up-1") a.So(lastDown.CorrelationIDs, should.Contain, "correlation-up-2") a.So(lastDown.CorrelationIDs, should.Contain, "correlation-app-down-1") a.So(lastDown.CorrelationIDs, should.Contain, "correlation-app-down-2") } setDevice := &ttnpb.EndDevice{ EndDeviceIdentifiers: ttnpb.EndDeviceIdentifiers{ ApplicationIdentifiers: appID, DeviceID: devID, DevAddr: &devAddr, }, FrequencyPlanID: test.EUFrequencyPlanID, LoRaWANPHYVersion: ttnpb.PHY_V1_1_REV_B, MACSettings: &ttnpb.MACSettings{ ClassCTimeout: DurationPtr(42 * time.Second), }, MACState: &ttnpb.MACState{ CurrentParameters: *CopyMACParameters(eu868macParameters), DesiredParameters: *CopyMACParameters(eu868macParameters), DeviceClass: ttnpb.CLASS_C, LoRaWANVersion: ttnpb.MAC_V1_1, PendingRequests: []*ttnpb.MACCommand{ { CID: ttnpb.CID_DEV_STATUS, }, }, }, QueuedApplicationDownlinks: []*ttnpb.ApplicationDownlink{}, RecentUplinks: []*ttnpb.UplinkMessage{ CopyUplinkMessage(lastUp), }, RecentDownlinks: []*ttnpb.DownlinkMessage{ lastDown, }, Session: &ttnpb.Session{ DevAddr: devAddr, LastNFCntDown: 0x24, SessionKeys: *CopySessionKeys(sessionKeys), }, } select { case <-ctx.Done(): t.Error("Timed out while waiting for DeviceRegistry.SetByID callback to return") case resp := <-setFuncRespCh: a.So(resp.Error, should.BeNil) a.So(resp.Paths, should.Resemble, []string{ "mac_state", "queued_application_downlinks", "recent_downlinks", "session", }) if a.So(resp.Device, should.NotBeNil) && a.So(resp.Device.MACState, should.NotBeNil) && a.So(resp.Device.MACState.LastConfirmedDownlinkAt, should.NotBeNil) { a.So([]time.Time{start, *resp.Device.MACState.LastConfirmedDownlinkAt, time.Now().Add(time.Second)}, should.BeChronological) setDevice.MACState.LastConfirmedDownlinkAt = resp.Device.MACState.LastConfirmedDownlinkAt } a.So(resp.Device, should.Resemble, setDevice) } close(setFuncRespCh) select { case <-ctx.Done(): t.Error("Timed out while waiting for DeviceRegistry.SetByID response to be processed") case setRespCh <- DeviceRegistrySetByIDResponse{ Device: setDevice, }: } if !AssertDownlinkTaskAddRequest(ctx, env.DownlinkTasks.Add, func(reqCtx context.Context, ids ttnpb.EndDeviceIdentifiers, startAt time.Time, replace bool) bool { return a.So(reqCtx, should.HaveParentContextOrEqual, ctx) && a.So(ids, should.Resemble, ttnpb.EndDeviceIdentifiers{ ApplicationIdentifiers: appID, DeviceID: devID, }) && a.So(replace, should.BeTrue) && a.So(startAt, should.Resemble, setDevice.MACState.LastConfirmedDownlinkAt.Add(42*time.Second)) }, nil, ) { t.Error("Downlink task add assertion failed") return false } select { case <-ctx.Done(): t.Error("Timed out while waiting for DownlinkTasks.Pop callback to return") case resp := <-popFuncRespCh: a.So(resp, should.BeNil) } close(popFuncRespCh) select { case <-ctx.Done(): t.Error("Timed out while waiting for DownlinkTasks.Pop response to be processed") case popRespCh <- nil: } return true }, }, { Name: "Class C/windows open/Rx2/application downlink/no absolute time/no forced gateways/windows open/FOpts present/EU868/1.1", DownlinkPriorities: DownlinkPriorities{ JoinAccept: ttnpb.TxSchedulePriority_HIGHEST, MACCommands: ttnpb.TxSchedulePriority_HIGH, MaxApplicationDownlink: ttnpb.TxSchedulePriority_NORMAL, }, Handler: func(ctx context.Context, env TestEnvironment) bool { t := test.MustTFromContext(ctx) a := assertions.New(t) start := time.Now() var popRespCh chan<- error popFuncRespCh := make(chan error) select { case <-ctx.Done(): t.Error("Timed out while waiting for DownlinkTasks.Pop to be called") return false case req := <-env.DownlinkTasks.Pop: popRespCh = req.Response a.So(req.Context, should.HaveParentContextOrEqual, ctx) go func() { popFuncRespCh <- req.Func(req.Context, ttnpb.EndDeviceIdentifiers{ ApplicationIdentifiers: appID, DeviceID: devID, }, time.Now()) }() } lastUp := &ttnpb.UplinkMessage{ CorrelationIDs: []string{"correlation-up-1", "correlation-up-2"}, DeviceChannelIndex: 3, Payload: &ttnpb.Message{ MHDR: ttnpb.MHDR{ MType: ttnpb.MType_UNCONFIRMED_UP, }, Payload: &ttnpb.Message_MACPayload{MACPayload: &ttnpb.MACPayload{}}, }, ReceivedAt: time.Now().Add(-time.Second), RxMetadata: deepcopy.Copy(rxMetadata).([]*ttnpb.RxMetadata), Settings: ttnpb.TxSettings{ DataRateIndex: ttnpb.DATA_RATE_0, Frequency: 430000000, }, } getDevice := &ttnpb.EndDevice{ EndDeviceIdentifiers: ttnpb.EndDeviceIdentifiers{ ApplicationIdentifiers: appID, DeviceID: devID, DevAddr: &devAddr, }, FrequencyPlanID: test.EUFrequencyPlanID, LoRaWANPHYVersion: ttnpb.PHY_V1_1_REV_B, MACSettings: &ttnpb.MACSettings{ ClassCTimeout: DurationPtr(42 * time.Second), }, MACState: &ttnpb.MACState{ CurrentParameters: *CopyMACParameters(eu868macParameters), DesiredParameters: *CopyMACParameters(eu868macParameters), DeviceClass: ttnpb.CLASS_C, LoRaWANVersion: ttnpb.MAC_V1_1, QueuedResponses: []*ttnpb.MACCommand{ (&ttnpb.MACCommand_ResetConf{ MinorVersion: 1, }).MACCommand(), (&ttnpb.MACCommand_LinkCheckAns{ Margin: 2, GatewayCount: 5, }).MACCommand(), }, RxWindowsAvailable: true, }, QueuedApplicationDownlinks: []*ttnpb.ApplicationDownlink{ { CorrelationIDs: []string{"correlation-app-down-1", "correlation-app-down-2"}, FCnt: 0x42, FPort: 0x1, FRMPayload: []byte("testPayload"), Priority: ttnpb.TxSchedulePriority_HIGHEST, SessionKeyID: []byte{<KEY> }, }, RecentUplinks: []*ttnpb.UplinkMessage{ CopyUplinkMessage(lastUp), }, Session: &ttnpb.Session{ DevAddr: devAddr, LastNFCntDown: 0x24, SessionKeys: *CopySessionKeys(sessionKeys), }, } var setRespCh chan<- DeviceRegistrySetByIDResponse setFuncRespCh := make(chan DeviceRegistrySetByIDRequestFuncResponse) select { case <-ctx.Done(): t.Error("Timed out while waiting for DeviceRegistry.SetByID to be called") return false case req := <-env.DeviceRegistry.SetByID: setRespCh = req.Response a.So(req.Context, should.HaveParentContextOrEqual, ctx) a.So(req.ApplicationIdentifiers, should.Resemble, appID) a.So(req.DeviceID, should.Resemble, devID) a.So(req.Paths, should.Resemble, getPaths) go func() { dev, sets, err := req.Func(CopyEndDevice(getDevice)) setFuncRespCh <- DeviceRegistrySetByIDRequestFuncResponse{ Device: dev, Paths: sets, Error: err, } }() } scheduleDownlink124Ch := make(chan NsGsScheduleDownlinkRequest) peer124 := NewGSPeer(ctx, &MockNsGsServer{ ScheduleDownlinkFunc: MakeNsGsScheduleDownlinkChFunc(scheduleDownlink124Ch), }) scheduleDownlink3Ch := make(chan NsGsScheduleDownlinkRequest) peer3 := NewGSPeer(ctx, &MockNsGsServer{ ScheduleDownlinkFunc: MakeNsGsScheduleDownlinkChFunc(scheduleDownlink3Ch), }) if !a.So(assertGetRxMetadataGatewayPeers(ctx, env.Cluster.GetPeer, peer124, peer3), should.BeTrue) { return false } _, ok := assertScheduleRxMetadataGateways( ctx, env.Cluster.Auth, scheduleDownlink124Ch, scheduleDownlink3Ch, func() []byte { b := []byte{ /* MHDR */ 0x60, /* MACPayload */ /** FHDR **/ /*** DevAddr ***/ devAddr[3], devAddr[2], devAddr[1], devAddr[0], /*** FCtrl ***/ 0x86, /*** FCnt ***/ 0x42, 0x00, } /** FOpts **/ b = append(b, test.Must(crypto.EncryptDownlink( nwkSEncKey, devAddr, 0x24, []byte{ /* ResetConf */ 0x01, 0x01, /* LinkCheckAns */ 0x02, 0x02, 0x05, /* DevStatusReq */ 0x06, }, )).([]byte)...) /** FPort **/ b = append(b, 0x1) /** FRMPayload **/ b = append(b, []byte("testPayload")...) /* MIC */ mic := test.Must(crypto.ComputeDownlinkMIC( sNwkSIntKey, devAddr, 0, 0x42, b, )).([4]byte) return append(b, mic[:]...) }(), func(paths ...*ttnpb.DownlinkPath) *ttnpb.TxRequest { return &ttnpb.TxRequest{ Class: ttnpb.CLASS_A, DownlinkPaths: paths, Priority: ttnpb.TxSchedulePriority_HIGH, Rx1Delay: ttnpb.RX_DELAY_3, Rx1DataRateIndex: ttnpb.DATA_RATE_0, Rx1Frequency: 431000000, } }, NsGsScheduleDownlinkResponse{ Error: errors.New("test"), }, NsGsScheduleDownlinkResponse{ Error: errors.New("test"), }, NsGsScheduleDownlinkResponse{ Error: errors.New("test"), }, ) if !a.So(ok, should.BeTrue) { t.Error("Scheduling assertion failed") return false } if !a.So(assertGetRxMetadataGatewayPeers(ctx, env.Cluster.GetPeer, peer124, peer3), should.BeTrue) { return false } lastDown, ok := assertScheduleRxMetadataGateways( ctx, env.Cluster.Auth, scheduleDownlink124Ch, scheduleDownlink3Ch, func() []byte { b := []byte{ /* MHDR */ 0x60, /* MACPayload */ /** FHDR **/ /*** DevAddr ***/ devAddr[3], devAddr[2], devAddr[1], devAddr[0], /*** FCtrl ***/ 0x86, /*** FCnt ***/ 0x42, 0x00, } /** FOpts **/ b = append(b, test.Must(crypto.EncryptDownlink( nwkSEncKey, devAddr, 0x24, []byte{ /* ResetConf */ 0x01, 0x01, /* LinkCheckAns */ 0x02, 0x02, 0x05, /* DevStatusReq */ 0x06, }, )).([]byte)...) /** FPort **/ b = append(b, 0x1) /** FRMPayload **/ b = append(b, []byte("testPayload")...) /* MIC */ mic := test.Must(crypto.ComputeDownlinkMIC( sNwkSIntKey, devAddr, 0, 0x42, b, )).([4]byte) return append(b, mic[:]...) }(), func(paths ...*ttnpb.DownlinkPath) *ttnpb.TxRequest { return &ttnpb.TxRequest{ Class: ttnpb.CLASS_C, DownlinkPaths: paths, Priority: ttnpb.TxSchedulePriority_HIGH, Rx2DataRateIndex: ttnpb.DATA_RATE_1, Rx2Frequency: 420000000, } }, NsGsScheduleDownlinkResponse{ Response: &ttnpb.ScheduleDownlinkResponse{ Delay: time.Second, }, }, ) if !a.So(ok, should.BeTrue) { t.Error("Scheduling assertion failed") return false } if a.So(lastDown.CorrelationIDs, should.HaveLength, 5) { a.So(lastDown.CorrelationIDs, should.Contain, "correlation-up-1") a.So(lastDown.CorrelationIDs, should.Contain, "correlation-up-2") a.So(lastDown.CorrelationIDs, should.Contain, "correlation-app-down-1") a.So(lastDown.CorrelationIDs, should.Contain, "correlation-app-down-2") } setDevice := &ttnpb.EndDevice{ EndDeviceIdentifiers: ttnpb.EndDeviceIdentifiers{ ApplicationIdentifiers: appID, DeviceID: devID, DevAddr: &devAddr, }, FrequencyPlanID: test.EUFrequencyPlanID, LoRaWANPHYVersion: ttnpb.PHY_V1_1_REV_B, MACSettings: &ttnpb.MACSettings{ ClassCTimeout: DurationPtr(42 * time.Second), }, MACState: &ttnpb.MACState{ CurrentParameters: *CopyMACParameters(eu868macParameters), DesiredParameters: *CopyMACParameters(eu868macParameters), DeviceClass: ttnpb.CLASS_C, LoRaWANVersion: ttnpb.MAC_V1_1, PendingRequests: []*ttnpb.MACCommand{ { CID: ttnpb.CID_DEV_STATUS, }, }, }, QueuedApplicationDownlinks: []*ttnpb.ApplicationDownlink{}, RecentUplinks: []*ttnpb.UplinkMessage{ CopyUplinkMessage(lastUp), }, RecentDownlinks: []*ttnpb.DownlinkMessage{ lastDown, }, Session: &ttnpb.Session{ DevAddr: devAddr, LastNFCntDown: 0x24, SessionKeys: *CopySessionKeys(sessionKeys), }, } select { case <-ctx.Done(): t.Error("Timed out while waiting for DeviceRegistry.SetByID callback to return") case resp := <-setFuncRespCh: a.So(resp.Error, should.BeNil) a.So(resp.Paths, should.Resemble, []string{ "mac_state", "queued_application_downlinks", "recent_downlinks", "session", }) if a.So(resp.Device, should.NotBeNil) && a.So(resp.Device.MACState, should.NotBeNil) && a.So(resp.Device.MACState.LastConfirmedDownlinkAt, should.NotBeNil) { a.So([]time.Time{start, *resp.Device.MACState.LastConfirmedDownlinkAt, time.Now().Add(time.Second)}, should.BeChronological) setDevice.MACState.LastConfirmedDownlinkAt = resp.Device.MACState.LastConfirmedDownlinkAt } a.So(resp.Device, should.Resemble, setDevice) } close(setFuncRespCh) select { case <-ctx.Done(): t.Error("Timed out while waiting for DeviceRegistry.SetByID response to be processed") case setRespCh <- DeviceRegistrySetByIDResponse{ Device: setDevice, }: } if !AssertDownlinkTaskAddRequest(ctx, env.DownlinkTasks.Add, func(reqCtx context.Context, ids ttnpb.EndDeviceIdentifiers, startAt time.Time, replace bool) bool { return a.So(reqCtx, should.HaveParentContextOrEqual, ctx) && a.So(ids, should.Resemble, ttnpb.EndDeviceIdentifiers{ ApplicationIdentifiers: appID, DeviceID: devID, }) && a.So(replace, should.BeTrue) && a.So(startAt, should.Resemble, setDevice.MACState.LastConfirmedDownlinkAt.Add(42*time.Second)) }, nil, ) { t.Error("Downlink task add assertion failed") return false } select { case <-ctx.Done(): t.Error("Timed out while waiting for DownlinkTasks.Pop callback to return") case resp := <-popFuncRespCh: a.So(resp, should.BeNil) } close(popFuncRespCh) select { case <-ctx.Done(): t.Error("Timed out while waiting for DownlinkTasks.Pop response to be processed") case popRespCh <- nil: } return true }, }, { Name: "Class C/windows open/Rx2/application downlink/absolute time within window/no forced gateways/windows open/FOpts present/EU868/1.1", DownlinkPriorities: DownlinkPriorities{ JoinAccept: ttnpb.TxSchedulePriority_HIGHEST, MACCommands: ttnpb.TxSchedulePriority_HIGH, MaxApplicationDownlink: ttnpb.TxSchedulePriority_NORMAL, }, Handler: func(ctx context.Context, env TestEnvironment) bool { t := test.MustTFromContext(ctx) a := assertions.New(t) start := time.Now() var popRespCh chan<- error popFuncRespCh := make(chan error) select { case <-ctx.Done(): t.Error("Timed out while waiting for DownlinkTasks.Pop to be called") return false case req := <-env.DownlinkTasks.Pop: popRespCh = req.Response a.So(req.Context, should.HaveParentContextOrEqual, ctx) go func() { popFuncRespCh <- req.Func(req.Context, ttnpb.EndDeviceIdentifiers{ ApplicationIdentifiers: appID, DeviceID: devID, }, time.Now()) }() } lastUp := &ttnpb.UplinkMessage{ CorrelationIDs: []string{"correlation-up-1", "correlation-up-2"}, DeviceChannelIndex: 3, Payload: &ttnpb.Message{ MHDR: ttnpb.MHDR{ MType: ttnpb.MType_UNCONFIRMED_UP, }, Payload: &ttnpb.Message_MACPayload{MACPayload: &ttnpb.MACPayload{}}, }, ReceivedAt: time.Now().Add(-time.Second), RxMetadata: deepcopy.Copy(rxMetadata).([]*ttnpb.RxMetadata), Settings: ttnpb.TxSettings{ DataRateIndex: ttnpb.DATA_RATE_0, Frequency: 430000000, }, } absTime := time.Now().Add(10 * time.Second).UTC() getDevice := &ttnpb.EndDevice{ EndDeviceIdentifiers: ttnpb.EndDeviceIdentifiers{ ApplicationIdentifiers: appID, DeviceID: devID, DevAddr: &devAddr, }, FrequencyPlanID: test.EUFrequencyPlanID, LoRaWANPHYVersion: ttnpb.PHY_V1_1_REV_B, MACSettings: &ttnpb.MACSettings{ ClassCTimeout: DurationPtr(42 * time.Second), }, MACState: &ttnpb.MACState{ CurrentParameters: *CopyMACParameters(eu868macParameters), DesiredParameters: *CopyMACParameters(eu868macParameters), DeviceClass: ttnpb.CLASS_C, LoRaWANVersion: ttnpb.MAC_V1_1, QueuedResponses: []*ttnpb.MACCommand{ (&ttnpb.MACCommand_ResetConf{ MinorVersion: 1, }).MACCommand(), (&ttnpb.MACCommand_LinkCheckAns{ Margin: 2, GatewayCount: 5, }).MACCommand(), }, RxWindowsAvailable: true, }, QueuedApplicationDownlinks: []*ttnpb.ApplicationDownlink{ { CorrelationIDs: []string{"correlation-app-down-1", "correlation-app-down-2"}, FCnt: 0x42, FPort: 0x1, FRMPayload: []byte("testPayload"), Priority: ttnpb.TxSchedulePriority_HIGHEST, SessionKeyID: []byte{<KEY>}, ClassBC: &ttnpb.ApplicationDownlink_ClassBC{ AbsoluteTime: deepcopy.Copy(&absTime).(*time.Time), }, }, }, RecentUplinks: []*ttnpb.UplinkMessage{ CopyUplinkMessage(lastUp), }, Session: &ttnpb.Session{ DevAddr: devAddr, LastNFCntDown: 0x24, SessionKeys: *CopySessionKeys(sessionKeys), }, } var setRespCh chan<- DeviceRegistrySetByIDResponse setFuncRespCh := make(chan DeviceRegistrySetByIDRequestFuncResponse) select { case <-ctx.Done(): t.Error("Timed out while waiting for DeviceRegistry.SetByID to be called") return false case req := <-env.DeviceRegistry.SetByID: setRespCh = req.Response a.So(req.Context, should.HaveParentContextOrEqual, ctx) a.So(req.ApplicationIdentifiers, should.Resemble, appID) a.So(req.DeviceID, should.Resemble, devID) a.So(req.Paths, should.Resemble, getPaths) go func() { dev, sets, err := req.Func(CopyEndDevice(getDevice)) setFuncRespCh <- DeviceRegistrySetByIDRequestFuncResponse{ Device: dev, Paths: sets, Error: err, } }() } scheduleDownlink124Ch := make(chan NsGsScheduleDownlinkRequest) peer124 := NewGSPeer(ctx, &MockNsGsServer{ ScheduleDownlinkFunc: MakeNsGsScheduleDownlinkChFunc(scheduleDownlink124Ch), }) scheduleDownlink3Ch := make(chan NsGsScheduleDownlinkRequest) peer3 := NewGSPeer(ctx, &MockNsGsServer{ ScheduleDownlinkFunc: MakeNsGsScheduleDownlinkChFunc(scheduleDownlink3Ch), }) if !a.So(assertGetRxMetadataGatewayPeers(ctx, env.Cluster.GetPeer, peer124, peer3), should.BeTrue) { return false } lastDown, ok := assertScheduleRxMetadataGateways( ctx, env.Cluster.Auth, scheduleDownlink124Ch, scheduleDownlink3Ch, func() []byte { b := []byte{ /* MHDR */ 0x60, /* MACPayload */ /** FHDR **/ /*** DevAddr ***/ devAddr[3], devAddr[2], devAddr[1], devAddr[0], /*** FCtrl ***/ 0x86, /*** FCnt ***/ 0x42, 0x00, } /** FOpts **/ b = append(b, test.Must(crypto.EncryptDownlink( nwkSEncKey, devAddr, 0x24, []byte{ /* ResetConf */ 0x01, 0x01, /* LinkCheckAns */ 0x02, 0x02, 0x05, /* DevStatusReq */ 0x06, }, )).([]byte)...) /** FPort **/ b = append(b, 0x1) /** FRMPayload **/ b = append(b, []byte("testPayload")...) /* MIC */ mic := test.Must(crypto.ComputeDownlinkMIC( sNwkSIntKey, devAddr, 0, 0x42, b, )).([4]byte) return append(b, mic[:]...) }(), func(paths ...*ttnpb.DownlinkPath) *ttnpb.TxRequest { return &ttnpb.TxRequest{ Class: ttnpb.CLASS_C, DownlinkPaths: paths, Priority: ttnpb.TxSchedulePriority_HIGH, Rx2DataRateIndex: ttnpb.DATA_RATE_1, Rx2Frequency: 420000000, AbsoluteTime: &absTime, } }, NsGsScheduleDownlinkResponse{ Error: errors.New("test"), }, NsGsScheduleDownlinkResponse{ Error: errors.New("test"), }, NsGsScheduleDownlinkResponse{ Response: &ttnpb.ScheduleDownlinkResponse{ Delay: time.Second, }, }, ) if !a.So(ok, should.BeTrue) { t.Error("Scheduling assertion failed") return false } if a.So(lastDown.CorrelationIDs, should.HaveLength, 5) { a.So(lastDown.CorrelationIDs, should.Contain, "correlation-up-1") a.So(lastDown.CorrelationIDs, should.Contain, "correlation-up-2") a.So(lastDown.CorrelationIDs, should.Contain, "correlation-app-down-1") a.So(lastDown.CorrelationIDs, should.Contain, "correlation-app-down-2") } setDevice := &ttnpb.EndDevice{ EndDeviceIdentifiers: ttnpb.EndDeviceIdentifiers{ ApplicationIdentifiers: appID, DeviceID: devID, DevAddr: &devAddr, }, FrequencyPlanID: test.EUFrequencyPlanID, LoRaWANPHYVersion: ttnpb.PHY_V1_1_REV_B, MACSettings: &ttnpb.MACSettings{ ClassCTimeout: DurationPtr(42 * time.Second), }, MACState: &ttnpb.MACState{ CurrentParameters: *CopyMACParameters(eu868macParameters), DesiredParameters: *CopyMACParameters(eu868macParameters), DeviceClass: ttnpb.CLASS_C, LoRaWANVersion: ttnpb.MAC_V1_1, PendingRequests: []*ttnpb.MACCommand{ { CID: ttnpb.CID_DEV_STATUS, }, }, }, QueuedApplicationDownlinks: []*ttnpb.ApplicationDownlink{}, RecentUplinks: []*ttnpb.UplinkMessage{ CopyUplinkMessage(lastUp), }, RecentDownlinks: []*ttnpb.DownlinkMessage{ lastDown, }, Session: &ttnpb.Session{ DevAddr: devAddr, LastNFCntDown: 0x24, SessionKeys: *CopySessionKeys(sessionKeys), }, } select { case <-ctx.Done(): t.Error("Timed out while waiting for DeviceRegistry.SetByID callback to return") case resp := <-setFuncRespCh: a.So(resp.Error, should.BeNil) a.So(resp.Paths, should.Resemble, []string{ "mac_state", "queued_application_downlinks", "recent_downlinks", "session", }) if a.So(resp.Device, should.NotBeNil) && a.So(resp.Device.MACState, should.NotBeNil) && a.So(resp.Device.MACState.LastConfirmedDownlinkAt, should.NotBeNil) { a.So([]time.Time{start, *resp.Device.MACState.LastConfirmedDownlinkAt, time.Now().Add(time.Second)}, should.BeChronological) setDevice.MACState.LastConfirmedDownlinkAt = resp.Device.MACState.LastConfirmedDownlinkAt } a.So(resp.Device, should.Resemble, setDevice) } close(setFuncRespCh) select { case <-ctx.Done(): t.Error("Timed out while waiting for DeviceRegistry.SetByID response to be processed") case setRespCh <- DeviceRegistrySetByIDResponse{ Device: setDevice, }: } if !AssertDownlinkTaskAddRequest(ctx, env.DownlinkTasks.Add, func(reqCtx context.Context, ids ttnpb.EndDeviceIdentifiers, startAt time.Time, replace bool) bool { return a.So(reqCtx, should.HaveParentContextOrEqual, ctx) && a.So(ids, should.Resemble, ttnpb.EndDeviceIdentifiers{ ApplicationIdentifiers: appID, DeviceID: devID, }) && a.So(replace, should.BeTrue) && a.So(startAt, should.Resemble, setDevice.MACState.LastConfirmedDownlinkAt.Add(42*time.Second)) }, nil, ) { t.Error("Downlink task add assertion failed") return false } select { case <-ctx.Done(): t.Error("Timed out while waiting for DownlinkTasks.Pop callback to return") case resp := <-popFuncRespCh: a.So(resp, should.BeNil) } close(popFuncRespCh) select { case <-ctx.Done(): t.Error("Timed out while waiting for DownlinkTasks.Pop response to be processed") case popRespCh <- nil: } return true }, }, { Name: "Class C/windows open/Rx2/application downlink/absolute time within window/no forced gateways/windows open/FOpts present/EU868/1.1/non-retryable errors", DownlinkPriorities: DownlinkPriorities{ JoinAccept: ttnpb.TxSchedulePriority_HIGHEST, MACCommands: ttnpb.TxSchedulePriority_HIGH, MaxApplicationDownlink: ttnpb.TxSchedulePriority_NORMAL, }, Handler: func(ctx context.Context, env TestEnvironment) bool { t := test.MustTFromContext(ctx) a := assertions.New(t) start := time.Now() var popRespCh chan<- error popFuncRespCh := make(chan error) select { case <-ctx.Done(): t.Error("Timed out while waiting for DownlinkTasks.Pop to be called") return false case req := <-env.DownlinkTasks.Pop: popRespCh = req.Response a.So(req.Context, should.HaveParentContextOrEqual, ctx) go func() { popFuncRespCh <- req.Func(req.Context, ttnpb.EndDeviceIdentifiers{ ApplicationIdentifiers: appID, DeviceID: devID, }, time.Now()) }() } lastUp := &ttnpb.UplinkMessage{ CorrelationIDs: []string{"correlation-up-1", "correlation-up-2"}, DeviceChannelIndex: 3, Payload: &ttnpb.Message{ MHDR: ttnpb.MHDR{ MType: ttnpb.MType_UNCONFIRMED_UP, }, Payload: &ttnpb.Message_MACPayload{MACPayload: &ttnpb.MACPayload{}}, }, ReceivedAt: time.Now().Add(-time.Second), RxMetadata: deepcopy.Copy(rxMetadata).([]*ttnpb.RxMetadata), Settings: ttnpb.TxSettings{ DataRateIndex: ttnpb.DATA_RATE_0, Frequency: 430000000, }, } absTime := time.Now().Add(10 * time.Second).UTC() getDevice := &ttnpb.EndDevice{ EndDeviceIdentifiers: ttnpb.EndDeviceIdentifiers{ ApplicationIdentifiers: appID, DeviceID: devID, DevAddr: &devAddr, }, FrequencyPlanID: test.EUFrequencyPlanID, LoRaWANPHYVersion: ttnpb.PHY_V1_1_REV_B, MACSettings: &ttnpb.MACSettings{ ClassCTimeout: DurationPtr(42 * time.Second), }, MACState: &ttnpb.MACState{ CurrentParameters: *CopyMACParameters(eu868macParameters), DesiredParameters: *CopyMACParameters(eu868macParameters), DeviceClass: ttnpb.CLASS_C, LoRaWANVersion: ttnpb.MAC_V1_1, QueuedResponses: []*ttnpb.MACCommand{ (&ttnpb.MACCommand_ResetConf{ MinorVersion: 1, }).MACCommand(), (&ttnpb.MACCommand_LinkCheckAns{ Margin: 2, GatewayCount: 5, }).MACCommand(), }, RxWindowsAvailable: true, }, QueuedApplicationDownlinks: []*ttnpb.ApplicationDownlink{ { CorrelationIDs: []string{"correlation-app-down-1", "correlation-app-down-2"}, FCnt: 0x42, FPort: 0x1, FRMPayload: []byte("testPayload"), Priority: ttnpb.TxSchedulePriority_HIGHEST, SessionKeyID: []byte{<KEY>}, ClassBC: &ttnpb.ApplicationDownlink_ClassBC{ AbsoluteTime: deepcopy.Copy(&absTime).(*time.Time), }, }, }, RecentUplinks: []*ttnpb.UplinkMessage{ CopyUplinkMessage(lastUp), }, Session: &ttnpb.Session{ DevAddr: devAddr, LastNFCntDown: 0x24, SessionKeys: *CopySessionKeys(sessionKeys), }, } var setRespCh chan<- DeviceRegistrySetByIDResponse setFuncRespCh := make(chan DeviceRegistrySetByIDRequestFuncResponse) select { case <-ctx.Done(): t.Error("Timed out while waiting for DeviceRegistry.SetByID to be called") return false case req := <-env.DeviceRegistry.SetByID: setRespCh = req.Response a.So(req.Context, should.HaveParentContextOrEqual, ctx) a.So(req.ApplicationIdentifiers, should.Resemble, appID) a.So(req.DeviceID, should.Resemble, devID) a.So(req.Paths, should.Resemble, getPaths) go func() { dev, sets, err := req.Func(CopyEndDevice(getDevice)) setFuncRespCh <- DeviceRegistrySetByIDRequestFuncResponse{ Device: dev, Paths: sets, Error: err, } }() } scheduleDownlink124Ch := make(chan NsGsScheduleDownlinkRequest) peer124 := NewGSPeer(ctx, &MockNsGsServer{ ScheduleDownlinkFunc: MakeNsGsScheduleDownlinkChFunc(scheduleDownlink124Ch), }) scheduleDownlink3Ch := make(chan NsGsScheduleDownlinkRequest) peer3 := NewGSPeer(ctx, &MockNsGsServer{ ScheduleDownlinkFunc: MakeNsGsScheduleDownlinkChFunc(scheduleDownlink3Ch), }) if !a.So(assertGetRxMetadataGatewayPeers(ctx, env.Cluster.GetPeer, peer124, peer3), should.BeTrue) { return false } lastDown, ok := assertScheduleRxMetadataGateways( ctx, env.Cluster.Auth, scheduleDownlink124Ch, scheduleDownlink3Ch, func() []byte { b := []byte{ /* MHDR */ 0x60, /* MACPayload */ /** FHDR **/ /*** DevAddr ***/ devAddr[3], devAddr[2], devAddr[1], devAddr[0], /*** FCtrl ***/ 0x86, /*** FCnt ***/ 0x42, 0x00, } /** FOpts **/ b = append(b, test.Must(crypto.EncryptDownlink( nwkSEncKey, devAddr, 0x24, []byte{ /* ResetConf */ 0x01, 0x01, /* LinkCheckAns */ 0x02, 0x02, 0x05, /* DevStatusReq */ 0x06, }, )).([]byte)...) /** FPort **/ b = append(b, 0x1) /** FRMPayload **/ b = append(b, []byte("testPayload")...) /* MIC */ mic := test.Must(crypto.ComputeDownlinkMIC( sNwkSIntKey, devAddr, 0, 0x42, b, )).([4]byte) return append(b, mic[:]...) }(), func(paths ...*ttnpb.DownlinkPath) *ttnpb.TxRequest { return &ttnpb.TxRequest{ Class: ttnpb.CLASS_C, DownlinkPaths: paths, Priority: ttnpb.TxSchedulePriority_HIGH, Rx2DataRateIndex: ttnpb.DATA_RATE_1, Rx2Frequency: 420000000, AbsoluteTime: &absTime, } }, NsGsScheduleDownlinkResponse{ Error: errors.New("test").WithDetails(&ttnpb.ScheduleDownlinkErrorDetails{ PathErrors: []*ttnpb.ErrorDetails{ ttnpb.ErrorDetailsToProto(errors.DefineAborted(ulid.MustNew(0, test.Randy).String(), "aborted")), ttnpb.ErrorDetailsToProto(errors.DefineResourceExhausted(ulid.MustNew(0, test.Randy).String(), "resource exhausted")), }, }), }, NsGsScheduleDownlinkResponse{ Error: errors.New("test").WithDetails(&ttnpb.ScheduleDownlinkErrorDetails{ PathErrors: []*ttnpb.ErrorDetails{ ttnpb.ErrorDetailsToProto(errors.DefineFailedPrecondition(ulid.MustNew(0, test.Randy).String(), "failed precondition")), }, }), }, NsGsScheduleDownlinkResponse{ Error: errors.New("test").WithDetails(&ttnpb.ScheduleDownlinkErrorDetails{ PathErrors: []*ttnpb.ErrorDetails{ ttnpb.ErrorDetailsToProto(errors.DefineResourceExhausted(ulid.MustNew(0, test.Randy).String(), "resource exhausted")), }, }), }, ) if !a.So(ok, should.BeTrue) { t.Error("Scheduling assertion failed") return false } if a.So(lastDown.CorrelationIDs, should.HaveLength, 5) { a.So(lastDown.CorrelationIDs, should.Contain, "correlation-up-1") a.So(lastDown.CorrelationIDs, should.Contain, "correlation-up-2") a.So(lastDown.CorrelationIDs, should.Contain, "correlation-app-down-1") a.So(lastDown.CorrelationIDs, should.Contain, "correlation-app-down-2") } select { case <-ctx.Done(): t.Error("Timed out while waiting for DeviceRegistry.SetByID callback to return") case resp := <-setFuncRespCh: a.So(resp.Error, should.BeNil) a.So(resp.Paths, should.Resemble, []string{ "queued_application_downlinks", }) if a.So(resp.Device, should.NotBeNil) { a.So(resp.Device.QueuedApplicationDownlinks, should.BeEmpty) } } close(setFuncRespCh) setDevice := CopyEndDevice(getDevice) setDevice.QueuedApplicationDownlinks = nil select { case <-ctx.Done(): t.Error("Timed out while waiting for DeviceRegistry.SetByID response to be processed") case setRespCh <- DeviceRegistrySetByIDResponse{ Device: setDevice, }: } if !AssertDownlinkTaskAddRequest(ctx, env.DownlinkTasks.Add, func(reqCtx context.Context, ids ttnpb.EndDeviceIdentifiers, startAt time.Time, replace bool) bool { return a.So(reqCtx, should.HaveParentContextOrEqual, ctx) && a.So(ids, should.Resemble, ttnpb.EndDeviceIdentifiers{ ApplicationIdentifiers: appID, DeviceID: devID, }) && a.So(replace, should.BeTrue) && a.So([]time.Time{start, startAt, time.Now().Add(downlinkRetryInterval)}, should.BeChronological) }, nil, ) { t.Error("Downlink task add assertion failed") return false } select { case <-ctx.Done(): t.Error("Timed out while waiting for DownlinkTasks.Pop callback to return") case resp := <-popFuncRespCh: a.So(resp, should.BeNil) } close(popFuncRespCh) select { case <-ctx.Done(): t.Error("Timed out while waiting for DownlinkTasks.Pop response to be processed") case popRespCh <- nil: } return true }, }, { Name: "Class C/windows open/Rx2/application downlink/absolute time within window/no forced gateways/windows open/FOpts present/EU868/1.1/retryable error", DownlinkPriorities: DownlinkPriorities{ JoinAccept: ttnpb.TxSchedulePriority_HIGHEST, MACCommands: ttnpb.TxSchedulePriority_HIGH, MaxApplicationDownlink: ttnpb.TxSchedulePriority_NORMAL, }, Handler: func(ctx context.Context, env TestEnvironment) bool { t := test.MustTFromContext(ctx) a := assertions.New(t) start := time.Now() var popRespCh chan<- error popFuncRespCh := make(chan error) select { case <-ctx.Done(): t.Error("Timed out while waiting for DownlinkTasks.Pop to be called") return false case req := <-env.DownlinkTasks.Pop: popRespCh = req.Response a.So(req.Context, should.HaveParentContextOrEqual, ctx) go func() { popFuncRespCh <- req.Func(req.Context, ttnpb.EndDeviceIdentifiers{ ApplicationIdentifiers: appID, DeviceID: devID, }, time.Now()) }() } lastUp := &ttnpb.UplinkMessage{ CorrelationIDs: []string{"correlation-up-1", "correlation-up-2"}, DeviceChannelIndex: 3, Payload: &ttnpb.Message{ MHDR: ttnpb.MHDR{ MType: ttnpb.MType_UNCONFIRMED_UP, }, Payload: &ttnpb.Message_MACPayload{MACPayload: &ttnpb.MACPayload{}}, }, ReceivedAt: time.Now().Add(-time.Second), RxMetadata: deepcopy.Copy(rxMetadata).([]*ttnpb.RxMetadata), Settings: ttnpb.TxSettings{ DataRateIndex: ttnpb.DATA_RATE_0, Frequency: 430000000, }, } absTime := time.Now().Add(10 * time.Second).UTC() getDevice := &ttnpb.EndDevice{ EndDeviceIdentifiers: ttnpb.EndDeviceIdentifiers{ ApplicationIdentifiers: appID, DeviceID: devID, DevAddr: &devAddr, }, FrequencyPlanID: test.EUFrequencyPlanID, LoRaWANPHYVersion: ttnpb.PHY_V1_1_REV_B, MACSettings: &ttnpb.MACSettings{ ClassCTimeout: DurationPtr(42 * time.Second), }, MACState: &ttnpb.MACState{ CurrentParameters: *CopyMACParameters(eu868macParameters), DesiredParameters: *CopyMACParameters(eu868macParameters), DeviceClass: ttnpb.CLASS_C, LoRaWANVersion: ttnpb.MAC_V1_1, QueuedResponses: []*ttnpb.MACCommand{ (&ttnpb.MACCommand_ResetConf{ MinorVersion: 1, }).MACCommand(), (&ttnpb.MACCommand_LinkCheckAns{ Margin: 2, GatewayCount: 5, }).MACCommand(), }, RxWindowsAvailable: true, }, QueuedApplicationDownlinks: []*ttnpb.ApplicationDownlink{ { CorrelationIDs: []string{"correlation-app-down-1", "correlation-app-down-2"}, FCnt: 0x42, FPort: 0x1, FRMPayload: []byte("testPayload"), Priority: ttnpb.TxSchedulePriority_HIGHEST, SessionKeyID: []byte{<KEY>}, ClassBC: &ttnpb.ApplicationDownlink_ClassBC{ AbsoluteTime: deepcopy.Copy(&absTime).(*time.Time), }, }, }, RecentUplinks: []*ttnpb.UplinkMessage{ CopyUplinkMessage(lastUp), }, Session: &ttnpb.Session{ DevAddr: devAddr, LastNFCntDown: 0x24, SessionKeys: *CopySessionKeys(sessionKeys), }, } var setRespCh chan<- DeviceRegistrySetByIDResponse setFuncRespCh := make(chan DeviceRegistrySetByIDRequestFuncResponse) select { case <-ctx.Done(): t.Error("Timed out while waiting for DeviceRegistry.SetByID to be called") return false case req := <-env.DeviceRegistry.SetByID: setRespCh = req.Response a.So(req.Context, should.HaveParentContextOrEqual, ctx) a.So(req.ApplicationIdentifiers, should.Resemble, appID) a.So(req.DeviceID, should.Resemble, devID) a.So(req.Paths, should.Resemble, getPaths) go func() { dev, sets, err := req.Func(CopyEndDevice(getDevice)) setFuncRespCh <- DeviceRegistrySetByIDRequestFuncResponse{ Device: dev, Paths: sets, Error: err, } }() } scheduleDownlink124Ch := make(chan NsGsScheduleDownlinkRequest) peer124 := NewGSPeer(ctx, &MockNsGsServer{ ScheduleDownlinkFunc: MakeNsGsScheduleDownlinkChFunc(scheduleDownlink124Ch), }) scheduleDownlink3Ch := make(chan NsGsScheduleDownlinkRequest) peer3 := NewGSPeer(ctx, &MockNsGsServer{ ScheduleDownlinkFunc: MakeNsGsScheduleDownlinkChFunc(scheduleDownlink3Ch), }) if !a.So(assertGetRxMetadataGatewayPeers(ctx, env.Cluster.GetPeer, peer124, peer3), should.BeTrue) { return false } lastDown, ok := assertScheduleRxMetadataGateways( ctx, env.Cluster.Auth, scheduleDownlink124Ch, scheduleDownlink3Ch, func() []byte { b := []byte{ /* MHDR */ 0x60, /* MACPayload */ /** FHDR **/ /*** DevAddr ***/ devAddr[3], devAddr[2], devAddr[1], devAddr[0], /*** FCtrl ***/ 0x86, /*** FCnt ***/ 0x42, 0x00, } /** FOpts **/ b = append(b, test.Must(crypto.EncryptDownlink( nwkSEncKey, devAddr, 0x24, []byte{ /* ResetConf */ 0x01, 0x01, /* LinkCheckAns */ 0x02, 0x02, 0x05, /* DevStatusReq */ 0x06, }, )).([]byte)...) /** FPort **/ b = append(b, 0x1) /** FRMPayload **/ b = append(b, []byte("testPayload")...) /* MIC */ mic := test.Must(crypto.ComputeDownlinkMIC( sNwkSIntKey, devAddr, 0, 0x42, b, )).([4]byte) return append(b, mic[:]...) }(), func(paths ...*ttnpb.DownlinkPath) *ttnpb.TxRequest { return &ttnpb.TxRequest{ Class: ttnpb.CLASS_C, DownlinkPaths: paths, Priority: ttnpb.TxSchedulePriority_HIGH, Rx2DataRateIndex: ttnpb.DATA_RATE_1, Rx2Frequency: 420000000, AbsoluteTime: &absTime, } }, NsGsScheduleDownlinkResponse{ Error: errors.New("test").WithDetails(&ttnpb.ScheduleDownlinkErrorDetails{ PathErrors: []*ttnpb.ErrorDetails{ ttnpb.ErrorDetailsToProto(errors.DefineAborted(ulid.MustNew(0, test.Randy).String(), "aborted")), ttnpb.ErrorDetailsToProto(errors.DefineResourceExhausted(ulid.MustNew(0, test.Randy).String(), "resource exhausted")), }, }), }, NsGsScheduleDownlinkResponse{ Error: errors.New("test").WithDetails(&ttnpb.ScheduleDownlinkErrorDetails{ PathErrors: []*ttnpb.ErrorDetails{ ttnpb.ErrorDetailsToProto(errors.DefineCorruption(ulid.MustNew(0, test.Randy).String(), "corruption")), // retryable }, }), }, NsGsScheduleDownlinkResponse{ Error: errors.New("test").WithDetails(&ttnpb.ScheduleDownlinkErrorDetails{ PathErrors: []*ttnpb.ErrorDetails{ ttnpb.ErrorDetailsToProto(errors.DefineResourceExhausted(ulid.MustNew(0, test.Randy).String(), "resource exhausted")), }, }), }, ) if !a.So(ok, should.BeTrue) { t.Error("Scheduling assertion failed") return false } if a.So(lastDown.CorrelationIDs, should.HaveLength, 5) { a.So(lastDown.CorrelationIDs, should.Contain, "correlation-up-1") a.So(lastDown.CorrelationIDs, should.Contain, "correlation-up-2") a.So(lastDown.CorrelationIDs, should.Contain, "correlation-app-down-1") a.So(lastDown.CorrelationIDs, should.Contain, "correlation-app-down-2") } select { case <-ctx.Done(): t.Error("Timed out while waiting for DeviceRegistry.SetByID callback to return") case resp := <-setFuncRespCh: a.So(resp.Error, should.BeNil) a.So(resp.Paths, should.BeEmpty) a.So(resp.Device, should.NotBeNil) } close(setFuncRespCh) select { case <-ctx.Done(): t.Error("Timed out while waiting for DeviceRegistry.SetByID response to be processed") case setRespCh <- DeviceRegistrySetByIDResponse{ Device: CopyEndDevice(getDevice), }: } if !AssertDownlinkTaskAddRequest(ctx, env.DownlinkTasks.Add, func(reqCtx context.Context, ids ttnpb.EndDeviceIdentifiers, startAt time.Time, replace bool) bool { return a.So(reqCtx, should.HaveParentContextOrEqual, ctx) && a.So(ids, should.Resemble, ttnpb.EndDeviceIdentifiers{ ApplicationIdentifiers: appID, DeviceID: devID, }) && a.So(replace, should.BeTrue) && a.So([]time.Time{start, startAt, time.Now().Add(downlinkRetryInterval)}, should.BeChronological) }, nil, ) { t.Error("Downlink task add assertion failed") return false } select { case <-ctx.Done(): t.Error("Timed out while waiting for DownlinkTasks.Pop callback to return") case resp := <-popFuncRespCh: a.So(resp, should.BeNil) } close(popFuncRespCh) select { case <-ctx.Done(): t.Error("Timed out while waiting for DownlinkTasks.Pop response to be processed") case popRespCh <- nil: } return true }, }, { Name: "Class C/windows open/Rx2/application downlink/absolute time outside window", DownlinkPriorities: DownlinkPriorities{ JoinAccept: ttnpb.TxSchedulePriority_HIGHEST, MACCommands: ttnpb.TxSchedulePriority_HIGH, MaxApplicationDownlink: ttnpb.TxSchedulePriority_NORMAL, }, Handler: func(ctx context.Context, env TestEnvironment) bool { t := test.MustTFromContext(ctx) a := assertions.New(t) var popRespCh chan<- error popFuncRespCh := make(chan error) select { case <-ctx.Done(): t.Error("Timed out while waiting for DownlinkTasks.Pop to be called") return false case req := <-env.DownlinkTasks.Pop: popRespCh = req.Response a.So(req.Context, should.HaveParentContextOrEqual, ctx) go func() { popFuncRespCh <- req.Func(req.Context, ttnpb.EndDeviceIdentifiers{ ApplicationIdentifiers: appID, DeviceID: devID, }, time.Now()) }() } lastUp := &ttnpb.UplinkMessage{ CorrelationIDs: []string{"correlation-up-1", "correlation-up-2"}, DeviceChannelIndex: 3, Payload: &ttnpb.Message{ MHDR: ttnpb.MHDR{ MType: ttnpb.MType_UNCONFIRMED_UP, }, Payload: &ttnpb.Message_MACPayload{MACPayload: &ttnpb.MACPayload{}}, }, ReceivedAt: time.Now().Add(-time.Second), RxMetadata: deepcopy.Copy(rxMetadata).([]*ttnpb.RxMetadata), Settings: ttnpb.TxSettings{ DataRateIndex: ttnpb.DATA_RATE_0, Frequency: 430000000, }, } absTime := time.Now().Add(42 * time.Hour).UTC() getDevice := &ttnpb.EndDevice{ EndDeviceIdentifiers: ttnpb.EndDeviceIdentifiers{ ApplicationIdentifiers: appID, DeviceID: devID, DevAddr: &devAddr, }, FrequencyPlanID: test.EUFrequencyPlanID, LoRaWANPHYVersion: ttnpb.PHY_V1_1_REV_B, MACSettings: &ttnpb.MACSettings{ ClassCTimeout: DurationPtr(42 * time.Second), }, MACState: &ttnpb.MACState{ CurrentParameters: *CopyMACParameters(eu868macParameters), DesiredParameters: *CopyMACParameters(eu868macParameters), DeviceClass: ttnpb.CLASS_C, LoRaWANVersion: ttnpb.MAC_V1_1, QueuedResponses: []*ttnpb.MACCommand{ (&ttnpb.MACCommand_ResetConf{ MinorVersion: 1, }).MACCommand(), (&ttnpb.MACCommand_LinkCheckAns{ Margin: 2, GatewayCount: 5, }).MACCommand(), }, RxWindowsAvailable: true, }, QueuedApplicationDownlinks: []*ttnpb.ApplicationDownlink{ { CorrelationIDs: []string{"correlation-app-down-1", "correlation-app-down-2"}, FCnt: 0x42, FPort: 0x1, FRMPayload: []byte("testPayload"), Priority: ttnpb.TxSchedulePriority_HIGHEST, SessionKeyID: []byte{<KEY>}, ClassBC: &ttnpb.ApplicationDownlink_ClassBC{ AbsoluteTime: deepcopy.Copy(&absTime).(*time.Time), }, }, }, RecentUplinks: []*ttnpb.UplinkMessage{ CopyUplinkMessage(lastUp), }, Session: &ttnpb.Session{ DevAddr: devAddr, LastNFCntDown: 0x24, SessionKeys: *CopySessionKeys(sessionKeys), }, } var setRespCh chan<- DeviceRegistrySetByIDResponse setFuncRespCh := make(chan DeviceRegistrySetByIDRequestFuncResponse) select { case <-ctx.Done(): t.Error("Timed out while waiting for DeviceRegistry.SetByID to be called") return false case req := <-env.DeviceRegistry.SetByID: setRespCh = req.Response a.So(req.Context, should.HaveParentContextOrEqual, ctx) a.So(req.ApplicationIdentifiers, should.Resemble, appID) a.So(req.DeviceID, should.Resemble, devID) a.So(req.Paths, should.Resemble, getPaths) go func() { dev, sets, err := req.Func(CopyEndDevice(getDevice)) setFuncRespCh <- DeviceRegistrySetByIDRequestFuncResponse{ Device: dev, Paths: sets, Error: err, } }() } select { case <-ctx.Done(): t.Error("Timed out while waiting for DeviceRegistry.SetByID callback to return") case resp := <-setFuncRespCh: a.So(resp.Error, should.BeNil) a.So(resp.Paths, should.BeEmpty) a.So(resp.Device, should.NotBeNil) } close(setFuncRespCh) select { case <-ctx.Done(): t.Error("Timed out while waiting for DeviceRegistry.SetByID response to be processed") case setRespCh <- DeviceRegistrySetByIDResponse{}: } if !AssertDownlinkTaskAddRequest(ctx, env.DownlinkTasks.Add, func(reqCtx context.Context, ids ttnpb.EndDeviceIdentifiers, startAt time.Time, replace bool) bool { return a.So(reqCtx, should.HaveParentContextOrEqual, ctx) && a.So(ids, should.Resemble, ttnpb.EndDeviceIdentifiers{ ApplicationIdentifiers: appID, DeviceID: devID, }) && a.So(replace, should.BeTrue) && a.So(startAt, should.Resemble, absTime.Add(-gsScheduleWindow)) }, nil, ) { t.Error("Downlink task add assertion failed") return false } select { case <-ctx.Done(): t.Error("Timed out while waiting for DownlinkTasks.Pop callback to return") case resp := <-popFuncRespCh: a.So(resp, should.BeNil) } close(popFuncRespCh) select { case <-ctx.Done(): t.Error("Timed out while waiting for DownlinkTasks.Pop response to be processed") case popRespCh <- nil: } return true }, }, { Name: "Class C/windows open/Rx2/expired application downlinks", DownlinkPriorities: DownlinkPriorities{ JoinAccept: ttnpb.TxSchedulePriority_HIGHEST, MACCommands: ttnpb.TxSchedulePriority_HIGH, MaxApplicationDownlink: ttnpb.TxSchedulePriority_NORMAL, }, Handler: func(ctx context.Context, env TestEnvironment) bool { t := test.MustTFromContext(ctx) a := assertions.New(t) var popRespCh chan<- error popFuncRespCh := make(chan error) select { case <-ctx.Done(): t.Error("Timed out while waiting for DownlinkTasks.Pop to be called") return false case req := <-env.DownlinkTasks.Pop: popRespCh = req.Response a.So(req.Context, should.HaveParentContextOrEqual, ctx) go func() { popFuncRespCh <- req.Func(req.Context, ttnpb.EndDeviceIdentifiers{ ApplicationIdentifiers: appID, DeviceID: devID, }, time.Now()) }() } lastUp := &ttnpb.UplinkMessage{ CorrelationIDs: []string{"correlation-up-1", "correlation-up-2"}, DeviceChannelIndex: 3, Payload: &ttnpb.Message{ MHDR: ttnpb.MHDR{ MType: ttnpb.MType_UNCONFIRMED_UP, }, Payload: &ttnpb.Message_MACPayload{MACPayload: &ttnpb.MACPayload{}}, }, ReceivedAt: time.Now().Add(-time.Second), RxMetadata: deepcopy.Copy(rxMetadata).([]*ttnpb.RxMetadata), Settings: ttnpb.TxSettings{ DataRateIndex: ttnpb.DATA_RATE_0, Frequency: 430000000, }, } getDevice := &ttnpb.EndDevice{ EndDeviceIdentifiers: ttnpb.EndDeviceIdentifiers{ ApplicationIdentifiers: appID, DeviceID: devID, DevAddr: &devAddr, }, FrequencyPlanID: test.EUFrequencyPlanID, LoRaWANPHYVersion: ttnpb.PHY_V1_1_REV_B, MACSettings: &ttnpb.MACSettings{ ClassCTimeout: DurationPtr(42 * time.Second), StatusCountPeriodicity: &pbtypes.UInt32Value{Value: 0}, StatusTimePeriodicity: DurationPtr(0), }, MACState: &ttnpb.MACState{ CurrentParameters: *CopyMACParameters(eu868macParameters), DesiredParameters: *CopyMACParameters(eu868macParameters), DeviceClass: ttnpb.CLASS_C, LoRaWANVersion: ttnpb.MAC_V1_1, RxWindowsAvailable: true, }, QueuedApplicationDownlinks: []*ttnpb.ApplicationDownlink{ { CorrelationIDs: []string{"correlation-app-down-1", "correlation-app-down-2"}, FCnt: 0x42, FPort: 0x1, FRMPayload: []byte("testPayload"), Priority: ttnpb.TxSchedulePriority_HIGHEST, SessionKeyID: []<KEY> ClassBC: &ttnpb.ApplicationDownlink_ClassBC{ AbsoluteTime: TimePtr(time.Now().Add(-2).UTC()), }, }, { CorrelationIDs: []string{"correlation-app-down-1", "correlation-app-down-2"}, FCnt: 0x42, FPort: 0x1, FRMPayload: []byte("testPayload"), Priority: ttnpb.TxSchedulePriority_HIGHEST, SessionKeyID: []byte{<KEY> ClassBC: &ttnpb.ApplicationDownlink_ClassBC{ AbsoluteTime: TimePtr(time.Now().Add(-1).UTC()), }, }, }, RecentUplinks: []*ttnpb.UplinkMessage{ CopyUplinkMessage(lastUp), }, Session: &ttnpb.Session{ DevAddr: devAddr, LastNFCntDown: 0x24, SessionKeys: *CopySessionKeys(sessionKeys), }, } var setRespCh chan<- DeviceRegistrySetByIDResponse setFuncRespCh := make(chan DeviceRegistrySetByIDRequestFuncResponse) select { case <-ctx.Done(): t.Error("Timed out while waiting for DeviceRegistry.SetByID to be called") return false case req := <-env.DeviceRegistry.SetByID: setRespCh = req.Response a.So(req.Context, should.HaveParentContextOrEqual, ctx) a.So(req.ApplicationIdentifiers, should.Resemble, appID) a.So(req.DeviceID, should.Resemble, devID) a.So(req.Paths, should.Resemble, getPaths) go func() { dev, sets, err := req.Func(CopyEndDevice(getDevice)) setFuncRespCh <- DeviceRegistrySetByIDRequestFuncResponse{ Device: dev, Paths: sets, Error: err, } }() } select { case <-ctx.Done(): t.Error("Timed out while waiting for DeviceRegistry.SetByID callback to return") case resp := <-setFuncRespCh: a.So(resp.Error, should.BeNil) a.So(resp.Paths, should.Resemble, []string{ "queued_application_downlinks", }) if resp.Device != nil { a.So(resp.Device.QueuedApplicationDownlinks, should.BeEmpty) } else { a.So(resp.Device, should.BeNil) } } close(setFuncRespCh) select { case <-ctx.Done(): t.Error("Timed out while waiting for DeviceRegistry.SetByID response to be processed") case setRespCh <- DeviceRegistrySetByIDResponse{}: } select { case <-ctx.Done(): t.Error("Timed out while waiting for DownlinkTasks.Pop callback to return") case resp := <-popFuncRespCh: a.So(resp, should.BeNil) } close(popFuncRespCh) select { case <-ctx.Done(): t.Error("Timed out while waiting for DownlinkTasks.Pop response to be processed") case popRespCh <- nil: } return true }, }, { Name: "join-accept/windows open/Rx2/no active MAC state/window open/EU868/1.1", DownlinkPriorities: DownlinkPriorities{ JoinAccept: ttnpb.TxSchedulePriority_HIGHEST, MACCommands: ttnpb.TxSchedulePriority_HIGH, MaxApplicationDownlink: ttnpb.TxSchedulePriority_NORMAL, }, Handler: func(ctx context.Context, env TestEnvironment) bool { t := test.MustTFromContext(ctx) a := assertions.New(t) var popRespCh chan<- error popFuncRespCh := make(chan error) select { case <-ctx.Done(): t.Error("Timed out while waiting for DownlinkTasks.Pop to be called") return false case req := <-env.DownlinkTasks.Pop: popRespCh = req.Response a.So(req.Context, should.HaveParentContextOrEqual, ctx) go func() { popFuncRespCh <- req.Func(req.Context, ttnpb.EndDeviceIdentifiers{ ApplicationIdentifiers: appID, DeviceID: devID, }, time.Now()) }() } lastUp := &ttnpb.UplinkMessage{ CorrelationIDs: []string{"correlation-up-1", "correlation-up-2"}, DeviceChannelIndex: 3, Payload: &ttnpb.Message{ MHDR: ttnpb.MHDR{ MType: ttnpb.MType_JOIN_REQUEST, }, Payload: &ttnpb.Message_JoinRequestPayload{JoinRequestPayload: &ttnpb.JoinRequestPayload{ JoinEUI: types.EUI64{0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, DevEUI: types.EUI64{0x42, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, DevNonce: types.DevNonce{0x00, 0x42}, }}, }, ReceivedAt: time.Now().Add(-time.Second), RxMetadata: deepcopy.Copy(rxMetadata).([]*ttnpb.RxMetadata), Settings: ttnpb.TxSettings{ DataRateIndex: ttnpb.DATA_RATE_0, Frequency: 430000000, }, } getDevice := &ttnpb.EndDevice{ EndDeviceIdentifiers: ttnpb.EndDeviceIdentifiers{ ApplicationIdentifiers: appID, DeviceID: devID, JoinEUI: &types.EUI64{0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, DevEUI: &types.EUI64{0x42, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, }, FrequencyPlanID: test.EUFrequencyPlanID, LoRaWANPHYVersion: ttnpb.PHY_V1_1_REV_B, PendingMACState: &ttnpb.MACState{ CurrentParameters: *CopyMACParameters(eu868macParameters), DesiredParameters: *CopyMACParameters(eu868macParameters), DeviceClass: ttnpb.CLASS_A, LoRaWANVersion: ttnpb.MAC_V1_1, QueuedJoinAccept: &ttnpb.MACState_JoinAccept{ Keys: *CopySessionKeys(sessionKeys), Payload: bytes.Repeat([]byte{0x42}, 33), Request: ttnpb.JoinRequest{ DevAddr: devAddr, }, }, RxWindowsAvailable: true, }, QueuedApplicationDownlinks: []*ttnpb.ApplicationDownlink{ { CorrelationIDs: []string{"correlation-app-down-1", "correlation-app-down-2"}, FCnt: 0x42, FPort: 0x1, FRMPayload: []byte("testPayload"), Priority: ttnpb.TxSchedulePriority_HIGHEST, SessionKeyID: []byte{<KEY>}, }, }, RecentUplinks: []*ttnpb.UplinkMessage{ CopyUplinkMessage(lastUp), }, SupportsJoin: true, } var setRespCh chan<- DeviceRegistrySetByIDResponse setFuncRespCh := make(chan DeviceRegistrySetByIDRequestFuncResponse) select { case <-ctx.Done(): t.Error("Timed out while waiting for DeviceRegistry.SetByID to be called") return false case req := <-env.DeviceRegistry.SetByID: setRespCh = req.Response a.So(req.Context, should.HaveParentContextOrEqual, ctx) a.So(req.ApplicationIdentifiers, should.Resemble, appID) a.So(req.DeviceID, should.Resemble, devID) a.So(req.Paths, should.Resemble, getPaths) go func() { dev, sets, err := req.Func(CopyEndDevice(getDevice)) setFuncRespCh <- DeviceRegistrySetByIDRequestFuncResponse{ Device: dev, Paths: sets, Error: err, } }() } scheduleDownlink124Ch := make(chan NsGsScheduleDownlinkRequest) peer124 := NewGSPeer(ctx, &MockNsGsServer{ ScheduleDownlinkFunc: MakeNsGsScheduleDownlinkChFunc(scheduleDownlink124Ch), }) scheduleDownlink3Ch := make(chan NsGsScheduleDownlinkRequest) peer3 := NewGSPeer(ctx, &MockNsGsServer{ ScheduleDownlinkFunc: MakeNsGsScheduleDownlinkChFunc(scheduleDownlink3Ch), }) if !a.So(assertGetRxMetadataGatewayPeers(ctx, env.Cluster.GetPeer, peer124, peer3), should.BeTrue) { return false } lastDown, ok := assertScheduleRxMetadataGateways( ctx, env.Cluster.Auth, scheduleDownlink124Ch, scheduleDownlink3Ch, bytes.Repeat([]byte{0x42}, 33), func(paths ...*ttnpb.DownlinkPath) *ttnpb.TxRequest { return &ttnpb.TxRequest{ Class: ttnpb.CLASS_A, DownlinkPaths: paths, Priority: ttnpb.TxSchedulePriority_HIGHEST, Rx1Delay: ttnpb.RX_DELAY_5, Rx1DataRateIndex: ttnpb.DATA_RATE_0, Rx1Frequency: 431000000, Rx2DataRateIndex: ttnpb.DATA_RATE_1, Rx2Frequency: 420000000, } }, NsGsScheduleDownlinkResponse{ Error: errors.New("test"), }, NsGsScheduleDownlinkResponse{ Error: errors.New("test"), }, NsGsScheduleDownlinkResponse{ Response: &ttnpb.ScheduleDownlinkResponse{ Delay: time.Second, }, }, ) if !a.So(ok, should.BeTrue) { t.Error("Scheduling assertion failed") return false } if a.So(lastDown.CorrelationIDs, should.HaveLength, 3) { a.So(lastDown.CorrelationIDs, should.Contain, "correlation-up-1") a.So(lastDown.CorrelationIDs, should.Contain, "correlation-up-2") } setDevice := &ttnpb.EndDevice{ EndDeviceIdentifiers: ttnpb.EndDeviceIdentifiers{ ApplicationIdentifiers: appID, DeviceID: devID, JoinEUI: &types.EUI64{0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, DevEUI: &types.EUI64{0x42, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, }, FrequencyPlanID: test.EUFrequencyPlanID, LoRaWANPHYVersion: ttnpb.PHY_V1_1_REV_B, PendingMACState: &ttnpb.MACState{ CurrentParameters: *CopyMACParameters(eu868macParameters), DesiredParameters: *CopyMACParameters(eu868macParameters), DeviceClass: ttnpb.CLASS_A, LoRaWANVersion: ttnpb.MAC_V1_1, PendingJoinRequest: &ttnpb.JoinRequest{ DevAddr: devAddr, }, }, PendingSession: &ttnpb.Session{ DevAddr: devAddr, SessionKeys: *CopySessionKeys(sessionKeys), }, QueuedApplicationDownlinks: []*ttnpb.ApplicationDownlink{ { CorrelationIDs: []string{"correlation-app-down-1", "correlation-app-down-2"}, FCnt: 0x42, FPort: 0x1, FRMPayload: []byte("testPayload"), Priority: ttnpb.TxSchedulePriority_HIGHEST, SessionKeyID: []byte{<KEY> }, }, RecentUplinks: []*ttnpb.UplinkMessage{ CopyUplinkMessage(lastUp), }, RecentDownlinks: []*ttnpb.DownlinkMessage{ lastDown, }, SupportsJoin: true, } select { case <-ctx.Done(): t.Error("Timed out while waiting for DeviceRegistry.SetByID callback to return") case resp := <-setFuncRespCh: a.So(resp.Error, should.BeNil) a.So(resp.Paths, should.Resemble, []string{ "pending_mac_state.pending_join_request", "pending_mac_state.queued_join_accept", "pending_mac_state.rx_windows_available", "pending_session.dev_addr", "pending_session.keys", "recent_downlinks", }) a.So(resp.Device, should.Resemble, setDevice) } close(setFuncRespCh) select { case <-ctx.Done(): t.Error("Timed out while waiting for DeviceRegistry.SetByID response to be processed") case setRespCh <- DeviceRegistrySetByIDResponse{ Device: setDevice, }: } select { case <-ctx.Done(): t.Error("Timed out while waiting for DownlinkTasks.Pop callback to return") case resp := <-popFuncRespCh: a.So(resp, should.BeNil) } close(popFuncRespCh) select { case <-ctx.Done(): t.Error("Timed out while waiting for DownlinkTasks.Pop response to be processed") case popRespCh <- nil: } return true }, }, } { t.Run(tc.Name, func(t *testing.T) { a := assertions.New(t) ns, ctx, env, stop := StartTest(t, Config{}, (1<<10)*test.Delay) defer stop() ns.downlinkPriorities = tc.DownlinkPriorities go func() { for ev := range env.Events { t.Logf("Event %s published with data %v", ev.Event.Name(), ev.Event.Data()) ev.Response <- struct{}{} } }() <-env.DownlinkTasks.Pop processDownlinkTaskErrCh := make(chan error) go func() { err := ns.processDownlinkTask(ctx) select { case <-ctx.Done(): t.Log("NetworkServer.processDownlinkTask took too long to return") return default: processDownlinkTaskErrCh <- err } }() res := tc.Handler(ctx, env) if !a.So(res, should.BeTrue) { t.Error("Test handler failed") return } select { case <-ctx.Done(): t.Error("Timed out while waiting for NetworkServer.processDownlinkTask to return") return case err := <-processDownlinkTaskErrCh: if tc.ErrorAssertion != nil { a.So(tc.ErrorAssertion(t, err), should.BeTrue) } else { a.So(err, should.BeNil) } } close(processDownlinkTaskErrCh) }) } } func TestGenerateDownlink(t *testing.T) { phy := test.Must(test.Must(band.GetByID(band.EU_863_870)).(band.Band).Version(ttnpb.PHY_V1_1_REV_B)).(band.Band) const appIDString = "process-downlink-test-app-id" appID := ttnpb.ApplicationIdentifiers{ApplicationID: appIDString} const devID = "process-downlink-test-dev-id" devAddr := types.DevAddr{0x42, 0xff, 0xff, 0xff} fNwkSIntKey := types.AES128Key{0x42, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} nwkSEncKey := types.AES128Key{0x42, 0x42, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} sNwkSIntKey := types.AES128Key{0x42, 0x42, 0x42, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} encodeMessage := func(msg *ttnpb.Message, ver ttnpb.MACVersion, confFCnt uint32) []byte { msg = deepcopy.Copy(msg).(*ttnpb.Message) mac := msg.GetMACPayload() if len(mac.FRMPayload) > 0 && mac.FPort == 0 { var key types.AES128Key switch ver { case ttnpb.MAC_V1_0, ttnpb.MAC_V1_0_1, ttnpb.MAC_V1_0_2: key = fNwkSIntKey case ttnpb.MAC_V1_1: key = nwkSEncKey default: panic(fmt.Errorf("unknown version %s", ver)) } var err error mac.FRMPayload, err = crypto.EncryptDownlink(key, mac.DevAddr, mac.FCnt, mac.FRMPayload) if err != nil { t.Fatal("Failed to encrypt downlink FRMPayload") } } b, err := lorawan.MarshalMessage(*msg) if err != nil { t.Fatal("Failed to marshal downlink") } var key types.AES128Key switch ver { case ttnpb.MAC_V1_0, ttnpb.MAC_V1_0_1, ttnpb.MAC_V1_0_2: key = fNwkSIntKey case ttnpb.MAC_V1_1: key = sNwkSIntKey default: panic(fmt.Errorf("unknown version %s", ver)) } mic, err := crypto.ComputeDownlinkMIC(key, mac.DevAddr, confFCnt, mac.FCnt, b) if err != nil { t.Fatal("Failed to compute MIC") } return append(b, mic[:]...) } encodeMAC := func(phy band.Band, cmds ...*ttnpb.MACCommand) (b []byte) { for _, cmd := range cmds { b = test.Must(lorawan.DefaultMACCommands.AppendDownlink(phy, b, *cmd)).([]byte) } return } for _, tc := range []struct { Name string Device *ttnpb.EndDevice Bytes []byte ApplicationDownlinkAssertion func(t *testing.T, down *ttnpb.ApplicationDownlink) bool DeviceAssertion func(*testing.T, *ttnpb.EndDevice) bool Error error }{ { Name: "1.1/no app downlink/no MAC/no ack", Device: &ttnpb.EndDevice{ EndDeviceIdentifiers: ttnpb.EndDeviceIdentifiers{ ApplicationIdentifiers: appID, DeviceID: devID, DevAddr: &devAddr, }, MACState: &ttnpb.MACState{ LoRaWANVersion: ttnpb.MAC_V1_1, }, Session: ttnpb.NewPopulatedSession(test.Randy, false), LoRaWANPHYVersion: ttnpb.PHY_V1_1_REV_B, FrequencyPlanID: band.EU_863_870, RecentUplinks: []*ttnpb.UplinkMessage{{ Payload: &ttnpb.Message{ MHDR: ttnpb.MHDR{ MType: ttnpb.MType_UNCONFIRMED_UP, }, Payload: &ttnpb.Message_MACPayload{MACPayload: &ttnpb.MACPayload{}}, }, }}, }, Error: errNoDownlink, }, { Name: "1.1/no app downlink/status after 1 downlink/no ack", Device: &ttnpb.EndDevice{ EndDeviceIdentifiers: ttnpb.EndDeviceIdentifiers{ ApplicationIdentifiers: appID, DeviceID: devID, DevAddr: &devAddr, }, MACSettings: &ttnpb.MACSettings{ StatusCountPeriodicity: &pbtypes.UInt32Value{Value: 3}, }, MACState: &ttnpb.MACState{ LoRaWANVersion: ttnpb.MAC_V1_1, LastDevStatusFCntUp: 2, }, Session: &ttnpb.Session{ LastFCntUp: 4, }, LoRaWANPHYVersion: ttnpb.PHY_V1_1_REV_B, FrequencyPlanID: band.EU_863_870, LastDevStatusReceivedAt: TimePtr(time.Unix(42, 0)), RecentUplinks: []*ttnpb.UplinkMessage{{ Payload: &ttnpb.Message{ MHDR: ttnpb.MHDR{ MType: ttnpb.MType_UNCONFIRMED_UP, }, Payload: &ttnpb.Message_MACPayload{MACPayload: &ttnpb.MACPayload{}}, }, }}, }, Error: errNoDownlink, }, { Name: "1.1/no app downlink/status after an hour/no ack", Device: &ttnpb.EndDevice{ EndDeviceIdentifiers: ttnpb.EndDeviceIdentifiers{ ApplicationIdentifiers: appID, DeviceID: devID, DevAddr: &devAddr, }, MACSettings: &ttnpb.MACSettings{ StatusTimePeriodicity: DurationPtr(24 * time.Hour), }, MACState: &ttnpb.MACState{ LoRaWANVersion: ttnpb.MAC_V1_1, }, LoRaWANPHYVersion: ttnpb.PHY_V1_1_REV_B, FrequencyPlanID: band.EU_863_870, LastDevStatusReceivedAt: TimePtr(time.Now()), Session: ttnpb.NewPopulatedSession(test.Randy, false), RecentUplinks: []*ttnpb.UplinkMessage{{ Payload: &ttnpb.Message{ MHDR: ttnpb.MHDR{ MType: ttnpb.MType_UNCONFIRMED_UP, }, Payload: &ttnpb.Message_MACPayload{MACPayload: &ttnpb.MACPayload{}}, }, }}, }, Error: errNoDownlink, }, { Name: "1.1/no app downlink/no MAC/ack", Device: &ttnpb.EndDevice{ EndDeviceIdentifiers: ttnpb.EndDeviceIdentifiers{ ApplicationIdentifiers: appID, DeviceID: devID, DevAddr: &devAddr, }, MACState: &ttnpb.MACState{ LoRaWANVersion: ttnpb.MAC_V1_1, RxWindowsAvailable: true, }, Session: &ttnpb.Session{ DevAddr: devAddr, LastNFCntDown: 41, SessionKeys: ttnpb.SessionKeys{ NwkSEncKey: &ttnpb.KeyEnvelope{ Key: &nwkSEncKey, }, SNwkSIntKey: &ttnpb.KeyEnvelope{ Key: &sNwkSIntKey, }, }, }, LoRaWANPHYVersion: ttnpb.PHY_V1_1_REV_B, FrequencyPlanID: band.EU_863_870, RecentUplinks: []*ttnpb.UplinkMessage{{ Payload: &ttnpb.Message{ MHDR: ttnpb.MHDR{ MType: ttnpb.MType_CONFIRMED_UP, }, Payload: &ttnpb.Message_MACPayload{ MACPayload: &ttnpb.MACPayload{ FHDR: ttnpb.FHDR{ FCnt: 24, }, }, }, }, }}, }, Bytes: encodeMessage(&ttnpb.Message{ MHDR: ttnpb.MHDR{ MType: ttnpb.MType_UNCONFIRMED_DOWN, Major: ttnpb.Major_LORAWAN_R1, }, Payload: &ttnpb.Message_MACPayload{ MACPayload: &ttnpb.MACPayload{ FHDR: ttnpb.FHDR{ DevAddr: devAddr, FCtrl: ttnpb.FCtrl{ Ack: true, ADR: true, }, FCnt: 42, }, }, }, }, ttnpb.MAC_V1_1, 24), DeviceAssertion: func(t *testing.T, dev *ttnpb.EndDevice) bool { return assertions.New(t).So(dev, should.Resemble, &ttnpb.EndDevice{ EndDeviceIdentifiers: ttnpb.EndDeviceIdentifiers{ ApplicationIdentifiers: appID, DeviceID: devID, DevAddr: &devAddr, }, MACState: &ttnpb.MACState{ LoRaWANVersion: ttnpb.MAC_V1_1, RxWindowsAvailable: true, }, Session: &ttnpb.Session{ DevAddr: devAddr, LastNFCntDown: 42, SessionKeys: ttnpb.SessionKeys{ NwkSEncKey: &ttnpb.KeyEnvelope{ Key: &nwkSEncKey, }, SNwkSIntKey: &ttnpb.KeyEnvelope{ Key: &sNwkSIntKey, }, }, }, LoRaWANPHYVersion: ttnpb.PHY_V1_1_REV_B, FrequencyPlanID: band.EU_863_870, RecentUplinks: []*ttnpb.UplinkMessage{{ Payload: &ttnpb.Message{ MHDR: ttnpb.MHDR{ MType: ttnpb.MType_CONFIRMED_UP, }, Payload: &ttnpb.Message_MACPayload{ MACPayload: &ttnpb.MACPayload{ FHDR: ttnpb.FHDR{ FCnt: 24, }, }, }, }, }}, }) }, }, { Name: "1.1/unconfirmed app downlink/no MAC/no ack", Device: &ttnpb.EndDevice{ EndDeviceIdentifiers: ttnpb.EndDeviceIdentifiers{ ApplicationIdentifiers: appID, DeviceID: devID, DevAddr: &devAddr, }, MACState: &ttnpb.MACState{ LoRaWANVersion: ttnpb.MAC_V1_1, RxWindowsAvailable: true, }, Session: &ttnpb.Session{ DevAddr: devAddr, SessionKeys: ttnpb.SessionKeys{ NwkSEncKey: &ttnpb.KeyEnvelope{ Key: &nwkSEncKey, }, SNwkSIntKey: &ttnpb.KeyEnvelope{ Key: &sNwkSIntKey, }, }, }, QueuedApplicationDownlinks: []*ttnpb.ApplicationDownlink{ { Confirmed: false, FCnt: 42, FPort: 1, FRMPayload: []byte("test"), }, }, LoRaWANPHYVersion: ttnpb.PHY_V1_1_REV_B, FrequencyPlanID: band.EU_863_870, RecentUplinks: []*ttnpb.UplinkMessage{{ Payload: &ttnpb.Message{ MHDR: ttnpb.MHDR{ MType: ttnpb.MType_UNCONFIRMED_UP, }, Payload: &ttnpb.Message_MACPayload{MACPayload: &ttnpb.MACPayload{}}, }, }}, }, Bytes: encodeMessage(&ttnpb.Message{ MHDR: ttnpb.MHDR{ MType: ttnpb.MType_UNCONFIRMED_DOWN, Major: ttnpb.Major_LORAWAN_R1, }, Payload: &ttnpb.Message_MACPayload{ MACPayload: &ttnpb.MACPayload{ FHDR: ttnpb.FHDR{ DevAddr: devAddr, FCtrl: ttnpb.FCtrl{ Ack: false, ADR: true, }, FCnt: 42, }, FPort: 1, FRMPayload: []byte("test"), }, }, }, ttnpb.MAC_V1_1, 0), ApplicationDownlinkAssertion: func(t *testing.T, down *ttnpb.ApplicationDownlink) bool { return assertions.New(t).So(down, should.Resemble, &ttnpb.ApplicationDownlink{ Confirmed: false, FCnt: 42, FPort: 1, FRMPayload: []byte("test"), }) }, DeviceAssertion: func(t *testing.T, dev *ttnpb.EndDevice) bool { return assertions.New(t).So(dev, should.Resemble, &ttnpb.EndDevice{ EndDeviceIdentifiers: ttnpb.EndDeviceIdentifiers{ ApplicationIdentifiers: appID, DeviceID: devID, DevAddr: &devAddr, }, MACState: &ttnpb.MACState{ LoRaWANVersion: ttnpb.MAC_V1_1, RxWindowsAvailable: true, }, Session: &ttnpb.Session{ DevAddr: devAddr, SessionKeys: ttnpb.SessionKeys{ NwkSEncKey: &ttnpb.KeyEnvelope{ Key: &nwkSEncKey, }, SNwkSIntKey: &ttnpb.KeyEnvelope{ Key: &sNwkSIntKey, }, }, }, LoRaWANPHYVersion: ttnpb.PHY_V1_1_REV_B, FrequencyPlanID: band.EU_863_870, RecentUplinks: []*ttnpb.UplinkMessage{{ Payload: &ttnpb.Message{ MHDR: ttnpb.MHDR{ MType: ttnpb.MType_UNCONFIRMED_UP, }, Payload: &ttnpb.Message_MACPayload{MACPayload: &ttnpb.MACPayload{}}, }, }}, QueuedApplicationDownlinks: []*ttnpb.ApplicationDownlink{}, }) }, }, { Name: "1.1/unconfirmed app downlink/no MAC/ack", Device: &ttnpb.EndDevice{ EndDeviceIdentifiers: ttnpb.EndDeviceIdentifiers{ ApplicationIdentifiers: appID, DeviceID: devID, DevAddr: &devAddr, }, MACState: &ttnpb.MACState{ LoRaWANVersion: ttnpb.MAC_V1_1, RxWindowsAvailable: true, }, Session: &ttnpb.Session{ DevAddr: devAddr, SessionKeys: ttnpb.SessionKeys{ NwkSEncKey: &ttnpb.KeyEnvelope{ Key: &nwkSEncKey, }, SNwkSIntKey: &ttnpb.KeyEnvelope{ Key: &sNwkSIntKey, }, }, }, QueuedApplicationDownlinks: []*ttnpb.ApplicationDownlink{ { Confirmed: false, FCnt: 42, FPort: 1, FRMPayload: []byte("test"), }, }, LoRaWANPHYVersion: ttnpb.PHY_V1_1_REV_B, FrequencyPlanID: band.EU_863_870, RecentUplinks: []*ttnpb.UplinkMessage{{ Payload: &ttnpb.Message{ MHDR: ttnpb.MHDR{ MType: ttnpb.MType_CONFIRMED_UP, }, Payload: &ttnpb.Message_MACPayload{ MACPayload: &ttnpb.MACPayload{ FHDR: ttnpb.FHDR{ FCnt: 24, }, }, }, }, }}, }, Bytes: encodeMessage(&ttnpb.Message{ MHDR: ttnpb.MHDR{ MType: ttnpb.MType_UNCONFIRMED_DOWN, Major: ttnpb.Major_LORAWAN_R1, }, Payload: &ttnpb.Message_MACPayload{ MACPayload: &ttnpb.MACPayload{ FHDR: ttnpb.FHDR{ DevAddr: devAddr, FCtrl: ttnpb.FCtrl{ Ack: true, ADR: true, }, FCnt: 42, }, FPort: 1, FRMPayload: []byte("test"), }, }, }, ttnpb.MAC_V1_1, 24), ApplicationDownlinkAssertion: func(t *testing.T, down *ttnpb.ApplicationDownlink) bool { return assertions.New(t).So(down, should.Resemble, &ttnpb.ApplicationDownlink{ Confirmed: false, FCnt: 42, FPort: 1, FRMPayload: []byte("test"), }) }, DeviceAssertion: func(t *testing.T, dev *ttnpb.EndDevice) bool { return assertions.New(t).So(dev, should.Resemble, &ttnpb.EndDevice{ EndDeviceIdentifiers: ttnpb.EndDeviceIdentifiers{ ApplicationIdentifiers: appID, DeviceID: devID, DevAddr: &devAddr, }, MACState: &ttnpb.MACState{ LoRaWANVersion: ttnpb.MAC_V1_1, RxWindowsAvailable: true, }, Session: &ttnpb.Session{ DevAddr: devAddr, SessionKeys: ttnpb.SessionKeys{ NwkSEncKey: &ttnpb.KeyEnvelope{ Key: &nwkSEncKey, }, SNwkSIntKey: &ttnpb.KeyEnvelope{ Key: &sNwkSIntKey, }, }, }, LoRaWANPHYVersion: ttnpb.PHY_V1_1_REV_B, FrequencyPlanID: band.EU_863_870, RecentUplinks: []*ttnpb.UplinkMessage{{ Payload: &ttnpb.Message{ MHDR: ttnpb.MHDR{ MType: ttnpb.MType_CONFIRMED_UP, }, Payload: &ttnpb.Message_MACPayload{ MACPayload: &ttnpb.MACPayload{ FHDR: ttnpb.FHDR{ FCnt: 24, }, }, }, }, }}, QueuedApplicationDownlinks: []*ttnpb.ApplicationDownlink{}, }) }, }, { Name: "1.1/confirmed app downlink/no MAC/no ack", Device: &ttnpb.EndDevice{ EndDeviceIdentifiers: ttnpb.EndDeviceIdentifiers{ ApplicationIdentifiers: appID, DeviceID: devID, DevAddr: &devAddr, }, MACState: &ttnpb.MACState{ LoRaWANVersion: ttnpb.MAC_V1_1, }, Session: &ttnpb.Session{ DevAddr: devAddr, SessionKeys: ttnpb.SessionKeys{ NwkSEncKey: &ttnpb.KeyEnvelope{ Key: &nwkSEncKey, }, SNwkSIntKey: &ttnpb.KeyEnvelope{ Key: &sNwkSIntKey, }, }, }, QueuedApplicationDownlinks: []*ttnpb.ApplicationDownlink{ { Confirmed: true, FCnt: 42, FPort: 1, FRMPayload: []byte("test"), }, }, LoRaWANPHYVersion: ttnpb.PHY_V1_1_REV_B, FrequencyPlanID: band.EU_863_870, RecentUplinks: []*ttnpb.UplinkMessage{{ Payload: &ttnpb.Message{ MHDR: ttnpb.MHDR{ MType: ttnpb.MType_UNCONFIRMED_UP, }, Payload: &ttnpb.Message_MACPayload{MACPayload: &ttnpb.MACPayload{}}, }, }}, }, Bytes: encodeMessage(&ttnpb.Message{ MHDR: ttnpb.MHDR{ MType: ttnpb.MType_CONFIRMED_DOWN, Major: ttnpb.Major_LORAWAN_R1, }, Payload: &ttnpb.Message_MACPayload{ MACPayload: &ttnpb.MACPayload{ FHDR: ttnpb.FHDR{ DevAddr: devAddr, FCtrl: ttnpb.FCtrl{ Ack: false, ADR: true, }, FCnt: 42, }, FPort: 1, FRMPayload: []byte("test"), }, }, }, ttnpb.MAC_V1_1, 0), ApplicationDownlinkAssertion: func(t *testing.T, down *ttnpb.ApplicationDownlink) bool { return assertions.New(t).So(down, should.Resemble, &ttnpb.ApplicationDownlink{ Confirmed: true, FCnt: 42, FPort: 1, FRMPayload: []byte("test"), }) }, DeviceAssertion: func(t *testing.T, dev *ttnpb.EndDevice) bool { a := assertions.New(t) if !a.So(dev.MACState, should.NotBeNil) { t.FailNow() } return a.So(dev, should.Resemble, &ttnpb.EndDevice{ EndDeviceIdentifiers: ttnpb.EndDeviceIdentifiers{ ApplicationIdentifiers: appID, DeviceID: devID, DevAddr: &devAddr, }, MACState: &ttnpb.MACState{ LoRaWANVersion: ttnpb.MAC_V1_1, PendingApplicationDownlink: &ttnpb.ApplicationDownlink{ Confirmed: true, FCnt: 42, FPort: 1, FRMPayload: []byte("test"), }, }, Session: &ttnpb.Session{ DevAddr: devAddr, LastConfFCntDown: 42, SessionKeys: ttnpb.SessionKeys{ NwkSEncKey: &ttnpb.KeyEnvelope{ Key: &nwkSEncKey, }, SNwkSIntKey: &ttnpb.KeyEnvelope{ Key: &sNwkSIntKey, }, }, }, LoRaWANPHYVersion: ttnpb.PHY_V1_1_REV_B, FrequencyPlanID: band.EU_863_870, RecentUplinks: []*ttnpb.UplinkMessage{{ Payload: &ttnpb.Message{ MHDR: ttnpb.MHDR{ MType: ttnpb.MType_UNCONFIRMED_UP, }, Payload: &ttnpb.Message_MACPayload{MACPayload: &ttnpb.MACPayload{}}, }, }}, QueuedApplicationDownlinks: []*ttnpb.ApplicationDownlink{}, }) }, }, { Name: "1.1/confirmed app downlink/no MAC/ack", Device: &ttnpb.EndDevice{ EndDeviceIdentifiers: ttnpb.EndDeviceIdentifiers{ ApplicationIdentifiers: appID, DeviceID: devID, DevAddr: &devAddr, }, MACState: &ttnpb.MACState{ LoRaWANVersion: ttnpb.MAC_V1_1, RxWindowsAvailable: true, }, Session: &ttnpb.Session{ DevAddr: devAddr, SessionKeys: ttnpb.SessionKeys{ NwkSEncKey: &ttnpb.KeyEnvelope{ Key: &nwkSEncKey, }, SNwkSIntKey: &ttnpb.KeyEnvelope{ Key: &sNwkSIntKey, }, }, }, LoRaWANPHYVersion: ttnpb.PHY_V1_1_REV_B, FrequencyPlanID: band.EU_863_870, QueuedApplicationDownlinks: []*ttnpb.ApplicationDownlink{ { Confirmed: true, FCnt: 42, FPort: 1, FRMPayload: []byte("test"), }, }, RecentUplinks: []*ttnpb.UplinkMessage{{ Payload: &ttnpb.Message{ MHDR: ttnpb.MHDR{ MType: ttnpb.MType_CONFIRMED_UP, }, Payload: &ttnpb.Message_MACPayload{ MACPayload: &ttnpb.MACPayload{ FHDR: ttnpb.FHDR{ FCnt: 24, }, }, }, }, }}, }, Bytes: encodeMessage(&ttnpb.Message{ MHDR: ttnpb.MHDR{ MType: ttnpb.MType_CONFIRMED_DOWN, Major: ttnpb.Major_LORAWAN_R1, }, Payload: &ttnpb.Message_MACPayload{ MACPayload: &ttnpb.MACPayload{ FHDR: ttnpb.FHDR{ DevAddr: devAddr, FCtrl: ttnpb.FCtrl{ Ack: true, ADR: true, }, FCnt: 42, }, FPort: 1, FRMPayload: []byte("test"), }, }, }, ttnpb.MAC_V1_1, 24), ApplicationDownlinkAssertion: func(t *testing.T, down *ttnpb.ApplicationDownlink) bool { return assertions.New(t).So(down, should.Resemble, &ttnpb.ApplicationDownlink{ Confirmed: true, FCnt: 42, FPort: 1, FRMPayload: []byte("test"), }) }, DeviceAssertion: func(t *testing.T, dev *ttnpb.EndDevice) bool { a := assertions.New(t) if !a.So(dev.MACState, should.NotBeNil) { t.FailNow() } return a.So(dev, should.Resemble, &ttnpb.EndDevice{ EndDeviceIdentifiers: ttnpb.EndDeviceIdentifiers{ ApplicationIdentifiers: appID, DeviceID: devID, DevAddr: &devAddr, }, MACState: &ttnpb.MACState{ LoRaWANVersion: ttnpb.MAC_V1_1, RxWindowsAvailable: true, PendingApplicationDownlink: &ttnpb.ApplicationDownlink{ Confirmed: true, FCnt: 42, FPort: 1, FRMPayload: []byte("test"), }, }, Session: &ttnpb.Session{ DevAddr: devAddr, LastConfFCntDown: 42, SessionKeys: ttnpb.SessionKeys{ NwkSEncKey: &ttnpb.KeyEnvelope{ Key: &nwkSEncKey, }, SNwkSIntKey: &ttnpb.KeyEnvelope{ Key: &sNwkSIntKey, }, }, }, LoRaWANPHYVersion: ttnpb.PHY_V1_1_REV_B, FrequencyPlanID: band.EU_863_870, QueuedApplicationDownlinks: []*ttnpb.ApplicationDownlink{}, RecentUplinks: []*ttnpb.UplinkMessage{{ Payload: &ttnpb.Message{ MHDR: ttnpb.MHDR{ MType: ttnpb.MType_CONFIRMED_UP, }, Payload: &ttnpb.Message_MACPayload{ MACPayload: &ttnpb.MACPayload{ FHDR: ttnpb.FHDR{ FCnt: 24, }, }, }, }, }}, }) }, }, { Name: "1.1/no app downlink/status(count)/no ack", Device: &ttnpb.EndDevice{ EndDeviceIdentifiers: ttnpb.EndDeviceIdentifiers{ ApplicationIdentifiers: appID, DeviceID: devID, DevAddr: &devAddr, }, MACSettings: &ttnpb.MACSettings{ StatusCountPeriodicity: &pbtypes.UInt32Value{Value: 3}, }, MACState: &ttnpb.MACState{ LoRaWANVersion: ttnpb.MAC_V1_1, LastDevStatusFCntUp: 4, }, Session: &ttnpb.Session{ DevAddr: devAddr, LastFCntUp: 99, LastNFCntDown: 41, SessionKeys: ttnpb.SessionKeys{ NwkSEncKey: &ttnpb.KeyEnvelope{ Key: &nwkSEncKey, }, SNwkSIntKey: &ttnpb.KeyEnvelope{ Key: &sNwkSIntKey, }, }, }, LoRaWANPHYVersion: ttnpb.PHY_V1_1_REV_B, FrequencyPlanID: band.EU_863_870, RecentUplinks: []*ttnpb.UplinkMessage{{ Payload: &ttnpb.Message{ MHDR: ttnpb.MHDR{ MType: ttnpb.MType_UNCONFIRMED_UP, }, Payload: &ttnpb.Message_MACPayload{MACPayload: &ttnpb.MACPayload{}}, }, }}, }, Bytes: encodeMessage(&ttnpb.Message{ MHDR: ttnpb.MHDR{ MType: ttnpb.MType_UNCONFIRMED_DOWN, Major: ttnpb.Major_LORAWAN_R1, }, Payload: &ttnpb.Message_MACPayload{ MACPayload: &ttnpb.MACPayload{ FHDR: ttnpb.FHDR{ DevAddr: devAddr, FCtrl: ttnpb.FCtrl{ Ack: false, ADR: true, }, FCnt: 42, }, FPort: 0, FRMPayload: encodeMAC( phy, ttnpb.CID_DEV_STATUS.MACCommand(), ), }, }, }, ttnpb.MAC_V1_1, 0), DeviceAssertion: func(t *testing.T, dev *ttnpb.EndDevice) bool { a := assertions.New(t) if !a.So(dev.MACState, should.NotBeNil) { t.FailNow() } return a.So(dev, should.Resemble, &ttnpb.EndDevice{ EndDeviceIdentifiers: ttnpb.EndDeviceIdentifiers{ ApplicationIdentifiers: appID, DeviceID: devID, DevAddr: &devAddr, }, MACSettings: &ttnpb.MACSettings{ StatusCountPeriodicity: &pbtypes.UInt32Value{Value: 3}, }, MACState: &ttnpb.MACState{ LoRaWANVersion: ttnpb.MAC_V1_1, LastDevStatusFCntUp: 4, PendingRequests: []*ttnpb.MACCommand{ ttnpb.CID_DEV_STATUS.MACCommand(), }, }, Session: &ttnpb.Session{ DevAddr: devAddr, LastFCntUp: 99, LastNFCntDown: 42, SessionKeys: ttnpb.SessionKeys{ NwkSEncKey: &ttnpb.KeyEnvelope{ Key: &nwkSEncKey, }, SNwkSIntKey: &ttnpb.KeyEnvelope{ Key: &sNwkSIntKey, }, }, }, LoRaWANPHYVersion: ttnpb.PHY_V1_1_REV_B, FrequencyPlanID: band.EU_863_870, RecentUplinks: []*ttnpb.UplinkMessage{{ Payload: &ttnpb.Message{ MHDR: ttnpb.MHDR{ MType: ttnpb.MType_UNCONFIRMED_UP, }, Payload: &ttnpb.Message_MACPayload{MACPayload: &ttnpb.MACPayload{}}, }, }}, }) }, }, { Name: "1.1/no app downlink/status(time/zero time)/no ack", Device: &ttnpb.EndDevice{ EndDeviceIdentifiers: ttnpb.EndDeviceIdentifiers{ ApplicationIdentifiers: appID, DeviceID: devID, DevAddr: &devAddr, }, MACSettings: &ttnpb.MACSettings{ StatusTimePeriodicity: DurationPtr(time.Nanosecond), }, MACState: &ttnpb.MACState{ LoRaWANVersion: ttnpb.MAC_V1_1, }, Session: &ttnpb.Session{ DevAddr: devAddr, LastNFCntDown: 41, SessionKeys: ttnpb.SessionKeys{ NwkSEncKey: &ttnpb.KeyEnvelope{ Key: &nwkSEncKey, }, SNwkSIntKey: &ttnpb.KeyEnvelope{ Key: &sNwkSIntKey, }, }, }, LoRaWANPHYVersion: ttnpb.PHY_V1_1_REV_B, FrequencyPlanID: band.EU_863_870, RecentUplinks: []*ttnpb.UplinkMessage{{ Payload: &ttnpb.Message{ MHDR: ttnpb.MHDR{ MType: ttnpb.MType_UNCONFIRMED_UP, }, Payload: &ttnpb.Message_MACPayload{MACPayload: &ttnpb.MACPayload{}}, }, }}, }, Bytes: encodeMessage(&ttnpb.Message{ MHDR: ttnpb.MHDR{ MType: ttnpb.MType_UNCONFIRMED_DOWN, Major: ttnpb.Major_LORAWAN_R1, }, Payload: &ttnpb.Message_MACPayload{ MACPayload: &ttnpb.MACPayload{ FHDR: ttnpb.FHDR{ DevAddr: devAddr, FCtrl: ttnpb.FCtrl{ Ack: false, ADR: true, }, FCnt: 42, }, FPort: 0, FRMPayload: encodeMAC( phy, ttnpb.CID_DEV_STATUS.MACCommand(), ), }, }, }, ttnpb.MAC_V1_1, 0), DeviceAssertion: func(t *testing.T, dev *ttnpb.EndDevice) bool { a := assertions.New(t) if !a.So(dev.MACState, should.NotBeNil) { t.FailNow() } return a.So(dev, should.Resemble, &ttnpb.EndDevice{ EndDeviceIdentifiers: ttnpb.EndDeviceIdentifiers{ ApplicationIdentifiers: appID, DeviceID: devID, DevAddr: &devAddr, }, MACSettings: &ttnpb.MACSettings{ StatusTimePeriodicity: DurationPtr(time.Nanosecond), }, MACState: &ttnpb.MACState{ LoRaWANVersion: ttnpb.MAC_V1_1, PendingRequests: []*ttnpb.MACCommand{ ttnpb.CID_DEV_STATUS.MACCommand(), }, }, Session: &ttnpb.Session{ DevAddr: devAddr, LastNFCntDown: 42, SessionKeys: ttnpb.SessionKeys{ NwkSEncKey: &ttnpb.KeyEnvelope{ Key: &nwkSEncKey, }, SNwkSIntKey: &ttnpb.KeyEnvelope{ Key: &sNwkSIntKey, }, }, }, LoRaWANPHYVersion: ttnpb.PHY_V1_1_REV_B, FrequencyPlanID: band.EU_863_870, RecentUplinks: []*ttnpb.UplinkMessage{{ Payload: &ttnpb.Message{ MHDR: ttnpb.MHDR{ MType: ttnpb.MType_UNCONFIRMED_UP, }, Payload: &ttnpb.Message_MACPayload{MACPayload: &ttnpb.MACPayload{}}, }, }}, }) }, }, } { t.Run(tc.Name, func(t *testing.T) { a := assertions.New(t) logger := test.GetLogger(t) ctx := test.ContextWithT(test.Context(), t) ctx = log.NewContext(ctx, logger) ctx, cancel := context.WithTimeout(ctx, (1<<7)*test.Delay) defer cancel() c := component.MustNew( log.Noop, &component.Config{}, component.WithClusterNew(func(context.Context, *config.Cluster, ...cluster.Option) (cluster.Cluster, error) { return &test.MockCluster{ JoinFunc: test.ClusterJoinNilFunc, }, nil }), ) c.FrequencyPlans = frequencyplans.NewStore(test.FrequencyPlansFetcher) err := c.Start() a.So(err, should.BeNil) ns := &NetworkServer{ Component: c, ctx: ctx, defaultMACSettings: ttnpb.MACSettings{ StatusTimePeriodicity: DurationPtr(0), StatusCountPeriodicity: &pbtypes.UInt32Value{Value: 0}, }, } dev := CopyEndDevice(tc.Device) _, phy, err := getDeviceBandVersion(dev, ns.FrequencyPlans) if !a.So(err, should.BeNil) { t.Fail() return } genDown, genState, err := ns.generateDownlink(ctx, dev, phy, math.MaxUint16, math.MaxUint16) if tc.Error != nil { a.So(err, should.EqualErrorOrDefinition, tc.Error) a.So(genDown, should.BeNil) return } // TODO: Assert AS uplinks generated(https://github.com/TheThingsNetwork/lorawan-stack/issues/631). if !a.So(err, should.BeNil) || !a.So(genDown, should.NotBeNil) { t.Fail() return } a.So(genDown.Payload, should.Resemble, tc.Bytes) if tc.ApplicationDownlinkAssertion != nil { a.So(tc.ApplicationDownlinkAssertion(t, genState.ApplicationDownlink), should.BeTrue) } else { a.So(genState.ApplicationDownlink, should.BeNil) } if tc.DeviceAssertion != nil { a.So(tc.DeviceAssertion(t, dev), should.BeTrue) } else { a.So(dev, should.Resemble, tc.Device) } }) } }
1.328125
1
protoc-gen-markdown/templates/pkg.go
appootb/grpc-gen
0
685
package templates import ( "text/template" "github.com/appootb/grpc-gen/protoc-gen-markdown/templates/markdown" "github.com/appootb/grpc-gen/protoc-gen-markdown/templates/readme" pgs "github.com/lyft/protoc-gen-star" pgsgo "github.com/lyft/protoc-gen-star/lang/go" ) type RegisterFn func(tpl *template.Template, params pgs.Parameters) type FilePathFn func(f pgs.File, ctx pgsgo.Context, tpl *template.Template) *pgs.FilePath func ProtoTemplate(params pgs.Parameters) []*template.Template { return []*template.Template{ makeTemplate("md", markdown.Register, params), } } func ReadMeTemplate(params pgs.Parameters) *template.Template { return makeTemplate("readme", readme.Register, params) } func FilePathFor(tpl *template.Template) FilePathFn { switch tpl.Name() { default: return func(f pgs.File, ctx pgsgo.Context, tpl *template.Template) *pgs.FilePath { out := ctx.OutputPath(f) out = out.SetExt("." + tpl.Name()) return &out } } } func makeTemplate(ext string, fn RegisterFn, params pgs.Parameters) *template.Template { tpl := template.New(ext) fn(tpl, params) return tpl }
1.164063
1
channeldb/payments_test.go
lightningbank/lightninglib
1
693
package channeldb import ( "bytes" "fmt" "math/rand" "reflect" "testing" "time" "github.com/davecgh/go-spew/spew" "github.com/breez/lightninglib/lnwire" ) func makeFakePayment() *OutgoingPayment { fakeInvoice := &Invoice{ // Use single second precision to avoid false positive test // failures due to the monotonic time component. CreationDate: time.Unix(time.Now().Unix(), 0), Memo: []byte("fake memo"), Receipt: []byte("fake receipt"), PaymentRequest: []byte(""), } copy(fakeInvoice.Terms.PaymentPreimage[:], rev[:]) fakeInvoice.Terms.Value = lnwire.NewMSatFromSatoshis(10000) fakePath := make([][33]byte, 3) for i := 0; i < 3; i++ { copy(fakePath[i][:], bytes.Repeat([]byte{byte(i)}, 33)) } fakePayment := &OutgoingPayment{ Invoice: *fakeInvoice, Fee: 101, Path: fakePath, TimeLockLength: 1000, } copy(fakePayment.PaymentPreimage[:], rev[:]) return fakePayment } func makeFakePaymentHash() [32]byte { var paymentHash [32]byte rBytes, _ := randomBytes(0, 32) copy(paymentHash[:], rBytes) return paymentHash } // randomBytes creates random []byte with length in range [minLen, maxLen) func randomBytes(minLen, maxLen int) ([]byte, error) { randBuf := make([]byte, minLen+rand.Intn(maxLen-minLen)) if _, err := rand.Read(randBuf); err != nil { return nil, fmt.Errorf("Internal error. "+ "Cannot generate random string: %v", err) } return randBuf, nil } func makeRandomFakePayment() (*OutgoingPayment, error) { var err error fakeInvoice := &Invoice{ // Use single second precision to avoid false positive test // failures due to the monotonic time component. CreationDate: time.Unix(time.Now().Unix(), 0), } fakeInvoice.Memo, err = randomBytes(1, 50) if err != nil { return nil, err } fakeInvoice.Receipt, err = randomBytes(1, 50) if err != nil { return nil, err } fakeInvoice.PaymentRequest = []byte("") preImg, err := randomBytes(32, 33) if err != nil { return nil, err } copy(fakeInvoice.Terms.PaymentPreimage[:], preImg) fakeInvoice.Terms.Value = lnwire.MilliSatoshi(rand.Intn(10000)) fakePathLen := 1 + rand.Intn(5) fakePath := make([][33]byte, fakePathLen) for i := 0; i < fakePathLen; i++ { b, err := randomBytes(33, 34) if err != nil { return nil, err } copy(fakePath[i][:], b) } fakePayment := &OutgoingPayment{ Invoice: *fakeInvoice, Fee: lnwire.MilliSatoshi(rand.Intn(1001)), Path: fakePath, TimeLockLength: uint32(rand.Intn(10000)), } copy(fakePayment.PaymentPreimage[:], fakeInvoice.Terms.PaymentPreimage[:]) return fakePayment, nil } func TestOutgoingPaymentSerialization(t *testing.T) { t.Parallel() fakePayment := makeFakePayment() var b bytes.Buffer if err := serializeOutgoingPayment(&b, fakePayment); err != nil { t.Fatalf("unable to serialize outgoing payment: %v", err) } newPayment, err := deserializeOutgoingPayment(&b) if err != nil { t.Fatalf("unable to deserialize outgoing payment: %v", err) } if !reflect.DeepEqual(fakePayment, newPayment) { t.Fatalf("Payments do not match after "+ "serialization/deserialization %v vs %v", spew.Sdump(fakePayment), spew.Sdump(newPayment), ) } } func TestOutgoingPaymentWorkflow(t *testing.T) { t.Parallel() db, cleanUp, err := makeTestDB() defer cleanUp() if err != nil { t.Fatalf("unable to make test db: %v", err) } fakePayment := makeFakePayment() if err = db.AddPayment(fakePayment); err != nil { t.Fatalf("unable to put payment in DB: %v", err) } payments, err := db.FetchAllPayments() if err != nil { t.Fatalf("unable to fetch payments from DB: %v", err) } expectedPayments := []*OutgoingPayment{fakePayment} if !reflect.DeepEqual(payments, expectedPayments) { t.Fatalf("Wrong payments after reading from DB."+ "Got %v, want %v", spew.Sdump(payments), spew.Sdump(expectedPayments), ) } // Make some random payments for i := 0; i < 5; i++ { randomPayment, err := makeRandomFakePayment() if err != nil { t.Fatalf("Internal error in tests: %v", err) } if err = db.AddPayment(randomPayment); err != nil { t.Fatalf("unable to put payment in DB: %v", err) } expectedPayments = append(expectedPayments, randomPayment) } payments, err = db.FetchAllPayments() if err != nil { t.Fatalf("Can't get payments from DB: %v", err) } if !reflect.DeepEqual(payments, expectedPayments) { t.Fatalf("Wrong payments after reading from DB."+ "Got %v, want %v", spew.Sdump(payments), spew.Sdump(expectedPayments), ) } // Delete all payments. if err = db.DeleteAllPayments(); err != nil { t.Fatalf("unable to delete payments from DB: %v", err) } // Check that there is no payments after deletion paymentsAfterDeletion, err := db.FetchAllPayments() if err != nil { t.Fatalf("Can't get payments after deletion: %v", err) } if len(paymentsAfterDeletion) != 0 { t.Fatalf("After deletion DB has %v payments, want %v", len(paymentsAfterDeletion), 0) } } func TestPaymentStatusWorkflow(t *testing.T) { t.Parallel() db, cleanUp, err := makeTestDB() defer cleanUp() if err != nil { t.Fatalf("unable to make test db: %v", err) } testCases := []struct { paymentHash [32]byte status PaymentStatus }{ { paymentHash: makeFakePaymentHash(), status: StatusGrounded, }, { paymentHash: makeFakePaymentHash(), status: StatusInFlight, }, { paymentHash: makeFakePaymentHash(), status: StatusCompleted, }, } for _, testCase := range testCases { err := db.UpdatePaymentStatus(testCase.paymentHash, testCase.status) if err != nil { t.Fatalf("unable to put payment in DB: %v", err) } status, err := db.FetchPaymentStatus(testCase.paymentHash) if err != nil { t.Fatalf("unable to fetch payments from DB: %v", err) } if status != testCase.status { t.Fatalf("Wrong payments status after reading from DB."+ "Got %v, want %v", spew.Sdump(status), spew.Sdump(testCase.status), ) } } }
2.015625
2
swarm/network/syncdb.go
martincyprus/waltonchain-gwtc
1
701
// Copyright 2016 The go-wtc Authors // This file is part of the go-wtc library. // // The go-wtc library is free software: you can redistribute it and/or modify // it under the terms of the GNU Lesser General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // The go-wtc library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License // along with the go-wtc library. If not, see <http://www.gnu.org/licenses/>. package network import ( "encoding/binary" "fmt" "github.com/wtc/go-wtc/log" "github.com/wtc/go-wtc/swarm/storage" "github.com/syndtr/goleveldb/leveldb" "github.com/syndtr/goleveldb/leveldb/iterator" ) const counterKeyPrefix = 0x01 /* syncDb is a queueing service for outgoing deliveries. One instance per priority queue for each peer a syncDb instance maintains an in-memory buffer (of capacity bufferSize) once its in-memory buffer is full it switches to persisting in db and dbRead iterator iterates through the items keeping their order once the db read catches up (there is no more items in the db) then it switches back to in-memory buffer. when syncdb is stopped all items in the buffer are saved to the db */ type syncDb struct { start []byte // this syncdb starting index in requestdb key storage.Key // remote peers address key counterKey []byte // db key to persist counter priority uint // priotity High|Medium|Low buffer chan interface{} // incoming request channel db *storage.LDBDatabase // underlying db (TODO should be interface) done chan bool // chan to signal goroutines finished quitting quit chan bool // chan to signal quitting to goroutines total, dbTotal int // counts for one session batch chan chan int // channel for batch requests dbBatchSize uint // number of items before batch is saved } // constructor needs a shared request db (leveldb) // priority is used in the index key // uses a buffer and a leveldb for persistent storage // bufferSize, dbBatchSize are config parameters func newSyncDb(db *storage.LDBDatabase, key storage.Key, priority uint, bufferSize, dbBatchSize uint, deliver func(interface{}, chan bool) bool) *syncDb { start := make([]byte, 42) start[1] = byte(priorities - priority) copy(start[2:34], key) counterKey := make([]byte, 34) counterKey[0] = counterKeyPrefix copy(counterKey[1:], start[1:34]) syncdb := &syncDb{ start: start, key: key, counterKey: counterKey, priority: priority, buffer: make(chan interface{}, bufferSize), db: db, done: make(chan bool), quit: make(chan bool), batch: make(chan chan int), dbBatchSize: dbBatchSize, } log.Trace(fmt.Sprintf("syncDb[peer: %v, priority: %v] - initialised", key.Log(), priority)) // starts the main forever loop reading from buffer go syncdb.bufferRead(deliver) return syncdb } /* bufferRead is a forever iterator loop that takes care of delivering outgoing store requests reads from incoming buffer its argument is the deliver function taking the item as first argument and a quit channel as second. Closing of this channel is supposed to abort all waiting for delivery (typically network write) The iteration switches between 2 modes, * buffer mode reads the in-memory buffer and delivers the items directly * db mode reads from the buffer and writes to the db, parallelly another routine is started that reads from the db and delivers items If there is buffer contention in buffer mode (slow network, high upload volume) syncdb switches to db mode and starts dbRead Once db backlog is delivered, it reverts back to in-memory buffer It is automatically started when syncdb is initialised. It saves the buffer to db upon receiving quit signal. syncDb#stop() */ func (self *syncDb) bufferRead(deliver func(interface{}, chan bool) bool) { var buffer, db chan interface{} // channels representing the two read modes var more bool var req interface{} var entry *syncDbEntry var inBatch, inDb int batch := new(leveldb.Batch) var dbSize chan int quit := self.quit counterValue := make([]byte, 8) // counter is used for keeping the items in order, persisted to db // start counter where db was at, 0 if not found data, err := self.db.Get(self.counterKey) var counter uint64 if err == nil { counter = binary.BigEndian.Uint64(data) log.Trace(fmt.Sprintf("syncDb[%v/%v] - counter read from db at %v", self.key.Log(), self.priority, counter)) } else { log.Trace(fmt.Sprintf("syncDb[%v/%v] - counter starts at %v", self.key.Log(), self.priority, counter)) } LOOP: for { // waiting for item next in the buffer, or quit signal or batch request select { // buffer only closes when writing to db case req = <-buffer: // deliver request : this is blocking on network write so // it is passed the quit channel as argument, so that it returns // if syncdb is stopped. In this case we need to save the item to the db more = deliver(req, self.quit) if !more { log.Debug(fmt.Sprintf("syncDb[%v/%v] quit: switching to db. session tally (db/total): %v/%v", self.key.Log(), self.priority, self.dbTotal, self.total)) // received quit signal, save request currently waiting delivery // by switching to db mode and closing the buffer buffer = nil db = self.buffer close(db) quit = nil // needs to block the quit case in select break // break from select, this item will be written to the db } self.total++ log.Trace(fmt.Sprintf("syncDb[%v/%v] deliver (db/total): %v/%v", self.key.Log(), self.priority, self.dbTotal, self.total)) // by the time deliver returns, there were new writes to the buffer // if buffer contention is detected, switch to db mode which drains // the buffer so no process will block on pushing store requests if len(buffer) == cap(buffer) { log.Debug(fmt.Sprintf("syncDb[%v/%v] buffer full %v: switching to db. session tally (db/total): %v/%v", self.key.Log(), self.priority, cap(buffer), self.dbTotal, self.total)) buffer = nil db = self.buffer } continue LOOP // incoming entry to put into db case req, more = <-db: if !more { // only if quit is called, saved all the buffer binary.BigEndian.PutUint64(counterValue, counter) batch.Put(self.counterKey, counterValue) // persist counter in batch self.writeSyncBatch(batch) // save batch log.Trace(fmt.Sprintf("syncDb[%v/%v] quitting: save current batch to db", self.key.Log(), self.priority)) break LOOP } self.dbTotal++ self.total++ // otherwise break after select case dbSize = <-self.batch: // explicit request for batch if inBatch == 0 && quit != nil { // there was no writes since the last batch so db depleted // switch to buffer mode log.Debug(fmt.Sprintf("syncDb[%v/%v] empty db: switching to buffer", self.key.Log(), self.priority)) db = nil buffer = self.buffer dbSize <- 0 // indicates to 'caller' that batch has been written inDb = 0 continue LOOP } binary.BigEndian.PutUint64(counterValue, counter) batch.Put(self.counterKey, counterValue) log.Debug(fmt.Sprintf("syncDb[%v/%v] write batch %v/%v - %x - %x", self.key.Log(), self.priority, inBatch, counter, self.counterKey, counterValue)) batch = self.writeSyncBatch(batch) dbSize <- inBatch // indicates to 'caller' that batch has been written inBatch = 0 continue LOOP // closing syncDb#quit channel is used to signal to all goroutines to quit case <-quit: // need to save backlog, so switch to db mode db = self.buffer buffer = nil quit = nil log.Trace(fmt.Sprintf("syncDb[%v/%v] quitting: save buffer to db", self.key.Log(), self.priority)) close(db) continue LOOP } // only get here if we put req into db entry, err = self.newSyncDbEntry(req, counter) if err != nil { log.Warn(fmt.Sprintf("syncDb[%v/%v] saving request %v (#%v/%v) failed: %v", self.key.Log(), self.priority, req, inBatch, inDb, err)) continue LOOP } batch.Put(entry.key, entry.val) log.Trace(fmt.Sprintf("syncDb[%v/%v] to batch %v '%v' (#%v/%v/%v)", self.key.Log(), self.priority, req, entry, inBatch, inDb, counter)) // if just switched to db mode and not quitting, then launch dbRead // in a parallel go routine to send deliveries from db if inDb == 0 && quit != nil { log.Trace(fmt.Sprintf("syncDb[%v/%v] start dbRead", self.key.Log(), self.priority)) go self.dbRead(true, counter, deliver) } inDb++ inBatch++ counter++ // need to save the batch if it gets too large (== dbBatchSize) if inBatch%int(self.dbBatchSize) == 0 { batch = self.writeSyncBatch(batch) } } log.Info(fmt.Sprintf("syncDb[%v:%v]: saved %v keys (saved counter at %v)", self.key.Log(), self.priority, inBatch, counter)) close(self.done) } // writes the batch to the db and returns a new batch object func (self *syncDb) writeSyncBatch(batch *leveldb.Batch) *leveldb.Batch { err := self.db.Write(batch) if err != nil { log.Warn(fmt.Sprintf("syncDb[%v/%v] saving batch to db failed: %v", self.key.Log(), self.priority, err)) return batch } return new(leveldb.Batch) } // abstract type for db entries (TODO could be a feature of Receipts) type syncDbEntry struct { key, val []byte } func (self syncDbEntry) String() string { return fmt.Sprintf("key: %x, value: %x", self.key, self.val) } /* dbRead is iterating over store requests to be sent over to the peer this is mainly to prevent crashes due to network output buffer contention (???) as well as to make syncronisation resilient to disconnects the messages are supposed to be sent in the p2p priority queue. the request DB is shared between peers, but domains for each syncdb are disjoint. dbkeys (42 bytes) are structured: * 0: 0x00 (0x01 reserved for counter key) * 1: priorities - priority (so that high priority can be replayed first) * 2-33: peers address * 34-41: syncdb counter to preserve order (this field is missing for the counter key) values (40 bytes) are: * 0-31: key * 32-39: request id dbRead needs a boolean to indicate if on first round all the historical record is synced. Second argument to indicate current db counter The third is the function to apply */ func (self *syncDb) dbRead(useBatches bool, counter uint64, fun func(interface{}, chan bool) bool) { key := make([]byte, 42) copy(key, self.start) binary.BigEndian.PutUint64(key[34:], counter) var batches, n, cnt, total int var more bool var entry *syncDbEntry var it iterator.Iterator var del *leveldb.Batch batchSizes := make(chan int) for { // if useBatches is false, cnt is not set if useBatches { // this could be called before all cnt items sent out // so that loop is not blocking while delivering // only relevant if cnt is large select { case self.batch <- batchSizes: case <-self.quit: return } // wait for the write to finish and get the item count in the next batch cnt = <-batchSizes batches++ if cnt == 0 { // empty return } } it = self.db.NewIterator() it.Seek(key) if !it.Valid() { copy(key, self.start) useBatches = true continue } del = new(leveldb.Batch) log.Trace(fmt.Sprintf("syncDb[%v/%v]: new iterator: %x (batch %v, count %v)", self.key.Log(), self.priority, key, batches, cnt)) for n = 0; !useBatches || n < cnt; it.Next() { copy(key, it.Key()) if len(key) == 0 || key[0] != 0 { copy(key, self.start) useBatches = true break } val := make([]byte, 40) copy(val, it.Value()) entry = &syncDbEntry{key, val} // log.Trace(fmt.Sprintf("syncDb[%v/%v] - %v, batches: %v, total: %v, session total from db: %v/%v", self.key.Log(), self.priority, self.key.Log(), batches, total, self.dbTotal, self.total)) more = fun(entry, self.quit) if !more { // quit received when waiting to deliver entry, the entry will not be deleted log.Trace(fmt.Sprintf("syncDb[%v/%v] batch %v quit after %v/%v items", self.key.Log(), self.priority, batches, n, cnt)) break } // since subsequent batches of the same db session are indexed incrementally // deleting earlier batches can be delayed and parallelised // this could be batch delete when db is idle (but added complexity esp when quitting) del.Delete(key) n++ total++ } log.Debug(fmt.Sprintf("syncDb[%v/%v] - db session closed, batches: %v, total: %v, session total from db: %v/%v", self.key.Log(), self.priority, batches, total, self.dbTotal, self.total)) self.db.Write(del) // this could be async called only when db is idle it.Release() } } // func (self *syncDb) stop() { close(self.quit) <-self.done } // calculate a dbkey for the request, for the db to work // see syncdb for db key structure // polimorphic: accepted types, see syncer#addRequest func (self *syncDb) newSyncDbEntry(req interface{}, counter uint64) (entry *syncDbEntry, err error) { var key storage.Key var chunk *storage.Chunk var id uint64 var ok bool var sreq *storeRequestMsgData if key, ok = req.(storage.Key); ok { id = generateId() } else if chunk, ok = req.(*storage.Chunk); ok { key = chunk.Key id = generateId() } else if sreq, ok = req.(*storeRequestMsgData); ok { key = sreq.Key id = sreq.Id } else if entry, ok = req.(*syncDbEntry); !ok { return nil, fmt.Errorf("type not allowed: %v (%T)", req, req) } // order by peer > priority > seqid // value is request id if exists if entry == nil { dbkey := make([]byte, 42) dbval := make([]byte, 40) // encode key copy(dbkey[:], self.start[:34]) // db peer binary.BigEndian.PutUint64(dbkey[34:], counter) // encode value copy(dbval, key[:]) binary.BigEndian.PutUint64(dbval[32:], id) entry = &syncDbEntry{dbkey, dbval} } return }
1.78125
2
wechat.go
colin3dmax/wechat
0
709
package wechat import ( "net/http" "sync" "github.com/colin3dmax/wechat/cache" "github.com/colin3dmax/wechat/context" "github.com/colin3dmax/wechat/device" "github.com/colin3dmax/wechat/js" "github.com/colin3dmax/wechat/material" "github.com/colin3dmax/wechat/menu" "github.com/colin3dmax/wechat/message" "github.com/colin3dmax/wechat/miniprogram" "github.com/colin3dmax/wechat/oauth" "github.com/colin3dmax/wechat/pay" "github.com/colin3dmax/wechat/qr" "github.com/colin3dmax/wechat/server" "github.com/colin3dmax/wechat/tcb" "github.com/colin3dmax/wechat/user" ) // Wechat struct type Wechat struct { Context *context.Context } // Config for user type Config struct { AppID string AppSecret string Token string EncodingAESKey string PayMchID string //支付 - 商户 ID PayNotifyURL string //支付 - 接受微信支付结果通知的接口地址 PayKey string //支付 - 商户后台设置的支付 key Cache cache.Cache } // NewWechat init func NewWechat(cfg *Config) *Wechat { context := new(context.Context) copyConfigToContext(cfg, context) return &Wechat{context} } func copyConfigToContext(cfg *Config, context *context.Context) { context.AppID = cfg.AppID context.AppSecret = cfg.AppSecret context.Token = cfg.Token context.EncodingAESKey = cfg.EncodingAESKey context.PayMchID = cfg.PayMchID context.PayKey = cfg.PayKey context.PayNotifyURL = cfg.PayNotifyURL context.Cache = cfg.Cache context.SetAccessTokenLock(new(sync.RWMutex)) context.SetJsAPITicketLock(new(sync.RWMutex)) } // GetServer 消息管理 func (wc *Wechat) GetServer(req *http.Request, writer http.ResponseWriter) *server.Server { wc.Context.Request = req wc.Context.Writer = writer return server.NewServer(wc.Context) } //GetAccessToken 获取access_token func (wc *Wechat) GetAccessToken() (string, error) { return wc.Context.GetAccessToken() } //GetAccessToken 获取access_token func (wc *Wechat) GetAccessTokenAndOpenId(code string) (*context.ResAccessToken, error) { return wc.Context.GetAccessTokenAndOpenId(code) } // GetOauth oauth2网页授权 func (wc *Wechat) GetOauth() *oauth.Oauth { return oauth.NewOauth(wc.Context) } // GetMaterial 素材管理 func (wc *Wechat) GetMaterial() *material.Material { return material.NewMaterial(wc.Context) } // GetJs js-sdk配置 func (wc *Wechat) GetJs() *js.Js { return js.NewJs(wc.Context) } // GetMenu 菜单管理接口 func (wc *Wechat) GetMenu() *menu.Menu { return menu.NewMenu(wc.Context) } // GetUser 用户管理接口 func (wc *Wechat) GetUser() *user.User { return user.NewUser(wc.Context) } // GetTemplate 模板消息接口 func (wc *Wechat) GetTemplate() *message.Template { return message.NewTemplate(wc.Context) } // GetPay 返回支付消息的实例 func (wc *Wechat) GetPay() *pay.Pay { return pay.NewPay(wc.Context) } // GetQR 返回二维码的实例 func (wc *Wechat) GetQR() *qr.QR { return qr.NewQR(wc.Context) } // GetMiniProgram 获取小程序的实例 func (wc *Wechat) GetMiniProgram() *miniprogram.MiniProgram { return miniprogram.NewMiniProgram(wc.Context) } // GetDevice 获取智能设备的实例 func (wc *Wechat) GetDevice() *device.Device { return device.NewDevice(wc.Context) } // GetTcb 获取小程序-云开发的实例 func (wc *Wechat) GetTcb() *tcb.Tcb { return tcb.NewTcb(wc.Context) }
1.257813
1
pkg/image/mapping_test.go
alknopfler/oc-mirror
0
717
package image import ( "testing" "github.com/openshift/library-go/pkg/image/reference" "github.com/openshift/oc/pkg/cli/image/imagesource" "github.com/stretchr/testify/require" ) func TestByCategory(t *testing.T) { tests := []struct { name string input TypedImageMapping typ []ImageType expected TypedImageMapping err string }{{ name: "Valid/OneType", input: TypedImageMapping{ {TypedImageReference: imagesource.TypedImageReference{ Ref: reference.DockerImageReference{ Registry: "some-registry", Namespace: "namespace", Name: "image", ID: "digest", }, Type: imagesource.DestinationRegistry, }, Category: TypeOperatorBundle}: { TypedImageReference: imagesource.TypedImageReference{ Ref: reference.DockerImageReference{ Registry: "disconn-registry", Namespace: "namespace", Name: "image", ID: "digest", }, Type: imagesource.DestinationRegistry, }, Category: TypeOperatorBundle}, {TypedImageReference: imagesource.TypedImageReference{ Ref: reference.DockerImageReference{ Registry: "some-registry", Namespace: "namespace", Name: "image", ID: "digest", }, Type: imagesource.DestinationRegistry, }, Category: TypeOCPRelease}: { TypedImageReference: imagesource.TypedImageReference{ Ref: reference.DockerImageReference{ Registry: "disconn-registry", Namespace: "namespace", Name: "image", ID: "digest", }, Type: imagesource.DestinationRegistry, }, Category: TypeOCPRelease}, }, typ: []ImageType{TypeOperatorBundle}, expected: TypedImageMapping{{ TypedImageReference: imagesource.TypedImageReference{ Ref: reference.DockerImageReference{ Registry: "some-registry", Namespace: "namespace", Name: "image", ID: "digest", }, Type: imagesource.DestinationRegistry, }, Category: TypeOperatorBundle}: { TypedImageReference: imagesource.TypedImageReference{ Ref: reference.DockerImageReference{ Registry: "disconn-registry", Namespace: "namespace", Name: "image", ID: "digest", }, Type: imagesource.DestinationRegistry, }, Category: TypeOperatorBundle}, }, }, { name: "Valid/PruneAllTypes", input: TypedImageMapping{{ TypedImageReference: imagesource.TypedImageReference{ Ref: reference.DockerImageReference{ Registry: "some-registry", Namespace: "namespace", Name: "image", ID: "digest", }, Type: imagesource.DestinationRegistry, }, Category: TypeOperatorBundle}: { TypedImageReference: imagesource.TypedImageReference{ Ref: reference.DockerImageReference{ Registry: "disconn-registry", Namespace: "namespace", Name: "image", ID: "digest", }, Type: imagesource.DestinationRegistry, }, Category: TypeOperatorBundle}, }, typ: []ImageType{TypeGeneric}, expected: TypedImageMapping{}, }, { name: "Valid/MultipleTypes", input: TypedImageMapping{ {TypedImageReference: imagesource.TypedImageReference{ Ref: reference.DockerImageReference{ Registry: "some-registry", Namespace: "namespace", Name: "image", ID: "digest", }, Type: imagesource.DestinationRegistry, }, Category: TypeOperatorBundle}: { TypedImageReference: imagesource.TypedImageReference{ Ref: reference.DockerImageReference{ Registry: "disconn-registry", Namespace: "namespace", Name: "image", ID: "digest", }, Type: imagesource.DestinationRegistry, }, Category: TypeOperatorBundle}, {TypedImageReference: imagesource.TypedImageReference{ Ref: reference.DockerImageReference{ Registry: "some-registry", Namespace: "namespace", Name: "image", ID: "digest", }, Type: imagesource.DestinationRegistry, }, Category: TypeOperatorCatalog}: { TypedImageReference: imagesource.TypedImageReference{ Ref: reference.DockerImageReference{ Registry: "disconn-registry", Namespace: "namespace", Name: "image", ID: "digest", }, Type: imagesource.DestinationRegistry, }, Category: TypeOperatorCatalog}, {TypedImageReference: imagesource.TypedImageReference{ Ref: reference.DockerImageReference{ Registry: "some-registry", Namespace: "namespace", Name: "image", ID: "digest", }, Type: imagesource.DestinationRegistry, }, Category: TypeGeneric}: { TypedImageReference: imagesource.TypedImageReference{ Ref: reference.DockerImageReference{ Registry: "disconn-registry", Namespace: "namespace", Name: "image", ID: "digest", }, Type: imagesource.DestinationRegistry, }, Category: TypeGeneric}, }, typ: []ImageType{TypeOperatorBundle, TypeOperatorCatalog}, expected: TypedImageMapping{ {TypedImageReference: imagesource.TypedImageReference{ Ref: reference.DockerImageReference{ Registry: "some-registry", Namespace: "namespace", Name: "image", ID: "digest", }, Type: imagesource.DestinationRegistry, }, Category: TypeOperatorBundle}: { TypedImageReference: imagesource.TypedImageReference{ Ref: reference.DockerImageReference{ Registry: "disconn-registry", Namespace: "namespace", Name: "image", ID: "digest", }, Type: imagesource.DestinationRegistry, }, Category: TypeOperatorBundle}, {TypedImageReference: imagesource.TypedImageReference{ Ref: reference.DockerImageReference{ Registry: "some-registry", Namespace: "namespace", Name: "image", ID: "digest", }, Type: imagesource.DestinationRegistry, }, Category: TypeOperatorCatalog}: { TypedImageReference: imagesource.TypedImageReference{ Ref: reference.DockerImageReference{ Registry: "disconn-registry", Namespace: "namespace", Name: "image", ID: "digest", }, Type: imagesource.DestinationRegistry, }, Category: TypeOperatorCatalog}, }, }} for _, test := range tests { t.Run(test.name, func(t *testing.T) { mapping := ByCategory(test.input, test.typ...) require.Equal(t, test.expected, mapping) }) } } func TestReadImageMapping(t *testing.T) { tests := []struct { name string seperator string path string typ ImageType expected TypedImageMapping err string }{{ name: "Valid/Separator", path: "testdata/mappings/valid.txt", seperator: "=", typ: TypeOperatorBundle, expected: TypedImageMapping{{ TypedImageReference: imagesource.TypedImageReference{ Ref: reference.DockerImageReference{ Registry: "some-registry.com", Namespace: "namespace", Name: "image", Tag: "latest", }, Type: imagesource.DestinationRegistry, }, Category: TypeOperatorBundle}: { TypedImageReference: imagesource.TypedImageReference{ Ref: reference.DockerImageReference{ Registry: "disconn-registry.com", Namespace: "namespace", Name: "image", Tag: "latest", }, Type: imagesource.DestinationRegistry, }, Category: TypeOperatorBundle}, }, }, { name: "Invalid/NoSeparator", path: "testdata/mappings/invalid.txt", seperator: "=", err: "mapping \"=\" expected to have exactly one \"some-registry.com/namespace/image:latest==disconn-registry.com/namespace/image:latest\"", }} for _, test := range tests { t.Run(test.name, func(t *testing.T) { mapping, err := ReadImageMapping(test.path, test.seperator, test.typ) if test.err != "" { require.EqualError(t, err, test.err) } else { require.NoError(t, err) require.Equal(t, test.expected, mapping) } }) } }
1.085938
1
pkg/logging/logger_test.go
beykansen/kenobi
0
725
package logging import ( "errors" "github.com/ereb-or-od/kenobi/pkg/logging/options" "io/ioutil" "os" "testing" ) func TestNewLoggerShouldReturnLoggerWithDefaultOptionsWhenOptionsDoesNotSelected(t *testing.T) { defaultLogger, err := New() if err != nil { t.Error("error does not expected when default-logger initialized") } if defaultLogger == nil { t.Errorf("default-logger must be initialized") } } func TestNewLoggerWithOptionsShouldReturnLoggerWithSelectedOptionsWhenOptionsSelected(t *testing.T) { defaultLogger, err := NewWithOptions(options.NewDefaultLoggerOptions()) if err != nil { t.Error("error does not expected when default-logger initialized") } if defaultLogger == nil { t.Errorf("default-logger must be initialized") } } func TestInfoShouldBuildInfoLog(t *testing.T) { rescueStdout := os.Stdout r, w, _ := os.Pipe() os.Stdout = w defaultLogger, _ := NewWithOptions(options.NewDefaultLoggerOptions()) defaultLogger.Info("sample") w.Close() out, _ := ioutil.ReadAll(r) os.Stdout = rescueStdout if len(out) == 0 { t.Error("info log could not be written") } } func TestInfoShouldBuildInfoLogWithParameters(t *testing.T) { rescueStdout := os.Stdout r, w, _ := os.Pipe() os.Stdout = w defaultLogger, _ := NewWithOptions(options.NewDefaultLoggerOptions()) defaultLogger.Info("sample", map[string]interface{}{"sample": "foo"}) w.Close() out, _ := ioutil.ReadAll(r) os.Stdout = rescueStdout if len(out) == 0 { t.Error("info log could not be written") } } func TestDebugShouldBuildDebugLog(t *testing.T) { rescueStdout := os.Stdout r, w, _ := os.Pipe() os.Stdout = w defaultLogger, _ := NewWithOptions(options.NewDefaultLoggerOptions()) defaultLogger.Debug("sample") w.Close() out, _ := ioutil.ReadAll(r) os.Stdout = rescueStdout if len(out) == 0 { t.Error("Debug log could not be written") } } func TestDebugShouldBuildDebugLogWithParameters(t *testing.T) { rescueStdout := os.Stdout r, w, _ := os.Pipe() os.Stdout = w defaultLogger, _ := NewWithOptions(options.NewDefaultLoggerOptions()) defaultLogger.Debug("sample", map[string]interface{}{"sample": "foo"}) w.Close() out, _ := ioutil.ReadAll(r) os.Stdout = rescueStdout if len(out) == 0 { t.Error("Debug log could not be written") } } func TestWarnShouldBuildWarnLog(t *testing.T) { rescueStdout := os.Stdout r, w, _ := os.Pipe() os.Stdout = w defaultLogger, _ := NewWithOptions(options.NewDefaultLoggerOptions()) defaultLogger.Warn("sample") w.Close() out, _ := ioutil.ReadAll(r) os.Stdout = rescueStdout if len(out) == 0 { t.Error("Warn log could not be written") } } func TestWarnShouldBuildWarnLogWithParameters(t *testing.T) { rescueStdout := os.Stdout r, w, _ := os.Pipe() os.Stdout = w defaultLogger, _ := NewWithOptions(options.NewDefaultLoggerOptions()) defaultLogger.Warn("sample", map[string]interface{}{"sample": "foo"}) w.Close() out, _ := ioutil.ReadAll(r) os.Stdout = rescueStdout if len(out) == 0 { t.Error("Warn log could not be written") } } func TestErrorShouldBuildErrorLog(t *testing.T) { rescueStdout := os.Stdout r, w, _ := os.Pipe() os.Stdout = w defaultLogger, _ := NewWithOptions(options.NewDefaultLoggerOptions()) defaultLogger.Error("sample", errors.New("sample")) w.Close() out, _ := ioutil.ReadAll(r) os.Stdout = rescueStdout if len(out) == 0 { t.Error("ExtractError log could not be written") } } func TestErrorWithParametersShouldBuildErrorLog(t *testing.T) { rescueStdout := os.Stdout r, w, _ := os.Pipe() os.Stdout = w defaultLogger, _ := NewWithOptions(options.NewDefaultLoggerOptions()) defaultLogger.Error("sample", errors.New("sample"), map[string]interface{}{"sample": "foo"}) w.Close() out, _ := ioutil.ReadAll(r) os.Stdout = rescueStdout if len(out) == 0 { t.Error("ExtractError log could not be written") } } func TestErrorWithParametersShouldBuildErrorLogWhenErrorIsNil(t *testing.T) { rescueStdout := os.Stdout r, w, _ := os.Pipe() os.Stdout = w defaultLogger, _ := NewWithOptions(options.NewDefaultLoggerOptions()) defaultLogger.Error("sample", nil) w.Close() out, _ := ioutil.ReadAll(r) os.Stdout = rescueStdout if len(out) == 0 { t.Error("ExtractError log could not be written") } }
1.375
1
pkg/oam/discoverymapper/suit_test.go
seanly/kubevela
1
733
/* Copyright 2021 The KubeVela Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package discoverymapper import ( "context" "testing" "time" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" crdv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/rest" "k8s.io/utils/pointer" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/envtest" "sigs.k8s.io/controller-runtime/pkg/envtest/printer" // +kubebuilder:scaffold:imports ) // These tests use Ginkgo (BDD-style Go testing framework). Refer to // http://onsi.github.io/ginkgo/ to learn more about Ginkgo. var cfg *rest.Config var k8sClient client.Client var testEnv *envtest.Environment var scheme = runtime.NewScheme() func TestMapper(t *testing.T) { RegisterFailHandler(Fail) RunSpecsWithDefaultAndCustomReporters(t, "Test Mapper Suite", []Reporter{printer.NewlineReporter{}}) } var _ = BeforeSuite(func(done Done) { By("Bootstrapping test environment") testEnv = &envtest.Environment{ UseExistingCluster: pointer.BoolPtr(false), } var err error cfg, err = testEnv.Start() Expect(err).ToNot(HaveOccurred()) Expect(cfg).ToNot(BeNil()) Expect(crdv1.AddToScheme(scheme)).Should(BeNil()) // +kubebuilder:scaffold:scheme By("Create the k8s client") k8sClient, err = client.New(cfg, client.Options{Scheme: scheme}) Expect(err).ToNot(HaveOccurred()) Expect(k8sClient).ToNot(BeNil()) close(done) }, 60) var _ = AfterSuite(func() { By("Tearing down the test environment") err := testEnv.Stop() Expect(err).ToNot(HaveOccurred()) }) var _ = Describe("Mapper discovery resources", func() { It("discovery built-in k8s resource", func() { dism, err := New(cfg) Expect(err).Should(BeNil()) mapper, err := dism.GetMapper() Expect(err).Should(BeNil()) mapping, err := mapper.RESTMapping(schema.GroupKind{Group: "apps", Kind: "Deployment"}, "v1") Expect(err).Should(BeNil()) Expect(mapping.Resource).Should(Equal(schema.GroupVersionResource{ Group: "apps", Version: "v1", Resource: "deployments", })) }) It("discovery CRD", func() { By("Check built-in resource") dism, err := New(cfg) Expect(err).Should(BeNil()) mapper, err := dism.GetMapper() Expect(err).Should(BeNil()) var mapping *meta.RESTMapping mapping, err = mapper.RESTMapping(schema.GroupKind{Group: "", Kind: "Pod"}, "v1") Expect(err).Should(BeNil()) Expect(mapping.Resource).Should(Equal(schema.GroupVersionResource{ Group: "", Version: "v1", Resource: "pods", })) By("CRD should be discovered after refresh") crd := crdv1.CustomResourceDefinition{ ObjectMeta: metav1.ObjectMeta{ Name: "foos.example.com", Labels: map[string]string{"crd": "dependency"}, }, Spec: crdv1.CustomResourceDefinitionSpec{ Group: "example.com", Names: crdv1.CustomResourceDefinitionNames{ Kind: "Foo", Plural: "foos", }, Versions: []crdv1.CustomResourceDefinitionVersion{{ Name: "v1", Served: true, Storage: true, Schema: &crdv1.CustomResourceValidation{ OpenAPIV3Schema: &crdv1.JSONSchemaProps{ Type: "object", }}, }, { Name: "v1beta1", Served: true, Schema: &crdv1.CustomResourceValidation{ OpenAPIV3Schema: &crdv1.JSONSchemaProps{ Type: "object", }}, }}, Scope: crdv1.NamespaceScoped, }, } Expect(k8sClient.Create(context.Background(), &crd)).Should(BeNil()) updatedCrdObj := crdv1.CustomResourceDefinition{} Eventually(func() bool { if err := k8sClient.Get(context.Background(), client.ObjectKey{Name: "foos.example.com"}, &updatedCrdObj); err != nil { return false } return len(updatedCrdObj.Spec.Versions) == 2 }, 3*time.Second, time.Second).Should(BeTrue()) Eventually(func() error { mapping, err = dism.RESTMapping(schema.GroupKind{Group: "example.com", Kind: "Foo"}, "v1") return err }, time.Second*2, time.Millisecond*300).Should(BeNil()) Expect(mapping.Resource).Should(Equal(schema.GroupVersionResource{ Group: "example.com", Version: "v1", Resource: "foos", })) var kinds []schema.GroupVersionKind Eventually(func() []schema.GroupVersionKind { kinds, _ = dism.KindsFor(schema.GroupVersionResource{Group: "example.com", Version: "", Resource: "foos"}) return kinds }, time.Second*30, time.Millisecond*300).Should(Equal([]schema.GroupVersionKind{ {Group: "example.com", Version: "v1", Kind: "Foo"}, {Group: "example.com", Version: "v1beta1", Kind: "Foo"}, })) kinds, err = dism.KindsFor(schema.GroupVersionResource{Group: "example.com", Version: "v1", Resource: "foos"}) Expect(err).Should(BeNil()) Expect(kinds).Should(Equal([]schema.GroupVersionKind{{Group: "example.com", Version: "v1", Kind: "Foo"}})) }) It("get GVK from k8s resource", func() { dism, err := New(cfg) Expect(err).Should(BeNil()) By("Test Pod") podAPIVersion, podKind := "v1", "Pod" podGV, err := schema.ParseGroupVersion(podAPIVersion) Expect(err).Should(BeNil()) podGVR, err := dism.ResourcesFor(podGV.WithKind(podKind)) Expect(err).Should(BeNil()) Expect(podGVR).Should(Equal(schema.GroupVersionResource{ Version: "v1", Resource: "pods", })) By("Test Deployment") deploymentAPIVersion, deploymentKind := "apps/v1", "Deployment" deploymentGV, err := schema.ParseGroupVersion(deploymentAPIVersion) Expect(err).Should(BeNil()) deploymentGVR, err := dism.ResourcesFor(deploymentGV.WithKind(deploymentKind)) Expect(err).Should(BeNil()) Expect(deploymentGVR).Should(Equal(schema.GroupVersionResource{ Group: "apps", Version: "v1", Resource: "deployments", })) By("Test CronJob") cronJobAPIVersion, cronJobKind := "batch/v1", "Job" cronJobGV, err := schema.ParseGroupVersion(cronJobAPIVersion) Expect(err).Should(BeNil()) cronJobGVR, err := dism.ResourcesFor(cronJobGV.WithKind(cronJobKind)) Expect(err).Should(BeNil()) Expect(cronJobGVR).Should(Equal(schema.GroupVersionResource{ Group: "batch", Version: "v1", Resource: "jobs", })) By("Test Invalid GVK") apiVersion, kind := "apps/v1", "Job" gv, err := schema.ParseGroupVersion(apiVersion) Expect(err).Should(BeNil()) _, err = dism.ResourcesFor(gv.WithKind(kind)) Expect(err).Should(HaveOccurred()) }) })
1.375
1
k8sconntrack/iptables.go
Hyperpilotio/snap-plugin-collector-k8sconntrack
0
741
package k8sconntrack import ( "encoding/json" "fmt" "github.com/Hyperpilotio/snap-plugin-collector-k8sconntrack/pkg/log" "gopkg.in/resty.v1" ) var ( Tables = [...]string{"filter", "nat", "mangle", "raw"} ) /* Chain Name is the name of the chain Data is ex:[ "277786", // packets "493629126", // bytes "KUBE-SERVICES", // target "all", // prot "--", // opt "*", // in "*", // out "0.0.0.0/0", //source "0.0.0.0/0", // destination "\/* kubernetes service portals *\/" ] */ type Chain struct { Name string `json:"name"` Data [][]string `json:"data"` } type Table struct { Name string `json:"name"` Chains []Chain `json:"chains"` } // FIXME should let api server return all iptables instead of querying particular tables func (con *Conntrack) GetIptables() (map[string]Table, error) { endpoint := fmt.Sprintf("http://%s/iptables", con.Host) resp, err := resty.R().Get(endpoint) if err != nil { msg := fmt.Errorf("Unable to get iptables stats from k8sconntrack: %s", err.Error()) log.WithFields(log.Fields{"Host": con.Host}). Error(msg.Error()) return nil, msg } var metrics map[string]Table err = json.Unmarshal(resp.Body(), &metrics) if err != nil { log.Errorf("Unable to parse body of response: err: %s body: %s", err.Error(), resp.String()) return nil, err } return metrics, nil } func (con *Conntrack) ListChains() (*map[string][]string, error) { resp, err := resty.R().Get(fmt.Sprintf("http://%s/iptables/chains", con.Host)) if err != nil { msg := fmt.Errorf("Unable to get iptables stats from k8sconntrack: %s", err.Error()) log.WithFields(log.Fields{"Host": con.Host}). Error(msg.Error()) return nil, msg } var chains map[string][]string err = json.Unmarshal(resp.Body(), &chains) if err != nil { log.Errorf("Unable to parse body of response: err: %s body: %s", err.Error(), resp.String()) return nil, err } return &chains, nil }
1.523438
2
internal/api/middleware_test.go
darkowlzz/octant
0
749
/* Copyright (c) 2019 the Octant contributors. All Rights Reserved. SPDX-License-Identifier: Apache-2.0 */ package api import ( "context" "fmt" "io/ioutil" "net/http" "net/http/httptest" "testing" "github.com/spf13/viper" "github.com/stretchr/testify/require" ) func Test_rebindHandler(t *testing.T) { cases := []struct { name string host string origin string expectedCode int listenerKey string listenerAddr string disableCrossOriginKey string disableCrossOriginChecking bool errorMessage string }{ { name: "in general", expectedCode: http.StatusOK, }, { name: "rebind", host: "hacker.com", expectedCode: http.StatusForbidden, errorMessage: "forbidden host\n", }, { name: "invalid host", host: ":::::::::", expectedCode: http.StatusBadRequest, errorMessage: "bad request\n", }, { name: "custom host", host: "0.0.0.0", expectedCode: http.StatusOK, listenerKey: "listener-addr", listenerAddr: "0.0.0.0:0000", }, { name: "disable CORS", host: "example.com", origin: "hacker.com", expectedCode: http.StatusOK, disableCrossOriginKey: "disable-origin-check", disableCrossOriginChecking: true, listenerKey: "listener-addr", listenerAddr: "example.com:80", errorMessage: "response", }, { name: "fails CORS and invalid host", host: "example.com", origin: "hacker.com", expectedCode: http.StatusForbidden, errorMessage: "forbidden host: forbidden bad origin\n", }, } for _, tc := range cases { t.Run(tc.name, func(t *testing.T) { if tc.listenerKey != "" { viper.Set(tc.listenerKey, tc.listenerAddr) defer viper.Set(tc.listenerKey, "") } if tc.disableCrossOriginKey != "" { viper.Set(tc.disableCrossOriginKey, tc.disableCrossOriginChecking) defer viper.Set(tc.disableCrossOriginKey, false) } fake := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { fmt.Fprint(w, "response") }) wrapped := rebindHandler(context.TODO(), acceptedHosts())(fake) ts := httptest.NewServer(wrapped) defer ts.Close() req, err := http.NewRequest(http.MethodGet, ts.URL, nil) require.NoError(t, err) if tc.origin != "" { req.Header["Origin"] = []string{tc.origin} } if tc.host != "" { req.Host = tc.host } res, err := http.DefaultClient.Do(req) require.NoError(t, err) if tc.errorMessage != "" { message, err := ioutil.ReadAll(res.Body) require.NoError(t, err) require.Equal(t, tc.errorMessage, string(message)) } require.Equal(t, tc.expectedCode, res.StatusCode) }) } } func Test_shouldAllowHost(t *testing.T) { cases := []struct { name string host string acceptedHosts []string expected bool }{ { name: "0.0.0.0 allow all", host: "192.168.1.1", acceptedHosts: []string{"127.0.0.1", "localhost", "0.0.0.0"}, expected: true, }, { name: "deny 192.168.1.1", host: "192.168.1.1", acceptedHosts: []string{"127.0.0.1", "localhost"}, expected: false, }, } for _, tc := range cases { t.Run(tc.name, func(t *testing.T) { require.Equal(t, tc.expected, shouldAllowHost(tc.host, tc.acceptedHosts)) }) } } func Test_checkSameOrigin(t *testing.T) { cases := []struct { name string host string origin string expected bool }{ { name: "host/origin match", host: "192.168.1.1:7777", origin: "http://192.168.1.1:7777", expected: true, }, { name: "host/origin do not match", host: "192.168.1.1:7777", origin: "http://127.0.0.1:7777", expected: false, }, } for _, tc := range cases { t.Run(tc.name, func(t *testing.T) { r := &http.Request{ Host: tc.host, Header: make(http.Header, 1), } r.Header.Set("Origin", tc.origin) require.Equal(t, tc.expected, checkSameOrigin(r)) }) } }
1.492188
1
errortrace/call.go
talon-one/go-hit
0
757
package errortrace import ( "runtime" "strings" ) type Call struct { PackageName string FunctionPath string FunctionName string File string Line int PC uintptr Entry uintptr FullName string } func (c *Call) setFullName() { var sb strings.Builder if c.PackageName != "" { sb.WriteString(c.PackageName) sb.WriteRune('.') } if c.FunctionPath != "" { sb.WriteString(c.FunctionPath) sb.WriteRune('.') } sb.WriteString(c.FunctionName) c.FullName = sb.String() } func makeCall(frame runtime.Frame) Call { // find the last slash lastSlash := strings.LastIndexFunc(frame.Function, func(r rune) bool { return r == '/' }) if lastSlash <= -1 { lastSlash = 0 } call := Call{ File: frame.File, Line: frame.Line, PC: frame.PC, Entry: frame.Entry, } // the first dot after the slash ends the package name dot := strings.IndexRune(frame.Function[lastSlash:], '.') if dot < 0 { // no dot means no package call.FunctionName = frame.Function } else { dot += lastSlash call.PackageName = frame.Function[:dot] call.FunctionName = strings.TrimLeft(frame.Function[dot:], ".") } parts := strings.FieldsFunc(call.FunctionName, func(r rune) bool { return r == '.' }) size := len(parts) //nolint:gomnd if size <= 1 { call.setFullName() return call } size-- call.FunctionPath = strings.Join(parts[:size], ".") call.FunctionName = parts[size] call.setFullName() return call }
1.976563
2
Godeps/_workspace/src/github.com/lazyshot/go-hbase/types.go
GHawk1ns/GoGolfers
0
765
package hbase import ( pb "github.com/ghawk1ns/golf/Godeps/_workspace/src/github.com/golang/protobuf/proto" ) type regionInfo struct { server string startKey []byte endKey []byte name string ts string tableNamespace string tableName string } type action interface { toProto() pb.Message } type exception struct { msg string } func (m *exception) Reset() { *m = exception{} } func (m *exception) String() string { return m.msg } func (*exception) ProtoMessage() {} type TableInfo struct { TableName string Families []string }
0.75
1
compass/pkg/errors/errors.go
iancardosozup/charlescd
368
773
package errors import ( "encoding/json" "time" "github.com/google/uuid" ) const ( component = "compass" ) type Error interface { WithMeta(key, value string) *AdvancedError WithOperations(operation string) *AdvancedError Error() SimpleError ErrorWithOperations() AdvancedError } type ErrorList interface { Append(ers ...Error) *CustomErrorList Get() *CustomErrorList GetErrors() []Error } type SimpleError struct { ID uuid.UUID `json:"id"` Title string `json:"title"` Detail string `json:"detail"` Meta map[string]string `json:"meta"` } type AdvancedError struct { *SimpleError Operations []string `json:"operations"` } func NewError(title string, detail string) Error { return &AdvancedError{ SimpleError: &SimpleError{ ID: uuid.New(), Title: title, Detail: detail, Meta: map[string]string{ "component": component, "timestamp": time.Now().String(), }, }, Operations: []string{}, } } func (err *AdvancedError) ErrorWithOperations() AdvancedError { return *err } func (err *AdvancedError) Error() SimpleError { return *err.SimpleError } func (err *AdvancedError) ToJSON() ([]byte, error) { return json.Marshal(err) } func (err *AdvancedError) WithMeta(key, value string) *AdvancedError { err.Meta[key] = value return err } func (err *AdvancedError) WithOperations(operation string) *AdvancedError { err.Operations = append(err.Operations, operation) return err } type CustomErrorList struct { Errors []Error `json:"errors"` } func NewErrorList() ErrorList { return &CustomErrorList{} } func (errList *CustomErrorList) Append(ers ...Error) *CustomErrorList { errList.Errors = append(errList.Errors, ers...) return errList } func (errList *CustomErrorList) Get() *CustomErrorList { return errList } func (errList *CustomErrorList) GetErrors() []Error { return errList.Errors }
1.601563
2
internal/function/uast_utils.go
BakeRolls/gitbase
1
781
package function import ( "bytes" "fmt" "hash" "gopkg.in/bblfsh/client-go.v3/tools" "gopkg.in/bblfsh/sdk.v2/uast/nodes/nodesproto" "github.com/sirupsen/logrus" "github.com/src-d/gitbase" bblfsh "gopkg.in/bblfsh/client-go.v3" "gopkg.in/bblfsh/sdk.v2/uast/nodes" errors "gopkg.in/src-d/go-errors.v1" "gopkg.in/src-d/go-mysql-server.v0/sql" ) var ( // ErrParseBlob is returned when the blob can't be parsed with bblfsh. ErrParseBlob = errors.NewKind("unable to parse the given blob using bblfsh: %s") // ErrUnmarshalUAST is returned when an error arises unmarshaling UASTs. ErrUnmarshalUAST = errors.NewKind("error unmarshaling UAST: %s") // ErrMarshalUAST is returned when an error arises marshaling UASTs. ErrMarshalUAST = errors.NewKind("error marshaling uast node: %s") ) func exprToString( ctx *sql.Context, e sql.Expression, r sql.Row, ) (string, error) { if e == nil { return "", nil } x, err := e.Eval(ctx, r) if err != nil { return "", err } if x == nil { return "", nil } x, err = sql.Text.Convert(x) if err != nil { return "", err } return x.(string), nil } func computeKey(h hash.Hash, mode, lang string, blob []byte) (string, error) { h.Reset() if err := writeToHash(h, [][]byte{ []byte(mode), []byte(lang), blob, }); err != nil { return "", err } return string(h.Sum(nil)), nil } func writeToHash(h hash.Hash, elements [][]byte) error { for _, e := range elements { n, err := h.Write(e) if err != nil { return err } if n != len(e) { return fmt.Errorf("cache key hash: " + "couldn't write all the content") } } return nil } func getUASTFromBblfsh(ctx *sql.Context, blob []byte, lang, xpath string, mode bblfsh.Mode, ) (nodes.Node, error) { session, ok := ctx.Session.(*gitbase.Session) if !ok { return nil, gitbase.ErrInvalidGitbaseSession.New(ctx.Session) } client, err := session.BblfshClient() if err != nil { return nil, err } // If we have a language we must check if it's supported. If we don't, bblfsh // is the one that will have to identify the language. if lang != "" { ok, err = client.IsLanguageSupported(ctx, lang) if err != nil { return nil, err } if !ok { return nil, ErrParseBlob.New( fmt.Errorf("unsupported language %q", lang)) } } node, _, err := client.ParseWithMode(ctx, mode, lang, blob) if err != nil { err := ErrParseBlob.New(err) logrus.Warn(err) return nil, err } return node, nil } func applyXpath(n nodes.Node, query string) (nodes.Array, error) { var filtered nodes.Array it, err := tools.Filter(n, query) if err != nil { return nil, err } for n := range tools.Iterate(it) { filtered = append(filtered, n) } return filtered, nil } func marshalNodes(arr nodes.Array) (interface{}, error) { if len(arr) == 0 { return nil, nil } buf := &bytes.Buffer{} if err := nodesproto.WriteTo(buf, arr); err != nil { return nil, err } return buf.Bytes(), nil } func getNodes(data interface{}) (nodes.Array, error) { if data == nil { return nil, nil } raw, ok := data.([]byte) if !ok { return nil, ErrUnmarshalUAST.New("wrong underlying UAST format") } return unmarshalNodes(raw) } func unmarshalNodes(data []byte) (nodes.Array, error) { if len(data) == 0 { return nil, nil } buf := bytes.NewReader(data) n, err := nodesproto.ReadTree(buf) if err != nil { return nil, err } if n.Kind() != nodes.KindArray { return nil, fmt.Errorf("unmarshal: wrong kind of node found %q, expected %q", n.Kind(), nodes.KindArray.String()) } return n.(nodes.Array), nil }
1.382813
1
modules/agent/funcs/funcs.go
gzyr/falcon-plus
0
789
// Copyright 2017 Xiaomi, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package funcs import ( "github.com/open-falcon/falcon-plus/common/model" "github.com/open-falcon/falcon-plus/modules/agent/g" ) type FuncsAndInterval struct { Fs []func() []*model.MetricValue Interval int } var Mappers []FuncsAndInterval func BuildMappers() { interval := g.Config().Transfer.Interval Mappers = []FuncsAndInterval{ { Fs: []func() []*model.MetricValue{ AgentMetrics, CpuMetrics, NetMetrics, KernelMetrics, LoadAvgMetrics, MemMetrics, DiskIOMetrics, IOStatsMetrics, NetstatMetrics, ProcMetrics, UdpMetrics, }, Interval: interval, }, { Fs: []func() []*model.MetricValue{ DeviceMetrics, }, Interval: interval, }, { Fs: []func() []*model.MetricValue{ PortMetrics, SocketStatSummaryMetrics, }, Interval: interval, }, { Fs: []func() []*model.MetricValue{ DuMetrics, }, Interval: interval, }, { Fs: []func() []*model.MetricValue{ UrlMetrics, }, Interval: interval, }, //{ // Fs: []func() []*model.MetricValue{ // GpuMetrics, // }, // Interval: interval, //}, } }
1.3125
1
adapter/sql/adapter.go
feber/rel
0
797
// Package sql is general sql adapter that wraps database/sql. package sql import ( "context" "database/sql" "errors" "strconv" "github.com/Fs02/rel" ) // Config holds configuration for adapter. type Config struct { Placeholder string Ordinal bool InsertDefaultValues bool EscapeChar string ErrorFunc func(error) error IncrementFunc func(Adapter) int } // Adapter definition for database database. type Adapter struct { Instrumenter rel.Instrumenter Config *Config DB *sql.DB Tx *sql.Tx savepoint int } var _ rel.Adapter = (*Adapter)(nil) // Close database connection. func (adapter *Adapter) Close() error { return adapter.DB.Close() } // Instrumentation set instrumenter for this adapter. func (adapter *Adapter) Instrumentation(instrumenter rel.Instrumenter) { adapter.Instrumenter = instrumenter } // Instrument call instrumenter, if no instrumenter is set, this will be a no op. func (adapter *Adapter) Instrument(ctx context.Context, op string, message string) func(err error) { if adapter.Instrumenter != nil { return adapter.Instrumenter(ctx, op, message) } return func(err error) {} } // Ping database. func (adapter *Adapter) Ping(ctx context.Context) error { return adapter.DB.PingContext(ctx) } // Aggregate record using given query. func (adapter *Adapter) Aggregate(ctx context.Context, query rel.Query, mode string, field string) (int, error) { var ( err error out sql.NullInt64 statement, args = NewBuilder(adapter.Config).Aggregate(query, mode, field) ) finish := adapter.Instrument(ctx, "adapter-aggregate", statement) if adapter.Tx != nil { err = adapter.Tx.QueryRowContext(ctx, statement, args...).Scan(&out) } else { err = adapter.DB.QueryRowContext(ctx, statement, args...).Scan(&out) } finish(err) return int(out.Int64), err } // Query performs query operation. func (adapter *Adapter) Query(ctx context.Context, query rel.Query) (rel.Cursor, error) { var ( statement, args = NewBuilder(adapter.Config).Find(query) ) finish := adapter.Instrument(ctx, "adapter-query", statement) rows, err := adapter.query(ctx, statement, args) finish(err) return &Cursor{rows}, adapter.Config.ErrorFunc(err) } func (adapter *Adapter) query(ctx context.Context, statement string, args []interface{}) (*sql.Rows, error) { if adapter.Tx != nil { return adapter.Tx.QueryContext(ctx, statement, args...) } return adapter.DB.QueryContext(ctx, statement, args...) } // Exec performs exec operation. func (adapter *Adapter) Exec(ctx context.Context, statement string, args []interface{}) (int64, int64, error) { finish := adapter.Instrument(ctx, "adapter-exec", statement) res, err := adapter.exec(ctx, statement, args) finish(err) if err != nil { return 0, 0, adapter.Config.ErrorFunc(err) } lastID, _ := res.LastInsertId() rowCount, _ := res.RowsAffected() return lastID, rowCount, nil } func (adapter *Adapter) exec(ctx context.Context, statement string, args []interface{}) (sql.Result, error) { if adapter.Tx != nil { return adapter.Tx.ExecContext(ctx, statement, args...) } return adapter.DB.ExecContext(ctx, statement, args...) } // Insert inserts a record to database and returns its id. func (adapter *Adapter) Insert(ctx context.Context, query rel.Query, primaryField string, mutates map[string]rel.Mutate) (interface{}, error) { var ( statement, args = NewBuilder(adapter.Config).Insert(query.Table, mutates) id, _, err = adapter.Exec(ctx, statement, args) ) return id, err } // InsertAll inserts all record to database and returns its ids. func (adapter *Adapter) InsertAll(ctx context.Context, query rel.Query, primaryField string, fields []string, bulkMutates []map[string]rel.Mutate) ([]interface{}, error) { statement, args := NewBuilder(adapter.Config).InsertAll(query.Table, fields, bulkMutates) id, _, err := adapter.Exec(ctx, statement, args) if err != nil { return nil, err } var ( ids = make([]interface{}, len(bulkMutates)) inc = 1 ) if adapter.Config.IncrementFunc != nil { inc = adapter.Config.IncrementFunc(*adapter) } if inc < 0 { id = id + int64((len(bulkMutates)-1)*inc) inc *= -1 } if primaryField != "" { counter := 0 for i := range ids { if mut, ok := bulkMutates[i][primaryField]; ok { ids[i] = mut.Value id = toInt64(ids[i]) counter = 1 } else { ids[i] = id + int64(counter*inc) counter++ } } } return ids, nil } // Update updates a record in database. func (adapter *Adapter) Update(ctx context.Context, query rel.Query, mutates map[string]rel.Mutate) (int, error) { var ( statement, args = NewBuilder(adapter.Config).Update(query.Table, mutates, query.WhereQuery) _, updatedCount, err = adapter.Exec(ctx, statement, args) ) return int(updatedCount), err } // Delete deletes all results that match the query. func (adapter *Adapter) Delete(ctx context.Context, query rel.Query) (int, error) { var ( statement, args = NewBuilder(adapter.Config).Delete(query.Table, query.WhereQuery) _, deletedCount, err = adapter.Exec(ctx, statement, args) ) return int(deletedCount), err } // Begin begins a new transaction. func (adapter *Adapter) Begin(ctx context.Context) (rel.Adapter, error) { var ( tx *sql.Tx savepoint int err error ) finish := adapter.Instrument(ctx, "adapter-begin", "begin transaction") if adapter.Tx != nil { tx = adapter.Tx savepoint = adapter.savepoint + 1 _, _, err = adapter.Exec(ctx, "SAVEPOINT s"+strconv.Itoa(savepoint)+";", []interface{}{}) } else { tx, err = adapter.DB.BeginTx(ctx, nil) } finish(err) return &Adapter{ Instrumenter: adapter.Instrumenter, Config: adapter.Config, Tx: tx, savepoint: savepoint, }, err } // Commit commits current transaction. func (adapter *Adapter) Commit(ctx context.Context) error { var err error finish := adapter.Instrument(ctx, "adapter-commit", "commit transaction") if adapter.Tx == nil { err = errors.New("unable to commit outside transaction") } else if adapter.savepoint > 0 { _, _, err = adapter.Exec(ctx, "RELEASE SAVEPOINT s"+strconv.Itoa(adapter.savepoint)+";", []interface{}{}) } else { err = adapter.Tx.Commit() } finish(err) return adapter.Config.ErrorFunc(err) } // Rollback revert current transaction. func (adapter *Adapter) Rollback(ctx context.Context) error { var err error finish := adapter.Instrument(ctx, "adapter-rollback", "rollback transaction") if adapter.Tx == nil { err = errors.New("unable to rollback outside transaction") } else if adapter.savepoint > 0 { _, _, err = adapter.Exec(ctx, "ROLLBACK TO SAVEPOINT s"+strconv.Itoa(adapter.savepoint)+";", []interface{}{}) } else { err = adapter.Tx.Rollback() } finish(err) return adapter.Config.ErrorFunc(err) } // New initialize adapter without db. func New(config *Config) *Adapter { adapter := &Adapter{ Config: config, } return adapter }
1.671875
2