Search is not available for this dataset
max_stars_repo_path
stringlengths 4
435
| max_stars_repo_name
stringlengths 4
107
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 10
1.05M
| score
float64 -0.76
3.84
| int_score
int64 0
4
|
---|---|---|---|---|---|---|
src/app/api/endpoint/static.go | josephspurrier/govueapp | 5 | 5 | package endpoint
import (
"net/http"
"os"
"path/filepath"
"strings"
)
// StaticEndpoint .
type StaticEndpoint struct {
Core
}
// SetupStatic .
func SetupStatic(core Core) {
p := new(StaticEndpoint)
p.Core = core
p.Router.Get("/api/v1", p.Index)
p.Router.Get("/api/static...", p.Static)
}
// Index .
// swagger:route GET /api/v1 healthcheck Ready
//
// API is ready.
//
// Responses:
// 200: OKResponse
func (p StaticEndpoint) Index(w http.ResponseWriter, r *http.Request) (int, error) {
return p.Response.OK(w, "ready")
}
// Static .
func (p StaticEndpoint) Static(w http.ResponseWriter, r *http.Request) (int, error) {
if r.URL.Path == "/api/static/" {
return http.StatusNotFound, nil
}
// Get the location of the executable.
basepath, err := os.Executable()
if err != nil {
return http.StatusInternalServerError, nil
}
// If static folder is found to the executable, serve the file.
staticPath := filepath.Join(basepath, "static")
if stat, err := os.Stat(staticPath); err == nil && stat.IsDir() {
// The static directory is found.
} else if len(os.Getenv("GOPATH")) > 0 {
// Else get the GOPATH.
basepath = filepath.Join(os.Getenv("GOPATH"), "src/app/api")
}
// Serve the file to the user.
http.ServeFile(w, r, filepath.Join(basepath, strings.TrimPrefix(r.URL.Path, "/api/")))
return http.StatusOK, nil
}
| 1.453125 | 1 |
internal/crypto/libsodium/crypter.go | darora/wal-g | 2,154 | 13 | package libsodium
// #cgo CFLAGS: -I../../../tmp/libsodium/include
// #cgo LDFLAGS: -L../../../tmp/libsodium/lib -lsodium
// #include <sodium.h>
import "C"
import (
"fmt"
"io"
"io/ioutil"
"strings"
"sync"
"github.com/pkg/errors"
"github.com/wal-g/wal-g/internal/crypto"
)
const (
chunkSize = 8192
libsodiumKeybytes = 32
minimalKeyLength = 25
)
// libsodium should always be initialised
func init() {
C.sodium_init()
}
// Crypter is libsodium Crypter implementation
type Crypter struct {
key []byte
KeyInline string
KeyPath string
KeyTransform string
mutex sync.RWMutex
}
func (crypter *Crypter) Name() string {
return "Libsodium"
}
// CrypterFromKey creates Crypter from key
func CrypterFromKey(key string, keyTransform string) crypto.Crypter {
return &Crypter{KeyInline: key, KeyTransform: keyTransform}
}
// CrypterFromKeyPath creates Crypter from key path
func CrypterFromKeyPath(path string, keyTransform string) crypto.Crypter {
return &Crypter{KeyPath: path, KeyTransform: keyTransform}
}
func (crypter *Crypter) setup() (err error) {
crypter.mutex.RLock()
if crypter.key != nil {
crypter.mutex.RUnlock()
return nil
}
crypter.mutex.RUnlock()
crypter.mutex.Lock()
defer crypter.mutex.Unlock()
if crypter.key != nil {
return nil
}
if crypter.KeyInline == "" && crypter.KeyPath == "" {
return errors.New("libsodium Crypter: must have a key or key path")
}
keyString := crypter.KeyInline
if keyString == "" {
// read from file
keyFileContents, err := ioutil.ReadFile(crypter.KeyPath)
if err != nil {
return fmt.Errorf("libsodium Crypter: unable to read key from file: %v", err)
}
keyString = strings.TrimSpace(string(keyFileContents))
}
key, err := keyTransform(keyString, crypter.KeyTransform, libsodiumKeybytes)
if err != nil {
return fmt.Errorf("libsodium Crypter: during key transform: %v", err)
}
crypter.key = key
return nil
}
// Encrypt creates encryption writer from ordinary writer
func (crypter *Crypter) Encrypt(writer io.Writer) (io.WriteCloser, error) {
if err := crypter.setup(); err != nil {
return nil, err
}
return NewWriter(writer, crypter.key), nil
}
// Decrypt creates decrypted reader from ordinary reader
func (crypter *Crypter) Decrypt(reader io.Reader) (io.Reader, error) {
if err := crypter.setup(); err != nil {
return nil, err
}
return NewReader(reader, crypter.key), nil
}
var _ error = &ErrShortKey{}
type ErrShortKey struct {
keyLength int
}
func (e ErrShortKey) Error() string {
return fmt.Sprintf("key length must not be less than %v, got %v", minimalKeyLength, e.keyLength)
}
func newErrShortKey(keyLength int) *ErrShortKey {
return &ErrShortKey{
keyLength: keyLength,
}
}
| 1.5 | 2 |
main.go | kevinlebrun/tvshows | 2 | 21 | package main
import (
"flag"
"fmt"
"net/http"
"net/http/cookiejar"
"net/url"
"os"
"strconv"
"strings"
"github.com/PuerkitoBio/goquery"
)
type Show struct {
Name string
Episodes []Episode
}
type Episode struct {
Name string
Season int64
Num int64
Aired bool
}
type Catalog struct {
Client *http.Client
}
func NewCatalog() *Catalog {
jar, _ := cookiejar.New(nil)
client := &http.Client{Jar: jar}
return &Catalog{client}
}
func (c *Catalog) Auth(username, password string) error {
form := make(url.Values)
form.Add("username", "<EMAIL>")
form.Add("password", "<PASSWORD>")
form.Add("sub_login", "Account Login")
data := strings.NewReader(form.Encode())
req, err := http.NewRequest("POST", "http://www.pogdesign.co.uk/cat/", data)
if err != nil {
return err
}
req.Header.Add("Content-Type", "application/x-www-form-urlencoded; charset=utf-8")
resp, err := c.Client.Do(req)
if err != nil {
return err
}
resp.Body.Close()
return nil
}
func (c *Catalog) Followed() ([]Show, error) {
req, err := http.NewRequest("GET", "http://www.pogdesign.co.uk/cat/profile/all-shows", nil)
if err != nil {
return nil, err
}
resp, err := c.Client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
doc, err := goquery.NewDocumentFromReader(resp.Body)
if err != nil {
return nil, err
}
shows := make([]Show, 0)
doc.Find("a.prfimg.prfmed").Each(func(i int, s *goquery.Selection) {
s.Find("span > strong").Remove()
show := Show{
Name: strings.Trim(s.Find("span").Text(), " \n\t"),
}
shows = append(shows, show)
})
return shows, nil
}
func (c *Catalog) Unwatched() ([]Show, error) {
req, err := http.NewRequest("GET", "http://www.pogdesign.co.uk/cat/profile/unwatched-episodes", nil)
if err != nil {
return nil, err
}
resp, err := c.Client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
doc, err := goquery.NewDocumentFromReader(resp.Body)
if err != nil {
return nil, err
}
shows := make([]Show, 0)
doc.Find("a.prfimg.prfmed").Each(func(i int, s *goquery.Selection) {
if url, exists := s.Attr("href"); exists {
episodes, err := c.UnwatchedEpisodesByURL(url)
if err != nil {
panic(err)
}
show := Show{
Name: strings.Trim(s.Find("span").Text(), " \n\t"),
Episodes: episodes,
}
shows = append(shows, show)
}
})
return shows, nil
}
func (c *Catalog) UnwatchedEpisodesByURL(url string) ([]Episode, error) {
req, err := http.NewRequest("GET", "http://www.pogdesign.co.uk"+url, nil)
if err != nil {
return nil, err
}
resp, err := c.Client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
doc, err := goquery.NewDocumentFromReader(resp.Body)
if err != nil {
return nil, err
}
episodes := make([]Episode, 0)
doc.Find(".ep.info").Each(func(i int, s *goquery.Selection) {
num, _ := strconv.ParseInt(s.Find(".pnumber").Text(), 10, 64)
season, _ := strconv.ParseInt(s.PrevAllFiltered("h2.xxla").Eq(0).AttrOr("id", ""), 10, 64)
name := s.Clone()
name.Find("span").Remove()
name.Find("label").Remove()
episode := Episode{
Name: strings.Trim(name.Text(), " \n\t"),
Num: num,
Season: season,
Aired: s.Children().Eq(1).Text() == "AIRED",
}
episodes = append(episodes, episode)
})
return episodes, nil
}
func main() {
var err error
var shows []Show
flag.Usage = func() {
fmt.Fprintf(os.Stderr, "Usage: %s <command>\n", os.Args[0])
flag.PrintDefaults()
}
var (
username = flag.String("username", "", "www.pogdesign.co.uk/cat username")
password = flag.String("password", "", "www.pogdesign.co.uk/cat password")
)
flag.Parse()
if flag.NArg() != 1 {
flag.Usage()
os.Exit(1)
}
command := flag.Arg(0)
catalog := NewCatalog()
err = catalog.Auth(*username, *password)
if err != nil {
panic(err)
}
switch command {
case "followed":
shows, err = catalog.Followed()
if err != nil {
panic(err)
}
for _, show := range shows {
fmt.Println(show.Name)
}
case "unwatched":
shows, err = catalog.Unwatched()
if err != nil {
panic(err)
}
for _, show := range shows {
for _, episode := range show.Episodes {
if episode.Aired {
fmt.Printf("%s s%02d e%02d [%s]\n", show.Name, episode.Season, episode.Num, episode.Name)
}
}
}
default:
fmt.Printf("Unknown command %q\n", command)
os.Exit(1)
}
}
| 1.421875 | 1 |
src/crypto/aes/cipher_asm.go | zos-go/go | 22 | 29 | // Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build amd64 s390x
package aes
// defined in asm_$GOARCH.s
func hasAsm() bool
func encryptBlockAsm(nr int, xk *uint32, dst, src *byte)
func decryptBlockAsm(nr int, xk *uint32, dst, src *byte)
func expandKeyAsm(nr int, key *byte, enc *uint32, dec *uint32)
var useAsm = hasAsm()
func encryptBlock(xk []uint32, dst, src []byte) {
if useAsm {
encryptBlockAsm(len(xk)/4-1, &xk[0], &dst[0], &src[0])
} else {
encryptBlockGo(xk, dst, src)
}
}
func decryptBlock(xk []uint32, dst, src []byte) {
if useAsm {
decryptBlockAsm(len(xk)/4-1, &xk[0], &dst[0], &src[0])
} else {
decryptBlockGo(xk, dst, src)
}
}
func expandKey(key []byte, enc, dec []uint32) {
if useAsm {
rounds := 10
switch len(key) {
case 128 / 8:
rounds = 10
case 192 / 8:
rounds = 12
case 256 / 8:
rounds = 14
}
expandKeyAsm(rounds, &key[0], &enc[0], &dec[0])
} else {
expandKeyGo(key, enc, dec)
}
}
| 1.835938 | 2 |
db/security/auth.go | fossabot/noah | 3 | 37 | /*
* Copyright (c) 2019 Ready Stock
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package security
import (
"crypto/tls"
"github.com/readystock/noah/db/util/protoutil"
"github.com/pkg/errors"
)
const (
// NodeUser is used by nodes for intra-cluster traffic.
NodeUser = "node"
// RootUser is the default cluster administrator.
RootUser = "root"
)
// UserAuthHook authenticates a user based on their username and whether their
// connection originates from a client or another node in the cluster.
type UserAuthHook func(string, bool) error
// GetCertificateUser extract the username from a client certificate.
func GetCertificateUser(tlsState *tls.ConnectionState) (string, error) {
if tlsState == nil {
return "", errors.Errorf("request is not using TLS")
}
if len(tlsState.PeerCertificates) == 0 {
return "", errors.Errorf("no client certificates in request")
}
// The go server handshake code verifies the first certificate, using
// any following certificates as intermediates. See:
// https://github.com/golang/go/blob/go1.8.1/src/crypto/tls/handshake_server.go#L723:L742
return tlsState.PeerCertificates[0].Subject.CommonName, nil
}
// RequestWithUser must be implemented by `roachpb.Request`s which are
// arguments to methods that are not permitted to skip user checks.
type RequestWithUser interface {
GetUser() string
}
// ProtoAuthHook builds an authentication hook based on the security
// mode and client certificate.
// The protoutil.Message passed to the hook must implement RequestWithUser.
func ProtoAuthHook(
insecureMode bool, tlsState *tls.ConnectionState,
) (func(protoutil.Message, bool) error, error) {
userHook, err := UserAuthCertHook(insecureMode, tlsState)
if err != nil {
return nil, err
}
return func(request protoutil.Message, clientConnection bool) error {
// RequestWithUser must be implemented.
requestWithUser, ok := request.(RequestWithUser)
if !ok {
return errors.Errorf("unknown request type: %T", request)
}
if err := userHook(requestWithUser.GetUser(), clientConnection); err != nil {
return errors.Errorf("%s error in request: %s", err, request)
}
return nil
}, nil
}
// UserAuthCertHook builds an authentication hook based on the security
// mode and client certificate.
func UserAuthCertHook(insecureMode bool, tlsState *tls.ConnectionState) (UserAuthHook, error) {
var certUser string
if !insecureMode {
var err error
certUser, err = GetCertificateUser(tlsState)
if err != nil {
return nil, err
}
}
return func(requestedUser string, clientConnection bool) error {
// TODO(marc): we may eventually need stricter user syntax rules.
if len(requestedUser) == 0 {
return errors.New("user is missing")
}
if !clientConnection && requestedUser != NodeUser {
return errors.Errorf("user %s is not allowed", requestedUser)
}
// If running in insecure mode, we have nothing to verify it against.
if insecureMode {
return nil
}
// The client certificate user must match the requested user,
// except if the certificate user is NodeUser, which is allowed to
// act on behalf of all other users.
if !(certUser == NodeUser || certUser == requestedUser) {
return errors.Errorf("requested user is %s, but certificate is for %s", requestedUser, certUser)
}
return nil
}, nil
}
// UserAuthPasswordHook builds an authentication hook based on the security
// mode, password, and its potentially matching hash.
func UserAuthPasswordHook(insecureMode bool, password string, hashedPassword []byte) UserAuthHook {
return func(requestedUser string, clientConnection bool) error {
if len(requestedUser) == 0 {
return errors.New("user is missing")
}
if !clientConnection {
return errors.New("password authentication is only available for client connections")
}
if insecureMode {
return nil
}
if requestedUser == RootUser {
return errors.Errorf("user %s must use certificate authentication instead of password authentication", RootUser)
}
// If the requested user has an empty password, disallow authentication.
if len(password) == 0 || CompareHashAndPassword(hashedPassword, password) != nil {
return errors.New("invalid password")
}
return nil
}
}
| 1.492188 | 1 |
utils/entity/cve_sa/request.go | zhang-jian-jun/cve-sa-backend | 2 | 45 | package cveSa
type RequestData struct {
KeyWord string `json:"keyword"`
Type string `json:"type"`
Year interface{} `json:"year"`
Status string `json:"status"`
PackageName string `json:"packageName"`
Pages Pages `json:"pages"`
}
type Pages struct {
Page int `json:"page"`
Size int `json:"size"`
}
type OeCompSearchRequest struct {
Os string `json:"os"`
Architecture string `json:"architecture"`
KeyWord string `json:"keyword"`
Lang string `json:"lang"`
Cpu string `json:"cpu"`
Pages Pages `json:"pages"`
}
type RequestOsv struct {
KeyWord string `json:"keyword"`
OsvName string `json:"osvName"`
Type string `json:"type"`
Pages Pages `json:"pages"`
}
type Osv struct {
Arch string `json:"arch"`
OsvName string `json:"osv_name"`
OsVersion string `json:"os_version"`
OsDownloadLink string `json:"os_download_link"`
Type string `json:"type"`
Date string `json:"date"`
Details string `json:"details"`
FriendlyLink string `json:"friendly_link"`
TotalResult string `json:"total_result"`
CheckSum string `json:"checksum"`
BaseOpeneulerVersion string `json:"base_openeuler_version"`
ToolsResult []Record `json:"tools_result"`
PlatformResult []Record `json:"platform_result"`
}
type Record struct {
Name string `json:"name"`
Percent string `json:"percent"`
Result string `json:"result"`
}
| 0.742188 | 1 |
vendor/github.com/google/gopacket/layers/tcpip.go | rhuss/dash2alex | 52 | 53 | // Copyright 2012 Google, Inc. All rights reserved.
// Copyright 2009-2011 <NAME>. All rights reserved.
//
// Use of this source code is governed by a BSD-style license
// that can be found in the LICENSE file in the root of the source
// tree.
package layers
import (
"errors"
"fmt"
"github.com/google/gopacket"
)
// Checksum computation for TCP/UDP.
type tcpipchecksum struct {
pseudoheader tcpipPseudoHeader
}
type tcpipPseudoHeader interface {
pseudoheaderChecksum() (uint32, error)
}
func (ip *IPv4) pseudoheaderChecksum() (csum uint32, err error) {
if err := ip.AddressTo4(); err != nil {
return 0, err
}
csum += (uint32(ip.SrcIP[0]) + uint32(ip.SrcIP[2])) << 8
csum += uint32(ip.SrcIP[1]) + uint32(ip.SrcIP[3])
csum += (uint32(ip.DstIP[0]) + uint32(ip.DstIP[2])) << 8
csum += uint32(ip.DstIP[1]) + uint32(ip.DstIP[3])
return csum, nil
}
func (ip *IPv6) pseudoheaderChecksum() (csum uint32, err error) {
if err := ip.AddressTo16(); err != nil {
return 0, err
}
for i := 0; i < 16; i += 2 {
csum += uint32(ip.SrcIP[i]) << 8
csum += uint32(ip.SrcIP[i+1])
csum += uint32(ip.DstIP[i]) << 8
csum += uint32(ip.DstIP[i+1])
}
return csum, nil
}
// Calculate the TCP/IP checksum defined in rfc1071. The passed-in csum is any
// initial checksum data that's already been computed.
func tcpipChecksum(data []byte, csum uint32) uint16 {
// to handle odd lengths, we loop to length - 1, incrementing by 2, then
// handle the last byte specifically by checking against the original
// length.
length := len(data) - 1
for i := 0; i < length; i += 2 {
// For our test packet, doing this manually is about 25% faster
// (740 ns vs. 1000ns) than doing it by calling binary.BigEndian.Uint16.
csum += uint32(data[i]) << 8
csum += uint32(data[i+1])
}
if len(data)%2 == 1 {
csum += uint32(data[length]) << 8
}
for csum > 0xffff {
csum = (csum >> 16) + (csum & 0xffff)
}
return ^uint16(csum + (csum >> 16))
}
// computeChecksum computes a TCP or UDP checksum. headerAndPayload is the
// serialized TCP or UDP header plus its payload, with the checksum zero'd
// out. headerProtocol is the IP protocol number of the upper-layer header.
func (c *tcpipchecksum) computeChecksum(headerAndPayload []byte, headerProtocol IPProtocol) (uint16, error) {
if c.pseudoheader == nil {
return 0, errors.New("TCP/IP layer 4 checksum cannot be computed without network layer... call SetNetworkLayerForChecksum to set which layer to use")
}
length := uint32(len(headerAndPayload))
csum, err := c.pseudoheader.pseudoheaderChecksum()
if err != nil {
return 0, err
}
csum += uint32(headerProtocol)
csum += length & 0xffff
csum += length >> 16
return tcpipChecksum(headerAndPayload, csum), nil
}
// SetNetworkLayerForChecksum tells this layer which network layer is wrapping it.
// This is needed for computing the checksum when serializing, since TCP/IP transport
// layer checksums depends on fields in the IPv4 or IPv6 layer that contains it.
// The passed in layer must be an *IPv4 or *IPv6.
func (i *tcpipchecksum) SetNetworkLayerForChecksum(l gopacket.NetworkLayer) error {
switch v := l.(type) {
case *IPv4:
i.pseudoheader = v
case *IPv6:
i.pseudoheader = v
default:
return fmt.Errorf("cannot use layer type %v for tcp checksum network layer", l.LayerType())
}
return nil
}
| 2.125 | 2 |
service/iam/api_op_AttachRolePolicy.go | int-tt/aws-sdk-go-v2 | 1 | 61 | // Code generated by smithy-go-codegen DO NOT EDIT.
package iam
import (
"context"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/awslabs/smithy-go/middleware"
smithyhttp "github.com/awslabs/smithy-go/transport/http"
)
// Attaches the specified managed policy to the specified IAM role. When you attach
// a managed policy to a role, the managed policy becomes part of the role's
// permission (access) policy. You cannot use a managed policy as the role's trust
// policy. The role's trust policy is created at the same time as the role, using
// CreateRole. You can update a role's trust policy using UpdateAssumeRolePolicy.
// Use this API to attach a managed policy to a role. To embed an inline policy in
// a role, use PutRolePolicy. For more information about policies, see Managed
// Policies and Inline Policies
// (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html)
// in the IAM User Guide.
func (c *Client) AttachRolePolicy(ctx context.Context, params *AttachRolePolicyInput, optFns ...func(*Options)) (*AttachRolePolicyOutput, error) {
if params == nil {
params = &AttachRolePolicyInput{}
}
result, metadata, err := c.invokeOperation(ctx, "AttachRolePolicy", params, optFns, addOperationAttachRolePolicyMiddlewares)
if err != nil {
return nil, err
}
out := result.(*AttachRolePolicyOutput)
out.ResultMetadata = metadata
return out, nil
}
type AttachRolePolicyInput struct {
// The Amazon Resource Name (ARN) of the IAM policy you want to attach. For more
// information about ARNs, see Amazon Resource Names (ARNs) and AWS Service
// Namespaces
// (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) in
// the AWS General Reference.
//
// This member is required.
PolicyArn *string
// The name (friendly name, not ARN) of the role to attach the policy to. This
// parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) a
// string of characters consisting of upper and lowercase alphanumeric characters
// with no spaces. You can also include any of the following characters: _+=,.@-
//
// This member is required.
RoleName *string
}
type AttachRolePolicyOutput struct {
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
}
func addOperationAttachRolePolicyMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsAwsquery_serializeOpAttachRolePolicy{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsAwsquery_deserializeOpAttachRolePolicy{}, middleware.After)
if err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddAttemptClockSkewMiddleware(stack); err != nil {
return err
}
if err = addClientUserAgent(stack); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addOpAttachRolePolicyValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opAttachRolePolicy(options.Region), middleware.Before); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
return nil
}
func newServiceMetadataMiddleware_opAttachRolePolicy(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "iam",
OperationName: "AttachRolePolicy",
}
}
| 1.382813 | 1 |
memcache_test.go | ahampton/memcache | 0 | 69 | /*
Copyright 2011 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package memcache provides a client for the memcached cache server.
package memcache
import (
"fmt"
"net"
"os"
"os/exec"
"strings"
"testing"
"time"
)
const testServer = "localhost:11211"
func (c *Client) totalOpen() int {
c.mu.Lock()
defer c.mu.Unlock()
count := 0
for _, v := range c.freeconn {
count += len(v)
}
return count
}
func newLocalhostServer(tb testing.TB) *Client {
c, err := net.Dial("tcp", testServer)
if err != nil {
tb.Skipf("skipping test; no server running at %s", testServer)
return nil
}
c.Write([]byte("flush_all\r\n"))
c.Close()
client, err := New(testServer)
if err != nil {
tb.Fatal(err)
}
return client
}
func newUnixServer(tb testing.TB) (*exec.Cmd, *Client) {
sock := fmt.Sprintf("/tmp/test-gomemcache-%d.sock", os.Getpid())
os.Remove(sock)
cmd := exec.Command("memcached", "-s", sock)
if err := cmd.Start(); err != nil {
tb.Skip("skipping test; couldn't find memcached")
return nil, nil
}
// Wait a bit for the socket to appear.
for i := 0; i < 10; i++ {
if _, err := os.Stat(sock); err == nil {
break
}
time.Sleep(time.Duration(25*i) * time.Millisecond)
}
c, err := New(sock)
if err != nil {
tb.Fatal(err)
}
return cmd, c
}
func TestLocalhost(t *testing.T) {
testWithClient(t, newLocalhostServer(t))
}
// Run the memcached binary as a child process and connect to its unix socket.
func TestUnixSocket(t *testing.T) {
cmd, c := newUnixServer(t)
defer cmd.Wait()
defer cmd.Process.Kill()
testWithClient(t, c)
}
func testWithClient(t *testing.T, c *Client) {
checkErr := func(err error, format string, args ...interface{}) {
if err != nil {
t.Fatalf(format, args...)
}
}
mustSet := func(it *Item) {
if err := c.Set(it); err != nil {
t.Fatalf("failed to Set %#v: %v", *it, err)
}
}
// Set
foo := &Item{Key: "foo", Value: []byte("fooval"), Flags: 123}
err := c.Set(foo)
checkErr(err, "first set(foo): %v", err)
err = c.Set(foo)
checkErr(err, "second set(foo): %v", err)
// Get
it, err := c.Get("foo")
checkErr(err, "get(foo): %v", err)
if it.Key != "foo" {
t.Errorf("get(foo) Key = %q, want foo", it.Key)
}
if string(it.Value) != "fooval" {
t.Errorf("get(foo) Value = %q, want fooval", string(it.Value))
}
if it.Flags != 123 {
t.Errorf("get(foo) Flags = %v, want 123", it.Flags)
}
// Get non-existant
_, err = c.Get("not-exists")
if err != ErrCacheMiss {
t.Errorf("get(not-exists): expecting %v, got %v instead", ErrCacheMiss, err)
}
// Get and set a unicode key
quxKey := "Hello_世界"
qux := &Item{Key: quxKey, Value: []byte("hello world")}
err = c.Set(qux)
checkErr(err, "first set(Hello_世界): %v", err)
it, err = c.Get(quxKey)
checkErr(err, "get(Hello_世界): %v", err)
if it.Key != quxKey {
t.Errorf("get(Hello_世界) Key = %q, want Hello_世界", it.Key)
}
if string(it.Value) != "hello world" {
t.Errorf("get(Hello_世界) Value = %q, want hello world", string(it.Value))
}
// Set malformed keys
malFormed := &Item{Key: "foo bar", Value: []byte("foobarval")}
err = c.Set(malFormed)
if err != ErrMalformedKey {
t.Errorf("set(foo bar) should return ErrMalformedKey instead of %v", err)
}
malFormed = &Item{Key: "foo" + string(0x7f), Value: []byte("foobarval")}
err = c.Set(malFormed)
if err != ErrMalformedKey {
t.Errorf("set(foo<0x7f>) should return ErrMalformedKey instead of %v", err)
}
// SetQuietly
quiet := &Item{Key: "quiet", Value: []byte("Shhh")}
err = c.SetQuietly(quiet)
checkErr(err, "setQuietly: %v", err)
it, err = c.Get(quiet.Key)
checkErr(err, "setQuietly: get: %v", err)
if it.Key != quiet.Key {
t.Errorf("setQuietly: get: Key = %q, want %s", it.Key, quiet.Key)
}
if string(it.Value) != string(quiet.Value) {
t.Errorf("setQuietly: get: Value = %q, want %q", string(it.Value), string(quiet.Value))
}
// Add
bar := &Item{Key: "bar", Value: []byte("barval")}
err = c.Add(bar)
checkErr(err, "first add(bar): %v", err)
if err := c.Add(bar); err != ErrNotStored {
t.Fatalf("second add(bar) want ErrNotStored, got %v", err)
}
// GetMulti
m, err := c.GetMulti([]string{"foo", "bar"})
checkErr(err, "GetMulti: %v", err)
if g, e := len(m), 2; g != e {
t.Errorf("GetMulti: got len(map) = %d, want = %d", g, e)
}
if _, ok := m["foo"]; !ok {
t.Fatalf("GetMulti: didn't get key 'foo'")
}
if _, ok := m["bar"]; !ok {
t.Fatalf("GetMulti: didn't get key 'bar'")
}
if g, e := string(m["foo"].Value), "fooval"; g != e {
t.Errorf("GetMulti: foo: got %q, want %q", g, e)
}
if g, e := string(m["bar"].Value), "barval"; g != e {
t.Errorf("GetMulti: bar: got %q, want %q", g, e)
}
// SetMulti
baz1 := &Item{Key: "baz1", Value: []byte("baz1val")}
baz2 := &Item{Key: "baz2", Value: []byte("baz2val"), Flags: 123}
err = c.SetMulti([]*Item{baz1, baz2})
checkErr(err, "first SetMulti: %v", err)
err = c.SetMulti([]*Item{baz1, baz2})
checkErr(err, "second SetMulti: %v", err)
m, err = c.GetMulti([]string{baz1.Key, baz2.Key})
checkErr(err, "SetMulti: %v", err)
if g, e := len(m), 2; g != e {
t.Errorf("SetMulti: got len(map) = %d, want = %d", g, e)
}
if _, ok := m[baz1.Key]; !ok {
t.Fatalf("SetMulti: didn't get key '%s'", baz1.Key)
}
if _, ok := m[baz2.Key]; !ok {
t.Fatalf("SetMulti: didn't get key '%s'", baz2.Key)
}
if g, e := string(m[baz1.Key].Value), string(baz1.Value); g != e {
t.Errorf("SetMulti: got %q, want %q", g, e)
}
if g, e := string(m[baz2.Key].Value), string(baz2.Value); g != e {
t.Errorf("SetMulti: got %q, want %q", g, e)
}
if m[baz1.Key].Flags != baz1.Flags {
t.Errorf("SetMulti: Flags = %v, want %v", m[baz1.Key].Flags, baz1.Flags)
}
if m[baz2.Key].Flags != baz2.Flags {
t.Errorf("SetMulti: Flags = %v, want %v", m[baz2.Key].Flags, baz2.Flags)
}
// SetMultiQuietly
quiet1 := &Item{Key: "quiet1", Value: []byte("quiet1val")}
quiet2 := &Item{Key: "quiet2", Value: []byte("quiet2val"), Flags: 123}
err = c.SetMulti([]*Item{quiet1, quiet2})
checkErr(err, "first SetMultiQuietly: %v", err)
err = c.SetMulti([]*Item{quiet1, quiet2})
checkErr(err, "second SetMultiQuietly: %v", err)
m, err = c.GetMulti([]string{quiet1.Key, quiet2.Key})
checkErr(err, "SetMultiQuietly: %v", err)
if g, e := len(m), 2; g != e {
t.Errorf("SetMultiQuietly: got len(map) = %d, want = %d", g, e)
}
if _, ok := m[quiet1.Key]; !ok {
t.Fatalf("SetMultiQuietly: didn't get key '%s'", quiet1.Key)
}
if _, ok := m[quiet2.Key]; !ok {
t.Fatalf("SetMultiQuietly: didn't get key '%s'", quiet2.Key)
}
if g, e := string(m[quiet1.Key].Value), string(quiet1.Value); g != e {
t.Errorf("SetMultiQuietly: got %q, want %q", g, e)
}
if g, e := string(m[quiet2.Key].Value), string(quiet2.Value); g != e {
t.Errorf("SetMultiQuietly: got %q, want %q", g, e)
}
if m[quiet1.Key].Flags != quiet1.Flags {
t.Errorf("SetMultiQuietly: Flags = %v, want %v", m[quiet1.Key].Flags, quiet1.Flags)
}
if m[quiet2.Key].Flags != quiet2.Flags {
t.Errorf("SetMultiQuietly: Flags = %v, want %v", m[quiet2.Key].Flags, quiet2.Flags)
}
// Delete
key := "foo"
item, err := c.Get(key)
checkErr(err, "pre-Delete: %v", err)
if item == nil {
t.Error("pre-Delete want item, got nil")
}
err = c.Delete(key)
checkErr(err, "Delete: %v", err)
_, err = c.Get(key)
if err != ErrCacheMiss {
t.Error("post-Delete want ErrCacheMiss, got nil")
}
err = c.Delete(key)
if err != ErrCacheMiss {
t.Error("post-Delete want ErrCacheMiss, got nil")
}
// DeleteQuietly
key = "quiet"
item, err = c.Get(key)
checkErr(err, "pre-DeleteQuietly: %v", err)
if item == nil {
t.Error("pre-DeleteQuietly want item, got nil")
}
err = c.DeleteQuietly(key)
checkErr(err, "DeleteQuietly: %v", err)
_, err = c.Get(key)
if err != ErrCacheMiss {
t.Errorf("post-DeleteQuietly want ErrCacheMiss, got %v", err)
}
err = c.DeleteQuietly(key)
if err != nil {
t.Errorf("post-DeleteQuietly want nil err, got %v", err)
}
// DeleteMulti
keys := []string{"baz1", "baz2"}
items, err := c.GetMulti(keys)
checkErr(err, "pre-DeleteMulti: %v", err)
if len(items) != len(keys) {
t.Errorf("pre-DeleteMulti want results, got %v", items)
}
err = c.DeleteMulti(keys)
checkErr(err, "DeleteMulti: %v", err)
items, err = c.GetMulti(keys)
checkErr(err, "post-DeleteMulti: %v", err)
if len(items) != 0 {
t.Errorf("post-DeleteMulti want no results, got %v", items)
}
err = c.DeleteMulti(keys)
if err == nil {
t.Error("post-DeleteMulti want err, got nil")
}
// DeleteMultiQuietly
keys = []string{"quiet1", "quiet2"}
items, err = c.GetMulti(keys)
checkErr(err, "pre-DeleteMultiQuietly: %v", err)
if len(items) != len(keys) {
t.Errorf("pre-DeleteMultiQuietly want results, got %v", items)
}
err = c.DeleteMultiQuietly(keys)
checkErr(err, "DeleteMultiQuietly: %v", err)
items, err = c.GetMulti(keys)
checkErr(err, "post-DeleteMultiQuietly: %v", err)
if len(items) != 0 {
t.Errorf("post-DeleteMultiQuietly want no results, got %v", items)
}
err = c.DeleteMultiQuietly(keys)
if err != nil {
t.Errorf("post-DeleteMultiQuietly want nil err, got %v", err)
}
// Incr/Decr
mustSet(&Item{Key: "num", Value: []byte("42")})
n, err := c.Increment("num", 8)
checkErr(err, "Increment num + 8: %v", err)
if n != 50 {
t.Fatalf("Increment num + 8: want=50, got=%d", n)
}
n, err = c.Decrement("num", 49)
checkErr(err, "Decrement: %v", err)
if n != 1 {
t.Fatalf("Decrement 49: want=1, got=%d", n)
}
err = c.Delete("num")
checkErr(err, "delete num: %v", err)
n, err = c.Increment("num", 1)
if err != ErrCacheMiss {
t.Fatalf("increment post-delete: want ErrCacheMiss, got %v", err)
}
mustSet(&Item{Key: "num", Value: []byte("not-numeric")})
n, err = c.Increment("num", 1)
if err != ErrBadIncrDec {
t.Fatalf("increment non-number: want %v, got %v", ErrBadIncrDec, err)
}
// Invalid key
if err := c.Set(&Item{Key: strings.Repeat("f", 251), Value: []byte("bar")}); err != ErrMalformedKey {
t.Errorf("expecting ErrMalformedKey when using key too long, got nil")
}
// Flush
_, err = c.Get("bar")
checkErr(err, "get(bar): %v", err)
err = c.Flush(0)
checkErr(err, "flush: %v", err)
_, err = c.Get("bar")
if err != ErrCacheMiss {
t.Fatalf("post-flush: want ErrCacheMiss, got %v", err)
}
}
| 1.820313 | 2 |
Godeps/_workspace/src/github.com/ugorji/go/codec/noop.go | lhuard1A/origin | 37 | 77 | // Copyright (c) 2012-2015 <NAME>. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
package codec
import (
"math/rand"
"time"
)
// NoopHandle returns a no-op handle. It basically does nothing.
// It is only useful for benchmarking, as it gives an idea of the
// overhead from the codec framework.
//
// LIBRARY USERS: *** DO NOT USE ***
func NoopHandle(slen int) *noopHandle {
h := noopHandle{}
h.rand = rand.New(rand.NewSource(time.Now().UnixNano()))
h.B = make([][]byte, slen)
h.S = make([]string, slen)
for i := 0; i < len(h.S); i++ {
b := make([]byte, i+1)
for j := 0; j < len(b); j++ {
b[j] = 'a' + byte(i)
}
h.B[i] = b
h.S[i] = string(b)
}
return &h
}
// noopHandle does nothing.
// It is used to simulate the overhead of the codec framework.
type noopHandle struct {
BasicHandle
binaryEncodingType
noopDrv // noopDrv is unexported here, so we can get a copy of it when needed.
}
type noopDrv struct {
i int
S []string
B [][]byte
mks []bool // stack. if map (true), else if array (false)
mk bool // top of stack. what container are we on? map or array?
ct valueType // last request for IsContainerType.
cb bool // last response for IsContainerType.
rand *rand.Rand
}
func (h *noopDrv) r(v int) int { return h.rand.Intn(v) }
func (h *noopDrv) m(v int) int { h.i++; return h.i % v }
func (h *noopDrv) newEncDriver(_ *Encoder) encDriver { return h }
func (h *noopDrv) newDecDriver(_ *Decoder) decDriver { return h }
// --- encDriver
// stack functions (for map and array)
func (h *noopDrv) start(b bool) {
// println("start", len(h.mks)+1)
h.mks = append(h.mks, b)
h.mk = b
}
func (h *noopDrv) end() {
// println("end: ", len(h.mks)-1)
h.mks = h.mks[:len(h.mks)-1]
if len(h.mks) > 0 {
h.mk = h.mks[len(h.mks)-1]
} else {
h.mk = false
}
}
func (h *noopDrv) EncodeBuiltin(rt uintptr, v interface{}) {}
func (h *noopDrv) EncodeNil() {}
func (h *noopDrv) EncodeInt(i int64) {}
func (h *noopDrv) EncodeUint(i uint64) {}
func (h *noopDrv) EncodeBool(b bool) {}
func (h *noopDrv) EncodeFloat32(f float32) {}
func (h *noopDrv) EncodeFloat64(f float64) {}
func (h *noopDrv) EncodeRawExt(re *RawExt, e *Encoder) {}
func (h *noopDrv) EncodeArrayStart(length int) { h.start(true) }
func (h *noopDrv) EncodeMapStart(length int) { h.start(false) }
func (h *noopDrv) EncodeEnd() { h.end() }
func (h *noopDrv) EncodeString(c charEncoding, v string) {}
func (h *noopDrv) EncodeSymbol(v string) {}
func (h *noopDrv) EncodeStringBytes(c charEncoding, v []byte) {}
func (h *noopDrv) EncodeExt(rv interface{}, xtag uint64, ext Ext, e *Encoder) {}
// ---- decDriver
func (h *noopDrv) initReadNext() {}
func (h *noopDrv) CheckBreak() bool { return false }
func (h *noopDrv) IsBuiltinType(rt uintptr) bool { return false }
func (h *noopDrv) DecodeBuiltin(rt uintptr, v interface{}) {}
func (h *noopDrv) DecodeInt(bitsize uint8) (i int64) { return int64(h.m(15)) }
func (h *noopDrv) DecodeUint(bitsize uint8) (ui uint64) { return uint64(h.m(35)) }
func (h *noopDrv) DecodeFloat(chkOverflow32 bool) (f float64) { return float64(h.m(95)) }
func (h *noopDrv) DecodeBool() (b bool) { return h.m(2) == 0 }
func (h *noopDrv) DecodeString() (s string) { return h.S[h.m(8)] }
// func (h *noopDrv) DecodeStringAsBytes(bs []byte) []byte { return h.DecodeBytes(bs) }
func (h *noopDrv) DecodeBytes(bs []byte, isstring, zerocopy bool) []byte { return h.B[h.m(len(h.B))] }
func (h *noopDrv) ReadEnd() { h.end() }
// toggle map/slice
func (h *noopDrv) ReadMapStart() int { h.start(true); return h.m(10) }
func (h *noopDrv) ReadArrayStart() int { h.start(false); return h.m(10) }
func (h *noopDrv) IsContainerType(vt valueType) bool {
// return h.m(2) == 0
// handle kStruct
if h.ct == valueTypeMap && vt == valueTypeArray || h.ct == valueTypeArray && vt == valueTypeMap {
h.cb = !h.cb
h.ct = vt
return h.cb
}
// go in a loop and check it.
h.ct = vt
h.cb = h.m(7) == 0
return h.cb
}
func (h *noopDrv) TryDecodeAsNil() bool {
if h.mk {
return false
} else {
return h.m(8) == 0
}
}
func (h *noopDrv) DecodeExt(rv interface{}, xtag uint64, ext Ext) uint64 {
return 0
}
func (h *noopDrv) DecodeNaked() (v interface{}, vt valueType, decodeFurther bool) {
// use h.r (random) not h.m() because h.m() could cause the same value to be given.
var sk int
if h.mk {
// if mapkey, do not support values of nil OR bytes, array, map or rawext
sk = h.r(7) + 1
} else {
sk = h.r(12)
}
switch sk {
case 0:
vt = valueTypeNil
case 1:
vt, v = valueTypeBool, false
case 2:
vt, v = valueTypeBool, true
case 3:
vt, v = valueTypeInt, h.DecodeInt(64)
case 4:
vt, v = valueTypeUint, h.DecodeUint(64)
case 5:
vt, v = valueTypeFloat, h.DecodeFloat(true)
case 6:
vt, v = valueTypeFloat, h.DecodeFloat(false)
case 7:
vt, v = valueTypeString, h.DecodeString()
case 8:
vt, v = valueTypeBytes, h.B[h.m(len(h.B))]
case 9:
vt, decodeFurther = valueTypeArray, true
case 10:
vt, decodeFurther = valueTypeMap, true
default:
vt, v = valueTypeExt, &RawExt{Tag: h.DecodeUint(64), Data: h.B[h.m(len(h.B))]}
}
h.ct = vt
return
}
| 2.125 | 2 |
handlebars/base_test.go | imantung/raymond | 1 | 85 | package handlebars
import (
"fmt"
"strings"
"io/ioutil"
"path"
"strconv"
"testing"
"github.com/imantung/mario"
"github.com/imantung/mario/ast"
)
// cf. https://github.com/aymerick/go-fuzz-tests/raymond
const dumpTpl = false
var dumpTplNb = 0
type Test struct {
name string
input string
data interface{}
privData map[string]interface{}
helpers map[string]interface{}
partials map[string]string
output interface{}
}
func launchTests(t *testing.T, tests []Test) {
t.Parallel()
for _, test := range tests {
var err error
var tpl *mario.Template
if dumpTpl {
filename := strconv.Itoa(dumpTplNb)
if err := ioutil.WriteFile(path.Join(".", "dump_tpl", filename), []byte(test.input), 0644); err != nil {
panic(err)
}
dumpTplNb++
}
// parse template
tpl, err = mario.New().Parse(test.input)
if err != nil {
t.Errorf("Test '%s' failed - Failed to parse template\ninput:\n\t'%s'\nerror:\n\t%s", test.name, test.input, err)
} else {
for name, fn := range test.helpers {
tpl.WithHelperFunc(name, fn)
}
for name, source := range test.partials {
tpl.WithPartial(name, mario.Must(mario.New().Parse(source)))
}
// setup private data frame
var privData *mario.DataFrame
if test.privData != nil {
privData = mario.NewDataFrame()
for k, v := range test.privData {
privData.Set(k, v)
}
}
// render template
var b strings.Builder
if err := tpl.ExecuteWith(&b, test.data, privData); err != nil {
t.Errorf("Test '%s' failed\ninput:\n\t'%s'\ndata:\n\t%s\nerror:\n\t%s\nAST:\n\t%s", test.name, test.input, mario.Str(test.data), err, ast.Print(tpl.Program()))
} else {
output := b.String()
// check output
var expectedArr []string
expectedArr, ok := test.output.([]string)
if ok {
match := false
for _, expectedStr := range expectedArr {
if expectedStr == output {
match = true
break
}
}
if !match {
t.Errorf("Test '%s' failed\ninput:\n\t'%s'\ndata:\n\t%s\npartials:\n\t%s\nexpected\n\t%q\ngot\n\t%q\nAST:\n%s", test.name, test.input, mario.Str(test.data), mario.Str(test.partials), expectedArr, output, ast.Print(tpl.Program()))
}
} else {
expectedStr, ok := test.output.(string)
if !ok {
panic(fmt.Errorf("Erroneous test output description: %q", test.output))
}
if expectedStr != output {
t.Errorf("Test '%s' failed\ninput:\n\t'%s'\ndata:\n\t%s\npartials:\n\t%s\nexpected\n\t%q\ngot\n\t%q\nAST:\n%s", test.name, test.input, mario.Str(test.data), mario.Str(test.partials), expectedStr, output, ast.Print(tpl.Program()))
}
}
}
}
}
}
| 1.703125 | 2 |
pkg/render/manager.go | IoannisMatzaris/operator | 0 | 93 | // Copyright (c) 2020-2021 Tigera, Inc. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package render
import (
"fmt"
"strconv"
"strings"
ocsv1 "github.com/openshift/api/security/v1"
v3 "github.com/tigera/api/pkg/apis/projectcalico/v3"
operatorv1 "github.com/tigera/operator/api/v1"
"github.com/tigera/operator/pkg/common"
"github.com/tigera/operator/pkg/components"
"github.com/tigera/operator/pkg/render/common/authentication"
tigerakvc "github.com/tigera/operator/pkg/render/common/authentication/tigera/key_validator_config"
"github.com/tigera/operator/pkg/render/common/configmap"
relasticsearch "github.com/tigera/operator/pkg/render/common/elasticsearch"
rkibana "github.com/tigera/operator/pkg/render/common/kibana"
rmeta "github.com/tigera/operator/pkg/render/common/meta"
"github.com/tigera/operator/pkg/render/common/podaffinity"
"github.com/tigera/operator/pkg/render/common/podsecuritycontext"
"github.com/tigera/operator/pkg/render/common/podsecuritypolicy"
"github.com/tigera/operator/pkg/render/common/secret"
"github.com/tigera/operator/pkg/tls/certificatemanagement"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
policyv1beta1 "k8s.io/api/policy/v1beta1"
rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"sigs.k8s.io/controller-runtime/pkg/client"
)
const (
managerPort = 9443
managerTargetPort = 9443
ManagerServiceName = "tigera-manager"
ManagerNamespace = "tigera-manager"
ManagerServiceIP = "localhost"
ManagerServiceAccount = "tigera-manager"
ManagerClusterRole = "tigera-manager-role"
ManagerClusterRoleBinding = "tigera-manager-binding"
ManagerTLSSecretName = "manager-tls"
ManagerInternalTLSSecretName = "internal-manager-tls"
ManagerClusterSettings = "cluster-settings"
ManagerUserSettings = "user-settings"
ManagerClusterSettingsLayerTigera = "cluster-settings.layer.tigera-infrastructure"
ManagerClusterSettingsViewDefault = "cluster-settings.view.default"
ElasticsearchManagerUserSecret = "tigera-ee-manager-elasticsearch-access"
TlsSecretHashAnnotation = "hash.operator.tigera.io/tls-secret"
KibanaTLSHashAnnotation = "hash.operator.tigera.io/kibana-secrets"
ElasticsearchUserHashAnnotation = "hash.operator.tigera.io/elasticsearch-user"
PrometheusTLSSecretName = "calico-node-prometheus-tls"
)
// ManagementClusterConnection configuration constants
const (
VoltronName = "tigera-voltron"
VoltronTunnelSecretName = "tigera-management-cluster-connection"
defaultVoltronPort = "9443"
defaultTunnelVoltronPort = "9449"
)
func Manager(cfg *ManagerConfiguration) (Component, error) {
var tlsSecrets []*corev1.Secret
tlsAnnotations := cfg.TrustedCertBundle.HashAnnotations()
tlsAnnotations[KibanaTLSHashAnnotation] = rmeta.SecretsAnnotationHash(cfg.KibanaSecrets...)
tlsAnnotations[cfg.TLSKeyPair.HashAnnotationKey()] = cfg.TLSKeyPair.HashAnnotationValue()
if cfg.KeyValidatorConfig != nil {
tlsSecrets = append(tlsSecrets, cfg.KeyValidatorConfig.RequiredSecrets(ManagerNamespace)...)
for key, value := range cfg.KeyValidatorConfig.RequiredAnnotations() {
tlsAnnotations[key] = value
}
}
if cfg.ManagementCluster != nil {
tlsAnnotations[cfg.InternalTrafficSecret.HashAnnotationKey()] = cfg.InternalTrafficSecret.HashAnnotationValue()
tlsAnnotations[cfg.TunnelSecret.HashAnnotationKey()] = cfg.InternalTrafficSecret.HashAnnotationValue()
}
return &managerComponent{
cfg: cfg,
tlsSecrets: tlsSecrets,
tlsAnnotations: tlsAnnotations,
}, nil
}
// ManagerConfiguration contains all the config information needed to render the component.
type ManagerConfiguration struct {
KeyValidatorConfig authentication.KeyValidatorConfig
ESSecrets []*corev1.Secret
KibanaSecrets []*corev1.Secret
TrustedCertBundle certificatemanagement.TrustedBundle
ESClusterConfig *relasticsearch.ClusterConfig
TLSKeyPair certificatemanagement.KeyPairInterface
PullSecrets []*corev1.Secret
Openshift bool
Installation *operatorv1.InstallationSpec
ManagementCluster *operatorv1.ManagementCluster
TunnelSecret certificatemanagement.KeyPairInterface
InternalTrafficSecret certificatemanagement.KeyPairInterface
ClusterDomain string
ESLicenseType ElasticsearchLicenseType
Replicas *int32
}
type managerComponent struct {
cfg *ManagerConfiguration
tlsSecrets []*corev1.Secret
tlsAnnotations map[string]string
managerImage string
proxyImage string
esProxyImage string
}
func (c *managerComponent) ResolveImages(is *operatorv1.ImageSet) error {
reg := c.cfg.Installation.Registry
path := c.cfg.Installation.ImagePath
prefix := c.cfg.Installation.ImagePrefix
var err error
c.managerImage, err = components.GetReference(components.ComponentManager, reg, path, prefix, is)
errMsgs := []string{}
if err != nil {
errMsgs = append(errMsgs, err.Error())
}
c.proxyImage, err = components.GetReference(components.ComponentManagerProxy, reg, path, prefix, is)
if err != nil {
errMsgs = append(errMsgs, err.Error())
}
c.esProxyImage, err = components.GetReference(components.ComponentEsProxy, reg, path, prefix, is)
if err != nil {
errMsgs = append(errMsgs, err.Error())
}
if len(errMsgs) != 0 {
return fmt.Errorf(strings.Join(errMsgs, ","))
}
return nil
}
func (c *managerComponent) SupportedOSType() rmeta.OSType {
return rmeta.OSTypeLinux
}
func (c *managerComponent) Objects() ([]client.Object, []client.Object) {
objs := []client.Object{
CreateNamespace(ManagerNamespace, c.cfg.Installation.KubernetesProvider),
}
objs = append(objs, secret.ToRuntimeObjects(secret.CopyToNamespace(ManagerNamespace, c.cfg.PullSecrets...)...)...)
objs = append(objs,
managerServiceAccount(),
managerClusterRole(c.cfg.ManagementCluster != nil, false, c.cfg.Openshift),
managerClusterRoleBinding(),
managerClusterWideSettingsGroup(),
managerUserSpecificSettingsGroup(),
managerClusterWideTigeraLayer(),
managerClusterWideDefaultView(),
)
objs = append(objs, c.getTLSObjects()...)
objs = append(objs,
c.managerService(),
)
// If we're running on openshift, we need to add in an SCC.
if c.cfg.Openshift {
objs = append(objs, c.securityContextConstraints())
} else {
// If we're not running openshift, we need to add pod security policies.
objs = append(objs, c.managerPodSecurityPolicy())
}
objs = append(objs, secret.ToRuntimeObjects(secret.CopyToNamespace(ManagerNamespace, c.cfg.ESSecrets...)...)...)
objs = append(objs, secret.ToRuntimeObjects(secret.CopyToNamespace(ManagerNamespace, c.cfg.KibanaSecrets...)...)...)
objs = append(objs, c.managerDeployment())
if c.cfg.KeyValidatorConfig != nil {
objs = append(objs, configmap.ToRuntimeObjects(c.cfg.KeyValidatorConfig.RequiredConfigMaps(ManagerNamespace)...)...)
}
return objs, nil
}
func (c *managerComponent) Ready() bool {
return true
}
// managerDeployment creates a deployment for the Tigera Secure manager component.
func (c *managerComponent) managerDeployment() *appsv1.Deployment {
var initContainers []corev1.Container
if c.cfg.TLSKeyPair.UseCertificateManagement() {
initContainers = append(initContainers, c.cfg.TLSKeyPair.InitContainer(ManagerNamespace))
}
podTemplate := relasticsearch.DecorateAnnotations(&corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Name: "tigera-manager",
Namespace: ManagerNamespace,
Labels: map[string]string{
"k8s-app": "tigera-manager",
},
Annotations: c.tlsAnnotations,
},
Spec: relasticsearch.PodSpecDecorate(corev1.PodSpec{
NodeSelector: c.cfg.Installation.ControlPlaneNodeSelector,
ServiceAccountName: ManagerServiceAccount,
Tolerations: c.managerTolerations(),
ImagePullSecrets: secret.GetReferenceList(c.cfg.PullSecrets),
InitContainers: initContainers,
Containers: []corev1.Container{
relasticsearch.ContainerDecorate(c.managerContainer(), c.cfg.ESClusterConfig.ClusterName(), ElasticsearchManagerUserSecret, c.cfg.ClusterDomain, c.SupportedOSType()),
relasticsearch.ContainerDecorate(c.managerEsProxyContainer(), c.cfg.ESClusterConfig.ClusterName(), ElasticsearchManagerUserSecret, c.cfg.ClusterDomain, c.SupportedOSType()),
c.managerProxyContainer(),
},
Volumes: c.managerVolumes(),
}),
}, c.cfg.ESClusterConfig, c.cfg.ESSecrets).(*corev1.PodTemplateSpec)
if c.cfg.Replicas != nil && *c.cfg.Replicas > 1 {
podTemplate.Spec.Affinity = podaffinity.NewPodAntiAffinity("tigera-manager", ManagerNamespace)
}
d := &appsv1.Deployment{
TypeMeta: metav1.TypeMeta{Kind: "Deployment", APIVersion: "apps/v1"},
ObjectMeta: metav1.ObjectMeta{
Name: "tigera-manager",
Namespace: ManagerNamespace,
Labels: map[string]string{
"k8s-app": "tigera-manager",
},
},
Spec: appsv1.DeploymentSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"k8s-app": "tigera-manager",
},
},
Replicas: c.cfg.Replicas,
Strategy: appsv1.DeploymentStrategy{
Type: appsv1.RecreateDeploymentStrategyType,
},
Template: *podTemplate,
},
}
return d
}
// managerVolumes returns the volumes for the Tigera Secure manager component.
func (c *managerComponent) managerVolumeMounts() []corev1.VolumeMount {
if c.cfg.KeyValidatorConfig != nil {
return c.cfg.KeyValidatorConfig.RequiredVolumeMounts()
}
return []corev1.VolumeMount{}
}
// managerVolumes returns the volumes for the Tigera Secure manager component.
func (c *managerComponent) managerVolumes() []corev1.Volume {
v := []corev1.Volume{
c.cfg.TLSKeyPair.Volume(),
c.cfg.TrustedCertBundle.Volume(),
{
Name: KibanaPublicCertSecret,
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: KibanaPublicCertSecret,
},
},
},
}
if c.cfg.ManagementCluster != nil {
v = append(v,
c.cfg.InternalTrafficSecret.Volume(),
c.cfg.TunnelSecret.Volume(),
)
}
if c.cfg.KeyValidatorConfig != nil {
v = append(v, c.cfg.KeyValidatorConfig.RequiredVolumes()...)
}
return v
}
// managerProbe returns the probe for the manager container.
func (c *managerComponent) managerProbe() *corev1.Probe {
return &corev1.Probe{
Handler: corev1.Handler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/",
Port: intstr.FromInt(managerPort),
Scheme: corev1.URISchemeHTTPS,
},
},
InitialDelaySeconds: 90,
PeriodSeconds: 10,
}
}
// managerEsProxyProbe returns the probe for the ES proxy container.
func (c *managerComponent) managerEsProxyProbe() *corev1.Probe {
return &corev1.Probe{
Handler: corev1.Handler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/tigera-elasticsearch/version",
Port: intstr.FromInt(managerPort),
Scheme: corev1.URISchemeHTTPS,
},
},
InitialDelaySeconds: 90,
PeriodSeconds: 10,
}
}
// managerProxyProbe returns the probe for the proxy container.
func (c *managerComponent) managerProxyProbe() *corev1.Probe {
return &corev1.Probe{
Handler: corev1.Handler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/voltron/api/health",
Port: intstr.FromInt(managerPort),
Scheme: corev1.URISchemeHTTPS,
},
},
InitialDelaySeconds: 90,
PeriodSeconds: 10,
}
}
// managerEnvVars returns the envvars for the manager container.
func (c *managerComponent) managerEnvVars() []corev1.EnvVar {
envs := []corev1.EnvVar{
{Name: "CNX_PROMETHEUS_API_URL", Value: fmt.Sprintf("/api/v1/namespaces/%s/services/calico-node-prometheus:9090/proxy/api/v1", common.TigeraPrometheusNamespace)},
{Name: "CNX_COMPLIANCE_REPORTS_API_URL", Value: "/compliance/reports"},
{Name: "CNX_QUERY_API_URL", Value: "/api/v1/namespaces/tigera-system/services/https:tigera-api:8080/proxy"},
{Name: "CNX_ELASTICSEARCH_API_URL", Value: "/tigera-elasticsearch"},
{Name: "CNX_ELASTICSEARCH_KIBANA_URL", Value: fmt.Sprintf("/%s", KibanaBasePath)},
{Name: "CNX_ENABLE_ERROR_TRACKING", Value: "false"},
{Name: "CNX_ALP_SUPPORT", Value: "true"},
{Name: "CNX_CLUSTER_NAME", Value: "cluster"},
{Name: "CNX_POLICY_RECOMMENDATION_SUPPORT", Value: "true"},
{Name: "ENABLE_MULTI_CLUSTER_MANAGEMENT", Value: strconv.FormatBool(c.cfg.ManagementCluster != nil)},
}
envs = append(envs, c.managerOAuth2EnvVars()...)
return envs
}
// managerContainer returns the manager container.
func (c *managerComponent) managerContainer() corev1.Container {
tm := corev1.Container{
Name: "tigera-manager",
Image: c.managerImage,
Env: c.managerEnvVars(),
LivenessProbe: c.managerProbe(),
SecurityContext: podsecuritycontext.NewBaseContext(),
VolumeMounts: c.managerVolumeMounts(),
}
return tm
}
// managerOAuth2EnvVars returns the OAuth2/OIDC envvars depending on the authentication type.
func (c *managerComponent) managerOAuth2EnvVars() []corev1.EnvVar {
var envs []corev1.EnvVar
if c.cfg.KeyValidatorConfig == nil {
envs = []corev1.EnvVar{{Name: "CNX_WEB_AUTHENTICATION_TYPE", Value: "Token"}}
} else {
envs = []corev1.EnvVar{
{Name: "CNX_WEB_AUTHENTICATION_TYPE", Value: "OIDC"},
{Name: "CNX_WEB_OIDC_CLIENT_ID", Value: c.cfg.KeyValidatorConfig.ClientID()}}
switch c.cfg.KeyValidatorConfig.(type) {
case *DexKeyValidatorConfig:
envs = append(envs, corev1.EnvVar{Name: "CNX_WEB_OIDC_AUTHORITY", Value: c.cfg.KeyValidatorConfig.Issuer()})
case *tigerakvc.KeyValidatorConfig:
envs = append(envs, corev1.EnvVar{Name: "CNX_WEB_OIDC_AUTHORITY", Value: ""})
}
}
return envs
}
// managerProxyContainer returns the container for the manager proxy container.
func (c *managerComponent) managerProxyContainer() corev1.Container {
var keyPath, certPath, intKeyPath, intCertPath, tunnelKeyPath, tunnelCertPath string
if c.cfg.TLSKeyPair != nil {
keyPath, certPath = c.cfg.TLSKeyPair.VolumeMountKeyFilePath(), c.cfg.TLSKeyPair.VolumeMountCertificateFilePath()
}
if c.cfg.InternalTrafficSecret != nil {
intKeyPath, intCertPath = c.cfg.InternalTrafficSecret.VolumeMountKeyFilePath(), c.cfg.InternalTrafficSecret.VolumeMountCertificateFilePath()
}
if c.cfg.TunnelSecret != nil {
tunnelKeyPath, tunnelCertPath = c.cfg.TunnelSecret.VolumeMountKeyFilePath(), c.cfg.TunnelSecret.VolumeMountCertificateFilePath()
}
env := []corev1.EnvVar{
{Name: "VOLTRON_PORT", Value: defaultVoltronPort},
{Name: "VOLTRON_COMPLIANCE_ENDPOINT", Value: fmt.Sprintf("https://compliance.%s.svc.%s", ComplianceNamespace, c.cfg.ClusterDomain)},
{Name: "VOLTRON_LOGLEVEL", Value: "Info"},
{Name: "VOLTRON_KIBANA_ENDPOINT", Value: rkibana.HTTPSEndpoint(c.SupportedOSType(), c.cfg.ClusterDomain)},
{Name: "VOLTRON_KIBANA_BASE_PATH", Value: fmt.Sprintf("/%s/", KibanaBasePath)},
{Name: "VOLTRON_KIBANA_CA_BUNDLE_PATH", Value: "/certs/kibana/tls.crt"},
{Name: "VOLTRON_PACKET_CAPTURE_CA_BUNDLE_PATH", Value: c.cfg.TrustedCertBundle.MountPath()},
{Name: "VOLTRON_PROMETHEUS_CA_BUNDLE_PATH", Value: c.cfg.TrustedCertBundle.MountPath()},
{Name: "VOLTRON_COMPLIANCE_CA_BUNDLE_PATH", Value: c.cfg.TrustedCertBundle.MountPath()},
{Name: "VOLTRON_HTTPS_KEY", Value: keyPath},
{Name: "VOLTRON_HTTPS_CERT", Value: certPath},
{Name: "VOLTRON_TUNNEL_KEY", Value: tunnelKeyPath},
{Name: "VOLTRON_TUNNEL_CERT", Value: tunnelCertPath},
{Name: "VOLTRON_INTERNAL_HTTPS_KEY", Value: intKeyPath},
{Name: "VOLTRON_INTERNAL_HTTPS_CERT", Value: intCertPath},
{Name: "VOLTRON_ENABLE_MULTI_CLUSTER_MANAGEMENT", Value: strconv.FormatBool(c.cfg.ManagementCluster != nil)},
{Name: "VOLTRON_TUNNEL_PORT", Value: defaultTunnelVoltronPort},
{Name: "VOLTRON_DEFAULT_FORWARD_SERVER", Value: "tigera-secure-es-gateway-http.tigera-elasticsearch.svc:9200"},
}
if c.cfg.KeyValidatorConfig != nil {
env = append(env, c.cfg.KeyValidatorConfig.RequiredEnv("VOLTRON_")...)
}
if _, ok := c.cfg.TrustedCertBundle.HashAnnotations()[complianceServerTLSHashAnnotation]; !ok {
env = append(env, corev1.EnvVar{Name: "VOLTRON_ENABLE_COMPLIANCE", Value: "false"})
}
return corev1.Container{
Name: VoltronName,
Image: c.proxyImage,
Env: env,
VolumeMounts: c.volumeMountsForProxyManager(),
LivenessProbe: c.managerProxyProbe(),
SecurityContext: podsecuritycontext.NewBaseContext(),
}
}
func (c *managerComponent) volumeMountsForProxyManager() []corev1.VolumeMount {
var mounts = []corev1.VolumeMount{
{Name: ManagerTLSSecretName, MountPath: "/manager-tls", ReadOnly: true},
{Name: KibanaPublicCertSecret, MountPath: "/certs/kibana", ReadOnly: true},
c.cfg.TrustedCertBundle.VolumeMount(),
}
if c.cfg.ManagementCluster != nil {
mounts = append(mounts, c.cfg.InternalTrafficSecret.VolumeMount())
mounts = append(mounts, c.cfg.TunnelSecret.VolumeMount())
}
if c.cfg.KeyValidatorConfig != nil {
mounts = append(mounts, c.cfg.KeyValidatorConfig.RequiredVolumeMounts()...)
}
return mounts
}
// managerEsProxyContainer returns the ES proxy container
func (c *managerComponent) managerEsProxyContainer() corev1.Container {
env := []corev1.EnvVar{
{Name: "ELASTIC_LICENSE_TYPE", Value: string(c.cfg.ESLicenseType)},
{Name: "ELASTIC_KIBANA_ENDPOINT", Value: rkibana.HTTPSEndpoint(c.SupportedOSType(), c.cfg.ClusterDomain)},
}
var volumeMounts []corev1.VolumeMount
if c.cfg.ManagementCluster != nil {
volumeMounts = append(volumeMounts, c.cfg.TrustedCertBundle.VolumeMount())
env = append(env, corev1.EnvVar{Name: "VOLTRON_CA_PATH", Value: certificatemanagement.TrustedCertBundleMountPath})
}
if c.cfg.KeyValidatorConfig != nil {
env = append(env, c.cfg.KeyValidatorConfig.RequiredEnv("")...)
volumeMounts = append(volumeMounts, c.cfg.KeyValidatorConfig.RequiredVolumeMounts()...)
}
return corev1.Container{
Name: "tigera-es-proxy",
Image: c.esProxyImage,
LivenessProbe: c.managerEsProxyProbe(),
SecurityContext: podsecuritycontext.NewBaseContext(),
Env: env,
VolumeMounts: volumeMounts,
}
}
// managerTolerations returns the tolerations for the Tigera Secure manager deployment pods.
func (c *managerComponent) managerTolerations() []corev1.Toleration {
return append(c.cfg.Installation.ControlPlaneTolerations, rmeta.TolerateMaster, rmeta.TolerateCriticalAddonsOnly)
}
// managerService returns the service exposing the Tigera Secure web app.
func (c *managerComponent) managerService() *corev1.Service {
return &corev1.Service{
TypeMeta: metav1.TypeMeta{Kind: "Service", APIVersion: "v1"},
ObjectMeta: metav1.ObjectMeta{
Name: "tigera-manager",
Namespace: ManagerNamespace,
},
Spec: corev1.ServiceSpec{
Ports: []corev1.ServicePort{
{
Port: managerPort,
Protocol: corev1.ProtocolTCP,
TargetPort: intstr.FromInt(managerTargetPort),
},
},
Selector: map[string]string{
"k8s-app": "tigera-manager",
},
},
}
}
// managerServiceAccount creates the serviceaccount used by the Tigera Secure web app.
func managerServiceAccount() *corev1.ServiceAccount {
return &corev1.ServiceAccount{
TypeMeta: metav1.TypeMeta{Kind: "ServiceAccount", APIVersion: "v1"},
ObjectMeta: metav1.ObjectMeta{Name: ManagerServiceAccount, Namespace: ManagerNamespace},
}
}
// managerClusterRole returns a clusterrole that allows authn/authz review requests.
func managerClusterRole(managementCluster, managedCluster, openshift bool) *rbacv1.ClusterRole {
cr := &rbacv1.ClusterRole{
TypeMeta: metav1.TypeMeta{Kind: "ClusterRole", APIVersion: "rbac.authorization.k8s.io/v1"},
ObjectMeta: metav1.ObjectMeta{
Name: ManagerClusterRole,
},
Rules: []rbacv1.PolicyRule{
{
APIGroups: []string{"authorization.k8s.io"},
Resources: []string{"subjectaccessreviews"},
Verbs: []string{"create"},
},
{
APIGroups: []string{"authentication.k8s.io"},
Resources: []string{"tokenreviews"},
Verbs: []string{"create"},
},
{
APIGroups: []string{"projectcalico.org"},
Resources: []string{
"networksets",
"globalnetworksets",
"globalnetworkpolicies",
"tier.globalnetworkpolicies",
"networkpolicies",
"tier.networkpolicies",
"stagedglobalnetworkpolicies",
"tier.stagedglobalnetworkpolicies",
"stagednetworkpolicies",
"tier.stagednetworkpolicies",
"stagedkubernetesnetworkpolicies",
},
Verbs: []string{"list"},
},
{
APIGroups: []string{"projectcalico.org"},
Resources: []string{
"tiers",
},
Verbs: []string{"get", "list"},
},
{
APIGroups: []string{"projectcalico.org"},
Resources: []string{
"hostendpoints",
},
Verbs: []string{"list"},
},
{
APIGroups: []string{"projectcalico.org"},
Resources: []string{
"felixconfigurations",
},
ResourceNames: []string{
"default",
},
Verbs: []string{"get"},
},
{
APIGroups: []string{"projectcalico.org"},
Resources: []string{
"alertexceptions",
},
Verbs: []string{"get", "list", "update"},
},
{
APIGroups: []string{"networking.k8s.io"},
Resources: []string{"networkpolicies"},
Verbs: []string{"get", "list"},
},
{
APIGroups: []string{""},
Resources: []string{"serviceaccounts", "namespaces", "nodes", "events", "services", "pods"},
Verbs: []string{"list"},
},
{
APIGroups: []string{"apps"},
Resources: []string{"replicasets", "statefulsets", "daemonsets"},
Verbs: []string{"list"},
},
// When a request is made in the manager UI, they are proxied through the Voltron backend server. If the
// request is targeting a k8s api or when it is targeting a managed cluster, Voltron will authenticate the
// user based on the auth header and then impersonate the user.
{
APIGroups: []string{""},
Resources: []string{"users", "groups", "serviceaccounts"},
Verbs: []string{"impersonate"},
},
},
}
if !managedCluster {
cr.Rules = append(cr.Rules,
rbacv1.PolicyRule{
APIGroups: []string{"projectcalico.org"},
Resources: []string{"managedclusters"},
Verbs: []string{"list", "get", "watch", "update"},
},
)
}
if !openshift {
// Allow access to the pod security policy in case this is enforced on the cluster
cr.Rules = append(cr.Rules,
rbacv1.PolicyRule{
APIGroups: []string{"policy"},
Resources: []string{"podsecuritypolicies"},
Verbs: []string{"use"},
ResourceNames: []string{"tigera-manager"},
},
)
}
return cr
}
// managerClusterRoleBinding returns a clusterrolebinding that gives the tigera-manager serviceaccount
// the permissions in the tigera-manager-role.
func managerClusterRoleBinding() *rbacv1.ClusterRoleBinding {
return &rbacv1.ClusterRoleBinding{
TypeMeta: metav1.TypeMeta{Kind: "ClusterRoleBinding", APIVersion: "rbac.authorization.k8s.io/v1"},
ObjectMeta: metav1.ObjectMeta{Name: ManagerClusterRoleBinding},
RoleRef: rbacv1.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "ClusterRole",
Name: ManagerClusterRole,
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
Name: ManagerServiceAccount,
Namespace: ManagerNamespace,
},
},
}
}
// TODO: Can we get rid of this and instead just bind to default ones?
func (c *managerComponent) securityContextConstraints() *ocsv1.SecurityContextConstraints {
privilegeEscalation := false
return &ocsv1.SecurityContextConstraints{
TypeMeta: metav1.TypeMeta{Kind: "SecurityContextConstraints", APIVersion: "security.openshift.io/v1"},
ObjectMeta: metav1.ObjectMeta{Name: ManagerNamespace},
AllowHostDirVolumePlugin: true,
AllowHostIPC: false,
AllowHostNetwork: false,
AllowHostPID: true,
AllowHostPorts: false,
AllowPrivilegeEscalation: &privilegeEscalation,
AllowPrivilegedContainer: false,
FSGroup: ocsv1.FSGroupStrategyOptions{Type: ocsv1.FSGroupStrategyRunAsAny},
RunAsUser: ocsv1.RunAsUserStrategyOptions{Type: ocsv1.RunAsUserStrategyRunAsAny},
ReadOnlyRootFilesystem: false,
SELinuxContext: ocsv1.SELinuxContextStrategyOptions{Type: ocsv1.SELinuxStrategyMustRunAs},
SupplementalGroups: ocsv1.SupplementalGroupsStrategyOptions{Type: ocsv1.SupplementalGroupsStrategyRunAsAny},
Users: []string{fmt.Sprintf("system:serviceaccount:%s:tigera-manager", ManagerNamespace)},
Volumes: []ocsv1.FSType{"*"},
}
}
func (c *managerComponent) getTLSObjects() []client.Object {
objs := []client.Object{}
for _, s := range c.tlsSecrets {
objs = append(objs, s)
}
return objs
}
func (c *managerComponent) managerPodSecurityPolicy() *policyv1beta1.PodSecurityPolicy {
psp := podsecuritypolicy.NewBasePolicy()
psp.GetObjectMeta().SetName("tigera-manager")
return psp
}
// managerClusterWideSettingsGroup returns a UISettingsGroup with the description "cluster-wide settings"
//
// Calico Enterprise only
func managerClusterWideSettingsGroup() *v3.UISettingsGroup {
return &v3.UISettingsGroup{
TypeMeta: metav1.TypeMeta{Kind: "UISettingsGroup", APIVersion: "projectcalico.org/v3"},
ObjectMeta: metav1.ObjectMeta{
Name: ManagerClusterSettings,
},
Spec: v3.UISettingsGroupSpec{
Description: "Cluster Settings",
},
}
}
// managerUserSpecificSettingsGroup returns a UISettingsGroup with the description "user settings"
//
// Calico Enterprise only
func managerUserSpecificSettingsGroup() *v3.UISettingsGroup {
return &v3.UISettingsGroup{
TypeMeta: metav1.TypeMeta{Kind: "UISettingsGroup", APIVersion: "projectcalico.org/v3"},
ObjectMeta: metav1.ObjectMeta{
Name: ManagerUserSettings,
},
Spec: v3.UISettingsGroupSpec{
Description: "User Settings",
FilterType: v3.FilterTypeUser,
},
}
}
// managerClusterWideTigeraLayer returns a UISettings layer belonging to the cluster-wide settings group that contains
// all of the tigera namespaces.
//
// Calico Enterprise only
func managerClusterWideTigeraLayer() *v3.UISettings {
namespaces := []string{
"tigera-compliance",
"tigera-dex",
"tigera-dpi",
"tigera-eck-operator",
"tigera-elasticsearch",
"tigera-fluentd",
"tigera-guardian",
"tigera-intrusion-detection",
"tigera-kibana",
"tigera-manager",
"tigera-operator",
"tigera-packetcapture",
"tigera-prometheus",
"tigera-system",
"calico-system",
}
nodes := make([]v3.UIGraphNode, len(namespaces))
for i := range namespaces {
ns := namespaces[i]
nodes[i] = v3.UIGraphNode{
ID: "namespace/" + ns,
Type: "namespace",
Name: ns,
}
}
return &v3.UISettings{
TypeMeta: metav1.TypeMeta{Kind: "UISettings", APIVersion: "projectcalico.org/v3"},
ObjectMeta: metav1.ObjectMeta{
Name: ManagerClusterSettingsLayerTigera,
},
Spec: v3.UISettingsSpec{
Group: "cluster-settings",
Description: "Tigera Infrastructure",
Layer: &v3.UIGraphLayer{
Nodes: nodes,
},
},
}
}
// managerClusterWideDefaultView returns a UISettings view belonging to the cluster-wide settings group that shows
// everything and uses the tigera-infrastructure layer.
//
// Calico Enterprise only
func managerClusterWideDefaultView() *v3.UISettings {
return &v3.UISettings{
TypeMeta: metav1.TypeMeta{Kind: "UISettings", APIVersion: "projectcalico.org/v3"},
ObjectMeta: metav1.ObjectMeta{
Name: ManagerClusterSettingsViewDefault,
},
Spec: v3.UISettingsSpec{
Group: "cluster-settings",
Description: "Default",
View: &v3.UIGraphView{
Nodes: []v3.UIGraphNodeView{{
UIGraphNode: v3.UIGraphNode{
ID: "layer/cluster-settings.layer.tigera-infrastructure",
Type: "layer",
Name: "cluster-settings.layer.tigera-infrastructure",
},
}},
},
},
}
}
| 1.132813 | 1 |
internal/pkg/edgectl/install/ui.go | Asher-Wang/ambassador | 0 | 101 | package edgectl
import (
"bufio"
"bytes"
"context"
"fmt"
"io"
"log"
"os"
"os/exec"
"regexp"
"strings"
"time"
"github.com/pkg/errors"
)
var validEmailAddress = regexp.MustCompile("^[a-zA-Z0-9.!#$%&'*+/=?^_`{|}~-]+@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*$")
func getEmailAddress(defaultEmail string, log *log.Logger) string {
prompt := fmt.Sprintf("Email address [%s]: ", defaultEmail)
errorFallback := defaultEmail
if defaultEmail == "" {
prompt = "Email address: "
errorFallback = "<EMAIL>"
}
for {
fmt.Print(prompt)
scanner := bufio.NewScanner(os.Stdin)
scanner.Scan()
text := scanner.Text()
if err := scanner.Err(); err != nil {
log.Printf("Email query failed: %+v", err)
return errorFallback
}
text = strings.TrimSpace(text)
if defaultEmail != "" && text == "" {
return defaultEmail
}
if validEmailAddress.MatchString(text) {
return text
}
fmt.Printf("Sorry, %q does not appear to be a valid email address. Please check it and try again.\n", text)
}
}
func (i *Installer) AskEmail() (string, Result) {
// Attempt to grab a reasonable default for the user's email address
defaultEmail, err := i.Capture("get email", true, "", "git", "config", "--global", "user.email")
if err != nil {
i.log.Print(err)
defaultEmail = ""
} else {
defaultEmail = strings.TrimSpace(defaultEmail)
if !validEmailAddress.MatchString(defaultEmail) {
defaultEmail = ""
}
}
// Ask for the user's email address
i.ShowRequestEmail()
// Do the goroutine dance to let the user hit Ctrl-C at the email prompt
gotEmail := make(chan string)
var emailAddress string
go func() {
gotEmail <- getEmailAddress(defaultEmail, i.log)
close(gotEmail)
}()
select {
case emailAddress = <-gotEmail:
// Continue
case <-i.ctx.Done():
return "", i.resEmailRequestError(errors.New("Interrupted"))
}
i.log.Printf("Using email address %q", emailAddress)
return emailAddress, Result{}
}
// LoopFailedError is a fatal error for loopUntil(...)
type LoopFailedError string
// Error implements error
func (s LoopFailedError) Error() string {
return string(s)
}
type loopConfig struct {
sleepTime time.Duration // How long to sleep between calls
progressTime time.Duration // How long until we explain why we're waiting
timeout time.Duration // How long until we give up
}
var lc2 = &loopConfig{
sleepTime: 500 * time.Millisecond,
progressTime: 15 * time.Second,
timeout: 120 * time.Second,
}
var lc5 = &loopConfig{
sleepTime: 3 * time.Second,
progressTime: 30 * time.Second,
timeout: 5 * time.Minute,
}
var lc10 = &loopConfig{
sleepTime: 3 * time.Second,
progressTime: 30 * time.Second,
timeout: 10 * time.Minute,
}
// loopUntil repeatedly calls a function until it succeeds, using a
// (presently-fixed) loop period and timeout.
func (i *Installer) loopUntil(what string, how func() error, lc *loopConfig) error {
ctx, cancel := context.WithTimeout(i.ctx, lc.timeout)
defer cancel()
start := time.Now()
i.log.Printf("Waiting for %s", what)
defer func() { i.log.Printf("Wait for %s took %.1f seconds", what, time.Since(start).Seconds()) }()
progTimer := time.NewTimer(lc.progressTime)
defer progTimer.Stop()
for {
err := how()
if err == nil {
return nil // Success
} else if _, ok := err.(LoopFailedError); ok {
return err // Immediate failure
}
// Wait and try again
select {
case <-progTimer.C:
i.ShowWaiting(what)
case <-time.After(lc.sleepTime):
// Try again
case <-ctx.Done():
i.ShowTimedOut(what)
return errors.Errorf("timed out waiting for %s (or interrupted)", what)
}
}
}
// ShowWrapped displays to the user (via the show logger) the text items passed
// in with word wrapping applied. Leading and trailing newlines are dropped in
// each text item (to make it easier to use multiline constants), but newlines
// within each item are preserved. Use an empty string item to include a blank
// line in the output between other items.
func (i *Installer) ShowWrapped(texts ...string) {
for _, text := range texts {
text = strings.Trim(text, "\n") // Drop leading and trailing newlines
for _, para := range strings.Split(text, "\n") { // Preserve newlines in the text
for _, line := range doWordWrap(para, "", 79) { // But wrap text too
i.show.Println(line)
}
}
}
}
func doWordWrap(text string, prefix string, lineWidth int) []string {
words := strings.Fields(strings.TrimSpace(text))
if len(words) == 0 {
return []string{""}
}
lines := make([]string, 0)
wrapped := prefix + words[0]
for _, word := range words[1:] {
if len(word)+1 > lineWidth-len(wrapped) {
lines = append(lines, wrapped)
wrapped = prefix + word
} else {
wrapped += " " + word
}
}
if len(wrapped) > 0 {
lines = append(lines, wrapped)
}
return lines
}
// Capture calls a command and returns its stdout
func (i *Installer) Capture(name string, logToStdout bool, input string, args ...string) (res string, err error) {
res = ""
resAsBytes := &bytes.Buffer{}
i.log.Printf("$ %s", strings.Join(args, " "))
cmd := exec.Command(args[0], args[1:]...)
cmd.Stdin = strings.NewReader(input)
if logToStdout {
cmd.Stdout = io.MultiWriter(NewLoggingWriter(i.cmdOut), resAsBytes)
} else {
cmd.Stdout = resAsBytes
}
cmd.Stderr = NewLoggingWriter(i.cmdErr)
err = cmd.Run()
if err != nil {
err = errors.Wrap(err, name)
}
res = resAsBytes.String()
return
}
| 1.859375 | 2 |
packages/arb-rpc-node/aggregator/aggregator.go | EazyReal/arbitrum | 1 | 109 | /*
* Copyright 2020-2021, Offchain Labs, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package aggregator
import (
"context"
"github.com/offchainlabs/arbitrum/packages/arb-util/arblog"
"math/big"
"github.com/offchainlabs/arbitrum/packages/arb-rpc-node/batcher"
"github.com/offchainlabs/arbitrum/packages/arb-rpc-node/snapshot"
"github.com/offchainlabs/arbitrum/packages/arb-rpc-node/txdb"
"github.com/offchainlabs/arbitrum/packages/arb-util/core"
"github.com/pkg/errors"
ethcommon "github.com/ethereum/go-ethereum/common"
ethcore "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/bloombits"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/rpc"
"github.com/offchainlabs/arbitrum/packages/arb-evm/evm"
"github.com/offchainlabs/arbitrum/packages/arb-util/common"
"github.com/offchainlabs/arbitrum/packages/arb-util/machine"
)
var logger = arblog.Logger.With().Str("component", "aggregator").Logger()
type Server struct {
chainId *big.Int
batch batcher.TransactionBatcher
db *txdb.TxDB
scope event.SubscriptionScope
}
// NewServer returns a new instance of the Server class
func NewServer(
batch batcher.TransactionBatcher,
chainId *big.Int,
db *txdb.TxDB,
) *Server {
return &Server{
chainId: chainId,
batch: batch,
db: db,
}
}
// SendTransaction takes a request signed transaction l2message from a Client
// and puts it in a queue to be included in the next transaction batch
func (m *Server) SendTransaction(ctx context.Context, tx *types.Transaction) error {
return m.batch.SendTransaction(ctx, tx)
}
func (m *Server) GetBlockCount() (uint64, error) {
latest, err := m.db.BlockCount()
if err != nil {
return 0, err
}
return latest, nil
}
func (m *Server) BlockNum(block *rpc.BlockNumber) (uint64, error) {
if block == nil {
return 0, errors.New("block number must not be null")
} else if *block == rpc.LatestBlockNumber || *block == rpc.PendingBlockNumber {
latest, err := m.db.LatestBlock()
if err != nil {
return 0, err
}
return latest.Header.Number.Uint64(), nil
} else if *block >= 0 {
return uint64(*block), nil
} else {
return 0, errors.Errorf("unsupported BlockNumber: %v", block.Int64())
}
}
func (m *Server) LatestBlockHeader() (*types.Header, error) {
latest, err := m.db.LatestBlock()
if err != nil || latest == nil {
return nil, err
}
return latest.Header, nil
}
// GetRequestResult returns the value output by the VM in response to the
// l2message with the given hash
func (m *Server) GetRequestResult(requestId common.Hash) (*evm.TxResult, core.InboxState, *big.Int, error) {
return m.db.GetRequest(requestId)
}
func (m *Server) GetL2ToL1Proof(batchNumber *big.Int, index uint64) (*evm.MerkleRootProof, error) {
batch, err := m.db.GetMessageBatch(batchNumber)
if err != nil {
return nil, err
}
if batch == nil {
return nil, errors.New("batch doesn't exist")
}
return batch.GenerateProof(index)
}
func (m *Server) ChainId() *big.Int {
return m.chainId
}
func (m *Server) BlockInfoByNumber(height uint64) (*machine.BlockInfo, error) {
return m.db.GetBlock(height)
}
func (m *Server) BlockLogFromInfo(block *machine.BlockInfo) (*evm.BlockInfo, error) {
return m.db.GetL2Block(block)
}
func (m *Server) BlockInfoByHash(hash common.Hash) (*machine.BlockInfo, error) {
return m.db.GetBlockWithHash(hash)
}
func (m *Server) GetMachineBlockResults(block *machine.BlockInfo) (*evm.BlockInfo, []*evm.TxResult, error) {
return m.db.GetBlockResults(block)
}
func (m *Server) GetTxInBlockAtIndexResults(res *machine.BlockInfo, index uint64) (*evm.TxResult, error) {
avmLog, err := core.GetZeroOrOneLog(m.db.Lookup, new(big.Int).SetUint64(res.InitialLogIndex()+index))
if err != nil || avmLog.Value == nil {
return nil, err
}
evmRes, err := evm.NewTxResultFromValue(avmLog.Value)
if err != nil {
return nil, err
}
if evmRes.IncomingRequest.L2BlockNumber.Cmp(res.Header.Number) != 0 {
return nil, nil
}
return evmRes, nil
}
func (m *Server) GetSnapshot(ctx context.Context, blockHeight uint64) (*snapshot.Snapshot, error) {
return m.db.GetSnapshot(ctx, blockHeight)
}
func (m *Server) LatestSnapshot(ctx context.Context) (*snapshot.Snapshot, error) {
return m.db.LatestSnapshot(ctx)
}
func (m *Server) PendingSnapshot(ctx context.Context) (*snapshot.Snapshot, error) {
pending, err := m.batch.PendingSnapshot(ctx)
if err != nil {
return nil, err
}
if pending == nil {
return m.LatestSnapshot(ctx)
}
return pending, nil
}
func (m *Server) Aggregator() *common.Address {
return m.batch.Aggregator()
}
func (m *Server) PendingTransactionCount(ctx context.Context, account common.Address) (*uint64, error) {
return m.batch.PendingTransactionCount(ctx, account)
}
func (m *Server) ChainDb() ethdb.Database {
return nil
}
func (m *Server) HeaderByNumber(ctx context.Context, blockNumber rpc.BlockNumber) (*types.Header, error) {
select {
case <-ctx.Done():
return nil, errors.New("context cancelled")
default:
}
height, err := m.BlockNum(&blockNumber)
if err != nil {
return nil, err
}
info, err := m.db.GetBlock(height)
if err != nil || info == nil {
return nil, err
}
return info.Header, nil
}
func (m *Server) HeaderByHash(_ context.Context, blockHash ethcommon.Hash) (*types.Header, error) {
info, err := m.BlockInfoByHash(common.NewHashFromEth(blockHash))
if err != nil || info == nil {
return nil, err
}
return info.Header, nil
}
func (m *Server) GetReceipts(_ context.Context, blockHash ethcommon.Hash) (types.Receipts, error) {
info, err := m.db.GetBlockWithHash(common.NewHashFromEth(blockHash))
if err != nil || info == nil {
return nil, err
}
_, results, err := m.GetMachineBlockResults(info)
if err != nil || results == nil {
return nil, err
}
receipts := make(types.Receipts, 0, len(results))
for _, res := range results {
receipts = append(receipts, res.ToEthReceipt(common.NewHashFromEth(blockHash)))
}
return receipts, nil
}
func (m *Server) GetLogs(_ context.Context, blockHash ethcommon.Hash) ([][]*types.Log, error) {
info, err := m.db.GetBlockWithHash(common.NewHashFromEth(blockHash))
if err != nil || info == nil {
return nil, err
}
_, results, err := m.GetMachineBlockResults(info)
if err != nil || results == nil {
return nil, err
}
logs := make([][]*types.Log, 0, len(results))
for _, res := range results {
logs = append(logs, res.EthLogs(common.NewHashFromEth(blockHash)))
}
return logs, nil
}
func (m *Server) BloomStatus() (uint64, uint64) {
return 0, 0
}
func (m *Server) ServiceFilter(_ context.Context, _ *bloombits.MatcherSession) {
// Currently not implemented
}
func (m *Server) SubscribeNewTxsEvent(ch chan<- ethcore.NewTxsEvent) event.Subscription {
return m.scope.Track(m.db.SubscribeNewTxsEvent(ch))
}
func (m *Server) SubscribePendingLogsEvent(ch chan<- []*types.Log) event.Subscription {
return m.scope.Track(m.db.SubscribePendingLogsEvent(ch))
}
func (m *Server) SubscribeChainEvent(ch chan<- ethcore.ChainEvent) event.Subscription {
return m.scope.Track(m.db.SubscribeChainEvent(ch))
}
func (m *Server) SubscribeChainHeadEvent(ch chan<- ethcore.ChainEvent) event.Subscription {
return m.scope.Track(m.db.SubscribeChainHeadEvent(ch))
}
func (m *Server) SubscribeChainSideEvent(ch chan<- ethcore.ChainEvent) event.Subscription {
return m.scope.Track(m.db.SubscribeChainSideEvent(ch))
}
func (m *Server) SubscribeRemovedLogsEvent(ch chan<- ethcore.RemovedLogsEvent) event.Subscription {
return m.scope.Track(m.db.SubscribeRemovedLogsEvent(ch))
}
func (m *Server) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription {
return m.scope.Track(m.db.SubscribeLogsEvent(ch))
}
func (m *Server) SubscribeBlockProcessingEvent(ch chan<- []*types.Log) event.Subscription {
return m.scope.Track(m.db.SubscribeBlockProcessingEvent(ch))
}
func (m *Server) GetLookup() core.ArbCoreLookup {
return m.db.Lookup
}
| 1.148438 | 1 |
types/methods.go | mdempsky/amigo | 65 | 117 | // Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package types
// This file defines utilities for population of method sets.
import (
"fmt"
)
// MethodValue returns the Function implementing method sel, building
// wrapper methods on demand. It returns nil if sel denotes an
// abstract (interface) method.
//
// Precondition: sel.Kind() == MethodVal.
//
// Thread-safe.
//
// EXCLUSIVE_LOCKS_ACQUIRED(prog.methodsMu)
//
func (prog *Program) MethodValue(sel *Selection) *Function {
if sel.Kind() != MethodVal {
panic(fmt.Sprintf("MethodValue(%s) kind != MethodVal", sel))
}
T := sel.Recv()
if isInterface(T) {
return nil // abstract method
}
if prog.mode&LogSource != 0 {
defer logStack("MethodValue %s %v", T, sel)()
}
prog.methodsMu.Lock()
defer prog.methodsMu.Unlock()
return prog.addMethod(prog.createMethodSet(T), sel)
}
// LookupMethod returns the implementation of the method of type T
// identified by (pkg, name). It returns nil if the method exists but
// is abstract, and panics if T has no such method.
//
func (prog *Program) LookupMethod(T Type, pkg *Package, name string) *Function {
sel := prog.MethodSets.MethodSet(T).Lookup(pkg, name)
if sel == nil {
panic(fmt.Sprintf("%s has no method %s", T, Id(pkg, name)))
}
return prog.MethodValue(sel)
}
// ssaMethodSet contains the (concrete) methods of a non-interface type.
type ssaMethodSet struct {
mapping map[string]*Function // populated lazily
complete bool // mapping contains all methods
}
// Precondition: !isInterface(T).
// EXCLUSIVE_LOCKS_REQUIRED(prog.methodsMu)
func (prog *Program) createMethodSet(T Type) *ssaMethodSet {
mset, ok := prog.methodSets.At(T).(*ssaMethodSet)
if !ok {
mset = &ssaMethodSet{mapping: make(map[string]*Function)}
prog.methodSets.Set(T, mset)
}
return mset
}
// EXCLUSIVE_LOCKS_REQUIRED(prog.methodsMu)
func (prog *Program) addMethod(mset *ssaMethodSet, sel *Selection) *Function {
if sel.Kind() == MethodExpr {
panic(sel)
}
id := sel.Obj().Id()
fn := mset.mapping[id]
if fn == nil {
obj := sel.Obj().(*Func)
needsPromotion := len(sel.Index()) > 1
needsIndirection := !isPointer(recvType(obj)) && isPointer(sel.Recv())
if needsPromotion || needsIndirection {
fn = prog.makeWrapper(sel)
} else {
fn = prog.declaredFunc(obj)
}
if fn.Signature.Recv() == nil {
panic(fn) // missing receiver
}
mset.mapping[id] = fn
}
return fn
}
// RuntimeTypes returns a new unordered slice containing all
// concrete types in the program for which a complete (non-empty)
// method set is required at run-time.
//
// Thread-safe.
//
// EXCLUSIVE_LOCKS_ACQUIRED(prog.methodsMu)
//
func (prog *Program) RuntimeTypes() []Type {
prog.methodsMu.Lock()
defer prog.methodsMu.Unlock()
var res []Type
prog.methodSets.Iterate(func(T Type, v interface{}) {
if v.(*ssaMethodSet).complete {
res = append(res, T)
}
})
return res
}
// declaredFunc returns the concrete function/method denoted by obj.
// Panic ensues if there is none.
//
func (prog *Program) declaredFunc(obj *Func) *Function {
if v := prog.packageLevelValue(obj); v != nil {
return v.(*Function)
}
panic("no concrete method: " + obj.String())
}
// needMethodsOf ensures that runtime type information (including the
// complete method set) is available for the specified type T and all
// its subcomponents.
//
// needMethodsOf must be called for at least every type that is an
// operand of some MakeInterface instruction, and for the type of
// every exported package member.
//
// Precondition: T is not a method signature (*Signature with Recv()!=nil).
//
// Thread-safe. (Called via emitConv from multiple builder goroutines.)
//
// TODO(adonovan): make this faster. It accounts for 20% of SSA build time.
//
// EXCLUSIVE_LOCKS_ACQUIRED(prog.methodsMu)
//
func (prog *Program) needMethodsOf(T Type) {
prog.methodsMu.Lock()
prog.needMethods(T, false)
prog.methodsMu.Unlock()
}
// Precondition: T is not a method signature (*Signature with Recv()!=nil).
// Recursive case: skip => don't create methods for T.
//
// EXCLUSIVE_LOCKS_REQUIRED(prog.methodsMu)
//
func (prog *Program) needMethods(T Type, skip bool) {
// Each package maintains its own set of types it has visited.
if prevSkip, ok := prog.runtimeTypes.At(T).(bool); ok {
// needMethods(T) was previously called
if !prevSkip || skip {
return // already seen, with same or false 'skip' value
}
}
prog.runtimeTypes.Set(T, skip)
tmset := prog.MethodSets.MethodSet(T)
if !skip && !isInterface(T) && tmset.Len() > 0 {
// Create methods of T.
mset := prog.createMethodSet(T)
if !mset.complete {
mset.complete = true
n := tmset.Len()
for i := 0; i < n; i++ {
prog.addMethod(mset, tmset.At(i))
}
}
}
// Recursion over signatures of each method.
for i := 0; i < tmset.Len(); i++ {
sig := tmset.At(i).Type().(*Signature)
prog.needMethods(sig.Params(), false)
prog.needMethods(sig.Results(), false)
}
switch t := T.(type) {
case *Basic:
// nop
case *Interface, *TypeParam:
// nop---handled by recursion over method set.
case *Pointer:
prog.needMethods(t.Elem(), false)
case *Slice:
prog.needMethods(t.Elem(), false)
case *Chan:
prog.needMethods(t.Elem(), false)
case *Map:
prog.needMethods(t.Key(), false)
prog.needMethods(t.Elem(), false)
case *Signature:
if t.Recv() != nil {
panic(fmt.Sprintf("Signature %s has Recv %s", t, t.Recv()))
}
prog.needMethods(t.Params(), false)
prog.needMethods(t.Results(), false)
case *Named:
// A pointer-to-named type can be derived from a named
// type via reflection. It may have methods too.
prog.needMethods(NewPointer(T), false)
// Consider 'type T struct{S}' where S has methods.
// Reflection provides no way to get from T to struct{S},
// only to S, so the method set of struct{S} is unwanted,
// so set 'skip' flag during recursion.
prog.needMethods(t.Underlying(), true)
case *Array:
prog.needMethods(t.Elem(), false)
case *Struct:
for i, n := 0, t.NumFields(); i < n; i++ {
prog.needMethods(t.Field(i).Type(), false)
}
case *Tuple:
for i, n := 0, t.Len(); i < n; i++ {
prog.needMethods(t.At(i).Type(), false)
}
default:
panic(T)
}
}
| 1.75 | 2 |
pkg/api/http/job/job/job.go | onedomain/lastbackend | 0 | 125 | //
// KULADO INC. CONFIDENTIAL
// __________________
//
// [2014] - [2019] KULADO INC.
// All Rights Reserved.
//
// NOTICE: All information contained herein is, and remains
// the property of KULADO INC. and its suppliers,
// if any. The intellectual and technical concepts contained
// herein are proprietary to KULADO INC.
// and its suppliers and may be covered by Russian Federation and Foreign Patents,
// patents in process, and are protected by trade secret or copyright law.
// Dissemination of this information or reproduction of this material
// is strictly forbidden unless prior written permission is obtained
// from KULADO INC..
//
package job
import (
"context"
"github.com/onedomain/lastbackend/pkg/api/envs"
"github.com/onedomain/lastbackend/pkg/api/types/v1/request"
"github.com/onedomain/lastbackend/pkg/distribution"
"github.com/onedomain/lastbackend/pkg/distribution/errors"
"github.com/onedomain/lastbackend/pkg/distribution/types"
"github.com/onedomain/lastbackend/pkg/log"
"github.com/onedomain/lastbackend/pkg/util/resource"
"net/http"
)
const (
logPrefix = "api:handler:job"
logLevel = 3
)
func Fetch(ctx context.Context, namespace, name string) (*types.Job, *errors.Err) {
jm := distribution.NewJobModel(ctx, envs.Get().GetStorage())
job, err := jm.Get(types.NewJobSelfLink(namespace, name).String())
if err != nil {
log.V(logLevel).Errorf("%s:fetch:> err: %s", logPrefix, err.Error())
return nil, errors.New("job").InternalServerError(err)
}
if job == nil {
err := errors.New("job not found")
log.V(logLevel).Errorf("%s:fetch:> err: %s", logPrefix, err.Error())
return nil, errors.New("job").NotFound()
}
return job, nil
}
func Apply(ctx context.Context, ns *types.Namespace, mf *request.JobManifest) (*types.Job, *errors.Err) {
if mf.Meta.Name == nil {
return nil, errors.New("job").BadParameter("meta.name")
}
job, err := Fetch(ctx, ns.Meta.Name, *mf.Meta.Name)
if err != nil {
if err.Code != http.StatusText(http.StatusNotFound) {
return nil, errors.New("job").InternalServerError()
}
}
if job == nil {
return Create(ctx, ns, mf)
}
return Update(ctx, ns, job, mf)
}
func Create(ctx context.Context, ns *types.Namespace, mf *request.JobManifest) (*types.Job, *errors.Err) {
jm := distribution.NewJobModel(ctx, envs.Get().GetStorage())
nm := distribution.NewNamespaceModel(ctx, envs.Get().GetStorage())
if mf.Meta.Name != nil {
job, err := jm.Get(types.NewJobSelfLink(ns.Meta.Name, *mf.Meta.Name).String())
if err != nil {
log.V(logLevel).Errorf("%s:create:> get job by name `%s` in namespace `%s` err: %s", logPrefix, mf.Meta.Name, ns.Meta.Name, err.Error())
return nil, errors.New("job").InternalServerError()
}
if job != nil {
log.V(logLevel).Warnf("%s:create:> job name `%s` in namespace `%s` not unique", logPrefix, mf.Meta.Name, ns.Meta.Name)
return nil, errors.New("job").NotUnique("name")
}
}
job := new(types.Job)
mf.SetJobMeta(job)
job.Meta.SelfLink = *types.NewJobSelfLink(ns.Meta.Name, *mf.Meta.Name)
job.Meta.Namespace = ns.Meta.Name
if err := mf.SetJobSpec(job); err != nil {
return nil, errors.New("job").BadRequest(err.Error())
}
if ns.Spec.Resources.Limits.RAM != 0 || ns.Spec.Resources.Limits.CPU != 0 {
for _, c := range job.Spec.Task.Template.Containers {
if c.Resources.Limits.RAM == 0 {
c.Resources.Limits.RAM, _ = resource.DecodeMemoryResource(types.DEFAULT_RESOURCE_LIMITS_RAM)
}
if c.Resources.Limits.CPU == 0 {
c.Resources.Limits.CPU, _ = resource.DecodeCpuResource(types.DEFAULT_RESOURCE_LIMITS_CPU)
}
}
}
if err := ns.AllocateResources(job.Spec.GetResourceRequest()); err != nil {
log.V(logLevel).Errorf("%s:create:> %s", logPrefix, err.Error())
return nil, errors.New("job").BadRequest(err.Error())
} else {
if err := nm.Update(ns); err != nil {
log.V(logLevel).Errorf("%s:update:> update namespace err: %s", logPrefix, err.Error())
return nil, errors.New("job").InternalServerError()
}
}
job, err := jm.Create(job)
if err != nil {
log.V(logLevel).Errorf("%s:create:> create job err: %s", logPrefix, err.Error())
return nil, errors.New("job").InternalServerError()
}
return job, nil
}
func Update(ctx context.Context, ns *types.Namespace, job *types.Job, mf *request.JobManifest) (*types.Job, *errors.Err) {
jm := distribution.NewJobModel(ctx, envs.Get().GetStorage())
nm := distribution.NewNamespaceModel(ctx, envs.Get().GetStorage())
resources := job.Spec.GetResourceRequest()
mf.SetJobMeta(job)
if err := mf.SetJobSpec(job); err != nil {
return nil, errors.New("job").BadRequest(err.Error())
}
requestedResources := job.Spec.GetResourceRequest()
if !resources.Equal(requestedResources) {
allocatedResources := ns.Status.Resources.Allocated
ns.ReleaseResources(resources)
if err := ns.AllocateResources(job.Spec.GetResourceRequest()); err != nil {
ns.Status.Resources.Allocated = allocatedResources
log.V(logLevel).Errorf("%s:update:> %s", logPrefix, err.Error())
return nil, errors.New("job").BadRequest(err.Error())
} else {
if err := nm.Update(ns); err != nil {
log.V(logLevel).Errorf("%s:update:> update namespace err: %s", logPrefix, err.Error())
return nil, errors.New("job").InternalServerError()
}
}
}
if err := jm.Set(job); err != nil {
log.V(logLevel).Errorf("%s:update:> update job err: %s", logPrefix, err.Error())
return nil, errors.New("job").InternalServerError()
}
return job, nil
}
| 1.21875 | 1 |
internal/services/iam/resource_iam_activation_email_test.go | jdelucaa/terraform-provider-hsdp | 26 | 133 | package iam_test
import (
"fmt"
"testing"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
"github.com/philips-software/terraform-provider-hsdp/internal/acctest"
)
func TestResourceIAMActivationEmail_basic(t *testing.T) {
resourceName := "hsdp_iam_activation_email.test"
userID := "foo"
resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { acctest.PreCheck(t) },
ProviderFactories: acctest.ProviderFactories,
Steps: []resource.TestStep{
{
Config: testAccResourceIAMActivationEmailConfig(userID),
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttr(resourceName, "user_id", userID),
),
},
{
ResourceName: resourceName,
ImportState: true,
ImportStateVerify: true,
},
},
})
}
func testAccResourceIAMActivationEmailConfig(id string) string {
return fmt.Sprintf(`
resource "hsdp_iam_activation_email" "test" {
user_id = %[1]q
}`, id)
}
| 0.980469 | 1 |
core/codec/capture_test.go | v2pro/wallaby | 5 | 141 | package codec
import (
"testing"
"net/http"
"github.com/stretchr/testify/require"
"net/http/httputil"
"bytes"
"fmt"
"bufio"
)
func Test_bufio(t *testing.T) {
should := require.New(t)
req, err := http.NewRequest("GET", "/", nil)
should.Nil(err)
reqBytes, err := httputil.DumpRequest(req, true)
should.Nil(err)
buf := &bytes.Buffer{}
buf.Write(reqBytes)
buf.Write(reqBytes)
reader := bufio.NewReaderSize(buf, 2048)
fmt.Println(http.ReadRequest(reader))
fmt.Println(reader.Buffered())
fmt.Println(http.ReadRequest(reader))
fmt.Println(reader.Buffered())
} | 1.289063 | 1 |
core/v1/core.go | tuxlinuxien/lesspass | 10 | 149 | // Package v1 provides core functions to build LessPass password.
package v1
import (
"crypto/hmac"
"crypto/sha256"
"fmt"
"strconv"
"golang.org/x/crypto/pbkdf2"
)
const (
iterations = 8192
keylen = 32
)
// EncryptLogin encrypts login with pbkdf2.
func EncryptLogin(login, password string) []byte {
var out = pbkdf2.Key([]byte(password), []byte(login), iterations, keylen, sha256.New)
return []byte(fmt.Sprintf("%x", out))
}
// RenderPassword returns the generated password.
func RenderPassword(encLogin []byte, site string, len, counter int, template string) string {
derivedEncryptedLogin := deriveEncryptedLogin(encLogin, site, len, counter)
return prettyPrint(derivedEncryptedLogin, template)
}
func createHmac(encLogin []byte, salt string) []byte {
mac := hmac.New(sha256.New, encLogin)
mac.Write([]byte(salt))
return []byte(fmt.Sprintf("%x", mac.Sum(nil)))
}
func deriveEncryptedLogin(encLogin []byte, site string, length, counter int) []byte {
var salt = site + strconv.Itoa(counter)
return createHmac(encLogin, salt)[0:length]
}
func getPasswordChar(charType byte, index int) byte {
var passwordsChars = map[byte]string{
'V': "AEIOUY",
'C': "BC<KEY>",
'v': "aeiouy",
'c': "bcdfghjklmnpqrstvwxz",
'A': "AE<KEY>",
'a': "<KEY>",
'n': "0123456789",
's': "@&%?,=[]_:-+*$#!'^~;()/.",
'x': "AEIOUYaeiouyBCDFGHJKLMNPQRSTVWXZbcdfghjklmnpqrstvwxz0123456789@&%?,=[]_:-+*$#!'^~;()/.",
}
var passwordChar = passwordsChars[charType]
return passwordChar[index%len(passwordChar)]
}
func getCharType(template string, index int) byte {
return template[index%len(template)]
}
func prettyPrint(hash []byte, template string) string {
var out = ""
for i, c := range hash {
tplStr := getCharType(template, i)
out += string(getPasswordChar(tplStr, int(c)))
}
return out
}
| 1.875 | 2 |
utils/switchable/snapshot.go | Enlighten-Fund/go-opera | 0 | 157 | package switchable
import (
"sync"
"github.com/Fantom-foundation/lachesis-base/kvdb"
"github.com/ethereum/go-ethereum/common"
)
type Snapshot struct {
kvdb.Snapshot
mu sync.RWMutex
}
func (s *Snapshot) SwitchTo(snap kvdb.Snapshot) kvdb.Snapshot {
s.mu.Lock()
defer s.mu.Unlock()
old := s.Snapshot
s.Snapshot = snap
return old
}
func Wrap(snap kvdb.Snapshot) *Snapshot {
s := &Snapshot{}
s.SwitchTo(snap)
return s
}
// Has checks if key is in the exists.
func (s *Snapshot) Has(key []byte) (bool, error) {
s.mu.RLock()
defer s.mu.RUnlock()
return s.Snapshot.Has(key)
}
// Get returns key-value pair by key.
func (s *Snapshot) Get(key []byte) ([]byte, error) {
s.mu.RLock()
defer s.mu.RUnlock()
return s.Snapshot.Get(key)
}
func (s *Snapshot) Release() {
s.mu.Lock()
defer s.mu.Unlock()
s.Snapshot.Release()
}
// NewIterator creates a binary-alphabetical iterator over a subset
// of database content with a particular key prefix, starting at a particular
// initial key (or after, if it does not exist).
func (s *Snapshot) NewIterator(prefix []byte, start []byte) kvdb.Iterator {
s.mu.RLock()
defer s.mu.RUnlock()
return &switchableIterator{
mu: &s.mu,
upd: &s.Snapshot,
cur: s.Snapshot,
parentIt: s.Snapshot.NewIterator(prefix, start),
prefix: prefix,
start: start,
}
}
/*
* Iterator
*/
type switchableIterator struct {
mu *sync.RWMutex
upd *kvdb.Snapshot
cur kvdb.Snapshot
parentIt kvdb.Iterator
prefix, start []byte
key, value []byte
}
func (it *switchableIterator) mayReopen() {
if it.cur != *it.upd {
// reopen iterator if DB was switched
it.cur = *it.upd
if it.key != nil {
it.start = common.CopyBytes(it.key[len(it.prefix):])
}
it.parentIt = it.cur.NewIterator(it.prefix, it.start)
if it.key != nil {
_ = it.parentIt.Next() // skip previous key
}
}
}
// Next scans key-value pair by key in lexicographic order. Looks in cache first,
// then - in DB.
func (it *switchableIterator) Next() bool {
it.mu.RLock()
defer it.mu.RUnlock()
it.mayReopen()
ok := it.parentIt.Next()
if !ok {
it.key = nil
it.value = nil
return false
}
it.key = it.parentIt.Key()
it.value = it.parentIt.Value()
return true
}
// Error returns any accumulated error. Exhausting all the key/value pairs
// is not considered to be an error. A memory iterator cannot encounter errors.
func (it *switchableIterator) Error() error {
it.mu.RLock()
defer it.mu.RUnlock()
it.mayReopen()
return it.parentIt.Error()
}
// Key returns the key of the current key/value pair, or nil if done. The caller
// should not modify the contents of the returned slice, and its contents may
// change on the next call to Next.
func (it *switchableIterator) Key() []byte {
return it.key
}
// Value returns the value of the current key/value pair, or nil if done. The
// caller should not modify the contents of the returned slice, and its contents
// may change on the next call to Next.
func (it *switchableIterator) Value() []byte {
return it.value
}
// Release releases associated resources. Release should always succeed and can
// be called multiple times without causing error.
func (it *switchableIterator) Release() {
it.mu.RLock()
defer it.mu.RUnlock()
it.mayReopen()
it.parentIt.Release()
}
| 1.8125 | 2 |
pkg/jwt/time_test.go | josestg/justforfun | 0 | 165 | package jwt
import (
"reflect"
"testing"
"time"
)
func TestNewTime(t *testing.T) {
t1 := NewTime(time.Now())
t2 := new(Time)
b, err := t1.MarshalJSON()
if err != nil {
t.Fatalf("expecting error nil but got %v", err)
}
if err := t2.UnmarshalJSON(b); err != nil {
t.Fatalf("expecting error nil but got %v", err)
}
if !reflect.DeepEqual(t1, t2) {
t.Fatalf("expecting t1 and t2 are equal")
}
}
| 1.109375 | 1 |
server/leaderboard_rank_cache.go | kokizzu/nakama | 0 | 173 | // Copyright 2018 The Nakama Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"database/sql"
"sync"
"time"
"github.com/heroiclabs/nakama/v3/internal/skiplist"
"github.com/heroiclabs/nakama-common/api"
"github.com/gofrs/uuid"
"go.uber.org/zap"
)
type LeaderboardRankCache interface {
Get(leaderboardId string, expiryUnix int64, ownerID uuid.UUID) int64
Fill(leaderboardId string, expiryUnix int64, records []*api.LeaderboardRecord)
Insert(leaderboardId string, expiryUnix int64, sortOrder int, ownerID uuid.UUID, score, subscore int64) int64
Delete(leaderboardId string, expiryUnix int64, ownerID uuid.UUID) bool
DeleteLeaderboard(leaderboardId string, expiryUnix int64) bool
TrimExpired(nowUnix int64) bool
}
type LeaderboardWithExpiry struct {
LeaderboardId string
Expiry int64
}
type RankAsc struct {
OwnerId uuid.UUID
Score int64
Subscore int64
}
func (r *RankAsc) Less(other interface{}) bool {
ro := other.(*RankAsc)
if r.Score < ro.Score {
return true
}
if r.Score > ro.Score {
return false
}
if r.Subscore < ro.Subscore {
return true
}
if r.Subscore > ro.Subscore {
return false
}
return r.OwnerId.String() < ro.OwnerId.String()
}
type RankDesc struct {
OwnerId uuid.UUID
Score int64
Subscore int64
}
func (r *RankDesc) Less(other interface{}) bool {
ro := other.(*RankDesc)
if ro.Score < r.Score {
return true
}
if ro.Score > r.Score {
return false
}
if ro.Subscore < r.Subscore {
return true
}
if ro.Subscore > r.Subscore {
return false
}
return ro.OwnerId.String() < r.OwnerId.String()
}
type RankCache struct {
sync.RWMutex
owners map[uuid.UUID]skiplist.Interface
cache *skiplist.SkipList
}
type LocalLeaderboardRankCache struct {
sync.RWMutex
blacklistAll bool
blacklistIds map[string]struct{}
cache map[LeaderboardWithExpiry]*RankCache
}
var _ LeaderboardRankCache = &LocalLeaderboardRankCache{}
func NewLocalLeaderboardRankCache(startupLogger *zap.Logger, db *sql.DB, config *LeaderboardConfig, leaderboardCache LeaderboardCache) LeaderboardRankCache {
cache := &LocalLeaderboardRankCache{
blacklistIds: make(map[string]struct{}, len(config.BlacklistRankCache)),
blacklistAll: len(config.BlacklistRankCache) == 1 && config.BlacklistRankCache[0] == "*",
cache: make(map[LeaderboardWithExpiry]*RankCache, 0),
}
// If caching is disabled completely do not preload any records.
if cache.blacklistAll {
startupLogger.Info("Skipping leaderboard rank cache initialization")
return cache
}
startupLogger.Info("Initializing leaderboard rank cache")
nowTime := time.Now().UTC()
go func() {
skippedLeaderboards := make([]string, 0, 10)
leaderboards := leaderboardCache.GetAllLeaderboards()
cachedLeaderboards := make([]string, 0, len(leaderboards))
for _, leaderboard := range leaderboards {
if _, ok := cache.blacklistIds[leaderboard.Id]; ok {
startupLogger.Debug("Skip caching leaderboard ranks", zap.String("leaderboard_id", leaderboard.Id))
skippedLeaderboards = append(skippedLeaderboards, leaderboard.Id)
continue
}
cachedLeaderboards = append(cachedLeaderboards, leaderboard.Id)
startupLogger.Debug("Caching leaderboard ranks", zap.String("leaderboard_id", leaderboard.Id))
// Current expiry for this leaderboard.
// This matches calculateTournamentDeadlines
var expiryUnix int64
if leaderboard.ResetSchedule != nil {
expiryUnix = leaderboard.ResetSchedule.Next(nowTime).UTC().Unix()
if leaderboard.EndTime > 0 && expiryUnix > leaderboard.EndTime {
expiryUnix = leaderboard.EndTime
}
} else {
expiryUnix = leaderboard.EndTime
}
// Prepare structure to receive rank data.
key := LeaderboardWithExpiry{LeaderboardId: leaderboard.Id, Expiry: expiryUnix}
cache.Lock()
rankCache, found := cache.cache[key]
if !found {
rankCache = &RankCache{
owners: make(map[uuid.UUID]skiplist.Interface),
cache: skiplist.New(),
}
cache.cache[key] = rankCache
}
cache.Unlock()
// Look up all active records for this leaderboard.
query := `
SELECT owner_id, score, subscore
FROM leaderboard_record
WHERE leaderboard_id = $1 AND expiry_time = $2`
rows, err := db.Query(query, leaderboard.Id, time.Unix(expiryUnix, 0).UTC())
if err != nil {
startupLogger.Error("Failed to caching leaderboard ranks", zap.String("leaderboard_id", leaderboard.Id), zap.Error(err))
continue
}
// Process the records.
for rows.Next() {
var ownerIDStr string
var score int64
var subscore int64
if err = rows.Scan(&ownerIDStr, &score, &subscore); err != nil {
startupLogger.Error("Failed to scan leaderboard rank data", zap.String("leaderboard_id", leaderboard.Id), zap.Error(err))
break
}
ownerID, err := uuid.FromString(ownerIDStr)
if err != nil {
startupLogger.Error("Failed to parse scanned leaderboard rank data", zap.String("leaderboard_id", leaderboard.Id), zap.String("owner_id", ownerIDStr), zap.Error(err))
break
}
// Prepare new rank data for this leaderboard entry.
var rankData skiplist.Interface
if leaderboard.SortOrder == LeaderboardSortOrderDescending {
rankData = &RankDesc{
OwnerId: ownerID,
Score: score,
Subscore: subscore,
}
} else {
rankData = &RankAsc{
OwnerId: ownerID,
Score: score,
Subscore: subscore,
}
}
rankCache.Lock()
if _, alreadyInserted := rankCache.owners[ownerID]; alreadyInserted {
rankCache.Unlock()
continue
}
rankCache.owners[ownerID] = rankData
rankCache.cache.Insert(rankData)
rankCache.Unlock()
}
_ = rows.Close()
}
startupLogger.Info("Leaderboard rank cache initialization completed successfully", zap.Strings("cached", cachedLeaderboards), zap.Strings("skipped", skippedLeaderboards))
}()
return cache
}
func (l *LocalLeaderboardRankCache) Get(leaderboardId string, expiryUnix int64, ownerID uuid.UUID) int64 {
if l.blacklistAll {
// If all rank caching is disabled.
return 0
}
if _, ok := l.blacklistIds[leaderboardId]; ok {
// If rank caching is disabled for this particular leaderboard.
return 0
}
// Find rank map for this leaderboard/expiry pair.
key := LeaderboardWithExpiry{LeaderboardId: leaderboardId, Expiry: expiryUnix}
l.RLock()
rankCache, ok := l.cache[key]
l.RUnlock()
if !ok {
return 0
}
// Find rank data for this owner.
rankCache.RLock()
rankData, ok := rankCache.owners[ownerID]
if !ok {
rankCache.RUnlock()
return 0
}
rank := rankCache.cache.GetRank(rankData)
rankCache.RUnlock()
return int64(rank)
}
func (l *LocalLeaderboardRankCache) Fill(leaderboardId string, expiryUnix int64, records []*api.LeaderboardRecord) {
if l.blacklistAll {
// If all rank caching is disabled.
return
}
if _, ok := l.blacklistIds[leaderboardId]; ok {
// If rank caching is disabled for this particular leaderboard.
return
}
if len(records) == 0 {
// Nothing to do.
return
}
// Find rank map for this leaderboard/expiry pair.
key := LeaderboardWithExpiry{LeaderboardId: leaderboardId, Expiry: expiryUnix}
l.RLock()
rankCache, ok := l.cache[key]
l.RUnlock()
if !ok {
return
}
// Find rank data for each owner.
rankCache.RLock()
for _, record := range records {
ownerID, err := uuid.FromString(record.OwnerId)
if err != nil {
continue
}
rankData, ok := rankCache.owners[ownerID]
if !ok {
continue
}
record.Rank = int64(rankCache.cache.GetRank(rankData))
}
rankCache.RUnlock()
}
func (l *LocalLeaderboardRankCache) Insert(leaderboardId string, expiryUnix int64, sortOrder int, ownerID uuid.UUID, score, subscore int64) int64 {
if l.blacklistAll {
// If all rank caching is disabled.
return 0
}
if _, ok := l.blacklistIds[leaderboardId]; ok {
// If rank caching is disabled for this particular leaderboard.
return 0
}
// No existing rank map for this leaderboard/expiry pair, prepare to create a new one.
key := LeaderboardWithExpiry{LeaderboardId: leaderboardId, Expiry: expiryUnix}
l.RLock()
rankCache, ok := l.cache[key]
l.RUnlock()
if !ok {
newRankCache := &RankCache{
owners: make(map[uuid.UUID]skiplist.Interface),
cache: skiplist.New(),
}
l.Lock()
// Last check if rank map was created by another writer just after last read.
rankCache, ok = l.cache[key]
if !ok {
rankCache = newRankCache
l.cache[key] = rankCache
}
l.Unlock()
}
// Prepare new rank data for this leaderboard entry.
var rankData skiplist.Interface
if sortOrder == LeaderboardSortOrderDescending {
rankData = &RankDesc{
OwnerId: ownerID,
Score: score,
Subscore: subscore,
}
} else {
rankData = &RankAsc{
OwnerId: ownerID,
Score: score,
Subscore: subscore,
}
}
// Check for and remove any previous rank entry, then insert the new rank data and get its rank.
rankCache.Lock()
if oldRankData, ok := rankCache.owners[ownerID]; ok {
rankCache.cache.Delete(oldRankData)
}
rankCache.owners[ownerID] = rankData
rankCache.cache.Insert(rankData)
rank := rankCache.cache.GetRank(rankData)
rankCache.Unlock()
return int64(rank)
}
func (l *LocalLeaderboardRankCache) Delete(leaderboardId string, expiryUnix int64, ownerID uuid.UUID) bool {
if l.blacklistAll {
// If all rank caching is disabled.
return false
}
if _, ok := l.blacklistIds[leaderboardId]; ok {
// If rank caching is disabled for this particular leaderboard.
return false
}
// Find the rank map for this leaderboard/expiry pair.
key := LeaderboardWithExpiry{LeaderboardId: leaderboardId, Expiry: expiryUnix}
l.RLock()
rankCache, ok := l.cache[key]
l.RUnlock()
if !ok {
// No rank cache for this leaderboard and expiry combination.
return true
}
// Remove any existing rank entry.
rankCache.Lock()
rankData, ok := rankCache.owners[ownerID]
if !ok {
rankCache.Unlock()
return true
}
delete(rankCache.owners, ownerID)
rankCache.cache.Delete(rankData)
rankCache.Unlock()
return true
}
func (l *LocalLeaderboardRankCache) DeleteLeaderboard(leaderboardId string, expiryUnix int64) bool {
if l.blacklistAll {
// If all rank caching is disabled.
return false
}
if _, ok := l.blacklistIds[leaderboardId]; ok {
// If rank caching is disabled for this particular leaderboard.
return false
}
// Delete the rank map for this leaderboard/expiry pair.
key := LeaderboardWithExpiry{LeaderboardId: leaderboardId, Expiry: expiryUnix}
l.Lock()
delete(l.cache, key)
l.Unlock()
return true
}
func (l *LocalLeaderboardRankCache) TrimExpired(nowUnix int64) bool {
if l.blacklistAll {
// If all rank caching is disabled.
return false
}
// Used for the timer.
l.Lock()
for k := range l.cache {
if k.Expiry != 0 && k.Expiry <= nowUnix {
delete(l.cache, k)
}
}
l.Unlock()
return true
}
| 1.632813 | 2 |
pkgs/sops-pgp-hook/hook_test.go | starcraft66/sops-nix | 360 | 181 | package main
import (
"bytes"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path"
"path/filepath"
"runtime"
"strings"
"testing"
)
// ok fails the test if an err is not nil.
func ok(tb testing.TB, err error) {
if err != nil {
_, file, line, _ := runtime.Caller(1)
fmt.Printf("\033[31m%s:%d: unexpected error: %s\033[39m\n\n", filepath.Base(file), line, err.Error())
tb.FailNow()
}
}
func TestShellHook(t *testing.T) {
assets := os.Getenv("TEST_ASSETS")
if assets == "" {
_, filename, _, _ := runtime.Caller(0)
assets = path.Join(path.Dir(filename), "test-assets")
}
tempdir, err := ioutil.TempDir("", "testdir")
ok(t, err)
defer os.RemoveAll(tempdir)
cmd := exec.Command("nix-shell", "shell.nix", "--run", "echo SOPS_PGP_FP=$SOPS_PGP_FP")
cmd.Env = append(os.Environ(), fmt.Sprintf("GNUPGHOME=%s", tempdir))
var stdoutBuf, stderrBuf bytes.Buffer
cmd.Stdout = &stdoutBuf
cmd.Stderr = &stderrBuf
cmd.Dir = assets
err = cmd.Run()
stdout := stdoutBuf.String()
stderr := stderrBuf.String()
fmt.Printf("$ %s\nstdout: \n%s\nstderr: \n%s\n", strings.Join(cmd.Args, " "), stdout, stderr)
ok(t, err)
expectedKeys := []string{
"C6DA56E69A7C756564A8AFEB4A6B05B714D13EFD",
"4EC40F8E04A945339F7F7C0032C5225271038E3F",
"7FB89715AADA920D65D25E63F9BA9DEBD03F57C0",
"<KEY>",
}
for _, key := range expectedKeys {
if !strings.Contains(stdout, key) {
t.Fatalf("'%v' not in '%v'", key, stdout)
}
}
// it should ignore subkeys from ./keys/key-with-subkeys.asc
subkey := "94F174F588090494E73D0835A79B1680BC4D9A54"
if strings.Contains(stdout, subkey) {
t.Fatalf("subkey found in %s", stdout)
}
expectedStderr := "./non-existing-key.gpg does not exists"
if !strings.Contains(stderr, expectedStderr) {
t.Fatalf("'%v' not in '%v'", expectedStderr, stdout)
}
}
| 1.414063 | 1 |
examples/firmata_gpio_max7219.go | stevebargelt/gobot | 0 | 189 | // +build example
//
// Do not build by default.
/*
How to setup
This examples requires you to daisy-chain 4 led matrices based on MAX7219.
It will turn on one led at a time, from the first led at the first matrix to the last led of the last matrix.
How to run
Pass serial port to use as the first param:
go run examples/firmata_gpio_max7219.go /dev/ttyACM0
*/
package main
import (
"os"
"time"
"github.com/stevebargelt/gobot"
"github.com/stevebargelt/gobot/drivers/gpio"
"github.com/stevebargelt/gobot/platforms/firmata"
)
func main() {
firmataAdaptor := firmata.NewAdaptor(os.Args[1])
max := gpio.NewMAX7219Driver(firmataAdaptor, "11", "10", "9", 4)
var digit byte = 1 // digit address goes from 0x01 (MAX7219Digit0) to 0x08 (MAX7219Digit8)
var bits byte = 1
var module uint
count := 0
work := func() {
gobot.Every(100*time.Millisecond, func() {
max.ClearAll()
max.One(module, digit, bits)
bits = bits << 1
count++
if count > 7 {
count = 0
digit++
bits = 1
if digit > 8 {
digit = 1
module++
if module >= 4 {
module = 0
count = 0
}
}
}
})
}
robot := gobot.NewRobot("Max7219Bot",
[]gobot.Connection{esp8266},
[]gobot.Device{max},
work,
)
robot.Start()
}
| 2.25 | 2 |
service/autopilot/v1/assistant/model_builds/api_op_client.go | RJPearson94/twilio-sdk-go | 14 | 197 | // Package model_builds contains auto-generated files. DO NOT MODIFY
package model_builds
import "github.com/RJPearson94/twilio-sdk-go/client"
// Client for managing model build resources
// See https://www.twilio.com/docs/autopilot/api/model-build for more details
type Client struct {
client *client.Client
assistantSid string
}
// ClientProperties are the properties required to manage the model builds resources
type ClientProperties struct {
AssistantSid string
}
// New creates a new instance of the model builds client
func New(client *client.Client, properties ClientProperties) *Client {
return &Client{
client: client,
assistantSid: properties.AssistantSid,
}
}
| 0.761719 | 1 |
cmd/siva/impl/unpack_test.go | vmarkovtsev/go-siva | 94 | 205 | package impl
import (
"io/ioutil"
"os"
"path/filepath"
"runtime"
. "gopkg.in/check.v1"
)
type UnpackSuite struct {
folder string
}
var _ = Suite(&UnpackSuite{})
func (s *UnpackSuite) SetUpTest(c *C) {
var err error
s.folder, err = ioutil.TempDir("", "siva-cmd-unpack")
c.Assert(err, IsNil)
}
func (s *UnpackSuite) TearDownTest(c *C) {
err := os.RemoveAll(s.folder)
c.Assert(err, IsNil)
}
func (s *UnpackSuite) TestBasic(c *C) {
cmd := &CmdUnpack{}
cmd.Output.Path = filepath.Join(s.folder, "files")
cmd.Args.File = filepath.Join("..", "..", "..", "fixtures", "perms.siva")
cmd.Overwrite = true
err := cmd.Execute(nil)
c.Assert(err, IsNil)
dir, err := ioutil.ReadDir(cmd.Output.Path)
c.Assert(err, IsNil)
c.Assert(dir, HasLen, 3)
perms := []string{"-rwxr-xr-x", "-rw-------", "-rw-r--r--"}
if runtime.GOOS == "windows" {
perms = []string{"-rw-rw-rw-", "-rw-rw-rw-", "-rw-rw-rw-"}
}
for i, f := range dir {
c.Assert(f.Name(), Equals, files[i].Name)
data, err := ioutil.ReadFile(filepath.Join(s.folder, "files", f.Name()))
c.Assert(err, IsNil)
c.Assert(string(data), Equals, files[i].Body)
c.Assert(f.Mode().String(), Equals, perms[i])
}
}
func (s *UnpackSuite) TestIgnorePerms(c *C) {
cmd := &CmdUnpack{}
cmd.Output.Path = filepath.Join(s.folder, "files")
cmd.Args.File = filepath.Join("..", "..", "..", "fixtures", "perms.siva")
cmd.IgnorePerms = true
err := cmd.Execute(nil)
c.Assert(err, IsNil)
dir, err := ioutil.ReadDir(cmd.Output.Path)
c.Assert(err, IsNil)
c.Assert(dir, HasLen, 3)
for _, f := range dir {
c.Assert(f.Mode(), Equals, os.FileMode(defaultPerms))
}
}
func (s *UnpackSuite) TestMatch(c *C) {
cmd := &CmdUnpack{}
cmd.Output.Path = filepath.Join(s.folder, "files")
cmd.Args.File = filepath.Join("..", "..", "..", "fixtures", "basic.siva")
cmd.Match = "gopher(.*)"
err := cmd.Execute(nil)
c.Assert(err, IsNil)
dir, err := ioutil.ReadDir(cmd.Output.Path)
c.Assert(err, IsNil)
c.Assert(dir, HasLen, 1)
c.Assert(dir[0].Name(), Equals, "gopher.txt")
}
func (s *UnpackSuite) TestOverwrite(c *C) {
cmd := &CmdUnpack{}
cmd.Output.Path = filepath.Join(s.folder, "files")
cmd.Args.File = filepath.Join("..", "..", "..", "fixtures", "duplicate.siva")
cmd.Overwrite = true
err := cmd.Execute(nil)
c.Assert(err, IsNil)
dir, err := ioutil.ReadDir(cmd.Output.Path)
c.Assert(err, IsNil)
c.Assert(dir, HasLen, 3)
}
func (s *UnpackSuite) TestZipSlip(c *C) {
cmd := &CmdUnpack{}
cmd.Output.Path = filepath.Join(s.folder, "files/inside")
cmd.Args.File = filepath.Join("..", "..", "..", "fixtures", "zipslip.siva")
err := cmd.Execute(nil)
c.Assert(err, NotNil)
_, err = os.Stat(filepath.Join(s.folder, "files"))
c.Assert(err, NotNil)
c.Assert(os.IsNotExist(err), Equals, true)
}
| 1.265625 | 1 |
cmd/repositories/update_repository.go | krok-o/krokctl | 0 | 213 | package repositories
import (
"fmt"
"github.com/krok-o/krok/pkg/models"
"github.com/spf13/cobra"
"github.com/krok-o/krokctl/cmd"
"github.com/krok-o/krokctl/pkg/formatter"
)
var (
// UpdateRepositoryCmd creates a repository with the given values.
UpdateRepositoryCmd = &cobra.Command{
Use: "repository",
Short: "Update repository",
Run: runUpdateRepositoryCmd,
}
updateRepoArgs struct {
name string
id int
}
)
func init() {
cmd.UpdateCmd.AddCommand(UpdateRepositoryCmd)
f := UpdateRepositoryCmd.PersistentFlags()
f.StringVar(&updateRepoArgs.name, "name", "", "The name of the repository.")
f.IntVar(&updateRepoArgs.id, "id", -1, "The ID of the repository to update.")
if err := UpdateRepositoryCmd.MarkPersistentFlagRequired("id"); err != nil {
cmd.CLILog.Fatal().Err(err).Msg("Failed to mark required flag.")
}
}
func runUpdateRepositoryCmd(c *cobra.Command, args []string) {
cmd.CLILog.Debug().Msg("Creating repository...")
repo := &models.Repository{
Name: updateRepoArgs.name,
ID: updateRepoArgs.id,
}
repo, err := cmd.KC.RepositoryClient.Update(repo)
if err != nil {
cmd.CLILog.Fatal().Err(err).Msg("Failed to update repository.")
}
fmt.Print(formatter.FormatRepository(repo, cmd.KrokArgs.Formatter))
}
| 1.703125 | 2 |
pkg/snowflake/system_get_snowflake_platform_info_test.go | gary-beautypie/terraform-provider-snowflake | 0 | 221 | package snowflake
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestSystemGetSnowflakePlatformInfoQuery(t *testing.T) {
r := require.New(t)
sb := SystemGetSnowflakePlatformInfoQuery()
r.Equal(sb, `SELECT SYSTEM$GET_SNOWFLAKE_PLATFORM_INFO() AS "info"`)
}
func TestSystemGetSnowflakePlatformInfoGetStructuredConfigAws(t *testing.T) {
r := require.New(t)
raw := &RawSnowflakePlatformInfo{
Info: `{"snowflake-vpc-id": ["vpc-1", "vpc-2"]}`,
}
c, e := raw.GetStructuredConfig()
r.Nil(e)
r.Equal([]string{"vpc-1", "vpc-2"}, c.AwsVpcIds)
r.Equal([]string(nil), c.AzureVnetSubnetIds)
}
func TestSystemGetSnowflakePlatformInfoGetStructuredConfigAzure(t *testing.T) {
r := require.New(t)
raw := &RawSnowflakePlatformInfo{
Info: `{"snowflake-vnet-subnet-id": ["/subscription/1/1", "/subscription/1/2"]}`,
}
c, e := raw.GetStructuredConfig()
r.Nil(e)
r.Equal([]string{"/subscription/1/1", "/subscription/1/2"}, c.AzureVnetSubnetIds)
r.Equal([]string(nil), c.AwsVpcIds)
}
| 1.257813 | 1 |
opamp/observiq/identity.go | observIQ/observiq-otel-collector | 1 | 229 | // Copyright observIQ, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package observiq
import (
"runtime"
ios "github.com/observiq/observiq-otel-collector/internal/os"
"github.com/observiq/observiq-otel-collector/internal/version"
"github.com/observiq/observiq-otel-collector/opamp"
"github.com/open-telemetry/opamp-go/protobufs"
"go.uber.org/zap"
)
// identity contains identifying information about the Collector
type identity struct {
agentID string
agentName *string
serviceName string
version string
labels *string
oSArch string
oSDetails string
oSFamily string
hostname string
mac string
}
// newIdentity constructs a new identity for this collector
func newIdentity(logger *zap.Logger, config opamp.Config) *identity {
// Grab various fields from OS
hostname, err := ios.Hostname()
if err != nil {
logger.Warn("Failed to retrieve hostname for collector. Creating partial identity", zap.Error(err))
}
name, err := ios.Name()
if err != nil {
logger.Warn("Failed to retrieve host details on collector. Creating partial identity", zap.Error(err))
}
return &identity{
agentID: config.AgentID,
agentName: config.AgentName,
serviceName: "com.observiq.collector", // Hardcoded defines this type of agent to the server
version: version.Version(),
labels: config.Labels,
oSArch: runtime.GOARCH,
oSDetails: name,
oSFamily: runtime.GOOS,
hostname: hostname,
mac: ios.MACAddress(),
}
}
// Copy creates a deep copy of this identity
func (i identity) Copy() *identity {
identCpy := &identity{
agentID: i.agentID,
serviceName: i.serviceName,
version: i.version,
oSArch: i.oSArch,
oSDetails: i.oSDetails,
oSFamily: i.oSFamily,
hostname: i.hostname,
mac: i.mac,
}
if i.agentName != nil {
identCpy.agentName = new(string)
*identCpy.agentName = *i.agentName
}
if i.labels != nil {
identCpy.labels = new(string)
*identCpy.labels = *i.labels
}
return identCpy
}
func (i *identity) ToAgentDescription() *protobufs.AgentDescription {
identifyingAttributes := []*protobufs.KeyValue{
opamp.StringKeyValue("service.instance.id", i.agentID),
opamp.StringKeyValue("service.name", i.serviceName),
opamp.StringKeyValue("service.version", i.version),
}
if i.agentName != nil {
identifyingAttributes = append(identifyingAttributes, opamp.StringKeyValue("service.instance.name", *i.agentName))
} else {
identifyingAttributes = append(identifyingAttributes, opamp.StringKeyValue("service.instance.name", i.hostname))
}
nonIdentifyingAttributes := []*protobufs.KeyValue{
opamp.StringKeyValue("os.arch", i.oSArch),
opamp.StringKeyValue("os.details", i.oSDetails),
opamp.StringKeyValue("os.family", i.oSFamily),
opamp.StringKeyValue("host.name", i.hostname),
opamp.StringKeyValue("host.mac_address", i.mac),
}
if i.labels != nil {
nonIdentifyingAttributes = append(nonIdentifyingAttributes, opamp.StringKeyValue("service.labels", *i.labels))
}
agentDesc := &protobufs.AgentDescription{
IdentifyingAttributes: identifyingAttributes,
NonIdentifyingAttributes: nonIdentifyingAttributes,
}
return agentDesc
}
| 1.3125 | 1 |
timerange.go | sent-hil/timerange | 0 | 237 | package timerange
import (
"errors"
"fmt"
"strings"
"time"
)
var (
ErrValueIsEmpty = errors.New("ERROR: value is empty.")
ErrInvalidRange = errors.New("ERROR: values in time range are invalid.")
ErrDateOrdering = errors.New("ERROR: start date is after end date")
)
var (
DefaultTimeLayout = "2006/01/02"
DefaultRangeSeperator = ".."
)
type Timerange struct {
TimeValues []time.Time
TimeLayout string
RangeSeparator string
}
func Parse(value string) ([]time.Time, error) {
t := NewTimerange()
if err := t.Set(value); err != nil {
return nil, err
}
return t.TimeValues, nil
}
func NewTimerange() *Timerange {
return &Timerange{
TimeValues: []time.Time{},
TimeLayout: DefaultTimeLayout,
RangeSeparator: DefaultRangeSeperator,
}
}
func (t *Timerange) String() string {
return fmt.Sprint(t.TimeValues)
}
func (t *Timerange) Set(value string) error {
if value == "" {
return ErrValueIsEmpty
}
var (
timeValues []time.Time
err error
)
if t.hasRangeSeperator(value) {
timeValues, err = t.parseRangeIntoTimeValues(value)
} else {
var tt time.Time
tt, err = t.parseTimeFromValue(value)
timeValues = []time.Time{tt}
}
if err != nil {
return err
}
t.TimeValues = append(t.TimeValues, timeValues...)
return nil
}
func (t *Timerange) hasRangeSeperator(value string) bool {
return strings.Contains(value, t.RangeSeparator)
}
func (t *Timerange) parseTimeFromValue(value string) (time.Time, error) {
return time.Parse(t.TimeLayout, value)
}
func (t *Timerange) parseRangeIntoTimeValues(rangeValue string) (timeValues []time.Time, err error) {
split := strings.Split(rangeValue, t.RangeSeparator)
if len(split) != 2 {
return nil, ErrInvalidRange
}
startValue, endValue := split[0], split[1]
startDate, err := t.parseTimeFromValue(startValue)
if err != nil {
return nil, err
}
endDate, err := t.parseTimeFromValue(endValue)
if err != nil {
return nil, err
}
duration := startDate.Sub(endDate).Hours()
if duration >= 0 {
return nil, ErrDateOrdering
}
durationInDays := (duration / 24) * -1
for i := 0; i <= int(durationInDays); i++ {
timeValues = append(timeValues, startDate.Add(time.Duration(i)*time.Duration(24)*time.Hour))
}
return timeValues, nil
}
| 1.9375 | 2 |
internal/pkg/options/flags.go | cucxabong/Reloader | 2 | 245 | package options
var (
// ConfigmapUpdateOnChangeAnnotation is an annotation to detect changes in configmaps
ConfigmapUpdateOnChangeAnnotation = "configmap.reloader.stakater.com/reload"
// SecretUpdateOnChangeAnnotation is an annotation to detect changes in secrets
SecretUpdateOnChangeAnnotation = "secret.reloader.stakater.com/reload"
// ReloaderAutoAnnotation is an annotation to detect changes in secrets
ReloaderAutoAnnotation = "reloader.stakater.com/auto"
// LogFormat is the log format to use (json, or empty string for default)
LogFormat = ""
)
| 0.412109 | 0 |
utils/x/crypto/openpgp/packet/public_key_v3.go | c2matrix/mqant | 2 | 253 | // Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package packet
import (
"crypto"
"crypto/md5"
"crypto/rsa"
"encoding/binary"
"fmt"
"hash"
"io"
"math/big"
"strconv"
"time"
"github.com/liangdas/mqant/utils/x/crypto/openpgp/errors"
)
// PublicKeyV3 represents older, version 3 public keys. These keys are less secure and
// should not be used for signing or encrypting. They are supported here only for
// parsing version 3 key material and validating signatures.
// See RFC 4880, section 5.5.2.
type PublicKeyV3 struct {
CreationTime time.Time
DaysToExpire uint16
PubKeyAlgo PublicKeyAlgorithm
PublicKey *rsa.PublicKey
Fingerprint [16]byte
KeyId uint64
IsSubkey bool
n, e parsedMPI
}
// newRSAPublicKeyV3 returns a PublicKey that wraps the given rsa.PublicKey.
// Included here for testing purposes only. RFC 4880, section 5.5.2:
// "an implementation MUST NOT generate a V3 key, but MAY accept it."
func newRSAPublicKeyV3(creationTime time.Time, pub *rsa.PublicKey) *PublicKeyV3 {
pk := &PublicKeyV3{
CreationTime: creationTime,
PublicKey: pub,
n: fromBig(pub.N),
e: fromBig(big.NewInt(int64(pub.E))),
}
pk.setFingerPrintAndKeyId()
return pk
}
func (pk *PublicKeyV3) parse(r io.Reader) (err error) {
// RFC 4880, section 5.5.2
var buf [8]byte
if _, err = readFull(r, buf[:]); err != nil {
return
}
if buf[0] < 2 || buf[0] > 3 {
return errors.UnsupportedError("public key version")
}
pk.CreationTime = time.Unix(int64(uint32(buf[1])<<24|uint32(buf[2])<<16|uint32(buf[3])<<8|uint32(buf[4])), 0)
pk.DaysToExpire = binary.BigEndian.Uint16(buf[5:7])
pk.PubKeyAlgo = PublicKeyAlgorithm(buf[7])
switch pk.PubKeyAlgo {
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
err = pk.parseRSA(r)
default:
err = errors.UnsupportedError("public key type: " + strconv.Itoa(int(pk.PubKeyAlgo)))
}
if err != nil {
return
}
pk.setFingerPrintAndKeyId()
return
}
func (pk *PublicKeyV3) setFingerPrintAndKeyId() {
// RFC 4880, section 12.2
fingerPrint := md5.New()
fingerPrint.Write(pk.n.bytes)
fingerPrint.Write(pk.e.bytes)
fingerPrint.Sum(pk.Fingerprint[:0])
pk.KeyId = binary.BigEndian.Uint64(pk.n.bytes[len(pk.n.bytes)-8:])
}
// parseRSA parses RSA public key material from the given Reader. See RFC 4880,
// section 5.5.2.
func (pk *PublicKeyV3) parseRSA(r io.Reader) (err error) {
if pk.n.bytes, pk.n.bitLength, err = readMPI(r); err != nil {
return
}
if pk.e.bytes, pk.e.bitLength, err = readMPI(r); err != nil {
return
}
// RFC 4880 Section 12.2 requires the low 8 bytes of the
// modulus to form the key id.
if len(pk.n.bytes) < 8 {
return errors.StructuralError("v3 public key modulus is too short")
}
if len(pk.e.bytes) > 3 {
err = errors.UnsupportedError("large public exponent")
return
}
rsa := &rsa.PublicKey{N: new(big.Int).SetBytes(pk.n.bytes)}
for i := 0; i < len(pk.e.bytes); i++ {
rsa.E <<= 8
rsa.E |= int(pk.e.bytes[i])
}
pk.PublicKey = rsa
return
}
// SerializeSignaturePrefix writes the prefix for this public key to the given Writer.
// The prefix is used when calculating a signature over this public key. See
// RFC 4880, section 5.2.4.
func (pk *PublicKeyV3) SerializeSignaturePrefix(w io.Writer) {
var pLength uint16
switch pk.PubKeyAlgo {
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
pLength += 2 + uint16(len(pk.n.bytes))
pLength += 2 + uint16(len(pk.e.bytes))
default:
panic("unknown public key algorithm")
}
pLength += 6
w.Write([]byte{0x99, byte(pLength >> 8), byte(pLength)})
return
}
func (pk *PublicKeyV3) Serialize(w io.Writer) (err error) {
length := 8 // 8 byte header
switch pk.PubKeyAlgo {
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
length += 2 + len(pk.n.bytes)
length += 2 + len(pk.e.bytes)
default:
panic("unknown public key algorithm")
}
packetType := packetTypePublicKey
if pk.IsSubkey {
packetType = packetTypePublicSubkey
}
if err = serializeHeader(w, packetType, length); err != nil {
return
}
return pk.serializeWithoutHeaders(w)
}
// serializeWithoutHeaders marshals the PublicKey to w in the form of an
// OpenPGP public key packet, not including the packet header.
func (pk *PublicKeyV3) serializeWithoutHeaders(w io.Writer) (err error) {
var buf [8]byte
// Version 3
buf[0] = 3
// Creation time
t := uint32(pk.CreationTime.Unix())
buf[1] = byte(t >> 24)
buf[2] = byte(t >> 16)
buf[3] = byte(t >> 8)
buf[4] = byte(t)
// Days to expire
buf[5] = byte(pk.DaysToExpire >> 8)
buf[6] = byte(pk.DaysToExpire)
// Public key algorithm
buf[7] = byte(pk.PubKeyAlgo)
if _, err = w.Write(buf[:]); err != nil {
return
}
switch pk.PubKeyAlgo {
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
return writeMPIs(w, pk.n, pk.e)
}
return errors.InvalidArgumentError("bad public-key algorithm")
}
// CanSign returns true iff this public key can generate signatures
func (pk *PublicKeyV3) CanSign() bool {
return pk.PubKeyAlgo != PubKeyAlgoRSAEncryptOnly
}
// VerifySignatureV3 returns nil iff sig is a valid signature, made by this
// public key, of the data hashed into signed. signed is mutated by this call.
func (pk *PublicKeyV3) VerifySignatureV3(signed hash.Hash, sig *SignatureV3) (err error) {
if !pk.CanSign() {
return errors.InvalidArgumentError("public key cannot generate signatures")
}
suffix := make([]byte, 5)
suffix[0] = byte(sig.SigType)
binary.BigEndian.PutUint32(suffix[1:], uint32(sig.CreationTime.Unix()))
signed.Write(suffix)
hashBytes := signed.Sum(nil)
if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] {
return errors.SignatureError("hash tag doesn't match")
}
if pk.PubKeyAlgo != sig.PubKeyAlgo {
return errors.InvalidArgumentError("public key and signature use different algorithms")
}
switch pk.PubKeyAlgo {
case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
if err = rsa.VerifyPKCS1v15(pk.PublicKey, sig.Hash, hashBytes, sig.RSASignature.bytes); err != nil {
return errors.SignatureError("RSA verification failure")
}
return
default:
// V3 public keys only support RSA.
panic("shouldn't happen")
}
}
// VerifyUserIdSignatureV3 returns nil iff sig is a valid signature, made by this
// public key, that id is the identity of pub.
func (pk *PublicKeyV3) VerifyUserIdSignatureV3(id string, pub *PublicKeyV3, sig *SignatureV3) (err error) {
h, err := userIdSignatureV3Hash(id, pk, sig.Hash)
if err != nil {
return err
}
return pk.VerifySignatureV3(h, sig)
}
// VerifyKeySignatureV3 returns nil iff sig is a valid signature, made by this
// public key, of signed.
func (pk *PublicKeyV3) VerifyKeySignatureV3(signed *PublicKeyV3, sig *SignatureV3) (err error) {
h, err := keySignatureHash(pk, signed, sig.Hash)
if err != nil {
return err
}
return pk.VerifySignatureV3(h, sig)
}
// userIdSignatureV3Hash returns a Hash of the message that needs to be signed
// to assert that pk is a valid key for id.
func userIdSignatureV3Hash(id string, pk signingKey, hfn crypto.Hash) (h hash.Hash, err error) {
if !hfn.Available() {
return nil, errors.UnsupportedError("hash function")
}
h = hfn.New()
// RFC 4880, section 5.2.4
pk.SerializeSignaturePrefix(h)
pk.serializeWithoutHeaders(h)
h.Write([]byte(id))
return
}
// KeyIdString returns the public key's fingerprint in capital hex
// (e.g. "6C7EE1B8621CC013").
func (pk *PublicKeyV3) KeyIdString() string {
return fmt.Sprintf("%X", pk.KeyId)
}
// KeyIdShortString returns the short form of public key's fingerprint
// in capital hex, as shown by gpg --list-keys (e.g. "621CC013").
func (pk *PublicKeyV3) KeyIdShortString() string {
return fmt.Sprintf("%X", pk.KeyId&0xFFFFFFFF)
}
// BitLength returns the bit length for the given public key.
func (pk *PublicKeyV3) BitLength() (bitLength uint16, err error) {
switch pk.PubKeyAlgo {
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
bitLength = pk.n.bitLength
default:
err = errors.InvalidArgumentError("bad public-key algorithm")
}
return
}
| 1.789063 | 2 |
ecr/ref_test.go | choo-stripe/amazon-ecr-containerd-resolver | 0 | 261 | /*
* Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You
* may not use this file except in compliance with the License. A copy of
* the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is
* distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
* ANY KIND, either express or implied. See the License for the specific
* language governing permissions and limitations under the License.
*/
package ecr
import (
"errors"
"fmt"
"testing"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/arn"
"github.com/aws/aws-sdk-go/service/ecr"
"github.com/stretchr/testify/assert"
)
func TestRefRepresentations(t *testing.T) {
cases := []struct {
ref string
arn string
spec ECRSpec
err error
}{
{
ref: "invalid",
err: invalidARN,
},
{
ref: "ecr.aws/arn:nope",
err: errors.New("arn: not enough sections"),
},
{
ref: "arn:aws:ecr:us-west-2:123456789012:repository/foo/bar",
err: invalidARN,
},
{
ref: "ecr.aws/arn:aws:ecr:us-west-2:123456789012:repository/foo/bar",
arn: "arn:aws:ecr:us-west-2:123456789012:repository/foo/bar",
spec: ECRSpec{
arn: arn.ARN{
Partition: "aws",
Region: "us-west-2",
AccountID: "123456789012",
Service: "ecr",
Resource: "repository/foo/bar",
},
Repository: "foo/bar",
},
},
{
ref: "ecr.aws/arn:aws:ecr:us-west-2:123456789012:repository/foo/bar:latest",
arn: "arn:aws:ecr:us-west-2:123456789012:repository/foo/bar",
spec: ECRSpec{
arn: arn.ARN{
Partition: "aws",
Region: "us-west-2",
AccountID: "123456789012",
Service: "ecr",
Resource: "repository/foo/bar",
},
Repository: "foo/bar",
Object: "latest",
},
},
{
ref: "ecr.aws/arn:aws:ecr:us-west-2:123456789012:repository/foo/bar:latest@sha256:digest",
arn: "arn:aws:ecr:us-west-2:123456789012:repository/foo/bar",
spec: ECRSpec{
arn: arn.ARN{
Partition: "aws",
Region: "us-west-2",
AccountID: "123456789012",
Service: "ecr",
Resource: "repository/foo/bar",
},
Repository: "foo/bar",
Object: "latest@sha256:digest",
},
},
{
ref: "ecr.aws/arn:aws:ecr:us-west-2:123456789012:repository/foo/bar@sha256:digest",
arn: "arn:aws:ecr:us-west-2:123456789012:repository/foo/bar",
spec: ECRSpec{
arn: arn.ARN{
Partition: "aws",
Region: "us-west-2",
AccountID: "123456789012",
Service: "ecr",
Resource: "repository/foo/bar",
},
Repository: "foo/bar",
Object: "@sha256:digest",
},
},
}
for _, tc := range cases {
t.Run(fmt.Sprintf("ParseRef-%s", tc.ref), func(t *testing.T) {
spec, err := ParseRef(tc.ref)
assert.Equal(t, tc.spec, spec)
if tc.err == nil {
assert.Nil(t, err)
} else {
assert.Equal(t, tc.err, err)
}
})
if tc.err != nil {
continue
}
t.Run(fmt.Sprintf("Canonical-%s", tc.ref), func(t *testing.T) {
assert.Equal(t, tc.ref, tc.spec.Canonical())
})
t.Run(fmt.Sprintf("ARN-%s", tc.ref), func(t *testing.T) {
assert.Equal(t, tc.arn, tc.spec.ARN())
})
}
}
func TestImageID(t *testing.T) {
cases := []struct {
name string
spec ECRSpec
imageID *ecr.ImageIdentifier
}{
{
name: "blank",
spec: ECRSpec{
Repository: "foo/bar",
},
imageID: &ecr.ImageIdentifier{},
},
{
name: "tag",
spec: ECRSpec{
Repository: "foo/bar",
Object: "latest",
},
imageID: &ecr.ImageIdentifier{
ImageTag: aws.String("latest"),
},
},
{
name: "digest",
spec: ECRSpec{
Repository: "foo/bar",
Object: "@sha256:digest",
},
imageID: &ecr.ImageIdentifier{
ImageDigest: aws.String("sha256:digest"),
},
},
{
name: "tag+digest",
spec: ECRSpec{
Repository: "foo/bar",
Object: "latest@sha256:digest",
},
imageID: &ecr.ImageIdentifier{
ImageTag: aws.String("latest"),
ImageDigest: aws.String("sha256:digest"),
},
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
assert.Equal(t, tc.imageID, tc.spec.ImageID())
})
}
}
| 1.132813 | 1 |
src/internal/poll/fd_unix.go | kavindyasinthasilva/go | 3 | 269 | // Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build aix darwin dragonfly freebsd js,wasm linux netbsd openbsd solaris
package poll
import (
"io"
"runtime"
"sync/atomic"
"syscall"
)
// FD is a file descriptor. The net and os packages use this type as a
// field of a larger type representing a network connection or OS file.
type FD struct {
// Lock sysfd and serialize access to Read and Write methods.
fdmu fdMutex
// System file descriptor. Immutable until Close.
Sysfd int
// I/O poller.
pd pollDesc
// Writev cache.
iovecs *[]syscall.Iovec
// Semaphore signaled when file is closed.
csema uint32
// Non-zero if this file has been set to blocking mode.
isBlocking uint32
// Whether this is a streaming descriptor, as opposed to a
// packet-based descriptor like a UDP socket. Immutable.
IsStream bool
// Whether a zero byte read indicates EOF. This is false for a
// message based socket connection.
ZeroReadIsEOF bool
// Whether this is a file rather than a network socket.
isFile bool
}
// Init initializes the FD. The Sysfd field should already be set.
// This can be called multiple times on a single FD.
// The net argument is a network name from the net package (e.g., "tcp"),
// or "file".
// Set pollable to true if fd should be managed by runtime netpoll.
func (fd *FD) Init(net string, pollable bool) error {
// We don't actually care about the various network types.
if net == "file" {
fd.isFile = true
}
if !pollable {
fd.isBlocking = 1
return nil
}
err := fd.pd.init(fd)
if err != nil {
// If we could not initialize the runtime poller,
// assume we are using blocking mode.
fd.isBlocking = 1
}
return err
}
// Destroy closes the file descriptor. This is called when there are
// no remaining references.
func (fd *FD) destroy() error {
// Poller may want to unregister fd in readiness notification mechanism,
// so this must be executed before CloseFunc.
fd.pd.close()
err := CloseFunc(fd.Sysfd)
fd.Sysfd = -1
runtime_Semrelease(&fd.csema)
return err
}
// Close closes the FD. The underlying file descriptor is closed by the
// destroy method when there are no remaining references.
func (fd *FD) Close() error {
if !fd.fdmu.increfAndClose() {
return errClosing(fd.isFile)
}
// Unblock any I/O. Once it all unblocks and returns,
// so that it cannot be referring to fd.sysfd anymore,
// the final decref will close fd.sysfd. This should happen
// fairly quickly, since all the I/O is non-blocking, and any
// attempts to block in the pollDesc will return errClosing(fd.isFile).
fd.pd.evict()
// The call to decref will call destroy if there are no other
// references.
err := fd.decref()
// Wait until the descriptor is closed. If this was the only
// reference, it is already closed. Only wait if the file has
// not been set to blocking mode, as otherwise any current I/O
// may be blocking, and that would block the Close.
// No need for an atomic read of isBlocking, increfAndClose means
// we have exclusive access to fd.
if fd.isBlocking == 0 {
runtime_Semacquire(&fd.csema)
}
return err
}
// Shutdown wraps the shutdown network call.
func (fd *FD) Shutdown(how int) error {
if err := fd.incref(); err != nil {
return err
}
defer fd.decref()
return syscall.Shutdown(fd.Sysfd, how)
}
// SetBlocking puts the file into blocking mode.
func (fd *FD) SetBlocking() error {
if err := fd.incref(); err != nil {
return err
}
defer fd.decref()
// Atomic store so that concurrent calls to SetBlocking
// do not cause a race condition. isBlocking only ever goes
// from 0 to 1 so there is no real race here.
atomic.StoreUint32(&fd.isBlocking, 1)
return syscall.SetNonblock(fd.Sysfd, false)
}
// Darwin and FreeBSD can't read or write 2GB+ files at a time,
// even on 64-bit systems.
// The same is true of socket implementations on many systems.
// See golang.org/issue/7812 and golang.org/issue/16266.
// Use 1GB instead of, say, 2GB-1, to keep subsequent reads aligned.
const maxRW = 1 << 30
// Read implements io.Reader.
func (fd *FD) Read(p []byte) (int, error) {
if err := fd.readLock(); err != nil {
return 0, err
}
defer fd.readUnlock()
if len(p) == 0 {
// If the caller wanted a zero byte read, return immediately
// without trying (but after acquiring the readLock).
// Otherwise syscall.Read returns 0, nil which looks like
// io.EOF.
// TODO(bradfitz): make it wait for readability? (Issue 15735)
return 0, nil
}
if err := fd.pd.prepareRead(fd.isFile); err != nil {
return 0, err
}
if fd.IsStream && len(p) > maxRW {
p = p[:maxRW]
}
for {
n, err := syscall.Read(fd.Sysfd, p)
if err != nil {
n = 0
if err == syscall.EAGAIN && fd.pd.pollable() {
if err = fd.pd.waitRead(fd.isFile); err == nil {
continue
}
}
// On MacOS we can see EINTR here if the user
// pressed ^Z. See issue #22838.
if runtime.GOOS == "darwin" && err == syscall.EINTR {
continue
}
}
err = fd.eofError(n, err)
return n, err
}
}
// Pread wraps the pread system call.
func (fd *FD) Pread(p []byte, off int64) (int, error) {
// Call incref, not readLock, because since pread specifies the
// offset it is independent from other reads.
// Similarly, using the poller doesn't make sense for pread.
if err := fd.incref(); err != nil {
return 0, err
}
if fd.IsStream && len(p) > maxRW {
p = p[:maxRW]
}
n, err := syscall.Pread(fd.Sysfd, p, off)
if err != nil {
n = 0
}
fd.decref()
err = fd.eofError(n, err)
return n, err
}
// ReadFrom wraps the recvfrom network call.
func (fd *FD) ReadFrom(p []byte) (int, syscall.Sockaddr, error) {
if err := fd.readLock(); err != nil {
return 0, nil, err
}
defer fd.readUnlock()
if err := fd.pd.prepareRead(fd.isFile); err != nil {
return 0, nil, err
}
for {
n, sa, err := syscall.Recvfrom(fd.Sysfd, p, 0)
if err != nil {
n = 0
if err == syscall.EAGAIN && fd.pd.pollable() {
if err = fd.pd.waitRead(fd.isFile); err == nil {
continue
}
}
}
err = fd.eofError(n, err)
return n, sa, err
}
}
// ReadMsg wraps the recvmsg network call.
func (fd *FD) ReadMsg(p []byte, oob []byte) (int, int, int, syscall.Sockaddr, error) {
if err := fd.readLock(); err != nil {
return 0, 0, 0, nil, err
}
defer fd.readUnlock()
if err := fd.pd.prepareRead(fd.isFile); err != nil {
return 0, 0, 0, nil, err
}
for {
n, oobn, flags, sa, err := syscall.Recvmsg(fd.Sysfd, p, oob, 0)
if err != nil {
// TODO(dfc) should n and oobn be set to 0
if err == syscall.EAGAIN && fd.pd.pollable() {
if err = fd.pd.waitRead(fd.isFile); err == nil {
continue
}
}
}
err = fd.eofError(n, err)
return n, oobn, flags, sa, err
}
}
// Write implements io.Writer.
func (fd *FD) Write(p []byte) (int, error) {
if err := fd.writeLock(); err != nil {
return 0, err
}
defer fd.writeUnlock()
if err := fd.pd.prepareWrite(fd.isFile); err != nil {
return 0, err
}
var nn int
for {
max := len(p)
if fd.IsStream && max-nn > maxRW {
max = nn + maxRW
}
n, err := syscall.Write(fd.Sysfd, p[nn:max])
if n > 0 {
nn += n
}
if nn == len(p) {
return nn, err
}
if err == syscall.EAGAIN && fd.pd.pollable() {
if err = fd.pd.waitWrite(fd.isFile); err == nil {
continue
}
}
if err != nil {
return nn, err
}
if n == 0 {
return nn, io.ErrUnexpectedEOF
}
}
}
// Pwrite wraps the pwrite system call.
func (fd *FD) Pwrite(p []byte, off int64) (int, error) {
// Call incref, not writeLock, because since pwrite specifies the
// offset it is independent from other writes.
// Similarly, using the poller doesn't make sense for pwrite.
if err := fd.incref(); err != nil {
return 0, err
}
defer fd.decref()
var nn int
for {
max := len(p)
if fd.IsStream && max-nn > maxRW {
max = nn + maxRW
}
n, err := syscall.Pwrite(fd.Sysfd, p[nn:max], off+int64(nn))
if n > 0 {
nn += n
}
if nn == len(p) {
return nn, err
}
if err != nil {
return nn, err
}
if n == 0 {
return nn, io.ErrUnexpectedEOF
}
}
}
// WriteTo wraps the sendto network call.
func (fd *FD) WriteTo(p []byte, sa syscall.Sockaddr) (int, error) {
if err := fd.writeLock(); err != nil {
return 0, err
}
defer fd.writeUnlock()
if err := fd.pd.prepareWrite(fd.isFile); err != nil {
return 0, err
}
for {
err := syscall.Sendto(fd.Sysfd, p, 0, sa)
if err == syscall.EAGAIN && fd.pd.pollable() {
if err = fd.pd.waitWrite(fd.isFile); err == nil {
continue
}
}
if err != nil {
return 0, err
}
return len(p), nil
}
}
// WriteMsg wraps the sendmsg network call.
func (fd *FD) WriteMsg(p []byte, oob []byte, sa syscall.Sockaddr) (int, int, error) {
if err := fd.writeLock(); err != nil {
return 0, 0, err
}
defer fd.writeUnlock()
if err := fd.pd.prepareWrite(fd.isFile); err != nil {
return 0, 0, err
}
for {
n, err := syscall.SendmsgN(fd.Sysfd, p, oob, sa, 0)
if err == syscall.EAGAIN && fd.pd.pollable() {
if err = fd.pd.waitWrite(fd.isFile); err == nil {
continue
}
}
if err != nil {
return n, 0, err
}
return n, len(oob), err
}
}
// Accept wraps the accept network call.
func (fd *FD) Accept() (int, syscall.Sockaddr, string, error) {
if err := fd.readLock(); err != nil {
return -1, nil, "", err
}
defer fd.readUnlock()
if err := fd.pd.prepareRead(fd.isFile); err != nil {
return -1, nil, "", err
}
for {
s, rsa, errcall, err := accept(fd.Sysfd)
if err == nil {
return s, rsa, "", err
}
switch err {
case syscall.EAGAIN:
if fd.pd.pollable() {
if err = fd.pd.waitRead(fd.isFile); err == nil {
continue
}
}
case syscall.ECONNABORTED:
// This means that a socket on the listen
// queue was closed before we Accept()ed it;
// it's a silly error, so try again.
continue
}
return -1, nil, errcall, err
}
}
// Seek wraps syscall.Seek.
func (fd *FD) Seek(offset int64, whence int) (int64, error) {
if err := fd.incref(); err != nil {
return 0, err
}
defer fd.decref()
return syscall.Seek(fd.Sysfd, offset, whence)
}
// ReadDirent wraps syscall.ReadDirent.
// We treat this like an ordinary system call rather than a call
// that tries to fill the buffer.
func (fd *FD) ReadDirent(buf []byte) (int, error) {
if err := fd.incref(); err != nil {
return 0, err
}
defer fd.decref()
for {
n, err := syscall.ReadDirent(fd.Sysfd, buf)
if err != nil {
n = 0
if err == syscall.EAGAIN && fd.pd.pollable() {
if err = fd.pd.waitRead(fd.isFile); err == nil {
continue
}
}
}
// Do not call eofError; caller does not expect to see io.EOF.
return n, err
}
}
// Fchdir wraps syscall.Fchdir.
func (fd *FD) Fchdir() error {
if err := fd.incref(); err != nil {
return err
}
defer fd.decref()
return syscall.Fchdir(fd.Sysfd)
}
// Fstat wraps syscall.Fstat
func (fd *FD) Fstat(s *syscall.Stat_t) error {
if err := fd.incref(); err != nil {
return err
}
defer fd.decref()
return syscall.Fstat(fd.Sysfd, s)
}
// tryDupCloexec indicates whether F_DUPFD_CLOEXEC should be used.
// If the kernel doesn't support it, this is set to 0.
var tryDupCloexec = int32(1)
// DupCloseOnExec dups fd and marks it close-on-exec.
func DupCloseOnExec(fd int) (int, string, error) {
if atomic.LoadInt32(&tryDupCloexec) == 1 {
r0, e1 := fcntl(fd, syscall.F_DUPFD_CLOEXEC, 0)
if e1 == nil {
return r0, "", nil
}
switch e1.(syscall.Errno) {
case syscall.EINVAL, syscall.ENOSYS:
// Old kernel, or js/wasm (which returns
// ENOSYS). Fall back to the portable way from
// now on.
atomic.StoreInt32(&tryDupCloexec, 0)
default:
return -1, "fcntl", e1
}
}
return dupCloseOnExecOld(fd)
}
// dupCloseOnExecUnixOld is the traditional way to dup an fd and
// set its O_CLOEXEC bit, using two system calls.
func dupCloseOnExecOld(fd int) (int, string, error) {
syscall.ForkLock.RLock()
defer syscall.ForkLock.RUnlock()
newfd, err := syscall.Dup(fd)
if err != nil {
return -1, "dup", err
}
syscall.CloseOnExec(newfd)
return newfd, "", nil
}
// Dup duplicates the file descriptor.
func (fd *FD) Dup() (int, string, error) {
if err := fd.incref(); err != nil {
return -1, "", err
}
defer fd.decref()
return DupCloseOnExec(fd.Sysfd)
}
// On Unix variants only, expose the IO event for the net code.
// WaitWrite waits until data can be read from fd.
func (fd *FD) WaitWrite() error {
return fd.pd.waitWrite(fd.isFile)
}
// WriteOnce is for testing only. It makes a single write call.
func (fd *FD) WriteOnce(p []byte) (int, error) {
if err := fd.writeLock(); err != nil {
return 0, err
}
defer fd.writeUnlock()
return syscall.Write(fd.Sysfd, p)
}
// RawControl invokes the user-defined function f for a non-IO
// operation.
func (fd *FD) RawControl(f func(uintptr)) error {
if err := fd.incref(); err != nil {
return err
}
defer fd.decref()
f(uintptr(fd.Sysfd))
return nil
}
// RawRead invokes the user-defined function f for a read operation.
func (fd *FD) RawRead(f func(uintptr) bool) error {
if err := fd.readLock(); err != nil {
return err
}
defer fd.readUnlock()
if err := fd.pd.prepareRead(fd.isFile); err != nil {
return err
}
for {
if f(uintptr(fd.Sysfd)) {
return nil
}
if err := fd.pd.waitRead(fd.isFile); err != nil {
return err
}
}
}
// RawWrite invokes the user-defined function f for a write operation.
func (fd *FD) RawWrite(f func(uintptr) bool) error {
if err := fd.writeLock(); err != nil {
return err
}
defer fd.writeUnlock()
if err := fd.pd.prepareWrite(fd.isFile); err != nil {
return err
}
for {
if f(uintptr(fd.Sysfd)) {
return nil
}
if err := fd.pd.waitWrite(fd.isFile); err != nil {
return err
}
}
}
| 1.765625 | 2 |
appgo/pkg/mus/caller.go | goecology/ecology | 4 | 277 | package mus
import (
"time"
"github.com/gin-gonic/gin"
"github.com/go-resty/resty/v2"
"github.com/i2eco/muses/pkg/cache/mixcache"
mmysql "github.com/i2eco/muses/pkg/database/mysql"
"github.com/i2eco/muses/pkg/logger"
"github.com/i2eco/muses/pkg/open/github"
"github.com/i2eco/muses/pkg/oss"
musgin "github.com/i2eco/muses/pkg/server/gin"
"github.com/i2eco/muses/pkg/session/ginsession"
"github.com/jinzhu/gorm"
)
var (
Cfg musgin.Cfg
Logger *logger.Client
Gin *gin.Engine
Db *gorm.DB
Session gin.HandlerFunc
Oss *oss.Client
Mixcache *mixcache.Client
GithubClient *github.Client
JsonRestyClient *resty.Client
FormRestyClient *resty.Client
)
// Init 初始化muses相关容器
func Init() error {
Cfg = musgin.Config()
Db = mmysql.Caller("ecology")
Logger = logger.Caller("system")
Gin = musgin.Caller()
Oss = oss.Caller("ecology")
Mixcache = mixcache.Caller("ecology")
Session = ginsession.Caller()
FormRestyClient = resty.New().SetDebug(true).SetTimeout(3*time.Second).SetHeader("Content-Type", "multipart/form-data")
JsonRestyClient = resty.New().SetDebug(true).SetTimeout(10*time.Second).SetHeader("Content-Type", "application/json;charset=utf-8")
GithubClient = github.Caller()
return nil
}
| 1.195313 | 1 |
libs/cosmos-sdk/x/gov/types/msgs_test.go | tokenchain/exchain | 162 | 285 | package types
import (
"strings"
"testing"
"github.com/stretchr/testify/require"
sdk "github.com/okex/exchain/libs/cosmos-sdk/types"
)
var (
coinsPos = sdk.NewCoins(sdk.NewInt64Coin(sdk.DefaultBondDenom, 1000))
coinsZero = sdk.NewCoins()
coinsMulti = sdk.NewCoins(sdk.NewInt64Coin(sdk.DefaultBondDenom, 1000), sdk.NewInt64Coin("foo", 10000))
addrs = []sdk.AccAddress{
sdk.AccAddress("test1"),
sdk.AccAddress("test2"),
}
)
func init() {
coinsMulti.Sort()
}
// test ValidateBasic for MsgCreateValidator
func TestMsgSubmitProposal(t *testing.T) {
tests := []struct {
title, description string
proposalType string
proposerAddr sdk.AccAddress
initialDeposit sdk.Coins
expectPass bool
}{
{"Test Proposal", "the purpose of this proposal is to test", ProposalTypeText, addrs[0], coinsPos, true},
{"", "the purpose of this proposal is to test", ProposalTypeText, addrs[0], coinsPos, false},
{"Test Proposal", "", ProposalTypeText, addrs[0], coinsPos, false},
{"Test Proposal", "the purpose of this proposal is to test", ProposalTypeText, sdk.AccAddress{}, coinsPos, false},
{"Test Proposal", "the purpose of this proposal is to test", ProposalTypeText, addrs[0], coinsZero, true},
{"Test Proposal", "the purpose of this proposal is to test", ProposalTypeText, addrs[0], coinsMulti, true},
{strings.Repeat("#", MaxTitleLength*2), "the purpose of this proposal is to test", ProposalTypeText, addrs[0], coinsMulti, false},
{"Test Proposal", strings.Repeat("#", MaxDescriptionLength*2), ProposalTypeText, addrs[0], coinsMulti, false},
}
for i, tc := range tests {
msg := NewMsgSubmitProposal(
ContentFromProposalType(tc.title, tc.description, tc.proposalType),
tc.initialDeposit,
tc.proposerAddr,
)
if tc.expectPass {
require.NoError(t, msg.ValidateBasic(), "test: %v", i)
} else {
require.Error(t, msg.ValidateBasic(), "test: %v", i)
}
}
}
func TestMsgDepositGetSignBytes(t *testing.T) {
addr := sdk.AccAddress("addr1")
msg := NewMsgDeposit(addr, 0, coinsPos)
res := msg.GetSignBytes()
expected := `{"type":"cosmos-sdk/MsgDeposit","value":{"amount":[{"amount":"1000.000000000000000000","denom":"okt"}],"depositor":"cosmos1v9jxgu33kfsgr5","proposal_id":"0"}}`
require.Equal(t, expected, string(res))
}
// test ValidateBasic for MsgDeposit
func TestMsgDeposit(t *testing.T) {
tests := []struct {
proposalID uint64
depositorAddr sdk.AccAddress
depositAmount sdk.Coins
expectPass bool
}{
{0, addrs[0], coinsPos, true},
{1, sdk.AccAddress{}, coinsPos, false},
{1, addrs[0], coinsZero, true},
{1, addrs[0], coinsMulti, true},
}
for i, tc := range tests {
msg := NewMsgDeposit(tc.depositorAddr, tc.proposalID, tc.depositAmount)
if tc.expectPass {
require.NoError(t, msg.ValidateBasic(), "test: %v", i)
} else {
require.Error(t, msg.ValidateBasic(), "test: %v", i)
}
}
}
// test ValidateBasic for MsgDeposit
func TestMsgVote(t *testing.T) {
tests := []struct {
proposalID uint64
voterAddr sdk.AccAddress
option VoteOption
expectPass bool
}{
{0, addrs[0], OptionYes, true},
{0, sdk.AccAddress{}, OptionYes, false},
{0, addrs[0], OptionNo, true},
{0, addrs[0], OptionNoWithVeto, true},
{0, addrs[0], OptionAbstain, true},
{0, addrs[0], VoteOption(0x13), false},
}
for i, tc := range tests {
msg := NewMsgVote(tc.voterAddr, tc.proposalID, tc.option)
if tc.expectPass {
require.Nil(t, msg.ValidateBasic(), "test: %v", i)
} else {
require.NotNil(t, msg.ValidateBasic(), "test: %v", i)
}
}
}
| 1.507813 | 2 |
agent/input/telegram/conn.go | 2733284198/go-micro | 1 | 293 | package telegram
import (
"errors"
"strings"
"sync"
"github.com/forestgiant/sliceutil"
"github.com/micro/go-micro/v2/agent/input"
log "github.com/micro/go-micro/v2/logger"
tgbotapi "gopkg.in/telegram-bot-api.v4"
)
type telegramConn struct {
input *telegramInput
recv <-chan tgbotapi.Update
exit chan bool
syncCond *sync.Cond
mutex sync.Mutex
}
func newConn(input *telegramInput) (*telegramConn, error) {
conn := &telegramConn{
input: input,
}
conn.syncCond = sync.NewCond(&conn.mutex)
go conn.run()
return conn, nil
}
func (tc *telegramConn) run() {
u := tgbotapi.NewUpdate(0)
u.Timeout = 60
updates, err := tc.input.api.GetUpdatesChan(u)
if err != nil {
return
}
tc.recv = updates
tc.syncCond.Signal()
select {
case <-tc.exit:
return
}
}
func (tc *telegramConn) Close() error {
return nil
}
func (tc *telegramConn) Recv(event *input.Event) error {
if event == nil {
return errors.New("event cannot be nil")
}
for {
if tc.recv == nil {
tc.mutex.Lock()
tc.syncCond.Wait()
}
update := <-tc.recv
if update.Message == nil || (len(tc.input.whitelist) > 0 && !sliceutil.Contains(tc.input.whitelist, update.Message.From.UserName)) {
continue
}
if event.Meta == nil {
event.Meta = make(map[string]interface{})
}
event.Type = input.TextEvent
event.From = update.Message.From.UserName
event.To = tc.input.api.Self.UserName
event.Data = []byte(update.Message.Text)
event.Meta["chatId"] = update.Message.Chat.ID
event.Meta["chatType"] = update.Message.Chat.Type
event.Meta["messageId"] = update.Message.MessageID
return nil
}
}
func (tc *telegramConn) Send(event *input.Event) error {
messageText := strings.TrimSpace(string(event.Data))
chatId := event.Meta["chatId"].(int64)
chatType := ChatType(event.Meta["chatType"].(string))
msgConfig := tgbotapi.NewMessage(chatId, messageText)
msgConfig.ParseMode = tgbotapi.ModeHTML
if sliceutil.Contains([]ChatType{Group, Supergroup}, chatType) {
msgConfig.ReplyToMessageID = event.Meta["messageId"].(int)
}
_, err := tc.input.api.Send(msgConfig)
if err != nil {
// probably it could be because of nested HTML tags -- telegram doesn't allow nested tags
log.Error("[telegram][Send] error:", err)
msgConfig.Text = "This bot couldn't send the response (Internal error)"
tc.input.api.Send(msgConfig)
}
return nil
}
| 1.367188 | 1 |
pkg/scheme/value.go | liuzhen21/core | 21 | 301 | /*
Copyright 2021 The tKeel Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scheme
import (
"encoding/json"
logf "github.com/tkeel-io/core/pkg/logfield"
"github.com/mitchellh/mapstructure"
"github.com/pkg/errors"
xerrors "github.com/tkeel-io/core/pkg/errors"
"github.com/tkeel-io/core/pkg/util"
"github.com/tkeel-io/kit/log"
)
const (
PropertyTypeInt = "int"
PropertyTypeBool = "bool"
PropertyTypeFloat = "float"
PropertyTypeDouble = "double"
PropertyTypeString = "string"
PropertyTypeArray = "array"
PropertyTypeStruct = "struct"
DefineFieldArrayLength = "length"
DefineFieldArrayElemCfg = "elem_type"
DefineFieldStructFields = "fields"
)
type Config struct {
ID string `json:"id" mapstructure:"id"`
Type string `json:"type" mapstructure:"type"`
Name string `json:"name" mapstructure:"name"`
Weight int `json:"weight" mapstructure:"weight"`
Enabled bool `json:"enabled" mapstructure:"enabled"`
EnabledSearch bool `json:"enabled_search" mapstructure:"enabled_search"`
EnabledTimeSeries bool `json:"enabled_time_series" mapstructure:"enabled_time_series"`
Description string `json:"description" mapstructure:"description"`
Define map[string]interface{} `json:"define" mapstructure:"define"`
LastTime int64 `json:"last_time" mapstructure:"last_time"`
}
func (cfg *Config) getArrayDefine() DefineArray {
length, _ := cfg.Define[DefineFieldArrayLength].(int)
etype, _ := cfg.Define[DefineFieldArrayElemCfg].(Config)
return DefineArray{Length: length, ElemType: etype}
}
func (cfg *Config) getStructDefine() DefineStruct {
fields, ok := cfg.Define[DefineFieldStructFields].(map[string]Config)
if !ok {
fields = make(map[string]Config)
cfg.Define[DefineFieldStructFields] = fields
}
return DefineStruct{Fields: fields}
}
func (cfg *Config) GetConfig(segs []string, index int) (int, *Config, error) {
return cfg.getConfig(segs, index)
}
func (cfg *Config) getConfig(segs []string, index int) (int, *Config, error) {
if len(segs) > index {
if cfg.Type != PropertyTypeStruct {
return index, cfg, xerrors.ErrPatchTypeInvalid
}
define := cfg.getStructDefine()
c, ok := define.Fields[segs[index]]
if !ok {
return index, cfg, xerrors.ErrPatchPathLack
}
cc := &c
return cc.getConfig(segs, index+1)
}
return index, cfg, nil
}
func (cfg *Config) AppendField(c Config) error {
if cfg.Type != PropertyTypeStruct {
return xerrors.ErrInvalidNodeType
}
define := cfg.getStructDefine()
define.Fields[c.ID] = c
return nil
}
func (cfg *Config) RemoveField(id string) error {
if cfg.Type != PropertyTypeStruct {
return xerrors.ErrInvalidNodeType
}
define := cfg.getStructDefine()
delete(define.Fields, id)
return nil
}
type DefineStruct struct {
Fields map[string]Config `json:"fields" mapstructure:"fields"`
}
func newDefineStruct() DefineStruct {
return DefineStruct{Fields: make(map[string]Config)}
}
type DefineArray struct {
Length int `json:"length" mapstructure:"length"`
ElemType Config `json:"elem_type" mapstructure:"elem_type"`
}
func Parse(bytes []byte) (map[string]*Config, error) {
// parse state config again.
configs := make(map[string]interface{})
if err := json.Unmarshal(bytes, &configs); nil != err {
log.L().Error("json unmarshal", logf.Error(err), logf.String("configs", string(bytes)))
return nil, errors.Wrap(err, "json unmarshal")
}
var err error
var cfg Config
cfgs := make(map[string]*Config)
for key, val := range configs {
if cfg, err = ParseConfigFrom(val); nil != err {
// TODO: dispose error.
log.L().Error("parse configs", logf.Error(err))
continue
}
cfgs[key] = &cfg
}
return cfgs, nil
}
func ParseFrom(bytes []byte) (*Config, error) {
v := make(map[string]interface{})
if err := json.Unmarshal(bytes, &v); nil != err {
log.L().Error("unmarshal Config", logf.Error(err))
return nil, errors.Wrap(err, "unmarshal Config")
}
cfg, err := ParseConfigFrom(v)
return &cfg, errors.Wrap(err, "parse Config")
}
func ParseConfigFrom(data interface{}) (cfg Config, err error) {
cfgRequest := Config{}
if err = mapstructure.Decode(data, &cfgRequest); nil != err {
return cfg, errors.Wrap(err, "decode property config failed")
} else if cfgRequest, err = parseField(cfgRequest); nil != err {
return cfg, errors.Wrap(err, "parse config failed")
}
return cfgRequest, nil
}
func parseField(in Config) (out Config, err error) {
switch in.Type {
case PropertyTypeInt:
case PropertyTypeBool:
case PropertyTypeFloat:
case PropertyTypeDouble:
case PropertyTypeString:
case PropertyTypeArray:
arrDefine := DefineArray{}
if err = mapstructure.Decode(in.Define, &arrDefine); nil != err {
return out, errors.Wrap(err, "parse property config failed")
} else if arrDefine.Length <= 0 {
return out, xerrors.ErrEntityConfigInvalid
}
arrDefine.ElemType, err = parseField(arrDefine.ElemType)
in.Define["elem_type"] = arrDefine.ElemType
case PropertyTypeStruct:
jsonDefine, jsonDefine2 := newDefineStruct(), newDefineStruct()
if err = mapstructure.Decode(in.Define, &jsonDefine); nil != err {
return out, errors.Wrap(err, "parse property config failed")
}
for cfgID, field := range jsonDefine.Fields {
var cfg Config
if cfg, err = parseField(field); nil != err {
return out, errors.Wrap(err, "parse property config failed")
}
cfg.ID = cfgID
jsonDefine2.Fields[cfgID] = cfg
}
in.Define["fields"] = jsonDefine2.Fields
default:
return out, xerrors.ErrEntityConfigInvalid
}
in.LastTime = lastTimestamp(in.LastTime)
return in, errors.Wrap(err, "parse property config failed")
}
func lastTimestamp(timestamp int64) int64 {
if timestamp == 0 {
timestamp = util.UnixMilli()
}
return timestamp
}
| 0.964844 | 1 |
apis/contact/func_all_test.go | jpbede/go-autodns- | 0 | 309 | package contact_test
import (
"context"
"github.com/stretchr/testify/assert"
"go.bnck.me/autodns/apis/contact"
"go.bnck.me/autodns/internal/transport"
"net/http"
"net/http/httptest"
"testing"
)
func TestClient_All(t *testing.T) {
srv := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
assert.Equal(t, "/contact/_search", req.URL.String())
assert.Equal(t, http.MethodPost, req.Method)
rw.Write([]byte("{\"stid\":\"20210312-app1-demo-55340\",\"status\":{\"code\":\"S0304\",\"text\":\"Die Daten des Domain-Kontaktes wurden erfolgreich ermittelt.\",\"type\":\"SUCCESS\"},\"object\":{\"type\":\"Contact\",\"summary\":1},\"data\":[{\"created\":\"2021-03-12T15:06:54.000+0100\",\"updated\":\"2021-03-12T22:07:49.000+0100\",\"id\":31364475,\"owner\":{\"context\":1,\"user\":\"2021_03_11_jpbe_la\"},\"alias\":\"<NAME>\",\"type\":\"PERSON\",\"organization\":\"\",\"title\":\"\",\"city\":\"Musterhausen\",\"country\":\"DE\",\"state\":\"DE\",\"fname\":\"Jan-Philipp\",\"lname\":\"Benecke\",\"address\":[\"Musterstraße 1\"],\"pcode\":\"12345\"}]}"))
}))
tc := transport.New(srv.URL)
tc.HTTPClient = srv.Client()
tc.Credentials = &transport.APICredentials{Username: "abc", Password: "<PASSWORD>", Context: 1}
cl := contact.New(tc)
resp, err := cl.All(context.Background())
assert.NoError(t, err)
assert.NotNil(t, resp)
assert.Len(t, resp, 1)
}
func TestClient_All_InvalidJson(t *testing.T) {
srv := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
rw.Write([]byte("no json"))
}))
tc := transport.New(srv.URL)
tc.HTTPClient = srv.Client()
tc.Credentials = &transport.APICredentials{Username: "abc", Password: "<PASSWORD>", Context: 1}
cl := contact.New(tc)
_, err := cl.All(context.Background())
assert.Error(t, err)
assert.EqualError(t, err, "invalid character 'o' in literal null (expecting 'u')")
}
| 1.46875 | 1 |
End of preview. Expand
in Dataset Viewer.
README.md exists but content is empty.
Use the Edit dataset card button to edit it.
- Downloads last month
- 852