hexsha
stringlengths
40
40
size
int64
2
1.04M
ext
stringclasses
6 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
5
244
max_stars_repo_name
stringlengths
5
96
max_stars_repo_head_hexsha
stringlengths
40
40
max_stars_repo_licenses
sequence
max_stars_count
int64
1
84.9k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
5
244
max_issues_repo_name
stringlengths
5
96
max_issues_repo_head_hexsha
stringlengths
40
40
max_issues_repo_licenses
sequence
max_issues_count
int64
1
98.3k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
5
244
max_forks_repo_name
stringlengths
5
96
max_forks_repo_head_hexsha
stringlengths
40
40
max_forks_repo_licenses
sequence
max_forks_count
int64
1
36.6k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
2
1.04M
avg_line_length
float64
1
11.9k
max_line_length
int64
1
548k
alphanum_fraction
float64
0
1
14896b490701f8a8f5f0fe1da14267db97f18c30
313
yaml
YAML
monitoring/ingress/ingressContorller-serviceMonior.yaml
00724/deploy-k8s
c4ad00ce58d741dbce2380828f42f45eb7f58896
[ "Apache-2.0" ]
null
null
null
monitoring/ingress/ingressContorller-serviceMonior.yaml
00724/deploy-k8s
c4ad00ce58d741dbce2380828f42f45eb7f58896
[ "Apache-2.0" ]
null
null
null
monitoring/ingress/ingressContorller-serviceMonior.yaml
00724/deploy-k8s
c4ad00ce58d741dbce2380828f42f45eb7f58896
[ "Apache-2.0" ]
null
null
null
apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor metadata: labels: app: ingress-nginx name: ingress-nginx namespace: monitoring spec: endpoints: - interval: 15s port: ingress-nginx namespaceSelector: matchNames: - ingress selector: matchLabels: app: ingress-nginx
17.388889
36
0.70607
5c478d76b87aeee48870b81778ab610403a8d00b
202
yaml
YAML
monitoring/etcd/etcd-svc.yaml
00724/deploy-k8s
c4ad00ce58d741dbce2380828f42f45eb7f58896
[ "Apache-2.0" ]
null
null
null
monitoring/etcd/etcd-svc.yaml
00724/deploy-k8s
c4ad00ce58d741dbce2380828f42f45eb7f58896
[ "Apache-2.0" ]
null
null
null
monitoring/etcd/etcd-svc.yaml
00724/deploy-k8s
c4ad00ce58d741dbce2380828f42f45eb7f58896
[ "Apache-2.0" ]
null
null
null
apiVersion: v1 kind: Service metadata: namespace: kube-system name: etcd labels: k8s-app: etcd spec: selector: component: etcd ports: - name: port port: 2381 targetPort: 2381
14.428571
24
0.663366
dc88dc33e62f1bfa144e494eb4b0666a802a68a4
336
yaml
YAML
monitoring/JVM/jvm-ServiceMonitor.yaml
00724/deploy-k8s
c4ad00ce58d741dbce2380828f42f45eb7f58896
[ "Apache-2.0" ]
null
null
null
monitoring/JVM/jvm-ServiceMonitor.yaml
00724/deploy-k8s
c4ad00ce58d741dbce2380828f42f45eb7f58896
[ "Apache-2.0" ]
null
null
null
monitoring/JVM/jvm-ServiceMonitor.yaml
00724/deploy-k8s
c4ad00ce58d741dbce2380828f42f45eb7f58896
[ "Apache-2.0" ]
null
null
null
apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor metadata: name: jvm-monitor namespace: monitoring labels: app: pod-jvm spec: #jobLabel: app endpoints: - port: jvm-monitor interval: 15s scheme: http path: '/metrics' selector: matchLabels: app: pod-jvm namespaceSelector: any: true
16.8
36
0.675595
53c2278bb7113adf2a1ee7ba83768fd772dde3e3
22,814
yaml
YAML
ingress/values.yaml
00724/deploy-k8s
c4ad00ce58d741dbce2380828f42f45eb7f58896
[ "Apache-2.0" ]
null
null
null
ingress/values.yaml
00724/deploy-k8s
c4ad00ce58d741dbce2380828f42f45eb7f58896
[ "Apache-2.0" ]
null
null
null
ingress/values.yaml
00724/deploy-k8s
c4ad00ce58d741dbce2380828f42f45eb7f58896
[ "Apache-2.0" ]
null
null
null
## nginx configuration ## Ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/index.md ## ## Overrides for generated resource names # See templates/_helpers.tpl # nameOverride: # fullnameOverride: controller: name: controller image: repository: registry.cn-beijing.aliyuncs.com/openacl/ingress-nginx-controller tag: "v0.45.0" pullPolicy: IfNotPresent # www-data -> uid 101 runAsUser: 101 allowPrivilegeEscalation: true # Use an existing PSP instead of creating one existingPsp: "" # Configures the ports the nginx-controller listens on containerPort: http: 80 https: 443 # Will add custom configuration options to Nginx https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/ config: {} ## Annotations to be added to the controller config configuration configmap ## configAnnotations: {} # Will add custom headers before sending traffic to backends according to https://github.com/kubernetes/ingress-nginx/tree/master/docs/examples/customization/custom-headers proxySetHeaders: {} # Will add custom headers before sending response traffic to the client according to: https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#add-headers addHeaders: {} # Optionally customize the pod dnsConfig. dnsConfig: {} # Optionally change this to ClusterFirstWithHostNet in case you have 'hostNetwork: true'. # By default, while using host network, name resolution uses the host's DNS. If you wish nginx-controller # to keep resolving names inside the k8s network, use ClusterFirstWithHostNet. dnsPolicy: ClusterFirstWithHostNet # Bare-metal considerations via the host network https://kubernetes.github.io/ingress-nginx/deploy/baremetal/#via-the-host-network # Ingress status was blank because there is no Service exposing the NGINX Ingress controller in a configuration using the host network, the default --publish-service flag used in standard cloud setups does not apply reportNodeInternalIp: false # Required for use with CNI based kubernetes installations (such as ones set up by kubeadm), # since CNI and hostport don't mix yet. Can be deprecated once https://github.com/kubernetes/kubernetes/issues/23920 # is merged hostNetwork: true ## Use host ports 80 and 443 ## Disabled by default ## hostPort: enabled: false ports: http: 80 https: 443 ## Election ID to use for status update ## electionID: ingress-controller-leader ## Name of the ingress class to route through this controller ## ingressClass: nginx # labels to add to the pod container metadata podLabels: {} # key: value ## Security Context policies for controller pods ## podSecurityContext: {} ## See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for ## notes on enabling and using sysctls ### sysctls: {} # sysctls: # "net.core.somaxconn": "8192" ## Allows customization of the source of the IP address or FQDN to report ## in the ingress status field. By default, it reads the information provided ## by the service. If disable, the status field reports the IP address of the ## node or nodes where an ingress controller pod is running. publishService: enabled: true ## Allows overriding of the publish service to bind to ## Must be <namespace>/<service_name> ## pathOverride: "" ## Limit the scope of the controller ## scope: enabled: false namespace: "" # defaults to .Release.Namespace ## Allows customization of the configmap / nginx-configmap namespace ## configMapNamespace: "" # defaults to .Release.Namespace ## Allows customization of the tcp-services-configmap ## tcp: configMapNamespace: "" # defaults to .Release.Namespace ## Annotations to be added to the tcp config configmap annotations: {} ## Allows customization of the udp-services-configmap ## udp: configMapNamespace: "" # defaults to .Release.Namespace ## Annotations to be added to the udp config configmap annotations: {} # Maxmind license key to download GeoLite2 Databases # https://blog.maxmind.com/2019/12/18/significant-changes-to-accessing-and-using-geolite2-databases maxmindLicenseKey: "" ## Additional command line arguments to pass to nginx-ingress-controller ## E.g. to specify the default SSL certificate you can use ## extraArgs: ## default-ssl-certificate: "<namespace>/<secret_name>" extraArgs: {} ## Additional environment variables to set extraEnvs: [] # extraEnvs: # - name: FOO # valueFrom: # secretKeyRef: # key: FOO # name: secret-resource ## DaemonSet or Deployment ## kind: DaemonSet ## Annotations to be added to the controller Deployment or DaemonSet ## annotations: {} # keel.sh/pollSchedule: "@every 60m" ## Labels to be added to the controller Deployment or DaemonSet ## labels: {} # keel.sh/policy: patch # keel.sh/trigger: poll # The update strategy to apply to the Deployment or DaemonSet ## updateStrategy: {} # rollingUpdate: # maxUnavailable: 1 # type: RollingUpdate # minReadySeconds to avoid killing pods before we are ready ## minReadySeconds: 0 ## Node tolerations for server scheduling to nodes with taints ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ ## tolerations: [] # - key: "key" # operator: "Equal|Exists" # value: "value" # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" ## Affinity and anti-affinity ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity ## affinity: {} # # An example of preferred pod anti-affinity, weight is in the range 1-100 # podAntiAffinity: # preferredDuringSchedulingIgnoredDuringExecution: # - weight: 100 # podAffinityTerm: # labelSelector: # matchExpressions: # - key: app.kubernetes.io/name # operator: In # values: # - ingress-nginx # - key: app.kubernetes.io/instance # operator: In # values: # - ingress-nginx # - key: app.kubernetes.io/component # operator: In # values: # - controller # topologyKey: kubernetes.io/hostname # # An example of required pod anti-affinity # podAntiAffinity: # requiredDuringSchedulingIgnoredDuringExecution: # - labelSelector: # matchExpressions: # - key: app.kubernetes.io/name # operator: In # values: # - ingress-nginx # - key: app.kubernetes.io/instance # operator: In # values: # - ingress-nginx # - key: app.kubernetes.io/component # operator: In # values: # - controller # topologyKey: "kubernetes.io/hostname" ## Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in. ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ ## topologySpreadConstraints: [] # - maxSkew: 1 # topologyKey: failure-domain.beta.kubernetes.io/zone # whenUnsatisfiable: DoNotSchedule # labelSelector: # matchLabels: # app.kubernetes.io/instance: ingress-nginx-internal ## terminationGracePeriodSeconds ## wait up to five minutes for the drain of connections ## terminationGracePeriodSeconds: 300 ## Node labels for controller pod assignment ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ ## nodeSelector: kubernetes.io/os: linux ## Liveness and readiness probe values ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes ## livenessProbe: failureThreshold: 5 initialDelaySeconds: 10 periodSeconds: 10 successThreshold: 1 timeoutSeconds: 1 port: 10254 readinessProbe: failureThreshold: 3 initialDelaySeconds: 10 periodSeconds: 10 successThreshold: 1 timeoutSeconds: 1 port: 10254 # Path of the health check endpoint. All requests received on the port defined by # the healthz-port parameter are forwarded internally to this path. healthCheckPath: "/healthz" ## Annotations to be added to controller pods ## podAnnotations: {} replicaCount: 1 minAvailable: 1 # Define requests resources to avoid probe issues due to CPU utilization in busy nodes # ref: https://github.com/kubernetes/ingress-nginx/issues/4735#issuecomment-551204903 # Ideally, there should be no limits. # https://engineering.indeedblog.com/blog/2019/12/cpu-throttling-regression-fix/ resources: # limits: # cpu: 100m # memory: 90Mi requests: cpu: 100m memory: 90Mi # Mutually exclusive with keda autoscaling autoscaling: enabled: false minReplicas: 1 maxReplicas: 11 targetCPUUtilizationPercentage: 50 targetMemoryUtilizationPercentage: 50 autoscalingTemplate: [] # Custom or additional autoscaling metrics # ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-custom-metrics # - type: Pods # pods: # metric: # name: nginx_ingress_controller_nginx_process_requests_total # target: # type: AverageValue # averageValue: 10000m # Mutually exclusive with hpa autoscaling keda: apiVersion: "keda.sh/v1alpha1" # apiVersion changes with keda 1.x vs 2.x # 2.x = keda.sh/v1alpha1 # 1.x = keda.k8s.io/v1alpha1 enabled: false minReplicas: 1 maxReplicas: 11 pollingInterval: 30 cooldownPeriod: 300 restoreToOriginalReplicaCount: false scaledObject: annotations: {} # Custom annotations for ScaledObject resource # annotations: # key: value triggers: [] # - type: prometheus # metadata: # serverAddress: http://<prometheus-host>:9090 # metricName: http_requests_total # threshold: '100' # query: sum(rate(http_requests_total{deployment="my-deployment"}[2m])) behavior: {} # scaleDown: # stabilizationWindowSeconds: 300 # policies: # - type: Pods # value: 1 # periodSeconds: 180 # scaleUp: # stabilizationWindowSeconds: 300 # policies: # - type: Pods # value: 2 # periodSeconds: 60 ## Enable mimalloc as a drop-in replacement for malloc. ## ref: https://github.com/microsoft/mimalloc ## enableMimalloc: true ## Override NGINX template customTemplate: configMapName: "" configMapKey: "" service: enabled: true annotations: {} labels: {} # clusterIP: "" ## List of IP addresses at which the controller services are available ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips ## externalIPs: [] # loadBalancerIP: "" loadBalancerSourceRanges: [] enableHttp: true enableHttps: true ## Set external traffic policy to: "Local" to preserve source IP on ## providers supporting it ## Ref: https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typeloadbalancer # externalTrafficPolicy: "" # Must be either "None" or "ClientIP" if set. Kubernetes will default to "None". # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies # sessionAffinity: "" # specifies the health check node port (numeric port number) for the service. If healthCheckNodePort isn’t specified, # the service controller allocates a port from your cluster’s NodePort range. # Ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip # healthCheckNodePort: 0 ports: http: 80 https: 443 targetPorts: http: http https: https type: ClusterIP # type: NodePort # nodePorts: # http: 32080 # https: 32443 # tcp: # 8080: 32808 nodePorts: http: "" https: "" tcp: {} udp: {} ## Enables an additional internal load balancer (besides the external one). ## Annotations are mandatory for the load balancer to come up. Varies with the cloud service. internal: enabled: false annotations: {} # loadBalancerIP: "" ## Restrict access For LoadBalancer service. Defaults to 0.0.0.0/0. loadBalancerSourceRanges: [] ## Set external traffic policy to: "Local" to preserve source IP on ## providers supporting it ## Ref: https://kubernetes.io/docs/tutorials/services/source-ip/#source-ip-for-services-with-typeloadbalancer # externalTrafficPolicy: "" extraContainers: [] ## Additional containers to be added to the controller pod. ## See https://github.com/lemonldap-ng-controller/lemonldap-ng-controller as example. # - name: my-sidecar # image: nginx:latest # - name: lemonldap-ng-controller # image: lemonldapng/lemonldap-ng-controller:0.2.0 # args: # - /lemonldap-ng-controller # - --alsologtostderr # - --configmap=$(POD_NAMESPACE)/lemonldap-ng-configuration # env: # - name: POD_NAME # valueFrom: # fieldRef: # fieldPath: metadata.name # - name: POD_NAMESPACE # valueFrom: # fieldRef: # fieldPath: metadata.namespace # volumeMounts: # - name: copy-portal-skins # mountPath: /srv/var/lib/lemonldap-ng/portal/skins extraVolumeMounts: [] ## Additional volumeMounts to the controller main container. # - name: copy-portal-skins # mountPath: /var/lib/lemonldap-ng/portal/skins extraVolumes: [] ## Additional volumes to the controller pod. # - name: copy-portal-skins # emptyDir: {} extraInitContainers: [] ## Containers, which are run before the app containers are started. # - name: init-myservice # image: busybox # command: ['sh', '-c', 'until nslookup myservice; do echo waiting for myservice; sleep 2; done;'] admissionWebhooks: annotations: {} enabled: true failurePolicy: Fail # timeoutSeconds: 10 port: 8443 certificate: "/usr/local/certificates/cert" key: "/usr/local/certificates/key" namespaceSelector: {} objectSelector: {} # Use an existing PSP instead of creating one existingPsp: "" service: annotations: {} # clusterIP: "" externalIPs: [] # loadBalancerIP: "" loadBalancerSourceRanges: [] servicePort: 443 type: ClusterIP patch: enabled: true image: repository: registry.cn-beijing.aliyuncs.com/openacl/kube-webhook-certgen tag: v1.5.1 pullPolicy: IfNotPresent ## Provide a priority class name to the webhook patching job ## priorityClassName: "" podAnnotations: {} nodeSelector: {} tolerations: [] runAsUser: 2000 metrics: port: 10254 # if this port is changed, change healthz-port: in extraArgs: accordingly enabled: false service: annotations: {} # prometheus.io/scrape: "true" # prometheus.io/port: "10254" # clusterIP: "" ## List of IP addresses at which the stats-exporter service is available ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips ## externalIPs: [] # loadBalancerIP: "" loadBalancerSourceRanges: [] servicePort: 10254 type: ClusterIP # externalTrafficPolicy: "" # nodePort: "" serviceMonitor: enabled: false additionalLabels: {} # The label to use to retrieve the job name from. # jobLabel: "app.kubernetes.io/name" namespace: "" namespaceSelector: {} # Default: scrape .Release.Namespace only # To scrape all, use the following: # namespaceSelector: # any: true scrapeInterval: 30s # honorLabels: true targetLabels: [] metricRelabelings: [] prometheusRule: enabled: false additionalLabels: {} # namespace: "" rules: [] # # These are just examples rules, please adapt them to your needs # - alert: NGINXConfigFailed # expr: count(nginx_ingress_controller_config_last_reload_successful == 0) > 0 # for: 1s # labels: # severity: critical # annotations: # description: bad ingress config - nginx config test failed # summary: uninstall the latest ingress changes to allow config reloads to resume # - alert: NGINXCertificateExpiry # expr: (avg(nginx_ingress_controller_ssl_expire_time_seconds) by (host) - time()) < 604800 # for: 1s # labels: # severity: critical # annotations: # description: ssl certificate(s) will expire in less then a week # summary: renew expiring certificates to avoid downtime # - alert: NGINXTooMany500s # expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"5.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 # for: 1m # labels: # severity: warning # annotations: # description: Too many 5XXs # summary: More than 5% of all requests returned 5XX, this requires your attention # - alert: NGINXTooMany400s # expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"4.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 # for: 1m # labels: # severity: warning # annotations: # description: Too many 4XXs # summary: More than 5% of all requests returned 4XX, this requires your attention ## Improve connection draining when ingress controller pod is deleted using a lifecycle hook: ## With this new hook, we increased the default terminationGracePeriodSeconds from 30 seconds ## to 300, allowing the draining of connections up to five minutes. ## If the active connections end before that, the pod will terminate gracefully at that time. ## To effectively take advantage of this feature, the Configmap feature ## worker-shutdown-timeout new value is 240s instead of 10s. ## lifecycle: preStop: exec: command: - /wait-shutdown priorityClassName: "" ## Rollback limit ## revisionHistoryLimit: 10 ## Default 404 backend ## defaultBackend: ## enabled: false name: defaultbackend image: repository: registry.cn-beijing.aliyuncs.com/openacl/defaultbackend-amd64 tag: "1.5" pullPolicy: IfNotPresent # nobody user -> uid 65534 runAsUser: 65534 runAsNonRoot: true readOnlyRootFilesystem: true allowPrivilegeEscalation: false # Use an existing PSP instead of creating one existingPsp: "" extraArgs: {} serviceAccount: create: true name: "" automountServiceAccountToken: true ## Additional environment variables to set for defaultBackend pods extraEnvs: [] port: 8080 ## Readiness and liveness probes for default backend ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ ## livenessProbe: failureThreshold: 3 initialDelaySeconds: 30 periodSeconds: 10 successThreshold: 1 timeoutSeconds: 5 readinessProbe: failureThreshold: 6 initialDelaySeconds: 0 periodSeconds: 5 successThreshold: 1 timeoutSeconds: 5 ## Node tolerations for server scheduling to nodes with taints ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ ## tolerations: [] # - key: "key" # operator: "Equal|Exists" # value: "value" # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" affinity: {} ## Security Context policies for controller pods ## See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for ## notes on enabling and using sysctls ## podSecurityContext: {} # labels to add to the pod container metadata podLabels: {} # key: value ## Node labels for default backend pod assignment ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ ## nodeSelector: {} ## Annotations to be added to default backend pods ## podAnnotations: {} replicaCount: 1 minAvailable: 1 resources: {} # limits: # cpu: 10m # memory: 20Mi # requests: # cpu: 10m # memory: 20Mi extraVolumeMounts: [] ## Additional volumeMounts to the default backend container. # - name: copy-portal-skins # mountPath: /var/lib/lemonldap-ng/portal/skins extraVolumes: [] ## Additional volumes to the default backend pod. # - name: copy-portal-skins # emptyDir: {} autoscaling: enabled: false minReplicas: 1 maxReplicas: 2 targetCPUUtilizationPercentage: 50 targetMemoryUtilizationPercentage: 50 service: annotations: {} # clusterIP: "" ## List of IP addresses at which the default backend service is available ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips ## externalIPs: [] # loadBalancerIP: "" loadBalancerSourceRanges: [] servicePort: 80 type: ClusterIP priorityClassName: "" ## Enable RBAC as per https://github.com/kubernetes/ingress/tree/master/examples/rbac/nginx and https://github.com/kubernetes/ingress/issues/266 rbac: create: true scope: false # If true, create & use Pod Security Policy resources # https://kubernetes.io/docs/concepts/policy/pod-security-policy/ podSecurityPolicy: enabled: false serviceAccount: create: true name: "" automountServiceAccountToken: true ## Optional array of imagePullSecrets containing private registry credentials ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ imagePullSecrets: [] # - name: secretName # TCP service key:value pairs # Ref: https://github.com/kubernetes/contrib/tree/master/ingress/controllers/nginx/examples/tcp ## tcp: {} # 8080: "default/example-tcp-svc:9000" # UDP service key:value pairs # Ref: https://github.com/kubernetes/contrib/tree/master/ingress/controllers/nginx/examples/udp ## udp: {} # 53: "kube-system/kube-dns:53" # A base64ed Diffie-Hellman parameter # This can be generated with: openssl dhparam 4096 2> /dev/null | base64 # Ref: https://github.com/krmichel/ingress-nginx/blob/master/docs/examples/customization/ssl-dh-param dhParam:
29.978975
217
0.670422
d804f441fe2099daa943e46f3e428be369a032a2
14,368
yaml
YAML
k8s-deploy/flannel.yaml
00724/deploy-k8s
c4ad00ce58d741dbce2380828f42f45eb7f58896
[ "Apache-2.0" ]
null
null
null
k8s-deploy/flannel.yaml
00724/deploy-k8s
c4ad00ce58d741dbce2380828f42f45eb7f58896
[ "Apache-2.0" ]
null
null
null
k8s-deploy/flannel.yaml
00724/deploy-k8s
c4ad00ce58d741dbce2380828f42f45eb7f58896
[ "Apache-2.0" ]
null
null
null
--- apiVersion: policy/v1beta1 kind: PodSecurityPolicy metadata: name: psp.flannel.unprivileged annotations: seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default spec: privileged: false volumes: - configMap - secret - emptyDir - hostPath allowedHostPaths: - pathPrefix: "/etc/cni/net.d" - pathPrefix: "/etc/kube-flannel" - pathPrefix: "/run/flannel" readOnlyRootFilesystem: false # Users and groups runAsUser: rule: RunAsAny supplementalGroups: rule: RunAsAny fsGroup: rule: RunAsAny # Privilege Escalation allowPrivilegeEscalation: false defaultAllowPrivilegeEscalation: false # Capabilities allowedCapabilities: ['NET_ADMIN'] defaultAddCapabilities: [] requiredDropCapabilities: [] # Host namespaces hostPID: false hostIPC: false hostNetwork: true hostPorts: - min: 0 max: 65535 # SELinux seLinux: # SELinux is unused in CaaSP rule: 'RunAsAny' --- kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: name: flannel rules: - apiGroups: ['extensions'] resources: ['podsecuritypolicies'] verbs: ['use'] resourceNames: ['psp.flannel.unprivileged'] - apiGroups: - "" resources: - pods verbs: - get - apiGroups: - "" resources: - nodes verbs: - list - watch - apiGroups: - "" resources: - nodes/status verbs: - patch --- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: name: flannel roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: flannel subjects: - kind: ServiceAccount name: flannel namespace: kube-system --- apiVersion: v1 kind: ServiceAccount metadata: name: flannel namespace: kube-system --- kind: ConfigMap apiVersion: v1 metadata: name: kube-flannel-cfg namespace: kube-system labels: tier: node app: flannel data: cni-conf.json: | { "name": "cbr0", "cniVersion": "0.3.1", "plugins": [ { "type": "flannel", "delegate": { "hairpinMode": true, "isDefaultGateway": true } }, { "type": "portmap", "capabilities": { "portMappings": true } } ] } net-conf.json: | { "Network": "172.16.0.1/16", "Backend": { "Type": "host-gw" } } --- apiVersion: apps/v1 kind: DaemonSet metadata: name: kube-flannel-ds-amd64 namespace: kube-system labels: tier: node app: flannel spec: selector: matchLabels: app: flannel template: metadata: labels: tier: node app: flannel spec: affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - matchExpressions: - key: kubernetes.io/os operator: In values: - linux - key: kubernetes.io/arch operator: In values: - amd64 hostNetwork: true tolerations: - operator: Exists effect: NoSchedule serviceAccountName: flannel initContainers: - name: install-cni image: quay.io/coreos/flannel:v0.12.0-amd64 command: - cp args: - -f - /etc/kube-flannel/cni-conf.json - /etc/cni/net.d/10-flannel.conflist volumeMounts: - name: cni mountPath: /etc/cni/net.d - name: flannel-cfg mountPath: /etc/kube-flannel/ containers: - name: kube-flannel image: quay.io/coreos/flannel:v0.12.0-amd64 command: - /opt/bin/flanneld args: - --ip-masq - --kube-subnet-mgr resources: requests: cpu: "100m" memory: "50Mi" limits: cpu: "100m" memory: "50Mi" securityContext: privileged: false capabilities: add: ["NET_ADMIN"] env: - name: POD_NAME valueFrom: fieldRef: fieldPath: metadata.name - name: POD_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace volumeMounts: - name: run mountPath: /run/flannel - name: flannel-cfg mountPath: /etc/kube-flannel/ volumes: - name: run hostPath: path: /run/flannel - name: cni hostPath: path: /etc/cni/net.d - name: flannel-cfg configMap: name: kube-flannel-cfg --- apiVersion: apps/v1 kind: DaemonSet metadata: name: kube-flannel-ds-arm64 namespace: kube-system labels: tier: node app: flannel spec: selector: matchLabels: app: flannel template: metadata: labels: tier: node app: flannel spec: affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - matchExpressions: - key: kubernetes.io/os operator: In values: - linux - key: kubernetes.io/arch operator: In values: - arm64 hostNetwork: true tolerations: - operator: Exists effect: NoSchedule serviceAccountName: flannel initContainers: - name: install-cni image: quay.io/coreos/flannel:v0.12.0-arm64 command: - cp args: - -f - /etc/kube-flannel/cni-conf.json - /etc/cni/net.d/10-flannel.conflist volumeMounts: - name: cni mountPath: /etc/cni/net.d - name: flannel-cfg mountPath: /etc/kube-flannel/ containers: - name: kube-flannel image: quay.io/coreos/flannel:v0.12.0-arm64 command: - /opt/bin/flanneld args: - --ip-masq - --kube-subnet-mgr resources: requests: cpu: "100m" memory: "50Mi" limits: cpu: "100m" memory: "50Mi" securityContext: privileged: false capabilities: add: ["NET_ADMIN"] env: - name: POD_NAME valueFrom: fieldRef: fieldPath: metadata.name - name: POD_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace volumeMounts: - name: run mountPath: /run/flannel - name: flannel-cfg mountPath: /etc/kube-flannel/ volumes: - name: run hostPath: path: /run/flannel - name: cni hostPath: path: /etc/cni/net.d - name: flannel-cfg configMap: name: kube-flannel-cfg --- apiVersion: apps/v1 kind: DaemonSet metadata: name: kube-flannel-ds-arm namespace: kube-system labels: tier: node app: flannel spec: selector: matchLabels: app: flannel template: metadata: labels: tier: node app: flannel spec: affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - matchExpressions: - key: kubernetes.io/os operator: In values: - linux - key: kubernetes.io/arch operator: In values: - arm hostNetwork: true tolerations: - operator: Exists effect: NoSchedule serviceAccountName: flannel initContainers: - name: install-cni image: quay.io/coreos/flannel:v0.12.0-arm command: - cp args: - -f - /etc/kube-flannel/cni-conf.json - /etc/cni/net.d/10-flannel.conflist volumeMounts: - name: cni mountPath: /etc/cni/net.d - name: flannel-cfg mountPath: /etc/kube-flannel/ containers: - name: kube-flannel image: quay.io/coreos/flannel:v0.12.0-arm command: - /opt/bin/flanneld args: - --ip-masq - --kube-subnet-mgr resources: requests: cpu: "100m" memory: "50Mi" limits: cpu: "100m" memory: "50Mi" securityContext: privileged: false capabilities: add: ["NET_ADMIN"] env: - name: POD_NAME valueFrom: fieldRef: fieldPath: metadata.name - name: POD_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace volumeMounts: - name: run mountPath: /run/flannel - name: flannel-cfg mountPath: /etc/kube-flannel/ volumes: - name: run hostPath: path: /run/flannel - name: cni hostPath: path: /etc/cni/net.d - name: flannel-cfg configMap: name: kube-flannel-cfg --- apiVersion: apps/v1 kind: DaemonSet metadata: name: kube-flannel-ds-ppc64le namespace: kube-system labels: tier: node app: flannel spec: selector: matchLabels: app: flannel template: metadata: labels: tier: node app: flannel spec: affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - matchExpressions: - key: kubernetes.io/os operator: In values: - linux - key: kubernetes.io/arch operator: In values: - ppc64le hostNetwork: true tolerations: - operator: Exists effect: NoSchedule serviceAccountName: flannel initContainers: - name: install-cni image: quay.io/coreos/flannel:v0.12.0-ppc64le command: - cp args: - -f - /etc/kube-flannel/cni-conf.json - /etc/cni/net.d/10-flannel.conflist volumeMounts: - name: cni mountPath: /etc/cni/net.d - name: flannel-cfg mountPath: /etc/kube-flannel/ containers: - name: kube-flannel image: quay.io/coreos/flannel:v0.12.0-ppc64le command: - /opt/bin/flanneld args: - --ip-masq - --kube-subnet-mgr resources: requests: cpu: "100m" memory: "50Mi" limits: cpu: "100m" memory: "50Mi" securityContext: privileged: false capabilities: add: ["NET_ADMIN"] env: - name: POD_NAME valueFrom: fieldRef: fieldPath: metadata.name - name: POD_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace volumeMounts: - name: run mountPath: /run/flannel - name: flannel-cfg mountPath: /etc/kube-flannel/ volumes: - name: run hostPath: path: /run/flannel - name: cni hostPath: path: /etc/cni/net.d - name: flannel-cfg configMap: name: kube-flannel-cfg --- apiVersion: apps/v1 kind: DaemonSet metadata: name: kube-flannel-ds-s390x namespace: kube-system labels: tier: node app: flannel spec: selector: matchLabels: app: flannel template: metadata: labels: tier: node app: flannel spec: affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - matchExpressions: - key: kubernetes.io/os operator: In values: - linux - key: kubernetes.io/arch operator: In values: - s390x hostNetwork: true tolerations: - operator: Exists effect: NoSchedule serviceAccountName: flannel initContainers: - name: install-cni image: quay.io/coreos/flannel:v0.12.0-s390x command: - cp args: - -f - /etc/kube-flannel/cni-conf.json - /etc/cni/net.d/10-flannel.conflist volumeMounts: - name: cni mountPath: /etc/cni/net.d - name: flannel-cfg mountPath: /etc/kube-flannel/ containers: - name: kube-flannel image: quay.io/coreos/flannel:v0.12.0-s390x command: - /opt/bin/flanneld args: - --ip-masq - --kube-subnet-mgr resources: requests: cpu: "100m" memory: "50Mi" limits: cpu: "100m" memory: "50Mi" securityContext: privileged: false capabilities: add: ["NET_ADMIN"] env: - name: POD_NAME valueFrom: fieldRef: fieldPath: metadata.name - name: POD_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace volumeMounts: - name: run mountPath: /run/flannel - name: flannel-cfg mountPath: /etc/kube-flannel/ volumes: - name: run hostPath: path: /run/flannel - name: cni hostPath: path: /etc/cni/net.d - name: flannel-cfg configMap: name: kube-flannel-cfg
23.827529
77
0.517817
d833defd896d44dd345ca8a0413679ec8b17cacc
1,175
yaml
YAML
monitoring/redis/redis+exporter-dp.yaml
00724/deploy-k8s
c4ad00ce58d741dbce2380828f42f45eb7f58896
[ "Apache-2.0" ]
null
null
null
monitoring/redis/redis+exporter-dp.yaml
00724/deploy-k8s
c4ad00ce58d741dbce2380828f42f45eb7f58896
[ "Apache-2.0" ]
null
null
null
monitoring/redis/redis+exporter-dp.yaml
00724/deploy-k8s
c4ad00ce58d741dbce2380828f42f45eb7f58896
[ "Apache-2.0" ]
null
null
null
apiVersion: apps/v1 kind: Deployment metadata: name: cache03 namespace: test456 labels: app: redis k8s-app: cache03 spec: replicas: 1 revisionHistoryLimit: 3 selector: matchLabels: app: redis k8s-app: cache03 strategy: rollingUpdate: maxSurge: 25% # 最大额外可以存在的副本数,可以为百分比,也可以为整数 maxUnavailable: 25% # 示在更新过程中能够进入不可用状态的 Pod 的最大值,可以为百分比,也可以为整数 type: RollingUpdate # 滚动更新策略 template: metadata: annotations: labels: app: redis k8s-app: cache03 spec: containers: - name: redis image: redis:4.0.14 imagePullPolicy: IfNotPresent ports: - containerPort: 6379 protocol: TCP env: - name: TZ value: Asia/Shanghai - name: redis-exporter image: oliver006/redis_exporter:v1.16.0-alpine imagePullPolicy: IfNotPresent ports: - containerPort: 9121 protocol: TCP env: - name: TZ value: Asia/Shanghai - name: REDIS_ADDR value: 'redis://localhost:6379' args: - -include-system-metrics=true
23.039216
68
0.578723
d895027b37d4c16838b689ac676332c4e6c84c24
81,240
yaml
YAML
monitoring/JVM/prometheus-rules.yaml
00724/deploy-k8s
c4ad00ce58d741dbce2380828f42f45eb7f58896
[ "Apache-2.0" ]
null
null
null
monitoring/JVM/prometheus-rules.yaml
00724/deploy-k8s
c4ad00ce58d741dbce2380828f42f45eb7f58896
[ "Apache-2.0" ]
null
null
null
monitoring/JVM/prometheus-rules.yaml
00724/deploy-k8s
c4ad00ce58d741dbce2380828f42f45eb7f58896
[ "Apache-2.0" ]
null
null
null
apiVersion: monitoring.coreos.com/v1 kind: PrometheusRule metadata: labels: prometheus: k8s role: alert-rules name: prometheus-k8s-rules namespace: monitoring spec: groups: - name: node-exporter.rules rules: - expr: | count without (cpu) ( count without (mode) ( node_cpu_seconds_total{job="node-exporter"} ) ) record: instance:node_num_cpu:sum - expr: | 1 - avg without (cpu, mode) ( rate(node_cpu_seconds_total{job="node-exporter", mode="idle"}[1m]) ) record: instance:node_cpu_utilisation:rate1m - expr: | ( node_load1{job="node-exporter"} / instance:node_num_cpu:sum{job="node-exporter"} ) record: instance:node_load1_per_cpu:ratio - expr: | 1 - ( node_memory_MemAvailable_bytes{job="node-exporter"} / node_memory_MemTotal_bytes{job="node-exporter"} ) record: instance:node_memory_utilisation:ratio - expr: | rate(node_vmstat_pgmajfault{job="node-exporter"}[1m]) record: instance:node_vmstat_pgmajfault:rate1m - expr: | rate(node_disk_io_time_seconds_total{job="node-exporter", device=~"nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+"}[1m]) record: instance_device:node_disk_io_time_seconds:rate1m - expr: | rate(node_disk_io_time_weighted_seconds_total{job="node-exporter", device=~"nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+"}[1m]) record: instance_device:node_disk_io_time_weighted_seconds:rate1m - expr: | sum without (device) ( rate(node_network_receive_bytes_total{job="node-exporter", device!="lo"}[1m]) ) record: instance:node_network_receive_bytes_excluding_lo:rate1m - expr: | sum without (device) ( rate(node_network_transmit_bytes_total{job="node-exporter", device!="lo"}[1m]) ) record: instance:node_network_transmit_bytes_excluding_lo:rate1m - expr: | sum without (device) ( rate(node_network_receive_drop_total{job="node-exporter", device!="lo"}[1m]) ) record: instance:node_network_receive_drop_excluding_lo:rate1m - expr: | sum without (device) ( rate(node_network_transmit_drop_total{job="node-exporter", device!="lo"}[1m]) ) record: instance:node_network_transmit_drop_excluding_lo:rate1m - name: kube-apiserver.rules rules: - expr: | ( ( # too slow sum(rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"LIST|GET"}[1d])) - ( sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="resource",le="0.1"}[1d])) + sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="namespace",le="0.5"}[1d])) + sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="cluster",le="5"}[1d])) ) ) + # errors sum(rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET",code=~"5.."}[1d])) ) / sum(rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[1d])) labels: verb: read record: apiserver_request:burnrate1d - expr: | ( ( # too slow sum(rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"LIST|GET"}[1h])) - ( sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="resource",le="0.1"}[1h])) + sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="namespace",le="0.5"}[1h])) + sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="cluster",le="5"}[1h])) ) ) + # errors sum(rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET",code=~"5.."}[1h])) ) / sum(rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[1h])) labels: verb: read record: apiserver_request:burnrate1h - expr: | ( ( # too slow sum(rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"LIST|GET"}[2h])) - ( sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="resource",le="0.1"}[2h])) + sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="namespace",le="0.5"}[2h])) + sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="cluster",le="5"}[2h])) ) ) + # errors sum(rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET",code=~"5.."}[2h])) ) / sum(rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[2h])) labels: verb: read record: apiserver_request:burnrate2h - expr: | ( ( # too slow sum(rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"LIST|GET"}[30m])) - ( sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="resource",le="0.1"}[30m])) + sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="namespace",le="0.5"}[30m])) + sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="cluster",le="5"}[30m])) ) ) + # errors sum(rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET",code=~"5.."}[30m])) ) / sum(rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[30m])) labels: verb: read record: apiserver_request:burnrate30m - expr: | ( ( # too slow sum(rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"LIST|GET"}[3d])) - ( sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="resource",le="0.1"}[3d])) + sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="namespace",le="0.5"}[3d])) + sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="cluster",le="5"}[3d])) ) ) + # errors sum(rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET",code=~"5.."}[3d])) ) / sum(rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[3d])) labels: verb: read record: apiserver_request:burnrate3d - expr: | ( ( # too slow sum(rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"LIST|GET"}[5m])) - ( sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="resource",le="0.1"}[5m])) + sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="namespace",le="0.5"}[5m])) + sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="cluster",le="5"}[5m])) ) ) + # errors sum(rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET",code=~"5.."}[5m])) ) / sum(rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[5m])) labels: verb: read record: apiserver_request:burnrate5m - expr: | ( ( # too slow sum(rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"LIST|GET"}[6h])) - ( sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="resource",le="0.1"}[6h])) + sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="namespace",le="0.5"}[6h])) + sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="cluster",le="5"}[6h])) ) ) + # errors sum(rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET",code=~"5.."}[6h])) ) / sum(rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[6h])) labels: verb: read record: apiserver_request:burnrate6h - expr: | ( ( # too slow sum(rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[1d])) - sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",le="1"}[1d])) ) + sum(rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",code=~"5.."}[1d])) ) / sum(rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[1d])) labels: verb: write record: apiserver_request:burnrate1d - expr: | ( ( # too slow sum(rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[1h])) - sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",le="1"}[1h])) ) + sum(rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",code=~"5.."}[1h])) ) / sum(rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[1h])) labels: verb: write record: apiserver_request:burnrate1h - expr: | ( ( # too slow sum(rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[2h])) - sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",le="1"}[2h])) ) + sum(rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",code=~"5.."}[2h])) ) / sum(rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[2h])) labels: verb: write record: apiserver_request:burnrate2h - expr: | ( ( # too slow sum(rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[30m])) - sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",le="1"}[30m])) ) + sum(rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",code=~"5.."}[30m])) ) / sum(rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[30m])) labels: verb: write record: apiserver_request:burnrate30m - expr: | ( ( # too slow sum(rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[3d])) - sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",le="1"}[3d])) ) + sum(rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",code=~"5.."}[3d])) ) / sum(rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[3d])) labels: verb: write record: apiserver_request:burnrate3d - expr: | ( ( # too slow sum(rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[5m])) - sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",le="1"}[5m])) ) + sum(rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",code=~"5.."}[5m])) ) / sum(rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[5m])) labels: verb: write record: apiserver_request:burnrate5m - expr: | ( ( # too slow sum(rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[6h])) - sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",le="1"}[6h])) ) + sum(rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",code=~"5.."}[6h])) ) / sum(rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[6h])) labels: verb: write record: apiserver_request:burnrate6h - expr: | 1 - ( ( # write too slow sum(increase(apiserver_request_duration_seconds_count{verb=~"POST|PUT|PATCH|DELETE"}[30d])) - sum(increase(apiserver_request_duration_seconds_bucket{verb=~"POST|PUT|PATCH|DELETE",le="1"}[30d])) ) + ( # read too slow sum(increase(apiserver_request_duration_seconds_count{verb=~"LIST|GET"}[30d])) - ( sum(increase(apiserver_request_duration_seconds_bucket{verb=~"LIST|GET",scope="resource",le="0.1"}[30d])) + sum(increase(apiserver_request_duration_seconds_bucket{verb=~"LIST|GET",scope="namespace",le="0.5"}[30d])) + sum(increase(apiserver_request_duration_seconds_bucket{verb=~"LIST|GET",scope="cluster",le="5"}[30d])) ) ) + # errors sum(code:apiserver_request_total:increase30d{code=~"5.."}) ) / sum(code:apiserver_request_total:increase30d) labels: verb: all record: apiserver_request:availability30d - expr: | 1 - ( sum(increase(apiserver_request_duration_seconds_count{job="apiserver",verb=~"LIST|GET"}[30d])) - ( # too slow sum(increase(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="resource",le="0.1"}[30d])) + sum(increase(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="namespace",le="0.5"}[30d])) + sum(increase(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="cluster",le="5"}[30d])) ) + # errors sum(code:apiserver_request_total:increase30d{verb="read",code=~"5.."}) ) / sum(code:apiserver_request_total:increase30d{verb="read"}) labels: verb: read record: apiserver_request:availability30d - expr: | 1 - ( ( # too slow sum(increase(apiserver_request_duration_seconds_count{verb=~"POST|PUT|PATCH|DELETE"}[30d])) - sum(increase(apiserver_request_duration_seconds_bucket{verb=~"POST|PUT|PATCH|DELETE",le="1"}[30d])) ) + # errors sum(code:apiserver_request_total:increase30d{verb="write",code=~"5.."}) ) / sum(code:apiserver_request_total:increase30d{verb="write"}) labels: verb: write record: apiserver_request:availability30d - expr: | sum by (code, verb) (increase(apiserver_request_total{job="apiserver"}[30d])) record: code_verb:apiserver_request_total:increase30d - expr: | sum by (code) (code_verb:apiserver_request_total:increase30d{verb=~"LIST|GET"}) labels: verb: read record: code:apiserver_request_total:increase30d - expr: | sum by (code) (code_verb:apiserver_request_total:increase30d{verb=~"POST|PUT|PATCH|DELETE"}) labels: verb: write record: code:apiserver_request_total:increase30d - expr: | sum by (code,resource) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[5m])) labels: verb: read record: code_resource:apiserver_request_total:rate5m - expr: | sum by (code,resource) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[5m])) labels: verb: write record: code_resource:apiserver_request_total:rate5m - expr: | histogram_quantile(0.99, sum by (le, resource) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET"}[5m]))) > 0 labels: quantile: "0.99" verb: read record: cluster_quantile:apiserver_request_duration_seconds:histogram_quantile - expr: | histogram_quantile(0.99, sum by (le, resource) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[5m]))) > 0 labels: quantile: "0.99" verb: write record: cluster_quantile:apiserver_request_duration_seconds:histogram_quantile - expr: | sum(rate(apiserver_request_duration_seconds_sum{subresource!="log",verb!~"LIST|WATCH|WATCHLIST|DELETECOLLECTION|PROXY|CONNECT"}[5m])) without(instance, pod) / sum(rate(apiserver_request_duration_seconds_count{subresource!="log",verb!~"LIST|WATCH|WATCHLIST|DELETECOLLECTION|PROXY|CONNECT"}[5m])) without(instance, pod) record: cluster:apiserver_request_duration_seconds:mean5m - expr: | histogram_quantile(0.99, sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",subresource!="log",verb!~"LIST|WATCH|WATCHLIST|DELETECOLLECTION|PROXY|CONNECT"}[5m])) without(instance, pod)) labels: quantile: "0.99" record: cluster_quantile:apiserver_request_duration_seconds:histogram_quantile - expr: | histogram_quantile(0.9, sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",subresource!="log",verb!~"LIST|WATCH|WATCHLIST|DELETECOLLECTION|PROXY|CONNECT"}[5m])) without(instance, pod)) labels: quantile: "0.9" record: cluster_quantile:apiserver_request_duration_seconds:histogram_quantile - expr: | histogram_quantile(0.5, sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",subresource!="log",verb!~"LIST|WATCH|WATCHLIST|DELETECOLLECTION|PROXY|CONNECT"}[5m])) without(instance, pod)) labels: quantile: "0.5" record: cluster_quantile:apiserver_request_duration_seconds:histogram_quantile - name: k8s.rules rules: - expr: | sum(rate(container_cpu_usage_seconds_total{job="kubelet", metrics_path="/metrics/cadvisor", image!="", container!="POD"}[5m])) by (namespace) record: namespace:container_cpu_usage_seconds_total:sum_rate - expr: | sum by (cluster, namespace, pod, container) ( rate(container_cpu_usage_seconds_total{job="kubelet", metrics_path="/metrics/cadvisor", image!="", container!="POD"}[5m]) ) * on (cluster, namespace, pod) group_left(node) topk by (cluster, namespace, pod) ( 1, max by(cluster, namespace, pod, node) (kube_pod_info) ) record: node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate - expr: | container_memory_working_set_bytes{job="kubelet", metrics_path="/metrics/cadvisor", image!=""} * on (namespace, pod) group_left(node) topk by(namespace, pod) (1, max by(namespace, pod, node) (kube_pod_info) ) record: node_namespace_pod_container:container_memory_working_set_bytes - expr: | container_memory_rss{job="kubelet", metrics_path="/metrics/cadvisor", image!=""} * on (namespace, pod) group_left(node) topk by(namespace, pod) (1, max by(namespace, pod, node) (kube_pod_info) ) record: node_namespace_pod_container:container_memory_rss - expr: | container_memory_cache{job="kubelet", metrics_path="/metrics/cadvisor", image!=""} * on (namespace, pod) group_left(node) topk by(namespace, pod) (1, max by(namespace, pod, node) (kube_pod_info) ) record: node_namespace_pod_container:container_memory_cache - expr: | container_memory_swap{job="kubelet", metrics_path="/metrics/cadvisor", image!=""} * on (namespace, pod) group_left(node) topk by(namespace, pod) (1, max by(namespace, pod, node) (kube_pod_info) ) record: node_namespace_pod_container:container_memory_swap - expr: | sum(container_memory_usage_bytes{job="kubelet", metrics_path="/metrics/cadvisor", image!="", container!="POD"}) by (namespace) record: namespace:container_memory_usage_bytes:sum - expr: | sum by (namespace) ( sum by (namespace, pod) ( max by (namespace, pod, container) ( kube_pod_container_resource_requests_memory_bytes{job="kube-state-metrics"} ) * on(namespace, pod) group_left() max by (namespace, pod) ( kube_pod_status_phase{phase=~"Pending|Running"} == 1 ) ) ) record: namespace:kube_pod_container_resource_requests_memory_bytes:sum - expr: | sum by (namespace) ( sum by (namespace, pod) ( max by (namespace, pod, container) ( kube_pod_container_resource_requests_cpu_cores{job="kube-state-metrics"} ) * on(namespace, pod) group_left() max by (namespace, pod) ( kube_pod_status_phase{phase=~"Pending|Running"} == 1 ) ) ) record: namespace:kube_pod_container_resource_requests_cpu_cores:sum - expr: | max by (cluster, namespace, workload, pod) ( label_replace( label_replace( kube_pod_owner{job="kube-state-metrics", owner_kind="ReplicaSet"}, "replicaset", "$1", "owner_name", "(.*)" ) * on(replicaset, namespace) group_left(owner_name) topk by(replicaset, namespace) ( 1, max by (replicaset, namespace, owner_name) ( kube_replicaset_owner{job="kube-state-metrics"} ) ), "workload", "$1", "owner_name", "(.*)" ) ) labels: workload_type: deployment record: mixin_pod_workload - expr: | max by (cluster, namespace, workload, pod) ( label_replace( kube_pod_owner{job="kube-state-metrics", owner_kind="DaemonSet"}, "workload", "$1", "owner_name", "(.*)" ) ) labels: workload_type: daemonset record: mixin_pod_workload - expr: | max by (cluster, namespace, workload, pod) ( label_replace( kube_pod_owner{job="kube-state-metrics", owner_kind="StatefulSet"}, "workload", "$1", "owner_name", "(.*)" ) ) labels: workload_type: statefulset record: mixin_pod_workload - name: kube-scheduler.rules rules: - expr: | histogram_quantile(0.99, sum(rate(scheduler_e2e_scheduling_duration_seconds_bucket{job="kube-scheduler"}[5m])) without(instance, pod)) labels: quantile: "0.99" record: cluster_quantile:scheduler_e2e_scheduling_duration_seconds:histogram_quantile - expr: | histogram_quantile(0.99, sum(rate(scheduler_scheduling_algorithm_duration_seconds_bucket{job="kube-scheduler"}[5m])) without(instance, pod)) labels: quantile: "0.99" record: cluster_quantile:scheduler_scheduling_algorithm_duration_seconds:histogram_quantile - expr: | histogram_quantile(0.99, sum(rate(scheduler_binding_duration_seconds_bucket{job="kube-scheduler"}[5m])) without(instance, pod)) labels: quantile: "0.99" record: cluster_quantile:scheduler_binding_duration_seconds:histogram_quantile - expr: | histogram_quantile(0.9, sum(rate(scheduler_e2e_scheduling_duration_seconds_bucket{job="kube-scheduler"}[5m])) without(instance, pod)) labels: quantile: "0.9" record: cluster_quantile:scheduler_e2e_scheduling_duration_seconds:histogram_quantile - expr: | histogram_quantile(0.9, sum(rate(scheduler_scheduling_algorithm_duration_seconds_bucket{job="kube-scheduler"}[5m])) without(instance, pod)) labels: quantile: "0.9" record: cluster_quantile:scheduler_scheduling_algorithm_duration_seconds:histogram_quantile - expr: | histogram_quantile(0.9, sum(rate(scheduler_binding_duration_seconds_bucket{job="kube-scheduler"}[5m])) without(instance, pod)) labels: quantile: "0.9" record: cluster_quantile:scheduler_binding_duration_seconds:histogram_quantile - expr: | histogram_quantile(0.5, sum(rate(scheduler_e2e_scheduling_duration_seconds_bucket{job="kube-scheduler"}[5m])) without(instance, pod)) labels: quantile: "0.5" record: cluster_quantile:scheduler_e2e_scheduling_duration_seconds:histogram_quantile - expr: | histogram_quantile(0.5, sum(rate(scheduler_scheduling_algorithm_duration_seconds_bucket{job="kube-scheduler"}[5m])) without(instance, pod)) labels: quantile: "0.5" record: cluster_quantile:scheduler_scheduling_algorithm_duration_seconds:histogram_quantile - expr: | histogram_quantile(0.5, sum(rate(scheduler_binding_duration_seconds_bucket{job="kube-scheduler"}[5m])) without(instance, pod)) labels: quantile: "0.5" record: cluster_quantile:scheduler_binding_duration_seconds:histogram_quantile - name: node.rules rules: - expr: | sum(min(kube_pod_info) by (cluster, node)) record: ':kube_pod_info_node_count:' - expr: | topk by(namespace, pod) (1, max by (node, namespace, pod) ( label_replace(kube_pod_info{job="kube-state-metrics"}, "pod", "$1", "pod", "(.*)") )) record: 'node_namespace_pod:kube_pod_info:' - expr: | count by (cluster, node) (sum by (node, cpu) ( node_cpu_seconds_total{job="node-exporter"} * on (namespace, pod) group_left(node) node_namespace_pod:kube_pod_info: )) record: node:node_num_cpu:sum - expr: | sum( node_memory_MemAvailable_bytes{job="node-exporter"} or ( node_memory_Buffers_bytes{job="node-exporter"} + node_memory_Cached_bytes{job="node-exporter"} + node_memory_MemFree_bytes{job="node-exporter"} + node_memory_Slab_bytes{job="node-exporter"} ) ) by (cluster) record: :node_memory_MemAvailable_bytes:sum - name: kubelet.rules rules: - expr: | histogram_quantile(0.99, sum(rate(kubelet_pleg_relist_duration_seconds_bucket[5m])) by (instance, le) * on(instance) group_left(node) kubelet_node_name{job="kubelet", metrics_path="/metrics"}) labels: quantile: "0.99" record: node_quantile:kubelet_pleg_relist_duration_seconds:histogram_quantile - expr: | histogram_quantile(0.9, sum(rate(kubelet_pleg_relist_duration_seconds_bucket[5m])) by (instance, le) * on(instance) group_left(node) kubelet_node_name{job="kubelet", metrics_path="/metrics"}) labels: quantile: "0.9" record: node_quantile:kubelet_pleg_relist_duration_seconds:histogram_quantile - expr: | histogram_quantile(0.5, sum(rate(kubelet_pleg_relist_duration_seconds_bucket[5m])) by (instance, le) * on(instance) group_left(node) kubelet_node_name{job="kubelet", metrics_path="/metrics"}) labels: quantile: "0.5" record: node_quantile:kubelet_pleg_relist_duration_seconds:histogram_quantile - name: kube-prometheus-node-recording.rules rules: - expr: sum(rate(node_cpu_seconds_total{mode!="idle",mode!="iowait"}[3m])) BY (instance) record: instance:node_cpu:rate:sum - expr: sum((node_filesystem_size_bytes{mountpoint="/"} - node_filesystem_free_bytes{mountpoint="/"})) BY (instance) record: instance:node_filesystem_usage:sum - expr: sum(rate(node_network_receive_bytes_total[3m])) BY (instance) record: instance:node_network_receive_bytes:rate:sum - expr: sum(rate(node_network_transmit_bytes_total[3m])) BY (instance) record: instance:node_network_transmit_bytes:rate:sum - expr: sum(rate(node_cpu_seconds_total{mode!="idle",mode!="iowait"}[5m])) WITHOUT (cpu, mode) / ON(instance) GROUP_LEFT() count(sum(node_cpu_seconds_total) BY (instance, cpu)) BY (instance) record: instance:node_cpu:ratio - expr: sum(rate(node_cpu_seconds_total{mode!="idle",mode!="iowait"}[5m])) record: cluster:node_cpu:sum_rate5m - expr: cluster:node_cpu_seconds_total:rate5m / count(sum(node_cpu_seconds_total) BY (instance, cpu)) record: cluster:node_cpu:ratio - name: kube-prometheus-general.rules rules: - expr: count without(instance, pod, node) (up == 1) record: count:up1 - expr: count without(instance, pod, node) (up == 0) record: count:up0 - name: kube-state-metrics rules: - alert: KubeStateMetricsListErrors annotations: message: kube-state-metrics is experiencing errors at an elevated rate in list operations. This is likely causing it to not be able to expose metrics about Kubernetes objects correctly or at all. runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubestatemetricslisterrors expr: | (sum(rate(kube_state_metrics_list_total{job="kube-state-metrics",result="error"}[5m])) / sum(rate(kube_state_metrics_list_total{job="kube-state-metrics"}[5m]))) > 0.01 for: 15m labels: severity: critical - alert: KubeStateMetricsWatchErrors annotations: message: kube-state-metrics is experiencing errors at an elevated rate in watch operations. This is likely causing it to not be able to expose metrics about Kubernetes objects correctly or at all. runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubestatemetricswatcherrors expr: | (sum(rate(kube_state_metrics_watch_total{job="kube-state-metrics",result="error"}[5m])) / sum(rate(kube_state_metrics_watch_total{job="kube-state-metrics"}[5m]))) > 0.01 for: 15m labels: severity: critical - name: node-exporter rules: - alert: NodeFilesystemSpaceFillingUp annotations: description: Filesystem on {{ $labels.device }} at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available space left and is filling up. runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-nodefilesystemspacefillingup summary: Filesystem is predicted to run out of space within the next 24 hours. expr: | ( node_filesystem_avail_bytes{job="node-exporter",fstype!=""} / node_filesystem_size_bytes{job="node-exporter",fstype!=""} * 100 < 40 and predict_linear(node_filesystem_avail_bytes{job="node-exporter",fstype!=""}[6h], 24*60*60) < 0 and node_filesystem_readonly{job="node-exporter",fstype!=""} == 0 ) for: 1h labels: severity: warning - alert: NodeFilesystemSpaceFillingUp annotations: description: Filesystem on {{ $labels.device }} at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available space left and is filling up fast. runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-nodefilesystemspacefillingup summary: Filesystem is predicted to run out of space within the next 4 hours. expr: | ( node_filesystem_avail_bytes{job="node-exporter",fstype!=""} / node_filesystem_size_bytes{job="node-exporter",fstype!=""} * 100 < 15 and predict_linear(node_filesystem_avail_bytes{job="node-exporter",fstype!=""}[6h], 4*60*60) < 0 and node_filesystem_readonly{job="node-exporter",fstype!=""} == 0 ) for: 1h labels: severity: critical - alert: NodeFilesystemAlmostOutOfSpace annotations: description: Filesystem on {{ $labels.device }} at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available space left. runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-nodefilesystemalmostoutofspace summary: Filesystem has less than 5% space left. expr: | ( node_filesystem_avail_bytes{job="node-exporter",fstype!=""} / node_filesystem_size_bytes{job="node-exporter",fstype!=""} * 100 < 5 and node_filesystem_readonly{job="node-exporter",fstype!=""} == 0 ) for: 1h labels: severity: warning - alert: NodeFilesystemAlmostOutOfSpace annotations: description: Filesystem on {{ $labels.device }} at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available space left. runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-nodefilesystemalmostoutofspace summary: Filesystem has less than 3% space left. expr: | ( node_filesystem_avail_bytes{job="node-exporter",fstype!=""} / node_filesystem_size_bytes{job="node-exporter",fstype!=""} * 100 < 3 and node_filesystem_readonly{job="node-exporter",fstype!=""} == 0 ) for: 1h labels: severity: critical - alert: NodeFilesystemFilesFillingUp annotations: description: Filesystem on {{ $labels.device }} at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available inodes left and is filling up. runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-nodefilesystemfilesfillingup summary: Filesystem is predicted to run out of inodes within the next 24 hours. expr: | ( node_filesystem_files_free{job="node-exporter",fstype!=""} / node_filesystem_files{job="node-exporter",fstype!=""} * 100 < 40 and predict_linear(node_filesystem_files_free{job="node-exporter",fstype!=""}[6h], 24*60*60) < 0 and node_filesystem_readonly{job="node-exporter",fstype!=""} == 0 ) for: 1h labels: severity: warning - alert: NodeFilesystemFilesFillingUp annotations: description: Filesystem on {{ $labels.device }} at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available inodes left and is filling up fast. runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-nodefilesystemfilesfillingup summary: Filesystem is predicted to run out of inodes within the next 4 hours. expr: | ( node_filesystem_files_free{job="node-exporter",fstype!=""} / node_filesystem_files{job="node-exporter",fstype!=""} * 100 < 20 and predict_linear(node_filesystem_files_free{job="node-exporter",fstype!=""}[6h], 4*60*60) < 0 and node_filesystem_readonly{job="node-exporter",fstype!=""} == 0 ) for: 1h labels: severity: critical - alert: NodeFilesystemAlmostOutOfFiles annotations: description: Filesystem on {{ $labels.device }} at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available inodes left. runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-nodefilesystemalmostoutoffiles summary: Filesystem has less than 5% inodes left. expr: | ( node_filesystem_files_free{job="node-exporter",fstype!=""} / node_filesystem_files{job="node-exporter",fstype!=""} * 100 < 5 and node_filesystem_readonly{job="node-exporter",fstype!=""} == 0 ) for: 1h labels: severity: warning - alert: NodeFilesystemAlmostOutOfFiles annotations: description: Filesystem on {{ $labels.device }} at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available inodes left. runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-nodefilesystemalmostoutoffiles summary: Filesystem has less than 3% inodes left. expr: | ( node_filesystem_files_free{job="node-exporter",fstype!=""} / node_filesystem_files{job="node-exporter",fstype!=""} * 100 < 3 and node_filesystem_readonly{job="node-exporter",fstype!=""} == 0 ) for: 1h labels: severity: critical - alert: NodeNetworkReceiveErrs annotations: description: '{{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf "%.0f" $value }} receive errors in the last two minutes.' runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-nodenetworkreceiveerrs summary: Network interface is reporting many receive errors. expr: | increase(node_network_receive_errs_total[2m]) > 10 for: 1h labels: severity: warning - alert: NodeNetworkTransmitErrs annotations: description: '{{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf "%.0f" $value }} transmit errors in the last two minutes.' runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-nodenetworktransmiterrs summary: Network interface is reporting many transmit errors. expr: | increase(node_network_transmit_errs_total[2m]) > 10 for: 1h labels: severity: warning - alert: NodeHighNumberConntrackEntriesUsed annotations: description: '{{ $value | humanizePercentage }} of conntrack entries are used' runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-nodehighnumberconntrackentriesused summary: Number of conntrack are getting close to the limit expr: | (node_nf_conntrack_entries / node_nf_conntrack_entries_limit) > 0.75 labels: severity: warning - alert: NodeClockSkewDetected annotations: message: Clock on {{ $labels.instance }} is out of sync by more than 300s. Ensure NTP is configured correctly on this host. runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-nodeclockskewdetected summary: Clock skew detected. expr: | ( node_timex_offset_seconds > 0.05 and deriv(node_timex_offset_seconds[5m]) >= 0 ) or ( node_timex_offset_seconds < -0.05 and deriv(node_timex_offset_seconds[5m]) <= 0 ) for: 10m labels: severity: warning - alert: NodeClockNotSynchronising annotations: message: Clock on {{ $labels.instance }} is not synchronising. Ensure NTP is configured on this host. runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-nodeclocknotsynchronising summary: Clock not synchronising. expr: | min_over_time(node_timex_sync_status[5m]) == 0 for: 10m labels: severity: warning - name: kubernetes-apps rules: - alert: KubePodCrashLooping annotations: message: Pod {{ $labels.namespace }}/{{ $labels.pod }} ({{ $labels.container }}) is restarting {{ printf "%.2f" $value }} times / 5 minutes. runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubepodcrashlooping expr: | rate(kube_pod_container_status_restarts_total{job="kube-state-metrics"}[15m]) * 60 * 5 > 0 for: 15m labels: severity: critical - alert: KubePodNotReady annotations: message: Pod {{ $labels.namespace }}/{{ $labels.pod }} has been in a non-ready state for longer than 3 minutes. runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubepodnotready expr: | sum by (namespace, pod) (max by(namespace, pod) (kube_pod_status_phase{job="kube-state-metrics", phase=~"Pending|Unknown"}) * on(namespace, pod) group_left(owner_kind) max by(namespace, pod, owner_kind) (kube_pod_owner{owner_kind!="Job"})) > 0 for: 3m labels: severity: critical - alert: KubeDeploymentGenerationMismatch annotations: message: Deployment generation for {{ $labels.namespace }}/{{ $labels.deployment }} does not match, this indicates that the Deployment has failed but has not been rolled back. runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubedeploymentgenerationmismatch expr: | kube_deployment_status_observed_generation{job="kube-state-metrics"} != kube_deployment_metadata_generation{job="kube-state-metrics"} for: 15m labels: severity: critical - alert: KubeDeploymentReplicasMismatch annotations: message: Deployment {{ $labels.namespace }}/{{ $labels.deployment }} has not matched the expected number of replicas for longer than 15 minutes. runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubedeploymentreplicasmismatch expr: | ( kube_deployment_spec_replicas{job="kube-state-metrics"} != kube_deployment_status_replicas_available{job="kube-state-metrics"} ) and ( changes(kube_deployment_status_replicas_updated{job="kube-state-metrics"}[5m]) == 0 ) for: 15m labels: severity: critical - alert: KubeStatefulSetReplicasMismatch annotations: message: StatefulSet {{ $labels.namespace }}/{{ $labels.statefulset }} has not matched the expected number of replicas for longer than 15 minutes. runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubestatefulsetreplicasmismatch expr: | ( kube_statefulset_status_replicas_ready{job="kube-state-metrics"} != kube_statefulset_status_replicas{job="kube-state-metrics"} ) and ( changes(kube_statefulset_status_replicas_updated{job="kube-state-metrics"}[5m]) == 0 ) for: 15m labels: severity: critical - alert: KubeStatefulSetGenerationMismatch annotations: message: StatefulSet generation for {{ $labels.namespace }}/{{ $labels.statefulset }} does not match, this indicates that the StatefulSet has failed but has not been rolled back. runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubestatefulsetgenerationmismatch expr: | kube_statefulset_status_observed_generation{job="kube-state-metrics"} != kube_statefulset_metadata_generation{job="kube-state-metrics"} for: 15m labels: severity: critical - alert: KubeStatefulSetUpdateNotRolledOut annotations: message: StatefulSet {{ $labels.namespace }}/{{ $labels.statefulset }} update has not been rolled out. runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubestatefulsetupdatenotrolledout expr: | max without (revision) ( kube_statefulset_status_current_revision{job="kube-state-metrics"} unless kube_statefulset_status_update_revision{job="kube-state-metrics"} ) * ( kube_statefulset_replicas{job="kube-state-metrics"} != kube_statefulset_status_replicas_updated{job="kube-state-metrics"} ) for: 15m labels: severity: critical - alert: KubeDaemonSetRolloutStuck annotations: message: Only {{ $value | humanizePercentage }} of the desired Pods of DaemonSet {{ $labels.namespace }}/{{ $labels.daemonset }} are scheduled and ready. runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubedaemonsetrolloutstuck expr: | kube_daemonset_status_number_ready{job="kube-state-metrics"} / kube_daemonset_status_desired_number_scheduled{job="kube-state-metrics"} < 1.00 for: 15m labels: severity: critical - alert: KubeContainerWaiting annotations: message: Pod {{ $labels.namespace }}/{{ $labels.pod }} container {{ $labels.container}} has been in waiting state for longer than 1 m. runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubecontainerwaiting expr: | sum by (namespace, pod, container) (kube_pod_container_status_waiting_reason{job="kube-state-metrics"}) > 0 for: 1m labels: severity: warning - alert: KubeDaemonSetNotScheduled annotations: message: '{{ $value }} Pods of DaemonSet {{ $labels.namespace }}/{{ $labels.daemonset }} are not scheduled.' runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubedaemonsetnotscheduled expr: | kube_daemonset_status_desired_number_scheduled{job="kube-state-metrics"} - kube_daemonset_status_current_number_scheduled{job="kube-state-metrics"} > 0 for: 10m labels: severity: warning - alert: KubeDaemonSetMisScheduled annotations: message: '{{ $value }} Pods of DaemonSet {{ $labels.namespace }}/{{ $labels.daemonset }} are running where they are not supposed to run.' runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubedaemonsetmisscheduled expr: | kube_daemonset_status_number_misscheduled{job="kube-state-metrics"} > 0 for: 15m labels: severity: warning - alert: KubeCronJobRunning annotations: message: CronJob {{ $labels.namespace }}/{{ $labels.cronjob }} is taking more than 1h to complete. runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubecronjobrunning expr: | time() - kube_cronjob_next_schedule_time{job="kube-state-metrics"} > 3600 for: 1h labels: severity: warning - alert: KubeJobCompletion annotations: message: Job {{ $labels.namespace }}/{{ $labels.job_name }} is taking more than one hour to complete. runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubejobcompletion expr: | kube_job_spec_completions{job="kube-state-metrics"} - kube_job_status_succeeded{job="kube-state-metrics"} > 0 for: 1h labels: severity: warning - alert: KubeJobFailed annotations: message: Job {{ $labels.namespace }}/{{ $labels.job_name }} failed to complete. runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubejobfailed expr: | kube_job_failed{job="kube-state-metrics"} > 0 for: 15m labels: severity: warning - alert: KubeHpaReplicasMismatch annotations: message: HPA {{ $labels.namespace }}/{{ $labels.hpa }} has not matched the desired number of replicas for longer than 15 minutes. runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubehpareplicasmismatch expr: | (kube_hpa_status_desired_replicas{job="kube-state-metrics"} != kube_hpa_status_current_replicas{job="kube-state-metrics"}) and changes(kube_hpa_status_current_replicas[15m]) == 0 for: 15m labels: severity: warning - alert: KubeHpaMaxedOut annotations: message: HPA {{ $labels.namespace }}/{{ $labels.hpa }} has been running at max replicas for longer than 15 minutes. runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubehpamaxedout expr: | kube_hpa_status_current_replicas{job="kube-state-metrics"} == kube_hpa_spec_max_replicas{job="kube-state-metrics"} for: 15m labels: severity: warning - name: kubernetes-resources rules: - alert: KubeCPUOvercommit annotations: message: Cluster has overcommitted CPU resource requests for Pods and cannot tolerate node failure. runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubecpuovercommit expr: | sum(namespace:kube_pod_container_resource_requests_cpu_cores:sum{}) / sum(kube_node_status_allocatable_cpu_cores) > (count(kube_node_status_allocatable_cpu_cores)-1) / count(kube_node_status_allocatable_cpu_cores) for: 5m labels: severity: warning - alert: KubeMemoryOvercommit annotations: message: Cluster has overcommitted memory resource requests for Pods and cannot tolerate node failure. runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubememoryovercommit expr: | sum(namespace:kube_pod_container_resource_requests_memory_bytes:sum{}) / sum(kube_node_status_allocatable_memory_bytes) > (count(kube_node_status_allocatable_memory_bytes)-1) / count(kube_node_status_allocatable_memory_bytes) for: 5m labels: severity: warning - alert: KubeCPUQuotaOvercommit annotations: message: Cluster has overcommitted CPU resource requests for Namespaces. runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubecpuquotaovercommit expr: | sum(kube_resourcequota{job="kube-state-metrics", type="hard", resource="cpu"}) / sum(kube_node_status_allocatable_cpu_cores) > 1.5 for: 5m labels: severity: warning - alert: KubeMemoryQuotaOvercommit annotations: message: Cluster has overcommitted memory resource requests for Namespaces. runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubememoryquotaovercommit expr: | sum(kube_resourcequota{job="kube-state-metrics", type="hard", resource="memory"}) / sum(kube_node_status_allocatable_memory_bytes{job="node-exporter"}) > 1.5 for: 5m labels: severity: warning - alert: KubeQuotaExceeded annotations: message: Namespace {{ $labels.namespace }} is using {{ $value | humanizePercentage }} of its {{ $labels.resource }} quota. runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubequotaexceeded expr: | kube_resourcequota{job="kube-state-metrics", type="used"} / ignoring(instance, job, type) (kube_resourcequota{job="kube-state-metrics", type="hard"} > 0) > 0.90 for: 15m labels: severity: warning - alert: CPUThrottlingHigh annotations: message: '{{ $value | humanizePercentage }} throttling of CPU in namespace {{ $labels.namespace }} for container {{ $labels.container }} in pod {{ $labels.pod }}.' runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-cputhrottlinghigh expr: | sum(increase(container_cpu_cfs_throttled_periods_total{container!="", }[5m])) by (container, pod, namespace) / sum(increase(container_cpu_cfs_periods_total{}[5m])) by (container, pod, namespace) > ( 25 / 100 ) for: 15m labels: severity: warning - name: kubernetes-storage rules: - alert: KubePersistentVolumeFillingUp annotations: message: The PersistentVolume claimed by {{ $labels.persistentvolumeclaim }} in Namespace {{ $labels.namespace }} is only {{ $value | humanizePercentage }} free. runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubepersistentvolumefillingup expr: | kubelet_volume_stats_available_bytes{job="kubelet", metrics_path="/metrics"} / kubelet_volume_stats_capacity_bytes{job="kubelet", metrics_path="/metrics"} < 0.03 for: 1m labels: severity: critical - alert: KubePersistentVolumeFillingUp annotations: message: Based on recent sampling, the PersistentVolume claimed by {{ $labels.persistentvolumeclaim }} in Namespace {{ $labels.namespace }} is expected to fill up within four days. Currently {{ $value | humanizePercentage }} is available. runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubepersistentvolumefillingup expr: | ( kubelet_volume_stats_available_bytes{job="kubelet", metrics_path="/metrics"} / kubelet_volume_stats_capacity_bytes{job="kubelet", metrics_path="/metrics"} ) < 0.15 and predict_linear(kubelet_volume_stats_available_bytes{job="kubelet", metrics_path="/metrics"}[6h], 4 * 24 * 3600) < 0 for: 1h labels: severity: warning - alert: KubePersistentVolumeErrors annotations: message: The persistent volume {{ $labels.persistentvolume }} has status {{ $labels.phase }}. runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubepersistentvolumeerrors expr: | kube_persistentvolume_status_phase{phase=~"Failed|Pending",job="kube-state-metrics"} > 0 for: 5m labels: severity: critical - name: kubernetes-system rules: - alert: KubeVersionMismatch annotations: message: There are {{ $value }} different semantic versions of Kubernetes components running. runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubeversionmismatch expr: | count(count by (gitVersion) (label_replace(kubernetes_build_info{job!~"kube-dns|coredns"},"gitVersion","$1","gitVersion","(v[0-9]*.[0-9]*.[0-9]*).*"))) > 1 for: 15m labels: severity: warning - alert: KubeClientErrors annotations: message: Kubernetes API server client '{{ $labels.job }}/{{ $labels.instance }}' is experiencing {{ $value | humanizePercentage }} errors.' runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubeclienterrors expr: | (sum(rate(rest_client_requests_total{code=~"5.."}[5m])) by (instance, job) / sum(rate(rest_client_requests_total[5m])) by (instance, job)) > 0.01 for: 15m labels: severity: warning - name: kube-apiserver-slos rules: - alert: KubeAPIErrorBudgetBurn annotations: message: The API server is burning too much error budget runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubeapierrorbudgetburn expr: | sum(apiserver_request:burnrate1h) > (14.40 * 0.01000) and sum(apiserver_request:burnrate5m) > (14.40 * 0.01000) for: 2m labels: severity: critical - alert: KubeAPIErrorBudgetBurn annotations: message: The API server is burning too much error budget runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubeapierrorbudgetburn expr: | sum(apiserver_request:burnrate6h) > (6.00 * 0.01000) and sum(apiserver_request:burnrate30m) > (6.00 * 0.01000) for: 15m labels: severity: critical - alert: KubeAPIErrorBudgetBurn annotations: message: The API server is burning too much error budget runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubeapierrorbudgetburn expr: | sum(apiserver_request:burnrate1d) > (3.00 * 0.01000) and sum(apiserver_request:burnrate2h) > (3.00 * 0.01000) for: 1h labels: severity: warning - alert: KubeAPIErrorBudgetBurn annotations: message: The API server is burning too much error budget runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubeapierrorbudgetburn expr: | sum(apiserver_request:burnrate3d) > (1.00 * 0.01000) and sum(apiserver_request:burnrate6h) > (1.00 * 0.01000) for: 3h labels: severity: warning - name: kubernetes-system-apiserver rules: - alert: KubeAPILatencyHigh annotations: message: The API server has an abnormal latency of {{ $value }} seconds for {{ $labels.verb }} {{ $labels.resource }}. runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubeapilatencyhigh expr: | ( cluster:apiserver_request_duration_seconds:mean5m{job="apiserver"} > on (verb) group_left() ( avg by (verb) (cluster:apiserver_request_duration_seconds:mean5m{job="apiserver"} >= 0) + 2*stddev by (verb) (cluster:apiserver_request_duration_seconds:mean5m{job="apiserver"} >= 0) ) ) > on (verb) group_left() 1.2 * avg by (verb) (cluster:apiserver_request_duration_seconds:mean5m{job="apiserver"} >= 0) and on (verb,resource) cluster_quantile:apiserver_request_duration_seconds:histogram_quantile{job="apiserver",quantile="0.99"} > 1 for: 5m labels: severity: warning - alert: KubeAPIErrorsHigh annotations: message: API server is returning errors for {{ $value | humanizePercentage }} of requests for {{ $labels.verb }} {{ $labels.resource }} {{ $labels.subresource }}. runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubeapierrorshigh expr: | sum(rate(apiserver_request_total{job="apiserver",code=~"5.."}[5m])) by (resource,subresource,verb) / sum(rate(apiserver_request_total{job="apiserver"}[5m])) by (resource,subresource,verb) > 0.05 for: 10m labels: severity: warning - alert: KubeClientCertificateExpiration annotations: message: A client certificate used to authenticate to the apiserver is expiring in less than 7.0 days. runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubeclientcertificateexpiration expr: | apiserver_client_certificate_expiration_seconds_count{job="apiserver"} > 0 and on(job) histogram_quantile(0.01, sum by (job, le) (rate(apiserver_client_certificate_expiration_seconds_bucket{job="apiserver"}[5m]))) < 604800 labels: severity: warning - alert: KubeClientCertificateExpiration annotations: message: A client certificate used to authenticate to the apiserver is expiring in less than 24.0 hours. runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubeclientcertificateexpiration expr: | apiserver_client_certificate_expiration_seconds_count{job="apiserver"} > 0 and on(job) histogram_quantile(0.01, sum by (job, le) (rate(apiserver_client_certificate_expiration_seconds_bucket{job="apiserver"}[5m]))) < 86400 labels: severity: critical - alert: AggregatedAPIErrors annotations: message: An aggregated API {{ $labels.name }}/{{ $labels.namespace }} has reported errors. The number of errors have increased for it in the past five minutes. High values indicate that the availability of the service changes too often. runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-aggregatedapierrors expr: | sum by(name, namespace)(increase(aggregator_unavailable_apiservice_count[5m])) > 2 labels: severity: warning - alert: AggregatedAPIDown annotations: message: An aggregated API {{ $labels.name }}/{{ $labels.namespace }} is down. It has not been available at least for the past five minutes. runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-aggregatedapidown expr: | sum by(name, namespace)(sum_over_time(aggregator_unavailable_apiservice[5m])) > 0 for: 5m labels: severity: warning - alert: KubeAPIDown annotations: message: KubeAPI has disappeared from Prometheus target discovery. runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubeapidown expr: | absent(up{job="apiserver"} == 1) for: 15m labels: severity: critical - name: kubernetes-system-kubelet rules: - alert: KubeNodeNotReady annotations: message: '{{ $labels.node }} has been unready for more than 15 minutes.' runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubenodenotready expr: | kube_node_status_condition{job="kube-state-metrics",condition="Ready",status="true"} == 0 for: 15m labels: severity: warning - alert: KubeNodeUnreachable annotations: message: '{{ $labels.node }} is unreachable and some workloads may be rescheduled.' runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubenodeunreachable expr: | kube_node_spec_taint{job="kube-state-metrics",key="node.kubernetes.io/unreachable",effect="NoSchedule"} == 1 for: 2m labels: severity: warning - alert: KubeletTooManyPods annotations: message: Kubelet '{{ $labels.node }}' is running at {{ $value | humanizePercentage }} of its Pod capacity. runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubelettoomanypods expr: | max(max(kubelet_running_pod_count{job="kubelet", metrics_path="/metrics"}) by(instance) * on(instance) group_left(node) kubelet_node_name{job="kubelet", metrics_path="/metrics"}) by(node) / max(kube_node_status_capacity_pods{job="kube-state-metrics"} != 1) by(node) > 0.95 for: 15m labels: severity: warning - alert: KubeNodeReadinessFlapping annotations: message: The readiness status of node {{ $labels.node }} has changed {{ $value }} times in the last 15 minutes. runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubenodereadinessflapping expr: | sum(changes(kube_node_status_condition{status="true",condition="Ready"}[15m])) by (node) > 2 for: 15m labels: severity: warning - alert: KubeletPlegDurationHigh annotations: message: The Kubelet Pod Lifecycle Event Generator has a 99th percentile duration of {{ $value }} seconds on node {{ $labels.node }}. runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubeletplegdurationhigh expr: | node_quantile:kubelet_pleg_relist_duration_seconds:histogram_quantile{quantile="0.99"} >= 10 for: 5m labels: severity: warning - alert: KubeletPodStartUpLatencyHigh annotations: message: Kubelet Pod startup 99th percentile latency is {{ $value }} seconds on node {{ $labels.node }}. runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubeletpodstartuplatencyhigh expr: | histogram_quantile(0.99, sum(rate(kubelet_pod_worker_duration_seconds_bucket{job="kubelet", metrics_path="/metrics"}[5m])) by (instance, le)) * on(instance) group_left(node) kubelet_node_name{job="kubelet", metrics_path="/metrics"} > 60 for: 15m labels: severity: warning - alert: KubeletDown annotations: message: Kubelet has disappeared from Prometheus target discovery. runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubeletdown expr: | absent(up{job="kubelet", metrics_path="/metrics"} == 1) for: 15m labels: severity: critical - name: kubernetes-system-scheduler rules: - alert: KubeSchedulerDown annotations: message: KubeScheduler has disappeared from Prometheus target discovery. runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubeschedulerdown expr: | absent(up{job="kube-scheduler"} == 1) for: 15m labels: severity: critical - name: kubernetes-system-controller-manager rules: - alert: KubeControllerManagerDown annotations: message: KubeControllerManager has disappeared from Prometheus target discovery. runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubecontrollermanagerdown expr: | absent(up{job="kube-controller-manager"} == 1) for: 15m labels: severity: critical - name: prometheus rules: - alert: PrometheusBadConfig annotations: description: Prometheus {{$labels.namespace}}/{{$labels.pod}} has failed to reload its configuration. summary: Failed Prometheus configuration reload. expr: | # Without max_over_time, failed scrapes could create false negatives, see # https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details. max_over_time(prometheus_config_last_reload_successful{job="prometheus-k8s",namespace="monitoring"}[5m]) == 0 for: 10m labels: severity: critical - alert: PrometheusNotificationQueueRunningFull annotations: description: Alert notification queue of Prometheus {{$labels.namespace}}/{{$labels.pod}} is running full. summary: Prometheus alert notification queue predicted to run full in less than 30m. expr: | # Without min_over_time, failed scrapes could create false negatives, see # https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details. ( predict_linear(prometheus_notifications_queue_length{job="prometheus-k8s",namespace="monitoring"}[5m], 60 * 30) > min_over_time(prometheus_notifications_queue_capacity{job="prometheus-k8s",namespace="monitoring"}[5m]) ) for: 15m labels: severity: warning - alert: PrometheusErrorSendingAlertsToSomeAlertmanagers annotations: description: '{{ printf "%.1f" $value }}% errors while sending alerts from Prometheus {{$labels.namespace}}/{{$labels.pod}} to Alertmanager {{$labels.alertmanager}}.' summary: Prometheus has encountered more than 1% errors sending alerts to a specific Alertmanager. expr: | ( rate(prometheus_notifications_errors_total{job="prometheus-k8s",namespace="monitoring"}[5m]) / rate(prometheus_notifications_sent_total{job="prometheus-k8s",namespace="monitoring"}[5m]) ) * 100 > 1 for: 15m labels: severity: warning - alert: PrometheusErrorSendingAlertsToAnyAlertmanager annotations: description: '{{ printf "%.1f" $value }}% minimum errors while sending alerts from Prometheus {{$labels.namespace}}/{{$labels.pod}} to any Alertmanager.' summary: Prometheus encounters more than 3% errors sending alerts to any Alertmanager. expr: | min without(alertmanager) ( rate(prometheus_notifications_errors_total{job="prometheus-k8s",namespace="monitoring"}[5m]) / rate(prometheus_notifications_sent_total{job="prometheus-k8s",namespace="monitoring"}[5m]) ) * 100 > 3 for: 15m labels: severity: critical - alert: PrometheusNotConnectedToAlertmanagers annotations: description: Prometheus {{$labels.namespace}}/{{$labels.pod}} is not connected to any Alertmanagers. summary: Prometheus is not connected to any Alertmanagers. expr: | # Without max_over_time, failed scrapes could create false negatives, see # https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details. max_over_time(prometheus_notifications_alertmanagers_discovered{job="prometheus-k8s",namespace="monitoring"}[5m]) < 1 for: 10m labels: severity: warning - alert: PrometheusTSDBReloadsFailing annotations: description: Prometheus {{$labels.namespace}}/{{$labels.pod}} has detected {{$value | humanize}} reload failures over the last 3h. summary: Prometheus has issues reloading blocks from disk. expr: | increase(prometheus_tsdb_reloads_failures_total{job="prometheus-k8s",namespace="monitoring"}[3h]) > 0 for: 4h labels: severity: warning - alert: PrometheusTSDBCompactionsFailing annotations: description: Prometheus {{$labels.namespace}}/{{$labels.pod}} has detected {{$value | humanize}} compaction failures over the last 3h. summary: Prometheus has issues compacting blocks. expr: | increase(prometheus_tsdb_compactions_failed_total{job="prometheus-k8s",namespace="monitoring"}[3h]) > 0 for: 4h labels: severity: warning - alert: PrometheusNotIngestingSamples annotations: description: Prometheus {{$labels.namespace}}/{{$labels.pod}} is not ingesting samples. summary: Prometheus is not ingesting samples. expr: | rate(prometheus_tsdb_head_samples_appended_total{job="prometheus-k8s",namespace="monitoring"}[5m]) <= 0 for: 10m labels: severity: warning - alert: PrometheusDuplicateTimestamps annotations: description: Prometheus {{$labels.namespace}}/{{$labels.pod}} is dropping {{ printf "%.4g" $value }} samples/s with different values but duplicated timestamp. summary: Prometheus is dropping samples with duplicate timestamps. expr: | rate(prometheus_target_scrapes_sample_duplicate_timestamp_total{job="prometheus-k8s",namespace="monitoring"}[5m]) > 0 for: 10m labels: severity: warning - alert: PrometheusOutOfOrderTimestamps annotations: description: Prometheus {{$labels.namespace}}/{{$labels.pod}} is dropping {{ printf "%.4g" $value }} samples/s with timestamps arriving out of order. summary: Prometheus drops samples with out-of-order timestamps. expr: | rate(prometheus_target_scrapes_sample_out_of_order_total{job="prometheus-k8s",namespace="monitoring"}[5m]) > 0 for: 10m labels: severity: warning - alert: PrometheusRemoteStorageFailures annotations: description: Prometheus {{$labels.namespace}}/{{$labels.pod}} failed to send {{ printf "%.1f" $value }}% of the samples to {{ $labels.remote_name}}:{{ $labels.url }} summary: Prometheus fails to send samples to remote storage. expr: | ( rate(prometheus_remote_storage_failed_samples_total{job="prometheus-k8s",namespace="monitoring"}[5m]) / ( rate(prometheus_remote_storage_failed_samples_total{job="prometheus-k8s",namespace="monitoring"}[5m]) + rate(prometheus_remote_storage_succeeded_samples_total{job="prometheus-k8s",namespace="monitoring"}[5m]) ) ) * 100 > 1 for: 15m labels: severity: critical - alert: PrometheusRemoteWriteBehind annotations: description: Prometheus {{$labels.namespace}}/{{$labels.pod}} remote write is {{ printf "%.1f" $value }}s behind for {{ $labels.remote_name}}:{{ $labels.url }}. summary: Prometheus remote write is behind. expr: | # Without max_over_time, failed scrapes could create false negatives, see # https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details. ( max_over_time(prometheus_remote_storage_highest_timestamp_in_seconds{job="prometheus-k8s",namespace="monitoring"}[5m]) - on(job, instance) group_right max_over_time(prometheus_remote_storage_queue_highest_sent_timestamp_seconds{job="prometheus-k8s",namespace="monitoring"}[5m]) ) > 120 for: 15m labels: severity: critical - alert: PrometheusRemoteWriteDesiredShards annotations: description: Prometheus {{$labels.namespace}}/{{$labels.pod}} remote write desired shards calculation wants to run {{ $value }} shards for queue {{ $labels.remote_name}}:{{ $labels.url }}, which is more than the max of {{ printf `prometheus_remote_storage_shards_max{instance="%s",job="prometheus-k8s",namespace="monitoring"}` $labels.instance | query | first | value }}. summary: Prometheus remote write desired shards calculation wants to run more than configured max shards. expr: | # Without max_over_time, failed scrapes could create false negatives, see # https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details. ( max_over_time(prometheus_remote_storage_shards_desired{job="prometheus-k8s",namespace="monitoring"}[5m]) > max_over_time(prometheus_remote_storage_shards_max{job="prometheus-k8s",namespace="monitoring"}[5m]) ) for: 15m labels: severity: warning - alert: PrometheusRuleFailures annotations: description: Prometheus {{$labels.namespace}}/{{$labels.pod}} has failed to evaluate {{ printf "%.0f" $value }} rules in the last 5m. summary: Prometheus is failing rule evaluations. expr: | increase(prometheus_rule_evaluation_failures_total{job="prometheus-k8s",namespace="monitoring"}[5m]) > 0 for: 15m labels: severity: critical - alert: PrometheusMissingRuleEvaluations annotations: description: Prometheus {{$labels.namespace}}/{{$labels.pod}} has missed {{ printf "%.0f" $value }} rule group evaluations in the last 5m. summary: Prometheus is missing rule evaluations due to slow rule group evaluation. expr: | increase(prometheus_rule_group_iterations_missed_total{job="prometheus-k8s",namespace="monitoring"}[5m]) > 0 for: 15m labels: severity: warning - name: alertmanager.rules rules: - alert: AlertmanagerConfigInconsistent annotations: message: | The configuration of the instances of the Alertmanager cluster `{{ $labels.namespace }}/{{ $labels.service }}` are out of sync. {{ range printf "alertmanager_config_hash{namespace=\"%s\",service=\"%s\"}" $labels.namespace $labels.service | query }} Configuration hash for pod {{ .Labels.pod }} is "{{ printf "%.f" .Value }}" {{ end }} expr: | count by(namespace,service) (count_values by(namespace,service) ("config_hash", alertmanager_config_hash{job="alertmanager-main",namespace="monitoring"})) != 1 for: 5m labels: severity: critical - alert: AlertmanagerFailedReload annotations: message: Reloading Alertmanager's configuration has failed for {{ $labels.namespace }}/{{ $labels.pod}}. expr: | alertmanager_config_last_reload_successful{job="alertmanager-main",namespace="monitoring"} == 0 for: 10m labels: severity: warning - alert: AlertmanagerMembersInconsistent annotations: message: Alertmanager has not found all other members of the cluster. expr: | alertmanager_cluster_members{job="alertmanager-main",namespace="monitoring"} != on (service) GROUP_LEFT() count by (service) (alertmanager_cluster_members{job="alertmanager-main",namespace="monitoring"}) for: 5m labels: severity: critical - name: general.rules rules: - alert: TargetDown annotations: message: '{{ printf "%.4g" $value }}% of the {{ $labels.job }}/{{ $labels.service }} targets in {{ $labels.namespace }} namespace are down.' expr: 100 * (count(up == 0) BY (job, namespace, service) / count(up) BY (job, namespace, service)) > 10 for: 10m labels: severity: warning - alert: Watchdog annotations: message: | This is an alert meant to ensure that the entire alerting pipeline is functional. This alert is always firing, therefore it should always be firing in Alertmanager and always fire against a receiver. There are integrations with various notification mechanisms that send a notification when this alert is not firing. For example the "DeadMansSnitch" integration in PagerDuty. expr: vector(1) labels: severity: none - name: node-network rules: - alert: NodeNetworkInterfaceFlapping annotations: message: Network interface "{{ $labels.device }}" changing it's up status often on node-exporter {{ $labels.namespace }}/{{ $labels.pod }}" expr: | changes(node_network_up{job="node-exporter",device!~"veth.+"}[2m]) > 2 for: 2m labels: severity: warning - name: prometheus-operator rules: - alert: PrometheusOperatorReconcileErrors annotations: message: Errors while reconciling {{ $labels.controller }} in {{ $labels.namespace }} Namespace. expr: | rate(prometheus_operator_reconcile_errors_total{job="prometheus-operator",namespace="monitoring"}[5m]) > 0.1 for: 10m labels: severity: warning - alert: PrometheusOperatorNodeLookupErrors annotations: message: Errors while reconciling Prometheus in {{ $labels.namespace }} Namespace. expr: | rate(prometheus_operator_node_address_lookup_errors_total{job="prometheus-operator",namespace="monitoring"}[5m]) > 0.1 for: 10m labels: severity: warning #添加jvm报警规则 - name: JVM-Monitor rules: - alert: JvmMemoryFillingUp expr: | (sum by (instance)(jvm_memory_bytes_used{area="heap"}) / sum by (instance)(jvm_memory_bytes_max{area="heap"})) * 100 > 10 for: 2m labels: severity: warning annotations: summary: | JVM memory filling up (instance {{ $labels.instance }}) description: | JVM memory is filling up (> 10%) MemoryValue = {{ $value }}
45.717501
280
0.650357
664dad725cf65cb020fd90e7ce07c29759ce1c83
353
yaml
YAML
monitoring/ingressRelatedToPrometheus/grafana-ingress.yaml
00724/deploy-k8s
c4ad00ce58d741dbce2380828f42f45eb7f58896
[ "Apache-2.0" ]
null
null
null
monitoring/ingressRelatedToPrometheus/grafana-ingress.yaml
00724/deploy-k8s
c4ad00ce58d741dbce2380828f42f45eb7f58896
[ "Apache-2.0" ]
null
null
null
monitoring/ingressRelatedToPrometheus/grafana-ingress.yaml
00724/deploy-k8s
c4ad00ce58d741dbce2380828f42f45eb7f58896
[ "Apache-2.0" ]
null
null
null
apiVersion: extensions/v1beta1 kind: Ingress metadata: annotations: k8s.kuboard.cn/workload: monitoring labels: app: grafana name: grafana namespace: monitoring spec: rules: - host: gr.code404.cn http: paths: - backend: serviceName: grafana servicePort: 3000 path: /
18.578947
39
0.594901
706be4f0297cf40692e7b2ea2c135e8bac148120
356
yaml
YAML
monitoring/JVM/prometheus-clusterRole.yaml
00724/deploy-k8s
c4ad00ce58d741dbce2380828f42f45eb7f58896
[ "Apache-2.0" ]
null
null
null
monitoring/JVM/prometheus-clusterRole.yaml
00724/deploy-k8s
c4ad00ce58d741dbce2380828f42f45eb7f58896
[ "Apache-2.0" ]
null
null
null
monitoring/JVM/prometheus-clusterRole.yaml
00724/deploy-k8s
c4ad00ce58d741dbce2380828f42f45eb7f58896
[ "Apache-2.0" ]
null
null
null
apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: prometheus-k8s rules: - apiGroups: - "" resources: - nodes/metrics verbs: - get - nonResourceURLs: - /metrics verbs: - get #跨namespace监控,添加一条rbac认证 - apiGroups: - "" resources: - nodes - services - endpoints - pods verbs: - get - list - watch
12.714286
40
0.646067
f3f2da5fd4f31c850e3e32efb6cdb3f0f9fea7a6
253
yaml
YAML
monitoring/redis/redis-exporter-svc.yaml
00724/deploy-k8s
c4ad00ce58d741dbce2380828f42f45eb7f58896
[ "Apache-2.0" ]
null
null
null
monitoring/redis/redis-exporter-svc.yaml
00724/deploy-k8s
c4ad00ce58d741dbce2380828f42f45eb7f58896
[ "Apache-2.0" ]
null
null
null
monitoring/redis/redis-exporter-svc.yaml
00724/deploy-k8s
c4ad00ce58d741dbce2380828f42f45eb7f58896
[ "Apache-2.0" ]
null
null
null
apiVersion: v1 kind: Service metadata: name: redis-monitor namespace: test123 labels: app: redis spec: type: ClusterIP ports: - name: redis-monitor port: 9121 protocol: TCP targetPort: 9121 selector: app: redis
14.882353
25
0.648221
4fcd994fc03bf100a130ff81f03f08e63ff45211
2,318
yaml
YAML
k8s-deploy/kuboard.yaml
00724/deploy-k8s
c4ad00ce58d741dbce2380828f42f45eb7f58896
[ "Apache-2.0" ]
1
2022-01-17T11:34:26.000Z
2022-01-17T11:34:26.000Z
addons-3rd/kuboard/kuboard-v2.yaml
cantevenl/k8s-ha-install
a0142dae609686174d3b7c48d51fd2f7b3199c67
[ "Apache-2.0" ]
null
null
null
addons-3rd/kuboard/kuboard-v2.yaml
cantevenl/k8s-ha-install
a0142dae609686174d3b7c48d51fd2f7b3199c67
[ "Apache-2.0" ]
null
null
null
apiVersion: apps/v1 kind: Deployment metadata: name: kuboard namespace: kube-system annotations: k8s.kuboard.cn/displayName: kuboard k8s.kuboard.cn/ingress: "true" k8s.kuboard.cn/service: NodePort k8s.kuboard.cn/workload: kuboard labels: k8s.kuboard.cn/layer: monitor k8s.kuboard.cn/name: kuboard spec: replicas: 1 selector: matchLabels: k8s.kuboard.cn/layer: monitor k8s.kuboard.cn/name: kuboard template: metadata: labels: k8s.kuboard.cn/layer: monitor k8s.kuboard.cn/name: kuboard spec: containers: - name: kuboard image: eipwork/kuboard:latest imagePullPolicy: Always tolerations: - key: node-role.kubernetes.io/master effect: NoSchedule operator: Exists --- apiVersion: v1 kind: Service metadata: name: kuboard namespace: kube-system spec: type: NodePort ports: - name: http port: 80 targetPort: 80 nodePort: 32567 selector: k8s.kuboard.cn/layer: monitor k8s.kuboard.cn/name: kuboard --- apiVersion: v1 kind: ServiceAccount metadata: name: kuboard-user namespace: kube-system --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: kuboard-user roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: cluster-admin subjects: - kind: ServiceAccount name: kuboard-user namespace: kube-system --- apiVersion: v1 kind: ServiceAccount metadata: name: kuboard-viewer namespace: kube-system --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: kuboard-viewer roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: view subjects: - kind: ServiceAccount name: kuboard-viewer namespace: kube-system # --- # apiVersion: extensions/v1beta1 # kind: Ingress # metadata: # name: kuboard # namespace: kube-system # annotations: # k8s.kuboard.cn/displayName: kuboard # k8s.kuboard.cn/workload: kuboard # nginx.org/websocket-services: "kuboard" # nginx.com/sticky-cookie-services: "serviceName=kuboard srv_id expires=1h path=/" # spec: # rules: # - host: kuboard.yourdomain.com # http: # paths: # - path: / # backend: # serviceName: kuboard # servicePort: http
20.333333
86
0.684642
070b983a927a7984ca18725a00433864b003bccd
350
yaml
YAML
monitoring/redis/redis-serviceMonitor.yaml
00724/deploy-k8s
c4ad00ce58d741dbce2380828f42f45eb7f58896
[ "Apache-2.0" ]
null
null
null
monitoring/redis/redis-serviceMonitor.yaml
00724/deploy-k8s
c4ad00ce58d741dbce2380828f42f45eb7f58896
[ "Apache-2.0" ]
null
null
null
monitoring/redis/redis-serviceMonitor.yaml
00724/deploy-k8s
c4ad00ce58d741dbce2380828f42f45eb7f58896
[ "Apache-2.0" ]
null
null
null
apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor metadata: name: redis-monitor namespace: monitoring labels: k8s-app: redis spec: #jobLabel: redis-monitor endpoints: - port: redis-monitor interval: 15s scheme: http path: '/metrics' selector: matchLabels: app: redis namespaceSelector: any: true
17.5
36
0.688571
475135db5057ad4d6bae9d743e8bb77e56867794
1,228
yaml
YAML
monitoring/redis/redis-monitor-AlertRules.yaml
00724/deploy-k8s
c4ad00ce58d741dbce2380828f42f45eb7f58896
[ "Apache-2.0" ]
null
null
null
monitoring/redis/redis-monitor-AlertRules.yaml
00724/deploy-k8s
c4ad00ce58d741dbce2380828f42f45eb7f58896
[ "Apache-2.0" ]
null
null
null
monitoring/redis/redis-monitor-AlertRules.yaml
00724/deploy-k8s
c4ad00ce58d741dbce2380828f42f45eb7f58896
[ "Apache-2.0" ]
null
null
null
#Redis-Alert - name: Redis instance is down rules: - alert: RedisDown expr: redis_up == 0 for: 0m labels: severity: critical annotations: summary: Redis down (instance {{ $labels.instance }}) description: | Redis instance is down Pod={{ $labels.pod }} - name: Redis instance has too many connections rules: - alert: RedisTooManyConnections expr: redis_connected_clients > 100 for: 2m labels: severity: warning annotations: summary: Redis too many connections (instance {{ $labels.instance }}) description: | Redis instance has too many connections VALUE = {{ $value }} LABELS: {{ $labels }} - name: Redis is running out of system memory (> 40%) rules: - alert: RedisOutOfSystemMemory expr: redis_memory_used_bytes / redis_total_system_memory_bytes * 100 > 40 for: 2m labels: severity: warning annotations: summary: Redis out of system memory (instance {{ $labels.instance }}) description: | Redis is running out of system memory (> 40%) VALUE = {{ $value }}\n LABELS: {{ $labels }}
31.487179
80
0.59202
9a69b076eb40e1d7a07e5d8366fec2d80bd2bc4c
487
yaml
YAML
monitoring/JVM/Jvm-AlertRules.yaml
00724/deploy-k8s
c4ad00ce58d741dbce2380828f42f45eb7f58896
[ "Apache-2.0" ]
null
null
null
monitoring/JVM/Jvm-AlertRules.yaml
00724/deploy-k8s
c4ad00ce58d741dbce2380828f42f45eb7f58896
[ "Apache-2.0" ]
null
null
null
monitoring/JVM/Jvm-AlertRules.yaml
00724/deploy-k8s
c4ad00ce58d741dbce2380828f42f45eb7f58896
[ "Apache-2.0" ]
null
null
null
#JVM-Alert - name: JvmMemoryFillingUp rules: - alert: JvmMemoryFillingUp expr: | (sum by (instance)(jvm_memory_bytes_used{area="heap"}) / sum by (instance)(jvm_memory_bytes_max{area="heap"})) * 100 > 50 for: 2m labels: severity: warning annotations: summary: | JVM memory filling up (instance {{ $labels.instance }}) description: | JVM memory is filling up (> 50%) MemoryValue = {{ $value }}
32.466667
129
0.581109
a2a68bff0864ca327e8109c2aabe0e8fa015060e
20,823
yaml
YAML
k8s-deploy/calico.yaml
00724/deploy-k8s
c4ad00ce58d741dbce2380828f42f45eb7f58896
[ "Apache-2.0" ]
null
null
null
k8s-deploy/calico.yaml
00724/deploy-k8s
c4ad00ce58d741dbce2380828f42f45eb7f58896
[ "Apache-2.0" ]
null
null
null
k8s-deploy/calico.yaml
00724/deploy-k8s
c4ad00ce58d741dbce2380828f42f45eb7f58896
[ "Apache-2.0" ]
null
null
null
--- # Source: calico/templates/calico-config.yaml # This ConfigMap is used to configure a self-hosted Calico installation. kind: ConfigMap apiVersion: v1 metadata: name: calico-config namespace: kube-system data: # Typha is disabled. typha_service_name: "none" # Configure the backend to use. calico_backend: "bird" # Configure the MTU to use veth_mtu: "1440" # The CNI network configuration to install on each node. The special # values in this config will be automatically populated. cni_network_config: |- { "name": "k8s-pod-network", "cniVersion": "0.3.1", "plugins": [ { "type": "calico", "log_level": "info", "datastore_type": "kubernetes", "nodename": "__KUBERNETES_NODE_NAME__", "mtu": __CNI_MTU__, "ipam": { "type": "calico-ipam" }, "policy": { "type": "k8s" }, "kubernetes": { "kubeconfig": "__KUBECONFIG_FILEPATH__" } }, { "type": "portmap", "snat": true, "capabilities": {"portMappings": true} } ] } --- # Source: calico/templates/kdd-crds.yaml apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: felixconfigurations.crd.projectcalico.org spec: scope: Cluster group: crd.projectcalico.org version: v1 names: kind: FelixConfiguration plural: felixconfigurations singular: felixconfiguration --- apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: ipamblocks.crd.projectcalico.org spec: scope: Cluster group: crd.projectcalico.org version: v1 names: kind: IPAMBlock plural: ipamblocks singular: ipamblock --- apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: blockaffinities.crd.projectcalico.org spec: scope: Cluster group: crd.projectcalico.org version: v1 names: kind: BlockAffinity plural: blockaffinities singular: blockaffinity --- apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: ipamhandles.crd.projectcalico.org spec: scope: Cluster group: crd.projectcalico.org version: v1 names: kind: IPAMHandle plural: ipamhandles singular: ipamhandle --- apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: ipamconfigs.crd.projectcalico.org spec: scope: Cluster group: crd.projectcalico.org version: v1 names: kind: IPAMConfig plural: ipamconfigs singular: ipamconfig --- apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: bgppeers.crd.projectcalico.org spec: scope: Cluster group: crd.projectcalico.org version: v1 names: kind: BGPPeer plural: bgppeers singular: bgppeer --- apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: bgpconfigurations.crd.projectcalico.org spec: scope: Cluster group: crd.projectcalico.org version: v1 names: kind: BGPConfiguration plural: bgpconfigurations singular: bgpconfiguration --- apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: ippools.crd.projectcalico.org spec: scope: Cluster group: crd.projectcalico.org version: v1 names: kind: IPPool plural: ippools singular: ippool --- apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: hostendpoints.crd.projectcalico.org spec: scope: Cluster group: crd.projectcalico.org version: v1 names: kind: HostEndpoint plural: hostendpoints singular: hostendpoint --- apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: clusterinformations.crd.projectcalico.org spec: scope: Cluster group: crd.projectcalico.org version: v1 names: kind: ClusterInformation plural: clusterinformations singular: clusterinformation --- apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: globalnetworkpolicies.crd.projectcalico.org spec: scope: Cluster group: crd.projectcalico.org version: v1 names: kind: GlobalNetworkPolicy plural: globalnetworkpolicies singular: globalnetworkpolicy --- apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: globalnetworksets.crd.projectcalico.org spec: scope: Cluster group: crd.projectcalico.org version: v1 names: kind: GlobalNetworkSet plural: globalnetworksets singular: globalnetworkset --- apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: networkpolicies.crd.projectcalico.org spec: scope: Namespaced group: crd.projectcalico.org version: v1 names: kind: NetworkPolicy plural: networkpolicies singular: networkpolicy --- apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: networksets.crd.projectcalico.org spec: scope: Namespaced group: crd.projectcalico.org version: v1 names: kind: NetworkSet plural: networksets singular: networkset --- # Source: calico/templates/rbac.yaml # Include a clusterrole for the kube-controllers component, # and bind it to the calico-kube-controllers serviceaccount. kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: name: calico-kube-controllers rules: # Nodes are watched to monitor for deletions. - apiGroups: [""] resources: - nodes verbs: - watch - list - get # Pods are queried to check for existence. - apiGroups: [""] resources: - pods verbs: - get # IPAM resources are manipulated when nodes are deleted. - apiGroups: ["crd.projectcalico.org"] resources: - ippools verbs: - list - apiGroups: ["crd.projectcalico.org"] resources: - blockaffinities - ipamblocks - ipamhandles verbs: - get - list - create - update - delete # Needs access to update clusterinformations. - apiGroups: ["crd.projectcalico.org"] resources: - clusterinformations verbs: - get - create - update --- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: name: calico-kube-controllers roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: calico-kube-controllers subjects: - kind: ServiceAccount name: calico-kube-controllers namespace: kube-system --- # Include a clusterrole for the calico-node DaemonSet, # and bind it to the calico-node serviceaccount. kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: name: calico-node rules: # The CNI plugin needs to get pods, nodes, and namespaces. - apiGroups: [""] resources: - pods - nodes - namespaces verbs: - get - apiGroups: [""] resources: - endpoints - services verbs: # Used to discover service IPs for advertisement. - watch - list # Used to discover Typhas. - get - apiGroups: [""] resources: - nodes/status verbs: # Needed for clearing NodeNetworkUnavailable flag. - patch # Calico stores some configuration information in node annotations. - update # Watch for changes to Kubernetes NetworkPolicies. - apiGroups: ["networking.k8s.io"] resources: - networkpolicies verbs: - watch - list # Used by Calico for policy information. - apiGroups: [""] resources: - pods - namespaces - serviceaccounts verbs: - list - watch # The CNI plugin patches pods/status. - apiGroups: [""] resources: - pods/status verbs: - patch # Calico monitors various CRDs for config. - apiGroups: ["crd.projectcalico.org"] resources: - globalfelixconfigs - felixconfigurations - bgppeers - globalbgpconfigs - bgpconfigurations - ippools - ipamblocks - globalnetworkpolicies - globalnetworksets - networkpolicies - networksets - clusterinformations - hostendpoints verbs: - get - list - watch # Calico must create and update some CRDs on startup. - apiGroups: ["crd.projectcalico.org"] resources: - ippools - felixconfigurations - clusterinformations verbs: - create - update # Calico stores some configuration information on the node. - apiGroups: [""] resources: - nodes verbs: - get - list - watch # These permissions are only requried for upgrade from v2.6, and can # be removed after upgrade or on fresh installations. - apiGroups: ["crd.projectcalico.org"] resources: - bgpconfigurations - bgppeers verbs: - create - update # These permissions are required for Calico CNI to perform IPAM allocations. - apiGroups: ["crd.projectcalico.org"] resources: - blockaffinities - ipamblocks - ipamhandles verbs: - get - list - create - update - delete - apiGroups: ["crd.projectcalico.org"] resources: - ipamconfigs verbs: - get # Block affinities must also be watchable by confd for route aggregation. - apiGroups: ["crd.projectcalico.org"] resources: - blockaffinities verbs: - watch # The Calico IPAM migration needs to get daemonsets. These permissions can be # removed if not upgrading from an installation using host-local IPAM. - apiGroups: ["apps"] resources: - daemonsets verbs: - get --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: calico-node roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: calico-node subjects: - kind: ServiceAccount name: calico-node namespace: kube-system --- # Source: calico/templates/calico-node.yaml # This manifest installs the calico-node container, as well # as the CNI plugins and network config on # each master and worker node in a Kubernetes cluster. kind: DaemonSet apiVersion: apps/v1 metadata: name: calico-node namespace: kube-system labels: k8s-app: calico-node spec: selector: matchLabels: k8s-app: calico-node updateStrategy: type: RollingUpdate rollingUpdate: maxUnavailable: 1 template: metadata: labels: k8s-app: calico-node annotations: # This, along with the CriticalAddonsOnly toleration below, # marks the pod as a critical add-on, ensuring it gets # priority scheduling and that its resources are reserved # if it ever gets evicted. scheduler.alpha.kubernetes.io/critical-pod: '' spec: nodeSelector: beta.kubernetes.io/os: linux hostNetwork: true tolerations: # Make sure calico-node gets scheduled on all nodes. - effect: NoSchedule operator: Exists # Mark the pod as a critical add-on for rescheduling. - key: CriticalAddonsOnly operator: Exists - effect: NoExecute operator: Exists serviceAccountName: calico-node # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. terminationGracePeriodSeconds: 0 priorityClassName: system-node-critical initContainers: # This container performs upgrade from host-local IPAM to calico-ipam. # It can be deleted if this is a fresh installation, or if you have already # upgraded to use calico-ipam. - name: upgrade-ipam image: calico/cni:v3.8.9 command: ["/opt/cni/bin/calico-ipam", "-upgrade"] env: - name: KUBERNETES_NODE_NAME valueFrom: fieldRef: fieldPath: spec.nodeName - name: CALICO_NETWORKING_BACKEND valueFrom: configMapKeyRef: name: calico-config key: calico_backend volumeMounts: - mountPath: /var/lib/cni/networks name: host-local-net-dir - mountPath: /host/opt/cni/bin name: cni-bin-dir securityContext: privileged: true # This container installs the CNI binaries # and CNI network config file on each node. - name: install-cni image: calico/cni:v3.8.9 command: ["/install-cni.sh"] env: # Name of the CNI config file to create. - name: CNI_CONF_NAME value: "10-calico.conflist" # The CNI network config to install on each node. - name: CNI_NETWORK_CONFIG valueFrom: configMapKeyRef: name: calico-config key: cni_network_config # Set the hostname based on the k8s node name. - name: KUBERNETES_NODE_NAME valueFrom: fieldRef: fieldPath: spec.nodeName # CNI MTU Config variable - name: CNI_MTU valueFrom: configMapKeyRef: name: calico-config key: veth_mtu # Prevents the container from sleeping forever. - name: SLEEP value: "false" volumeMounts: - mountPath: /host/opt/cni/bin name: cni-bin-dir - mountPath: /host/etc/cni/net.d name: cni-net-dir securityContext: privileged: true # Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes # to communicate with Felix over the Policy Sync API. - name: flexvol-driver image: calico/pod2daemon-flexvol:v3.8.9 volumeMounts: - name: flexvol-driver-host mountPath: /host/driver securityContext: privileged: true containers: # Runs calico-node container on each Kubernetes node. This # container programs network policy and routes on each # host. - name: calico-node image: calico/node:v3.8.9 env: # Use Kubernetes API as the backing datastore. - name: DATASTORE_TYPE value: "kubernetes" # Wait for the datastore. - name: WAIT_FOR_DATASTORE value: "true" # Set based on the k8s node name. - name: NODENAME valueFrom: fieldRef: fieldPath: spec.nodeName # Choose the backend to use. - name: CALICO_NETWORKING_BACKEND valueFrom: configMapKeyRef: name: calico-config key: calico_backend # Cluster type to identify the deployment type - name: CLUSTER_TYPE value: "k8s,bgp" # Auto-detect the BGP IP address. - name: IP value: "autodetect" # Enable IPIP - name: CALICO_IPV4POOL_IPIP value: "Always" # Set MTU for tunnel device used if ipip is enabled - name: FELIX_IPINIPMTU valueFrom: configMapKeyRef: name: calico-config key: veth_mtu # The default IPv4 pool to create on startup if none exists. Pod IPs will be # chosen from this range. Changing this value after installation will have # no effect. This should fall within `--cluster-cidr`. - name: CALICO_IPV4POOL_CIDR value: "172.16.0.0/16" # Disable file logging so `kubectl logs` works. - name: CALICO_DISABLE_FILE_LOGGING value: "true" # Set Felix endpoint to host default action to ACCEPT. - name: FELIX_DEFAULTENDPOINTTOHOSTACTION value: "ACCEPT" # Disable IPv6 on Kubernetes. - name: FELIX_IPV6SUPPORT value: "false" # Set Felix logging to "info" - name: FELIX_LOGSEVERITYSCREEN value: "info" - name: FELIX_HEALTHENABLED value: "true" securityContext: privileged: true resources: requests: cpu: 250m livenessProbe: exec: command: - /bin/calico-node - -felix-live - -bird-live periodSeconds: 10 initialDelaySeconds: 10 failureThreshold: 6 readinessProbe: exec: command: - /bin/calico-node - -bird-ready - -felix-ready periodSeconds: 10 volumeMounts: - mountPath: /lib/modules name: lib-modules readOnly: true - mountPath: /run/xtables.lock name: xtables-lock readOnly: false - mountPath: /var/run/calico name: var-run-calico readOnly: false - mountPath: /var/lib/calico name: var-lib-calico readOnly: false - name: policysync mountPath: /var/run/nodeagent volumes: # Used by calico-node. - name: lib-modules hostPath: path: /lib/modules - name: var-run-calico hostPath: path: /var/run/calico - name: var-lib-calico hostPath: path: /var/lib/calico - name: xtables-lock hostPath: path: /run/xtables.lock type: FileOrCreate # Used to install CNI. - name: cni-bin-dir hostPath: path: /opt/cni/bin - name: cni-net-dir hostPath: path: /etc/cni/net.d # Mount in the directory for host-local IPAM allocations. This is # used when upgrading from host-local to calico-ipam, and can be removed # if not using the upgrade-ipam init container. - name: host-local-net-dir hostPath: path: /var/lib/cni/networks # Used to create per-pod Unix Domain Sockets - name: policysync hostPath: type: DirectoryOrCreate path: /var/run/nodeagent # Used to install Flex Volume Driver - name: flexvol-driver-host hostPath: type: DirectoryOrCreate path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds --- apiVersion: v1 kind: ServiceAccount metadata: name: calico-node namespace: kube-system --- # Source: calico/templates/calico-kube-controllers.yaml # See https://github.com/projectcalico/kube-controllers apiVersion: apps/v1 kind: Deployment metadata: name: calico-kube-controllers namespace: kube-system labels: k8s-app: calico-kube-controllers spec: # The controllers can only have a single active instance. replicas: 1 selector: matchLabels: k8s-app: calico-kube-controllers strategy: type: Recreate template: metadata: name: calico-kube-controllers namespace: kube-system labels: k8s-app: calico-kube-controllers annotations: scheduler.alpha.kubernetes.io/critical-pod: '' spec: nodeSelector: beta.kubernetes.io/os: linux tolerations: # Mark the pod as a critical add-on for rescheduling. - key: CriticalAddonsOnly operator: Exists - key: node-role.kubernetes.io/master effect: NoSchedule serviceAccountName: calico-kube-controllers priorityClassName: system-cluster-critical containers: - name: calico-kube-controllers image: calico/kube-controllers:v3.8.9 env: # Choose which controllers to run. - name: ENABLED_CONTROLLERS value: node - name: DATASTORE_TYPE value: kubernetes readinessProbe: exec: command: - /usr/bin/check-status - -r --- apiVersion: v1 kind: ServiceAccount metadata: name: calico-kube-controllers namespace: kube-system --- # Source: calico/templates/calico-etcd-secrets.yaml --- # Source: calico/templates/calico-typha.yaml --- # Source: calico/templates/configure-canal.yaml
26.291667
95
0.618307
b670cd9d6ba1eb1d40948ceb5fa9a762cad99100
383
yaml
YAML
monitoring/controllerManager&Scheduler/kube-scheduler-svc.yaml
00724/deploy-k8s
c4ad00ce58d741dbce2380828f42f45eb7f58896
[ "Apache-2.0" ]
null
null
null
monitoring/controllerManager&Scheduler/kube-scheduler-svc.yaml
00724/deploy-k8s
c4ad00ce58d741dbce2380828f42f45eb7f58896
[ "Apache-2.0" ]
null
null
null
monitoring/controllerManager&Scheduler/kube-scheduler-svc.yaml
00724/deploy-k8s
c4ad00ce58d741dbce2380828f42f45eb7f58896
[ "Apache-2.0" ]
null
null
null
apiVersion: v1 kind: Service metadata: namespace: kube-system name: kube-scheduler labels: k8s-app: kube-scheduler spec: selector: component: kube-scheduler ports: - name: http-metrics port: 10251 targetPort: 10251 #修改 kube-scheduler 配置,/etc/kubernetes/manifests/kube-scheduler.yaml, #--bind-address=0.0.0.0 #改为0.0.0.0 #kubectl get cs 集群组件状态,注销--port=0
20.157895
68
0.710183
d0c67d5d8c2d0758c125bbab98062353df7b2453
2,653
yaml
YAML
monitoring/PromAlert/PrometheusAlert-Deployment.yaml
00724/deploy-k8s
c4ad00ce58d741dbce2380828f42f45eb7f58896
[ "Apache-2.0" ]
null
null
null
monitoring/PromAlert/PrometheusAlert-Deployment.yaml
00724/deploy-k8s
c4ad00ce58d741dbce2380828f42f45eb7f58896
[ "Apache-2.0" ]
null
null
null
monitoring/PromAlert/PrometheusAlert-Deployment.yaml
00724/deploy-k8s
c4ad00ce58d741dbce2380828f42f45eb7f58896
[ "Apache-2.0" ]
null
null
null
apiVersion: v1 data: app.conf: | #---------------------↓全局配置----------------------- appname = PrometheusAlert #监听端口 httpport = 8080 runmode = dev #开启JSON请求 copyrequestbody = true #告警消息标题 title=k8s-test #短信告警级别(等于3就进行短信告警) 告警级别定义 0 信息,1 警告,2 一般严重,3 严重,4 灾难 messagelevel=3 #电话告警级别(等于4就进行语音告警) 告警级别定义 0 信息,1 警告,2 一般严重,3 严重,4 灾难 phonecalllevel=4 #自动告警抑制(自动告警抑制是默认同一个告警源的告警信息只发送告警级别最高的第一条告警信息,其他消息默认屏蔽,这么做的目的是为了减少相同告警来源的消息数量,防止告警炸弹,0为关闭,1为开启) silent=1 #---------------------↓webhook----------------------- #是否开启钉钉告警通道,可同时开始多个通道0为关闭,1为开启 open-dingding=1 #默认钉钉机器人地址 ddurl=https://oapi.dingtalk.com/robot/send?access_token=2ea370c1fa0445b304448686909447ba9d5295a531d74b2e2b81b158610d5c5b kind: ConfigMap metadata: name: prometheus-alert-center-conf namespace: monitoring --- apiVersion: apps/v1 kind: Deployment metadata: labels: app: prometheus-alert-center alertname: prometheus-alert-center name: prometheus-alert-center namespace: monitoring spec: replicas: 1 selector: matchLabels: app: prometheus-alert-center alertname: prometheus-alert-center template: metadata: labels: app: prometheus-alert-center alertname: prometheus-alert-center spec: containers: - image: feiyu563/prometheus-alert:4.1 name: prometheus-alert-center env: - name: TZ value: "Asia/Shanghai" ports: - containerPort: 8080 name: http resources: limits: cpu: 200m memory: 200Mi requests: cpu: 100m memory: 100Mi volumeMounts: - name: prometheus-alert-center-conf-map mountPath: /app/conf/app.conf subPath: app.conf volumes: - name: prometheus-alert-center-conf-map configMap: name: prometheus-alert-center-conf items: - key: app.conf path: app.conf --- apiVersion: v1 kind: Service metadata: labels: alertname: prometheus-alert-center name: prometheus-alert-center namespace: monitoring spec: ports: - name: http port: 8080 targetPort: http selector: app: prometheus-alert-center --- apiVersion: networking.k8s.io/v1beta1 kind: Ingress metadata: annotations: kubernetes.io/ingress.class: nginx name: prometheus-alert-center namespace: monitoring spec: rules: - host: alert.code404.cn http: paths: - backend: serviceName: prometheus-alert-center servicePort: 8080 path: /
24.118182
124
0.617414
19fb4131ea238180ddeca9326beb6abe32a90afd
694
yaml
YAML
monitoring/missing-container-metrics/podmonitor.yaml
00724/deploy-k8s
c4ad00ce58d741dbce2380828f42f45eb7f58896
[ "Apache-2.0" ]
null
null
null
monitoring/missing-container-metrics/podmonitor.yaml
00724/deploy-k8s
c4ad00ce58d741dbce2380828f42f45eb7f58896
[ "Apache-2.0" ]
null
null
null
monitoring/missing-container-metrics/podmonitor.yaml
00724/deploy-k8s
c4ad00ce58d741dbce2380828f42f45eb7f58896
[ "Apache-2.0" ]
null
null
null
{{ if .Values.prometheusOperator.podMonitor.enabled }} apiVersion: monitoring.coreos.com/v1 kind: PodMonitor metadata: name: {{ include "missing-container-metrics.fullname" . }} {{- with .Values.prometheusOperator.podMonitor.namespace }} namespace: {{ . }} {{- end }} labels: {{- include "missing-container-metrics.labels" . | nindent 4 }} {{- with .Values.prometheusOperator.podMonitor.selector }} {{- toYaml . | nindent 4 }} {{- end }} spec: selector: matchLabels: {{- include "missing-container-metrics.selectorLabels" . | nindent 6 }} podMetricsEndpoints: - port: http namespaceSelector: matchNames: - {{ .Release.Namespace }} {{ end }}
30.173913
77
0.659942
be0a6dc00b9d578e429e3e59ff22ba736b02f15b
417
yaml
YAML
ingress/ingress-pv-pvc.yaml
00724/deploy-k8s
c4ad00ce58d741dbce2380828f42f45eb7f58896
[ "Apache-2.0" ]
null
null
null
ingress/ingress-pv-pvc.yaml
00724/deploy-k8s
c4ad00ce58d741dbce2380828f42f45eb7f58896
[ "Apache-2.0" ]
null
null
null
ingress/ingress-pv-pvc.yaml
00724/deploy-k8s
c4ad00ce58d741dbce2380828f42f45eb7f58896
[ "Apache-2.0" ]
null
null
null
--- apiVersion: v1 kind: PersistentVolume metadata: name: pv-nfs spec: capacity: storage: 1Gi accessModes: - ReadWriteOnce persistentVolumeReclaimPolicy: Recycle nfs: server: 10.4.7.78 path: /data/volumes --- apiVersion: v1 kind: PersistentVolumeClaim metadata: name: ingress-pvc namespace: ingress-nginx spec: accessModes: - ReadWriteOnce resources: requests: storage: 1Gi
16.038462
40
0.709832
864e214a86d28abd4990699ac5f7941563e2276b
659
yaml
YAML
monitoring/missing-container-metrics/prometheusrule.yaml
00724/deploy-k8s
c4ad00ce58d741dbce2380828f42f45eb7f58896
[ "Apache-2.0" ]
null
null
null
monitoring/missing-container-metrics/prometheusrule.yaml
00724/deploy-k8s
c4ad00ce58d741dbce2380828f42f45eb7f58896
[ "Apache-2.0" ]
null
null
null
monitoring/missing-container-metrics/prometheusrule.yaml
00724/deploy-k8s
c4ad00ce58d741dbce2380828f42f45eb7f58896
[ "Apache-2.0" ]
null
null
null
{{ if .Values.prometheusOperator.prometheusRule.enabled }} apiVersion: monitoring.coreos.com/v1 kind: PrometheusRule metadata: name: {{ include "missing-container-metrics.fullname" . }} {{- with .Values.prometheusOperator.prometheusRule.namespace }} namespace: {{ . }} {{- end }} labels: {{- include "missing-container-metrics.labels" . | nindent 4 }} {{- with .Values.prometheusOperator.prometheusRule.selector }} {{- toYaml . | nindent 4 }} {{- end }} spec: groups: - name: {{ include "missing-container-metrics.fullname" . }} rules: {{- toYaml .Values.prometheusOperator.prometheusRule.rules | nindent 6 }} {{ end }}
34.684211
79
0.672231
ad00ddda92589a175f8100a4d517bc314cad926f
313
yaml
YAML
monitoring/ingress/ingressContorller-svc.yaml
00724/deploy-k8s
c4ad00ce58d741dbce2380828f42f45eb7f58896
[ "Apache-2.0" ]
null
null
null
monitoring/ingress/ingressContorller-svc.yaml
00724/deploy-k8s
c4ad00ce58d741dbce2380828f42f45eb7f58896
[ "Apache-2.0" ]
null
null
null
monitoring/ingress/ingressContorller-svc.yaml
00724/deploy-k8s
c4ad00ce58d741dbce2380828f42f45eb7f58896
[ "Apache-2.0" ]
null
null
null
apiVersion: v1 kind: Service metadata: labels: app: ingress-nginx name: ingress-monitor namespace: ingress spec: ports: - name: ingress-nginx port: 10254 protocol: TCP targetPort: 10254 selector: app: ingress-nginx type: ClusterIP #在ingress-deployment中添加app=ingress-nginx的labels
17.388889
47
0.71885
2a9436cc00c13cc8d372820566a4fc2c614a633c
2,731
yaml
YAML
monitoring/missing-container-metrics/values.yaml
00724/deploy-k8s
c4ad00ce58d741dbce2380828f42f45eb7f58896
[ "Apache-2.0" ]
null
null
null
monitoring/missing-container-metrics/values.yaml
00724/deploy-k8s
c4ad00ce58d741dbce2380828f42f45eb7f58896
[ "Apache-2.0" ]
null
null
null
monitoring/missing-container-metrics/values.yaml
00724/deploy-k8s
c4ad00ce58d741dbce2380828f42f45eb7f58896
[ "Apache-2.0" ]
null
null
null
# Default values for missing-container-metrics. # This is a YAML-formatted file. # Declare variables to be passed into your templates. image: repository: dmilhdef/missing-container-metrics pullPolicy: IfNotPresent # Overrides the image tag whose default is the chart appVersion. tag: "v0.21.0" imagePullSecrets: [] nameOverride: "" fullnameOverride: "" podAnnotations: prometheus.io/scrape: "true" prometheus.io/port: "3001" podSecurityContext: {} # fsGroup: 2000 securityContext: {} # capabilities: # drop: # - ALL # readOnlyRootFilesystem: true # runAsNonRoot: true # runAsUser: 1000 resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. # limits: # cpu: 100m # memory: 128Mi # requests: # cpu: 100m # memory: 128Mi useDocker: true useContainerd: false ###添加 prometheusOperator: podMonitor: # Create a Prometheus Operator PodMonitor resource enabled: true # Namespace defaults to the Release namespace but can be overridden namespace: "" # Additional labels to add to the PodMonitor so it matches the Operator's podMonitorSelector selector: app.kubernetes.io/name: missing-container-metrics prometheusRule: # Create a Prometheus Operator PrometheusRule resource enabled: true # Namespace defaults to the Release namespace but can be overridden namespace: "" # Additional labels to add to the PrometheusRule so it matches the Operator's ruleSelector selector: prometheus: k8s role: alert-rules # The rules can be set here. An example is defined here but can be overridden. rules: - alert: ContainerOOMObserved annotations: message: A process in this Pod has been OOMKilled due to exceeding the Kubernetes memory limit at least twice in the last 15 minutes. Look at the metrics to determine if a memory limit increase is required. expr: sum(increase(container_ooms[15m])) by (exported_namespace, exported_pod) > 2 labels: severity: warning - alert: ContainerOOMObserved annotations: message: A process in this Pod has been OOMKilled due to exceeding the Kubernetes memory limit at least ten times in the last 15 minutes. Look at the metrics to determine if a memory limit increase is required. expr: sum(increase(container_ooms[15m])) by (exported_namespace, exported_pod) > 10 labels: severity: critical
39.014286
218
0.730135
6e8003ab95318e77e42356cd320733d3b158c917
316
yaml
YAML
monitoring/etcd/prometheus-serviceMonitorEtcd.yaml
00724/deploy-k8s
c4ad00ce58d741dbce2380828f42f45eb7f58896
[ "Apache-2.0" ]
null
null
null
monitoring/etcd/prometheus-serviceMonitorEtcd.yaml
00724/deploy-k8s
c4ad00ce58d741dbce2380828f42f45eb7f58896
[ "Apache-2.0" ]
null
null
null
monitoring/etcd/prometheus-serviceMonitorEtcd.yaml
00724/deploy-k8s
c4ad00ce58d741dbce2380828f42f45eb7f58896
[ "Apache-2.0" ]
null
null
null
apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor metadata: name: etcd-k8s namespace: monitoring labels: k8s-app: etcd-k8s spec: jobLabel: k8s-app endpoints: - port: port interval: 15s selector: matchLabels: k8s-app: etcd namespaceSelector: matchNames: - kube-system
17.555556
36
0.689873
a070188dd1b910805c9b69246c767a648f3c1876
5,612
yaml
YAML
k8s-elk/webappFilebeatWithSidecar-deployment.yaml
00724/deploy-k8s
c4ad00ce58d741dbce2380828f42f45eb7f58896
[ "Apache-2.0" ]
null
null
null
k8s-elk/webappFilebeatWithSidecar-deployment.yaml
00724/deploy-k8s
c4ad00ce58d741dbce2380828f42f45eb7f58896
[ "Apache-2.0" ]
null
null
null
k8s-elk/webappFilebeatWithSidecar-deployment.yaml
00724/deploy-k8s
c4ad00ce58d741dbce2380828f42f45eb7f58896
[ "Apache-2.0" ]
null
null
null
# webapp服务 --- apiVersion: v1 kind: Service metadata: name: webapp namespace: test labels: app: webapp spec: selector: app: webapp ports: - protocol: TCP port: 80 targetPort: 80 name: http #ingress --- kind: Ingress apiVersion: extensions/v1beta1 #apiVersion: networking.k8s.io/v1 metadata: name: webapp namespace: test spec: rules: - host: test.code404.net http: paths: - path: / backend: serviceName: webapp servicePort: 80 #filebeat的cm --- apiVersion: v1 kind: ConfigMap metadata: name: filebeat-config namespace: test labels: k8s-app: filebeat data: filebeat.yml: |- filebeat.inputs: - type: log json.keys_under_root: true json.overwrite_keys: true json.message_key: message paths: - /logm/*.log - /logm/*.log - /logm/*/*.log - /logm/*/*/*.log - /logm/*/*/*/*.log - /logm/*/*/*/*/*.log multiline.pattern: "^[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}" multiline.negate: true multiline.match: after tail_files: true fields: POD_NAME: '${POD_NAME}' POD_IP: '${POD_IP}' PROJ_ENV: '${PROJ_ENV}' setup.ilm.enabled: false setup.template.pattern: true setup.template.name: "${PROJ_NAME}-${PROJ_ENV}" setup.template.pattern: "${PROJ_NAME}-${PROJ_ENV}-*" output.elasticsearch: hosts: ["${ES_HOST-1}:9200"] index: "${PROJ_NAME}-${PROJ_ENV}-%{+yyyy.MM.dd}" ilm.enabled: false #nginx.conf的cm --- apiVersion: v1 kind: ConfigMap metadata: name: nginx-conf namespace: test labels: app: webapp data: nginx.conf: |- user nginx; worker_processes auto; error_log /var/log/nginx/error.log warn; pid /var/run/nginx.pid; events { worker_connections 1024; } http { #include /etc/nginx/mime.types; default_type application/octet-stream; log_format json '{"time": "$time_iso8601", "remote_addr": "$remote_addr",' '"x-forward-for":"$proxy_add_x_forwarded_for", "remote_user": "$remote_user",' '"bytes_sent": "$bytes_sent", "request_time": "$request_time",' '"status": "$status", "vhost": "$host", "request_proto": "$server_protocol",' '"path": "$uri","request_query": "$args", "request_length": "$request_length",' '"duration": "$request_time", "method": "$request_method", "http_referrer": "$http_referer",' '"http_user_agent": "$http_user_agent","upstream_addr": "$upstream_addr",' '"upstream_response_length": "$upstream_response_length",' '"upstream_response_time": "$upstream_response_time",' '"upstream_status": "$upstream_status"}'; access_log /var/log/nginx/access.log json; sendfile on; #tcp_nopush on; keepalive_timeout 65; gzip on; include /etc/nginx/conf.d/*.conf; } # deployment --- apiVersion: apps/v1 kind: Deployment metadata: name: webapp namespace: test spec: replicas: 1 minReadySeconds: 10 #滚动升级15s后标志pod准备就绪 strategy: rollingUpdate: #replicas为2, 升级过程中pod个数在1-3个之间 maxSurge: 1 #滚动升级时会先启动1个pod maxUnavailable: 1 #滚动升级时允许pod处于Unavailable的最大个数 selector: matchLabels: app: webapp template: metadata: labels: app: webapp spec: terminationGracePeriodSeconds: 30 #30秒内优雅关闭程序 containers: - image: registry.cn-beijing.aliyuncs.com/openacl/filebeat:7.2.0-es-y name: filebeat args: [ "-c", "/etc/filebeat/filebeat.yml", "-e", ] env: - name: ES_HOST-1 value: 10.4.7.100 - name: PROJ_NAME value: webapp - name: PROJ_ENV value: dev - name: POD_IP valueFrom: fieldRef: apiVersion: v1 fieldPath: status.podIP - name: POD_NAME valueFrom: fieldRef: apiVersion: v1 fieldPath: metadata.name #- name: NS # valueFrom: # filedRef: # apiVersion: v1 # filePath: metadata.namespace securityContext: runAsUser: 0 resources: limits: memory: 50Mi requests: cpu: 200m memory: 50Mi volumeMounts: - name: filebeat-config #将configmap的内容放到容器本地目录 mountPath: /etc/filebeat/ readOnly: true - name: logm #同一个pod内的两个应用共享目录logm, 一个写一个读 mountPath: /logm - name: webapp image: nginx:1.16 #提供具体服务的app镜像 ports: - containerPort: 80 volumeMounts: - name: logm #指定挂在目录到logm mountPath: /var/log/nginx/ - name: nginx-conf mountPath: /etc/nginx/nginx.conf subPath: nginx.conf readOnly: true volumes: - name: logm #定义logm为EmptyDir类型挂载目录 emptyDir: {} - name: filebeat-config configMap: defaultMode: 420 name: filebeat-config #使用前面定义的configmap items: - key: filebeat.yml path: filebeat.yml - name: nginx-conf configMap: defaultMode: 420 name: nginx-conf items: - key: nginx.conf path: nginx.conf
26.851675
119
0.546686
3bf5235325d00e6e4247328f8c31880b45115d29
385
yaml
YAML
monitoring/ingressRelatedToPrometheus/alertmanager-ingress.yaml
00724/deploy-k8s
c4ad00ce58d741dbce2380828f42f45eb7f58896
[ "Apache-2.0" ]
null
null
null
monitoring/ingressRelatedToPrometheus/alertmanager-ingress.yaml
00724/deploy-k8s
c4ad00ce58d741dbce2380828f42f45eb7f58896
[ "Apache-2.0" ]
null
null
null
monitoring/ingressRelatedToPrometheus/alertmanager-ingress.yaml
00724/deploy-k8s
c4ad00ce58d741dbce2380828f42f45eb7f58896
[ "Apache-2.0" ]
null
null
null
apiVersion: extensions/v1beta1 kind: Ingress metadata: annotations: k8s.kuboard.cn/workload: alertmanager labels: app: alertmanager name: alertmanager namespace: monitoring spec: rules: - host: alertmanager.code404.cn http: paths: - backend: serviceName: alertmanager-main servicePort: 9093 path: /
20.263158
44
0.625974
5abfb448586c8098cbe3e89989b5494981e63b70
1,458
yaml
YAML
ingress/ingress-nginx-configmap.yaml
00724/deploy-k8s
c4ad00ce58d741dbce2380828f42f45eb7f58896
[ "Apache-2.0" ]
null
null
null
ingress/ingress-nginx-configmap.yaml
00724/deploy-k8s
c4ad00ce58d741dbce2380828f42f45eb7f58896
[ "Apache-2.0" ]
null
null
null
ingress/ingress-nginx-configmap.yaml
00724/deploy-k8s
c4ad00ce58d741dbce2380828f42f45eb7f58896
[ "Apache-2.0" ]
null
null
null
kind: ConfigMap apiVersion: v1 metadata: name: ingress-nginx-controller namespace: ingress-nginx labels: #app.kubernetes.io/name: ingress-nginx ##app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/component: controller data: keepalive_timeout: "150" proxy-read-timeout: "150" proxy-send-timeout: "150" worker-connections: "65535" proxy-connect-timeout: "150" #log-format: "time": "$time_iso8601", "remote_addr": "$remote_addr","body_bytes_sent": "$body_bytes_sent", "request_time": "$request_time", "status": "$status", "request": "$request", "request_method": "$request_method", "http_referrer": "$http_referer", "http_x_forwarded_for": "$http_x_forwarded_for", "http_user_agent": "$http_user_agent" #log-format: '$time_iso8601 $remote_addr $body_bytes_sent $request_time $upstream_response_time $status $request $request_method $http_referer $http_x_forwarded_for $http_user_agent' log-format-upstream: '{"time": "$time_iso8601", "remote_addr": "$proxy_protocol_addr", "x_forward_for": "$proxy_add_x_forwarded_for", "request_id": "$req_id", "remote_user": "$remote_user", "bytes_sent": $bytes_sent, "request_time": $request_time, "status": $status, "vhost": "$host", "request_proto": "$server_protocol", "path": "$uri", "request_query": "$args", "request_length": $request_length, "duration": $request_time,"method": "$request_method", "http_referrer": "$http_referer", "http_user_agent": "$http_user_agent" }'
76.736842
530
0.737997
c37bf69342e8355e0744c1263cc78396b9ea0a0f
71
yml
YAML
.travis.yml
007300/ng-time-value-accessor
e21d02928a285b8443794df9047e1fa9dd9eb0fe
[ "MIT" ]
null
null
null
.travis.yml
007300/ng-time-value-accessor
e21d02928a285b8443794df9047e1fa9dd9eb0fe
[ "MIT" ]
3
2020-09-05T09:11:08.000Z
2021-05-07T20:30:14.000Z
.travis.yml
007300/ng-local-var
6cfb095c3851359bc65a57ba149cd005c5178d1c
[ "MIT" ]
null
null
null
cache: yarn addons: chrome: stable language: node_js node_js: - "8"
11.833333
17
0.704225
8784f7df053ef3e8fc0125d8d170823c5069de87
1,179
yml
YAML
.github/workflows/azure.yml
007444/-
4dd086a34e6a48c91f42263022604c36c2294cae
[ "Apache-2.0" ]
1
2020-09-10T06:48:56.000Z
2020-09-10T06:48:56.000Z
.github/workflows/azure.yml
007444/-
4dd086a34e6a48c91f42263022604c36c2294cae
[ "Apache-2.0" ]
null
null
null
.github/workflows/azure.yml
007444/-
4dd086a34e6a48c91f42263022604c36c2294cae
[ "Apache-2.0" ]
null
null
null
#此工作流将在创建发行版时构建node.js应用程序并将其推送到Azure Web应用程序。 # #此工作流假定您已经创建了目标Azure AppService Web应用程序。 #有关说明,请参阅https://docs.microsoft.com/azure/app-service/app-service-plan-manage#create-an-app-service-plan # #以配置此工作流: # #1。在您的存储库中设置一个名为Azure_webapp_public_profile的秘密,其值为 有关获取发布配置文件的说明,请参阅:https://docs.microsoft.com/azure/app-service/deploy-github-actions#configure-the-github-secret # #2。更改Azure_webapp_name的值,Azure_webapp_Package_path和node_version环境变量(如下)。 # 有关Azure的GitHub操作的更多信息,请参阅https://github.com/Azure/Actions #获取更多示例,以便开始使用GitHubAction工作流部署到Azure,参考https://github.com/Azure/actions-workflow-samples on: 发行版: 类型:[Created] env: Azure_webapp_name:您的应用程序名#将此设置为应用程序的名称 Azure_webapp_Package_path:‘.’#将此设置为web应用程序项目的路径,默认为存储库根 节点_version:‘10.x’#将此设置为使用 作业: 构建和部署: 名称:构建和部署 运行-on:Ubuntu-最新 步骤: -使用操作/签出@v2 -name:use Node.js${{env.NODE_version}} 使用:action/set-ode@v1 具有: 节点-版本:${env.node_version}} -名称:NPM安装,构建和测试 运行: 构建和测试项目,然后 #部署到Azure Web应用程序 NPM安装 NPM运行构建-如果-现在npm运行测试-如果-现在 -名称:‘部署到Azure webapp’ 使用:Azure/webapp-部署@v2 与: 应用程序名称:${{env.AZURE_webapp_name}} 发布-配置文件:${{Incres.AZURE_webapp_Publisation_发布${env.AZURE_webapp_name}} 发布_profile}} 包:${env.AZURE_webapp_Package_PATH}
24.5625
112
0.807464
dd30bac604c5550e943aedf7286d4accce32d922
2,705
yml
YAML
_config.yml
007Alice/007Alice.github.io
8122aa520817436f4414cec23dbd2b18b2467630
[ "Apache-2.0" ]
null
null
null
_config.yml
007Alice/007Alice.github.io
8122aa520817436f4414cec23dbd2b18b2467630
[ "Apache-2.0" ]
null
null
null
_config.yml
007Alice/007Alice.github.io
8122aa520817436f4414cec23dbd2b18b2467630
[ "Apache-2.0" ]
null
null
null
# Site settings title: Alice Blog SEOTitle: Alice的博客 | Alice Blog header-img: img/home-bg.jpg email: [email protected] description: "Stay hungry, stay foolish." keyword: "" url: "https://007alice.github.io/" # your host, for absolute URL baseurl: "/" # for example, '/blog' if your blog hosted on 'host/blog' # SNS settings RSS: false # weibo_username: huxpro # zhihu_username: huxpro github_username: 007Alice # twitter_username: huxpro # facebook_username: huxpro # Build settings # from 2016, 'pygments' is unsupported on GitHub Pages. Use 'rouge' for highlighting instead. highlighter: rouge permalink: pretty paginate: 10 exclude: ["less","node_modules","Gruntfile.js","package.json","README.md"] anchorjs: true # if you want to customize anchor. check out line:181 of `post.html` # Gems # from PR#40, to support local preview for Jekyll 3.0 gems: [jekyll-paginate] # Markdown settings # replace redcarpet to kramdown, # although redcarpet can auto highlight code, the lack of header-id make the catalog impossible, so I switch to kramdown # document: http://jekyllrb.com/docs/configuration/#kramdown markdown: kramdown kramdown: input: GFM # use Github Flavored Markdown !important # Disqus settings #disqus_username: _your_disqus_short_name_ # Duoshuo settings duoshuo_username: huxblog # Share component is depend on Comment so we can NOT use share only. duoshuo_share: true # set to false if you want to use Comment without Sharing # Analytics settings # Baidu Analytics # ba_track_id: 4cc1f2d8f3067386cc5cdb626a202900 # Google Analytics # ga_track_id: 'UA-49627206-1' # Format: UA-xxxxxx-xx # ga_domain: huangxuan.me # Sidebar settings sidebar: true # whether or not using Sidebar. sidebar-about-description: "努力进化 ing,行万里路 ing。" sidebar-avatar: /img/owner.jpg # use absolute URL, seeing it's used in both `/` and `/about/` # Featured Tags featured-tags: true # whether or not using Feature-Tags featured-condition-size: 1 # A tag will be featured if the size of it is more than this condition value # Friends friends: [ { title: "Alice Blog", href: "https://007alice.github.io/" },{ title: "Foo", href: "#" },{ title: "Bar", href: "#" },{ title: "Example Friends", href: "#" },{ title: "It helps SEO", href: "#" } ] # Support for LiveRe comments system. # You can get your uid from https://livere.com/insight/myCode (General web site) livere_uid: MTAyMC81Mjg4MC8yOTM1Nw==
25.046296
120
0.662847
a5297fd323cfc32fd995859f722fe8d0dd5778be
2,691
yaml
YAML
flutter_fishredux/pubspec.yaml
007HelloWorld/FishRedux
fd74911ff808d947b9b1998b2817eacb12977e4e
[ "Apache-2.0" ]
1
2020-05-06T14:21:40.000Z
2020-05-06T14:21:40.000Z
flutter_fishredux/pubspec.yaml
007HelloWorld/FishRedux
fd74911ff808d947b9b1998b2817eacb12977e4e
[ "Apache-2.0" ]
null
null
null
flutter_fishredux/pubspec.yaml
007HelloWorld/FishRedux
fd74911ff808d947b9b1998b2817eacb12977e4e
[ "Apache-2.0" ]
null
null
null
name: flutter_fishredux description: A new Flutter project. # The following defines the version and build number for your application. # A version number is three numbers separated by dots, like 1.2.43 # followed by an optional build number separated by a +. # Both the version and the builder number may be overridden in flutter # build by specifying --build-name and --build-number, respectively. # In Android, build-name is used as versionName while build-number used as versionCode. # Read more about Android versioning at https://developer.android.com/studio/publish/versioning # In iOS, build-name is used as CFBundleShortVersionString while build-number used as CFBundleVersion. # Read more about iOS versioning at # https://developer.apple.com/library/archive/documentation/General/Reference/InfoPlistKeyReference/Articles/CoreFoundationKeys.html version: 1.0.0+1 environment: sdk: ">=2.1.0 <3.0.0" dependencies: flutter: sdk: flutter fish_redux: ^0.2.7 # The following adds the Cupertino Icons font to your application. # Use with the CupertinoIcons class for iOS style icons. cupertino_icons: ^0.1.2 dev_dependencies: flutter_test: sdk: flutter # For information on the generic Dart part of this file, see the # following page: https://dart.dev/tools/pub/pubspec # The following section is specific to Flutter. flutter: # The following line ensures that the Material Icons font is # included with your application, so that you can use the icons in # the material Icons class. uses-material-design: true # To add assets to your application, add an assets section, like this: # assets: # - images/a_dot_burr.jpeg # - images/a_dot_ham.jpeg # An image asset can refer to one or more resolution-specific "variants", see # https://flutter.dev/assets-and-images/#resolution-aware. # For details regarding adding assets from package dependencies, see # https://flutter.dev/assets-and-images/#from-packages # To add custom fonts to your application, add a fonts section here, # in this "flutter" section. Each entry in this list should have a # "family" key with the font family name, and a "fonts" key with a # list giving the asset and other descriptors for the font. For # example: # fonts: # - family: Schyler # fonts: # - asset: fonts/Schyler-Regular.ttf # - asset: fonts/Schyler-Italic.ttf # style: italic # - family: Trajan Pro # fonts: # - asset: fonts/TrajanPro.ttf # - asset: fonts/TrajanPro_Bold.ttf # weight: 700 # # For details regarding fonts from package dependencies, # see https://flutter.dev/custom-fonts/#from-packages
35.88
132
0.729097
3efff552b49904a1563e58419e1c31c402248328
2,659
yaml
YAML
channel/pubspec.yaml
007HelloWorld/FlutterChannel
52c9353971549d64c1ed73981448762c10dcf502
[ "Apache-2.0" ]
null
null
null
channel/pubspec.yaml
007HelloWorld/FlutterChannel
52c9353971549d64c1ed73981448762c10dcf502
[ "Apache-2.0" ]
null
null
null
channel/pubspec.yaml
007HelloWorld/FlutterChannel
52c9353971549d64c1ed73981448762c10dcf502
[ "Apache-2.0" ]
null
null
null
name: channel description: A new Flutter project. # The following defines the version and build number for your application. # A version number is three numbers separated by dots, like 1.2.43 # followed by an optional build number separated by a +. # Both the version and the builder number may be overridden in flutter # build by specifying --build-name and --build-number, respectively. # In Android, build-name is used as versionName while build-number used as versionCode. # Read more about Android versioning at https://developer.android.com/studio/publish/versioning # In iOS, build-name is used as CFBundleShortVersionString while build-number used as CFBundleVersion. # Read more about iOS versioning at # https://developer.apple.com/library/archive/documentation/General/Reference/InfoPlistKeyReference/Articles/CoreFoundationKeys.html version: 1.0.0+1 environment: sdk: ">=2.1.0 <3.0.0" dependencies: flutter: sdk: flutter # The following adds the Cupertino Icons font to your application. # Use with the CupertinoIcons class for iOS style icons. cupertino_icons: ^0.1.2 dev_dependencies: flutter_test: sdk: flutter # For information on the generic Dart part of this file, see the # following page: https://dart.dev/tools/pub/pubspec # The following section is specific to Flutter. flutter: # The following line ensures that the Material Icons font is # included with your application, so that you can use the icons in # the material Icons class. uses-material-design: true # To add assets to your application, add an assets section, like this: # assets: # - images/a_dot_burr.jpeg # - images/a_dot_ham.jpeg # An image asset can refer to one or more resolution-specific "variants", see # https://flutter.dev/assets-and-images/#resolution-aware. # For details regarding adding assets from package dependencies, see # https://flutter.dev/assets-and-images/#from-packages # To add custom fonts to your application, add a fonts section here, # in this "flutter" section. Each entry in this list should have a # "family" key with the font family name, and a "fonts" key with a # list giving the asset and other descriptors for the font. For # example: # fonts: # - family: Schyler # fonts: # - asset: fonts/Schyler-Regular.ttf # - asset: fonts/Schyler-Italic.ttf # style: italic # - family: Trajan Pro # fonts: # - asset: fonts/TrajanPro.ttf # - asset: fonts/TrajanPro_Bold.ttf # weight: 700 # # For details regarding fonts from package dependencies, # see https://flutter.dev/custom-fonts/#from-packages
36.424658
132
0.729222
2507929c34c948f6f1967c3631710e22742f2af0
2,144
yml
YAML
_config.yml
007Rohit/merge-conflict
a8a321495dbde03d1d19975c4dbcbde728977eab
[ "MIT" ]
null
null
null
_config.yml
007Rohit/merge-conflict
a8a321495dbde03d1d19975c4dbcbde728977eab
[ "MIT" ]
3
2018-11-04T13:54:42.000Z
2018-11-04T17:55:13.000Z
_config.yml
007Rohit/merge-conflict
a8a321495dbde03d1d19975c4dbcbde728977eab
[ "MIT" ]
null
null
null
# Site settings title: "Jekyll Resume Template" description: "A resume template for Jekyll and GitHub Pages sites." # Build settings markdown: kramdown sass: sass_dir: _sass style: compressed # Resume settings resume_avatar: "true" update-config resume_name: "GitHub Teacher" resume_title: "Professional Trainer" resume_contact_email: "[email protected]" resume_contact_telephone: "1-(877)-448-4820" resume_contact_address: "San Francisco, California" resume_header_contact_info: "San Francisco, California | 1-(877)-448-4820 | [email protected]" resume_header_intro: "<p>Charting the knowledge of the Internet, just like Galileo charted the stars.</p>" # use "yes" to display the email contact button, # "no" to display an "I'm not looking for work" message, # or remove the resume_looking_for_work option entirely # to leave blank resume_looking_for_work: "yes" # Decide which sections to use # comment out to hide resume_section_experience: true resume_section_education: true resume_section_projects: true resume_section_skills: true resume_section_recognition: true resume_section_links: true resume_section_associations: true # resume_section_interests: true # Resume social links # uncomment the options you wish to display, and add your own URL resume_social_links: resume_github_url: "https://github.com/jglovier/resume-template" resume_twitter_url: "http://twitter.com/jglovier" resume_dribbble_url: "https://dribbble.com/jag" # resume_facebook_url: "insert Facebook URL here" resume_linkedin_url: "https://www.linkedin.com/in/joelglovier" # resume_instagram_url: "insert your Instagram URL here" resume_website_url: "http://joelglovier.com" resume_print_social_links: true # Design settings resume_theme: default # Note: when editing locally, remember to restart # your Jekyll server when you edit this file. Changes # to _config.yml are only recognized when the server starts
35.733333
106
0.714086
8c4138d20eee04829afa257d7c0484fb0fbc4846
406
yml
YAML
_data/skills.yml
007Rohit/merge-conflict
a8a321495dbde03d1d19975c4dbcbde728977eab
[ "MIT" ]
null
null
null
_data/skills.yml
007Rohit/merge-conflict
a8a321495dbde03d1d19975c4dbcbde728977eab
[ "MIT" ]
3
2018-11-04T13:54:42.000Z
2018-11-04T17:55:13.000Z
_data/skills.yml
007Rohit/merge-conflict
a8a321495dbde03d1d19975c4dbcbde728977eab
[ "MIT" ]
null
null
null
# Skills i am java profectional # Education - skill: Education description: Developed and maintained various conference talks, online training, and in-person trainings covering various topics including Git, GitHub, and Open Source. # Leadership - skill: Leadership description: Managed multiple asynchronous teams in the development, maintenance, and release of various web applications and websites.
40.6
170
0.810345
21ca680976d251190a1c927dd68f8c2f4a9f8c56
2,098
yml
YAML
_config.yml
007Salih2020/merge-conflicts
6a618668e8b7253fdd116e656a6f73309fe2c093
[ "MIT" ]
null
null
null
_config.yml
007Salih2020/merge-conflicts
6a618668e8b7253fdd116e656a6f73309fe2c093
[ "MIT" ]
6
2020-11-25T21:14:44.000Z
2020-12-24T16:24:11.000Z
_config.yml
007Salih2020/merge-conflicts
6a618668e8b7253fdd116e656a6f73309fe2c093
[ "MIT" ]
null
null
null
# Site settings title: "Jekyll Resume Template" description: "A resume template for Jekyll and GitHub Pages sites." # Build settings markdown: kramdown sass: sass_dir: _sass style: compressed # Resume settings resume_title: "Professional Trainer" resume_contact_telephone: "1-(877)-448-4820" resume_header_contact_info: "San Francisco, California | 1-(877)-448-4820 | [email protected]" resume_header_intro: "<p>Charting the knowledge of the Internet, just like Galileo charted the stars.</p>" resume_name: "Surftocat" resume_contact_email: "[email protected]" resume_contact_address: "San Diego, California" # use "yes" to display the email contact button, # "no" to display an "I'm not looking for work" message, # or remove the resume_looking_for_work option entirely # to leave blank resume_looking_for_work: "yes" # Decide which sections to use # comment out to hide resume_section_experience: true resume_section_education: true resume_section_projects: true resume_section_skills: true resume_section_recognition: true resume_section_links: true resume_section_associations: true # resume_section_interests: true # Resume social links # uncomment the options you wish to display, and add your own URL resume_social_links: resume_github_url: "https://github.com/jglovier/resume-template" resume_twitter_url: "http://twitter.com/jglovier" resume_dribbble_url: "https://dribbble.com/jag" # resume_facebook_url: "insert Facebook URL here" resume_linkedin_url: "https://www.linkedin.com/in/joelglovier" # resume_instagram_url: "insert your Instagram URL here" resume_website_url: "http://joelglovier.com" resume_print_social_links: true # Design settings resume_theme: default # Note: when editing locally, remember to restart # your Jekyll server when you edit this file. Changes # to _config.yml are only recognized when the server starts
32.78125
106
0.712583
9c516e1652cb88f6f0ca0fb430c848e575850371
570
yml
YAML
_data/experience.yml
007Salih2020/merge-conflicts
6a618668e8b7253fdd116e656a6f73309fe2c093
[ "MIT" ]
null
null
null
_data/experience.yml
007Salih2020/merge-conflicts
6a618668e8b7253fdd116e656a6f73309fe2c093
[ "MIT" ]
6
2020-11-25T21:14:44.000Z
2020-12-24T16:24:11.000Z
_data/experience.yml
007Salih2020/merge-conflicts
6a618668e8b7253fdd116e656a6f73309fe2c093
[ "MIT" ]
null
null
null
# Jobs duration: March, 2012 &mdash; Present summary: Teach all things Git, give away all the stickers, ensure world peace. # Surftocat - company: Self Employed position: Surftocat # Job Title - company: Conway Meats position: Web Developer duration: May, 2014 &mdash; May, 2015 summary: Web developer for a restuarant meat provider. # Supportocat - company: GitHub, Inc. position: Supportocat, GitHub for Business duration: June 2016 &mdash; Present summary: Provide world class support to customers on the GitHub for Business platform
23.75
87
0.738596
99027e4b001dde23fe779b28b8b8eb3c3d2e07f3
447
yml
YAML
_data/interests.yml
007Salih2020/merge-conflicts
6a618668e8b7253fdd116e656a6f73309fe2c093
[ "MIT" ]
null
null
null
_data/interests.yml
007Salih2020/merge-conflicts
6a618668e8b7253fdd116e656a6f73309fe2c093
[ "MIT" ]
6
2020-11-25T21:14:44.000Z
2020-12-24T16:24:11.000Z
_data/interests.yml
007Salih2020/merge-conflicts
6a618668e8b7253fdd116e656a6f73309fe2c093
[ "MIT" ]
null
null
null
# Interests - description: Electron, Atom, GraphQL, Webhooks, and most of all... Git - description: Movies about a cappella singing - description: Anaheim Theme Parks - description: Travel - description: Animated cartoons involving restuarants and burgers - description: Amusement parks - description: Open Source - description: Empowering underrepresented groups in tech through education and free events. - description: French Horn
24.833333
92
0.776286
b6bcd414ad49fed1d0663cb856f5c01eeeec890a
515
yml
YAML
.github/workflows/my-workflow.yml
007Salih2020/write-github-script
135f1b0869f42d0248ae45c07b9a08086c278679
[ "MIT" ]
null
null
null
.github/workflows/my-workflow.yml
007Salih2020/write-github-script
135f1b0869f42d0248ae45c07b9a08086c278679
[ "MIT" ]
7
2020-11-28T18:14:29.000Z
2020-12-24T19:43:03.000Z
.github/workflows/my-workflow.yml
007Salih2020/write-github-script
135f1b0869f42d0248ae45c07b9a08086c278679
[ "MIT" ]
null
null
null
name: Learning GitHub Script on: issues: types: [opened] jobs: comment: runs-on: ubuntu-latest steps: - uses: actions/[email protected] with: github-token: ${{secrets.GITHUB_TOKEN}} script: | github.issues.createComment({ issue_number: context.issue.number, owner: context.repo.owner, repo: context.repo.repo, body: "🎉 You've created this issue comment using GitHub Script!!!" })
24.52381
80
0.557282
b6bc59c4d28d57f6523d1d6983048a59ce0e67b5
508
yml
YAML
_config.yml
007Steve/007Steve.github.io
8b226c23503af816473081a97e9ec99aaaa366f1
[ "MIT" ]
null
null
null
_config.yml
007Steve/007Steve.github.io
8b226c23503af816473081a97e9ec99aaaa366f1
[ "MIT" ]
null
null
null
_config.yml
007Steve/007Steve.github.io
8b226c23503af816473081a97e9ec99aaaa366f1
[ "MIT" ]
null
null
null
# Site settings author: Stephen Plummer title: header-img: img/home-bg.png email: [email protected] copyright_name: Stephen Plummer description: > # this means to ignore newlines until "baseurl:" baseurl: "" # the subpath of your site, e.g. /blog twitter_username: github_username: 007Steve # Build settings markdown: kramdown highlighter: rouge permalink: pretty paginate: 5 exclude: ["less","node_modules","Gruntfile.js","package.json","README.md"] gems: [jekyll-paginate, jekyll-feed]
24.190476
74
0.759843
3e02f91524ebb44d823b21bbd55c6fda9a72ee2d
2,326
yml
YAML
.github/workflows/codeql-analysis.yml
007aneesh/IWT-Website
5cd91d106bed9dfb3ec5053dbe1e979e60d9337a
[ "MIT" ]
null
null
null
.github/workflows/codeql-analysis.yml
007aneesh/IWT-Website
5cd91d106bed9dfb3ec5053dbe1e979e60d9337a
[ "MIT" ]
null
null
null
.github/workflows/codeql-analysis.yml
007aneesh/IWT-Website
5cd91d106bed9dfb3ec5053dbe1e979e60d9337a
[ "MIT" ]
null
null
null
# For most projects, this workflow file will not need changing; you simply need # to commit it to your repository. # # You may wish to alter this file to override the set of languages analyzed, # or to provide custom queries or build logic. # # ******** NOTE ******** # We have attempted to detect the languages in your repository. Please check # the `language` matrix defined below to confirm you have the correct set of # supported CodeQL languages. # name: "CodeQL" on: push: branches: [ main ] pull_request: # The branches below must be a subset of the branches above branches: [ main ] schedule: - cron: '44 2 * * 0' jobs: analyze: name: Analyze runs-on: ubuntu-latest permissions: actions: read contents: read security-events: write strategy: fail-fast: false matrix: language: [ 'javascript' ] # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ] # Learn more about CodeQL language support at https://git.io/codeql-language-support steps: - name: Checkout repository uses: actions/checkout@v2 # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL uses: github/codeql-action/init@v1 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. # By default, queries listed here will override any specified in a config file. # Prefix the list here with "+" to use these queries and those in the config file. # queries: ./path/to/local/query, your-org/your-repo/queries@main # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild uses: github/codeql-action/autobuild@v1 # ℹ️ Command-line programs to run using the OS shell. # 📚 https://git.io/JvXDl # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines # and modify them (or add more) to build your code if your project # uses a compiled language #- run: | # make bootstrap # make release - name: Perform CodeQL Analysis uses: github/codeql-action/analyze@v1
32.760563
92
0.660791
f95ea36e816c0789586e4d1526357ba1d733afb0
1,044
yml
YAML
_config.yml
007aniketkumar/onepage-bio
6fc6cea7e4f89907687eece0b1892c8b79312a58
[ "Apache-2.0" ]
1
2021-03-02T07:08:56.000Z
2021-03-02T07:08:56.000Z
_config.yml
007aniketkumar/007aniketkumar.github.io
6fc6cea7e4f89907687eece0b1892c8b79312a58
[ "Apache-2.0" ]
null
null
null
_config.yml
007aniketkumar/007aniketkumar.github.io
6fc6cea7e4f89907687eece0b1892c8b79312a58
[ "Apache-2.0" ]
null
null
null
# Site settings name: Aniket Kumar email: [email protected] #canonical_url: https://aniketkumar.com keywords: "Software Engineer, Java, Microservices, API Gateway, Scalable, Lead" skills: "Senior Software Engineer in Morgan Stanley, India" baseurl: # Social networks usernames (many more available: google-plus, flickr, dribbble, pinterest, instagram, tumblr, linkedin, etc.) # You should replace these with your social networks, of course. :) social: - title: linkedin url: https://www.linkedin.com/in/aniket-kumar-23105a14/ #- title: medium #url: https://medium.com/@cgroom #- title: twitter # url: https://twitter.com/chuck_groom # Google webmaster tools google_verify: # https://ssl.bing.com/webmaster/configure/verify/ownership Option 2 content= goes here bing_verify: # Color settings (hex-codes without the leading hash-tag) color: primary: 2196F3 primary-rgb: "33, 150, 243" secondary: 285172 secondary-dark: 29455b section-tout: eaf1f1 # Build settings markdown: kramdown permalink: pretty
27.473684
126
0.748084
36cfcad0f0a4b57f3fa127800e3f55cdc6b97d8a
400
yml
YAML
exampleSite/data/en/banner.yml
007architect/meghna-hugo
2eca2ff78e9153d5994e560c043d8461bf6b33be
[ "CC-BY-3.0" ]
null
null
null
exampleSite/data/en/banner.yml
007architect/meghna-hugo
2eca2ff78e9153d5994e560c043d8461bf6b33be
[ "CC-BY-3.0" ]
null
null
null
exampleSite/data/en/banner.yml
007architect/meghna-hugo
2eca2ff78e9153d5994e560c043d8461bf6b33be
[ "CC-BY-3.0" ]
null
null
null
--- banner: enable: true bg_image_webp: images/backgrounds/hero-area.webp bg_image: images/backgrounds/hero-area.jpeg icon: '' title: ARCHITECT HUGH WOOD content: Lorem ipsum dolor sit amet consectetur adipisicing elit. Fugit, excepturi. At recusandae sit perferendis autem,iste tempora nostrum numquam sapiente! button: enable: true label: Explore Us link: "#services"
28.571429
85
0.74
114cfb944d97dd626830b9a44064afb649572424
8,360
yml
YAML
_config.yml
007bishesh/007bishesh.github.io
50fc6a3053fa8fc9214855d8c8a38aba978111ba
[ "BSD-3-Clause", "MIT" ]
null
null
null
_config.yml
007bishesh/007bishesh.github.io
50fc6a3053fa8fc9214855d8c8a38aba978111ba
[ "BSD-3-Clause", "MIT" ]
null
null
null
_config.yml
007bishesh/007bishesh.github.io
50fc6a3053fa8fc9214855d8c8a38aba978111ba
[ "BSD-3-Clause", "MIT" ]
1
2018-12-31T20:11:56.000Z
2018-12-31T20:11:56.000Z
# Welcome to Jekyll! # # This config file is meant for settings that affect your entire site, values # which you are expected to set up once and rarely need to edit after that. # For technical reasons, this file is *NOT* reloaded automatically when you use # `jekyll serve`. If you change this file, please restart the server process. # Theme Settings # # Review documentation to determine if you should use `theme` or `remote_theme` # https://mmistakes.github.io/minimal-mistakes/docs/quick-start-guide/#installing-the-theme # theme : "minimal-mistakes-jekyll" remote_theme : "mmistakes/minimal-mistakes" minimal_mistakes_skin : "default" # "air", "aqua", "contrast", "dark", "dirt", "neon", "mint", "plum", "sunrise" # Site Settings locale : "en-US" title : "Bishesh Manandhar" title_separator : "-" name : "Bishesh" description : "My Portfolio" url : # the base hostname & protocol for your site e.g. "https://mmistakes.github.io" baseurl : # the subpath of your site, e.g. "/blog" repository : # GitHub username/repo-name e.g. "mmistakes/minimal-mistakes" teaser : # path of fallback teaser image, e.g. "/assets/images/500x300.png" # breadcrumbs : false # true, false (default) words_per_minute : 200 comments: provider : # false (default), "disqus", "discourse", "facebook", "google-plus", "staticman", "staticman_v2" "custom" disqus: shortname : # https://help.disqus.com/customer/portal/articles/466208-what-s-a-shortname- discourse: server : # https://meta.discourse.org/t/embedding-discourse-comments-via-javascript/31963 , e.g.: meta.discourse.org facebook: # https://developers.facebook.com/docs/plugins/comments appid : num_posts : # 5 (default) colorscheme : # "light" (default), "dark" staticman: allowedFields : # ['name', 'email', 'url', 'message'] branch : # "master" commitMessage : # "New comment by {fields.name}" filename : # comment-{@timestamp} format : # "yml" moderation : # true path : # "/_data/comments/{options.slug}" (default) requiredFields : # ['name', 'email', 'message'] transforms: email : # "md5" generatedFields: date: type : # "date" options: format : # "iso8601" (default), "timestamp-seconds", "timestamp-milliseconds" endpoint : # URL of your own deployment with trailing slash, will fallback to the public instance reCaptcha: siteKey : secret : atom_feed: path : # blank (default) uses feed.xml search : # true, false (default) search_full_content : # true, false (default) search_provider : # lunr (default), algolia, google algolia: application_id : # YOUR_APPLICATION_ID index_name : # YOUR_INDEX_NAME search_only_api_key : # YOUR_SEARCH_ONLY_API_KEY powered_by : # true (default), false google: search_engine_id : # YOUR_SEARCH_ENGINE_ID instant_search : # false (default), true # SEO Related google_site_verification : bing_site_verification : yandex_site_verification : naver_site_verification : # Social Sharing twitter: username : facebook: username : app_id : publisher : og_image : # Open Graph/Twitter default site image # For specifying social profiles # - https://developers.google.com/structured-data/customize/social-profiles social: type : # Person or Organization (defaults to Person) name : # If the user or organization name differs from the site's name links: # An array of links to social media profiles # Analytics analytics: provider : false # false (default), "google", "google-universal", "custom" google: tracking_id : anonymize_ip : # true, false (default) # Site Author author: name : "Bishesh Manandhar" avatar : # path of avatar image, e.g. "/assets/images/bio-photo.jpg" bio : "Development Portfolio" location : "Toronto" email : links: - label: "Email" icon: "fas fa-fw fa-envelope-square" # url: mailto:[email protected] - label: "Website" icon: "fas fa-fw fa-link" # url: "https://your-website.com" - label: "Twitter" icon: "fab fa-fw fa-twitter-square" # url: "https://twitter.com/" - label: "Facebook" icon: "fab fa-fw fa-facebook-square" # url: "https://facebook.com/" - label: "GitHub" icon: "fab fa-fw fa-github" url: "https://github.com/007bishesh/" # Site Footer footer: links: - label: "Twitter" icon: "fab fa-fw fa-twitter-square" # url: - label: "Facebook" icon: "fab fa-fw fa-facebook-square" # url: - label: "GitHub" icon: "fab fa-fw fa-github" # url: - label: "GitLab" icon: "fab fa-fw fa-gitlab" # url: - label: "Bitbucket" icon: "fab fa-fw fa-bitbucket" # url: - label: "Instagram" icon: "fab fa-fw fa-instagram" # url: # Reading Files include: - .htaccess - _pages exclude: - "*.sublime-project" - "*.sublime-workspace" - vendor - .asset-cache - .bundle - .jekyll-assets-cache - .sass-cache - assets/js/plugins - assets/js/_main.js - assets/js/vendor - Capfile - CHANGELOG - config - Gemfile - Gruntfile.js - gulpfile.js - LICENSE - log - node_modules - package.json - Rakefile - README - tmp - /docs # ignore Minimal Mistakes /docs - /test # ignore Minimal Mistakes /test keep_files: - .git - .svn encoding: "utf-8" markdown_ext: "markdown,mkdown,mkdn,mkd,md" # Conversion markdown: kramdown highlighter: rouge lsi: false excerpt_separator: "\n\n" incremental: false # Markdown Processing kramdown: input: GFM hard_wrap: false auto_ids: true footnote_nr: 1 entity_output: as_char toc_levels: 1..6 smart_quotes: lsquo,rsquo,ldquo,rdquo enable_coderay: false # Sass/SCSS sass: sass_dir: _sass style: compressed # http://sass-lang.com/documentation/file.SASS_REFERENCE.html#output_style # Outputting permalink: /:categories/:title/ paginate: 5 # amount of posts to show paginate_path: /page:num/ timezone: # https://en.wikipedia.org/wiki/List_of_tz_database_time_zones # Plugins (previously gems:) plugins: - jekyll-paginate - jekyll-sitemap - jekyll-gist - jekyll-feed - jemoji - jekyll-include-cache # mimic GitHub Pages with --safe whitelist: - jekyll-paginate - jekyll-sitemap - jekyll-gist - jekyll-feed - jemoji - jekyll-include-cache # Archives # Type # - GitHub Pages compatible archive pages built with Liquid ~> type: liquid (default) # - Jekyll Archives plugin archive pages ~> type: jekyll-archives # Path (examples) # - Archive page should exist at path when using Liquid method or you can # expect broken links (especially with breadcrumbs enabled) # - <base_path>/tags/my-awesome-tag/index.html ~> path: /tags/ # - <base_path/categories/my-awesome-category/index.html ~> path: /categories/ # - <base_path/my-awesome-category/index.html ~> path: / category_archive: type: liquid path: /categories/ tag_archive: type: liquid path: /tags/ # https://github.com/jekyll/jekyll-archives # jekyll-archives: # enabled: # - categories # - tags # layouts: # category: archive-taxonomy # tag: archive-taxonomy # permalinks: # category: /categories/:name/ # tag: /tags/:name/ # HTML Compression # - http://jch.penibelst.de/ compress_html: clippings: all ignore: envs: development # Defaults defaults: # _posts - scope: path: "" type: posts values: layout: single author_profile: true read_time: true comments: # true share: true related: true # _pages - scope: path:"" type:pages values: layout:single author_profile:true
28.338983
134
0.616866
bd29ef9da1c69e5a72fa27182293cedf66cd1bcb
38,889
yaml
YAML
cassandra.yaml
007ebey/graknsurvey
98fac1104e576322bba2acac8715e64e20898e29
[ "MIT" ]
5
2018-05-22T12:54:09.000Z
2020-04-22T07:14:44.000Z
cassandra.yaml
007ebey/graknsurvey
98fac1104e576322bba2acac8715e64e20898e29
[ "MIT" ]
1
2018-05-23T23:07:00.000Z
2018-05-24T16:35:16.000Z
cassandra.yaml
007ebey/graknsurvey
98fac1104e576322bba2acac8715e64e20898e29
[ "MIT" ]
3
2018-08-05T05:37:30.000Z
2018-12-16T19:54:38.000Z
# Cassandra storage config YAML # NOTE: # See http://wiki.apache.org/cassandra/StorageConfiguration for # full explanations of configuration directives # /NOTE # The name of the cluster. This is mainly used to prevent machines in # one logical cluster from joining another. cluster_name: 'Grakn Example Cluster' # This defines the number of tokens randomly assigned to this node on the ring # The more tokens, relative to other nodes, the larger the proportion of data # that this node will store. You probably want all nodes to have the same number # of tokens assuming they have equal hardware capability. # # If you leave this unspecified, Cassandra will use the default of 1 token for legacy compatibility, # and will use the initial_token as described below. # # Specifying initial_token will override this setting on the node's initial start, # on subsequent starts, this setting will apply even if initial token is set. # # If you already have a cluster with 1 token per node, and wish to migrate to # multiple tokens per node, see http://wiki.apache.org/cassandra/Operations # # The default value that ships with Janus is smaller than the default value # that ships with Apache Cassandra. Fewer tokens means faster Faunus run # times on small datasets. That's convenient when testing Janus(-Hadoop) on # toy data. However, increasing num_tokens is strongly recommended for # production Janus instances backed by Cassandra. This low value is just # for development and testing. num_tokens: 4 # initial_token allows you to specify tokens manually. While you can use # it with # vnodes (num_tokens > 1, above) -- in which case you should provide a # comma-separated list -- it's primarily used when adding nodes # to legacy clusters # that do not have vnodes enabled. # initial_token: # See http://wiki.apache.org/cassandra/HintedHandoff # May either be "true" or "false" to enable globally, or contain a list # of data centers to enable per-datacenter. # hinted_handoff_enabled: DC1,DC2 hinted_handoff_enabled: true # this defines the maximum amount of time a dead host will have hints # generated. After it has been dead this long, new hints for it will not be # created until it has been seen alive and gone down again. max_hint_window_in_ms: 10800000 # 3 hours # Maximum throttle in KBs per second, per delivery thread. This will be # reduced proportionally to the number of nodes in the cluster. (If there # are two nodes in the cluster, each delivery thread will use the maximum # rate; if there are three, each will throttle to half of the maximum, # since we expect two nodes to be delivering hints simultaneously.) hinted_handoff_throttle_in_kb: 1024 # Number of threads with which to deliver hints; # Consider increasing this number when you have multi-dc deployments, since # cross-dc handoff tends to be slower max_hints_delivery_threads: 2 # Maximum throttle in KBs per second, total. This will be # reduced proportionally to the number of nodes in the cluster. batchlog_replay_throttle_in_kb: 1024 # Authentication backend, implementing IAuthenticator; used to identify users # Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthenticator, # PasswordAuthenticator}. # # - AllowAllAuthenticator performs no checks - set it to disable authentication. # - PasswordAuthenticator relies on username/password pairs to authenticate # users. It keeps usernames and hashed passwords in system_auth.credentials table. # Please increase system_auth keyspace replication factor if you use this authenticator. authenticator: AllowAllAuthenticator # Authorization backend, implementing IAuthorizer; used to limit access/provide permissions # Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthorizer, # CassandraAuthorizer}. # # - AllowAllAuthorizer allows any action to any user - set it to disable authorization. # - CassandraAuthorizer stores permissions in system_auth.permissions table. Please # increase system_auth keyspace replication factor if you use this authorizer. authorizer: AllowAllAuthorizer # Validity period for permissions cache (fetching permissions can be an # expensive operation depending on the authorizer, CassandraAuthorizer is # one example). Defaults to 2000, set to 0 to disable. # Will be disabled automatically for AllowAllAuthorizer. permissions_validity_in_ms: 2000 # Refresh interval for permissions cache (if enabled). # After this interval, cache entries become eligible for refresh. Upon next # access, an async reload is scheduled and the old value returned until it # completes. If permissions_validity_in_ms is non-zero, then this must be # also. # Defaults to the same value as permissions_validity_in_ms. # permissions_update_interval_in_ms: 1000 # The partitioner is responsible for distributing groups of rows (by # partition key) across nodes in the cluster. You should leave this # alone for new clusters. The partitioner can NOT be changed without # reloading all data, so when upgrading you should set this to the # same partitioner you were already using. # # Besides Murmur3Partitioner, partitioners included for backwards # compatibility include RandomPartitioner, ByteOrderedPartitioner, and # OrderPreservingPartitioner. # partitioner: org.apache.cassandra.dht.Murmur3Partitioner # Directories where Cassandra should store data on disk. Cassandra # will spread data evenly across them, subject to the granularity of # the configured compaction strategy. data_file_directories: - db/cassandra/data # commit log commitlog_directory: db/cassandra/commitlog hints_directory: db/cassandra/data/hints cdc_raw_directory: db/cassandra/data/cdc_raw # policy for data disk failures: # die: shut down gossip and client transports and kill the JVM for any fs errors or # single-sstable errors, so the node can be replaced. # stop_paranoid: shut down gossip and client transports even for single-sstable errors, # kill the JVM for errors during startup. # stop: shut down gossip and client transports, leaving the node effectively dead, but # can still be inspected via JMX, kill the JVM for errors during startup. # best_effort: stop using the failed disk and respond to requests based on # remaining available sstables. This means you WILL see obsolete # data at CL.ONE! # ignore: ignore fatal errors and let requests fail, as in pre-1.2 Cassandra disk_failure_policy: stop # policy for commit disk failures: # die: shut down gossip and Thrift and kill the JVM, so the node can be replaced. # stop: shut down gossip and Thrift, leaving the node effectively dead, but # can still be inspected via JMX. # stop_commit: shutdown the commit log, letting writes collect but # continuing to service reads, as in pre-2.0.5 Cassandra # ignore: ignore fatal errors and let the batches fail commit_failure_policy: stop # Maximum size of the key cache in memory. # # Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the # minimum, sometimes more. The key cache is fairly tiny for the amount of # time it saves, so it's worthwhile to use it at large numbers. # The row cache saves even more time, but must contain the entire row, # so it is extremely space-intensive. It's best to only use the # row cache if you have hot rows or static rows. # # NOTE: if you reduce the size, you may not get you hottest keys loaded on startup. # # Default value is empty to make it "auto" (min(5% of Heap (in MB), 100MB)). Set to 0 to disable key cache. key_cache_size_in_mb: # Duration in seconds after which Cassandra should # save the key cache. Caches are saved to saved_caches_directory as # specified in this configuration file. # # Saved caches greatly improve cold-start speeds, and is relatively cheap in # terms of I/O for the key cache. Row cache saving is much more expensive and # has limited use. # # Default is 14400 or 4 hours. key_cache_save_period: 14400 # Number of keys from the key cache to save # Disabled by default, meaning all keys are going to be saved # key_cache_keys_to_save: 100 # Maximum size of the row cache in memory. # NOTE: if you reduce the size, you may not get you hottest keys loaded on startup. # # Default value is 0, to disable row caching. row_cache_size_in_mb: 0 # Duration in seconds after which Cassandra should # save the row cache. Caches are saved to saved_caches_directory as specified # in this configuration file. # # Saved caches greatly improve cold-start speeds, and is relatively cheap in # terms of I/O for the key cache. Row cache saving is much more expensive and # has limited use. # # Default is 0 to disable saving the row cache. row_cache_save_period: 0 # Number of keys from the row cache to save # Disabled by default, meaning all keys are going to be saved # row_cache_keys_to_save: 100 # Maximum size of the counter cache in memory. # # Counter cache helps to reduce counter locks' contention for hot counter cells. # In case of RF = 1 a counter cache hit will cause Cassandra to skip the read before # write entirely. With RF > 1 a counter cache hit will still help to reduce the duration # of the lock hold, helping with hot counter cell updates, but will not allow skipping # the read entirely. Only the local (clock, count) tuple of a counter cell is kept # in memory, not the whole counter, so it's relatively cheap. # # NOTE: if you reduce the size, you may not get you hottest keys loaded on startup. # # Default value is empty to make it "auto" (min(2.5% of Heap (in MB), 50MB)). Set to 0 to disable counter cache. # NOTE: if you perform counter deletes and rely on low gcgs, you should disable the counter cache. counter_cache_size_in_mb: # Duration in seconds after which Cassandra should # save the counter cache (keys only). Caches are saved to saved_caches_directory as # specified in this configuration file. # # Default is 7200 or 2 hours. counter_cache_save_period: 7200 # Number of keys from the counter cache to save # Disabled by default, meaning all keys are going to be saved # counter_cache_keys_to_save: 100 # The off-heap memory allocator. Affects storage engine metadata as # well as caches. Experiments show that JEMAlloc saves some memory # than the native GCC allocator (i.e., JEMalloc is more # fragmentation-resistant). # # Supported values are: NativeAllocator, JEMallocAllocator # # If you intend to use JEMallocAllocator you have to install JEMalloc as library and # modify cassandra-env.sh as directed in the file. # # Defaults to NativeAllocator # memory_allocator: NativeAllocator # saved caches saved_caches_directory: db/cassandra/saved_caches # commitlog_sync may be either "periodic" or "batch." # # When in batch mode, Cassandra won't ack writes until the commit log # has been fsynced to disk. It will wait # commitlog_sync_batch_window_in_ms milliseconds between fsyncs. # This window should be kept short because the writer threads will # be unable to do extra work while waiting. (You may need to increase # concurrent_writes for the same reason.) # # commitlog_sync: batch # commitlog_sync_batch_window_in_ms: 2 # # the other option is "periodic" where writes may be acked immediately # and the CommitLog is simply synced every commitlog_sync_period_in_ms # milliseconds. commitlog_sync: periodic commitlog_sync_period_in_ms: 10000 # The size of the individual commitlog file segments. A commitlog # segment may be archived, deleted, or recycled once all the data # in it (potentially from each columnfamily in the system) has been # flushed to sstables. # # The default size is 32, which is almost always fine, but if you are # archiving commitlog segments (see commitlog_archiving.properties), # then you probably want a finer granularity of archiving; 8 or 16 MB # is reasonable. commitlog_segment_size_in_mb: 32 # Reuse commit log files when possible. The default is false, and this # feature will be removed entirely in future versions of Cassandra. #commitlog_segment_recycling: false # any class that implements the SeedProvider interface and has a # constructor that takes a Map<String, String> of parameters will do. seed_provider: # Addresses of hosts that are deemed contact points. # Cassandra nodes use this list of hosts to find each other and learn # the topology of the ring. You must change this if you are running # multiple nodes! - class_name: org.apache.cassandra.locator.SimpleSeedProvider parameters: # seeds is actually a comma-delimited list of addresses. # Ex: "<ip1>,<ip2>,<ip3>" - seeds: "127.0.0.1" # For workloads with more data than can fit in memory, Cassandra's # bottleneck will be reads that need to fetch data from # disk. "concurrent_reads" should be set to (16 * number_of_drives) in # order to allow the operations to enqueue low enough in the stack # that the OS and drives can reorder them. Same applies to # "concurrent_counter_writes", since counter writes read the current # values before incrementing and writing them back. # # On the other hand, since writes are almost never IO bound, the ideal # number of "concurrent_writes" is dependent on the number of cores in # your system; (8 * number_of_cores) is a good rule of thumb. concurrent_reads: 32 concurrent_writes: 32 concurrent_counter_writes: 32 # Total memory to use for sstable-reading buffers. Defaults to # the smaller of 1/4 of heap or 512MB. # file_cache_size_in_mb: 512 # Total permitted memory to use for memtables. Cassandra will stop # accepting writes when the limit is exceeded until a flush completes, # and will trigger a flush based on memtable_cleanup_threshold # If omitted, Cassandra will set both to 1/4 the size of the heap. # memtable_heap_space_in_mb: 2048 # memtable_offheap_space_in_mb: 2048 # Ratio of occupied non-flushing memtable size to total permitted size # that will trigger a flush of the largest memtable. Lager mct will # mean larger flushes and hence less compaction, but also less concurrent # flush activity which can make it difficult to keep your disks fed # under heavy write load. # # memtable_cleanup_threshold defaults to 1 / (memtable_flush_writers + 1) # memtable_cleanup_threshold: 0.11 # Specify the way Cassandra allocates and manages memtable memory. # Options are: # heap_buffers: on heap nio buffers # offheap_buffers: off heap (direct) nio buffers # offheap_objects: native memory, eliminating nio buffer heap overhead memtable_allocation_type: heap_buffers # Total space to use for commitlogs. Since commitlog segments are # mmapped, and hence use up address space, the default size is 32 # on 32-bit JVMs, and 8192 on 64-bit JVMs. # # If space gets above this value (it will round up to the next nearest # segment multiple), Cassandra will flush every dirty CF in the oldest # segment and remove it. So a small total commitlog space will tend # to cause more flush activity on less-active columnfamilies. # commitlog_total_space_in_mb: 8192 # This sets the amount of memtable flush writer threads. These will # be blocked by disk io, and each one will hold a memtable in memory # while blocked. # # memtable_flush_writers defaults to the smaller of (number of disks, # number of cores), with a minimum of 2 and a maximum of 8. # # If your data directories are backed by SSD, you should increase this # to the number of cores. #memtable_flush_writers: 8 # A fixed memory pool size in MB for for SSTable index summaries. If left # empty, this will default to 5% of the heap size. If the memory usage of # all index summaries exceeds this limit, SSTables with low read rates will # shrink their index summaries in order to meet this limit. However, this # is a best-effort process. In extreme conditions Cassandra may need to use # more than this amount of memory. index_summary_capacity_in_mb: # How frequently index summaries should be resampled. This is done # periodically to redistribute memory from the fixed-size pool to sstables # proportional their recent read rates. Setting to -1 will disable this # process, leaving existing index summaries at their current sampling level. index_summary_resize_interval_in_minutes: 60 # Whether to, when doing sequential writing, fsync() at intervals in # order to force the operating system to flush the dirty # buffers. Enable this to avoid sudden dirty buffer flushing from # impacting read latencies. Almost always a good idea on SSDs; not # necessarily on platters. trickle_fsync: false trickle_fsync_interval_in_kb: 10240 # TCP port, for commands and data # For security reasons, you should not expose this port to the internet. Firewall it if needed. storage_port: 7000 # SSL port, for encrypted communication. Unused unless enabled in # encryption_options # For security reasons, you should not expose this port to the internet. Firewall it if needed. ssl_storage_port: 7001 # Address or interface to bind to and tell other Cassandra nodes to connect to. # You _must_ change this if you want multiple nodes to be able to communicate! # # Set listen_address OR listen_interface, not both. Interfaces must correspond # to a single address, IP aliasing is not supported. # # Leaving it blank leaves it up to InetAddress.getLocalHost(). This # will always do the Right Thing _if_ the node is properly configured # (hostname, name resolution, etc), and the Right Thing is to use the # address associated with the hostname (it might not be). # # Setting listen_address to 0.0.0.0 is always wrong. # # If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address # you can specify which should be chosen using listen_interface_prefer_ipv6. If false the first ipv4 # address will be used. If true the first ipv6 address will be used. Defaults to false preferring # ipv4. If there is only one address it will be selected regardless of ipv4/ipv6. listen_address: localhost # listen_interface: eth0 # listen_interface_prefer_ipv6: false # Address to broadcast to other Cassandra nodes # Leaving this blank will set it to the same value as listen_address # broadcast_address: 1.2.3.4 # Internode authentication backend, implementing IInternodeAuthenticator; # used to allow/disallow connections from peer nodes. # internode_authenticator: org.apache.cassandra.auth.AllowAllInternodeAuthenticator # Whether to start the native transport server. # Please note that the address on which the native transport is bound is the # same as the rpc_address. The port however is different and specified below. start_native_transport: true # port for the CQL native transport to listen for clients on native_transport_port: 9042 # The maximum threads for handling requests when the native transport is used. # This is similar to rpc_max_threads though the default differs slightly (and # there is no native_transport_min_threads, idle threads will always be stopped # after 30 seconds). # native_transport_max_threads: 128 # # The maximum size of allowed frame. Frame (requests) larger than this will # be rejected as invalid. The default is 256MB. # native_transport_max_frame_size_in_mb: 256 # The maximum number of concurrent client connections. # The default is -1, which means unlimited. # native_transport_max_concurrent_connections: -1 # The maximum number of concurrent client connections per source ip. # The default is -1, which means unlimited. # native_transport_max_concurrent_connections_per_ip: -1 # Whether to start the thrift rpc server. start_rpc: true # The address or interface to bind the Thrift RPC service and native transport # server to. # # Set rpc_address OR rpc_interface, not both. Interfaces must correspond # to a single address, IP aliasing is not supported. # # Leaving rpc_address blank has the same effect as on listen_address # (i.e. it will be based on the configured hostname of the node). # # Note that unlike listen_address, you can specify 0.0.0.0, but you must also # set broadcast_rpc_address to a value other than 0.0.0.0. # # For security reasons, you should not expose this port to the internet. Firewall it if needed. # # If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address # you can specify which should be chosen using rpc_interface_prefer_ipv6. If false the first ipv4 # address will be used. If true the first ipv6 address will be used. Defaults to false preferring # ipv4. If there is only one address it will be selected regardless of ipv4/ipv6. rpc_address: 0.0.0.0 # rpc_interface: eth1 # rpc_interface_prefer_ipv6: false # port for Thrift to listen for clients on rpc_port: 9160 # RPC address to broadcast to drivers and other Cassandra nodes. This cannot # be set to 0.0.0.0. If left blank, this will be set to the value of # rpc_address. If rpc_address is set to 0.0.0.0, broadcast_rpc_address must # be set. broadcast_rpc_address: 192.168.99.100 # enable or disable keepalive on rpc/native connections rpc_keepalive: true # Cassandra provides two out-of-the-box options for the RPC Server: # # sync -> One thread per thrift connection. For a very large number of clients, memory # will be your limiting factor. On a 64 bit JVM, 180KB is the minimum stack size # per thread, and that will correspond to your use of virtual memory (but physical memory # may be limited depending on use of stack space). # # hsha -> Stands for "half synchronous, half asynchronous." All thrift clients are handled # asynchronously using a small number of threads that does not vary with the amount # of thrift clients (and thus scales well to many clients). The rpc requests are still # synchronous (one thread per active request). If hsha is selected then it is essential # that rpc_max_threads is changed from the default value of unlimited. # # The default is sync because on Windows hsha is about 30% slower. On Linux, # sync/hsha performance is about the same, with hsha of course using less memory. # # Alternatively, can provide your own RPC server by providing the fully-qualified class name # of an o.a.c.t.TServerFactory that can create an instance of it. rpc_server_type: sync # Uncomment rpc_min|max_thread to set request pool size limits. # # Regardless of your choice of RPC server (see above), the number of maximum requests in the # RPC thread pool dictates how many concurrent requests are possible (but if you are using the sync # RPC server, it also dictates the number of clients that can be connected at all). # # The default is unlimited and thus provides no protection against clients overwhelming the server. You are # encouraged to set a maximum that makes sense for you in production, but do keep in mind that # rpc_max_threads represents the maximum number of client requests this server may execute concurrently. # # rpc_min_threads: 16 # rpc_max_threads: 2048 # uncomment to set socket buffer sizes on rpc connections # rpc_send_buff_size_in_bytes: # rpc_recv_buff_size_in_bytes: # Uncomment to set socket buffer size for internode communication # Note that when setting this, the buffer size is limited by net.core.wmem_max # and when not setting it it is defined by net.ipv4.tcp_wmem # See: # /proc/sys/net/core/wmem_max # /proc/sys/net/core/rmem_max # /proc/sys/net/ipv4/tcp_wmem # /proc/sys/net/ipv4/tcp_wmem # and: man tcp # internode_send_buff_size_in_bytes: # internode_recv_buff_size_in_bytes: # Frame size for thrift (maximum message length). thrift_framed_transport_size_in_mb: 15 # Set to true to have Cassandra create a hard link to each sstable # flushed or streamed locally in a backups/ subdirectory of the # keyspace data. Removing these links is the operator's # responsibility. incremental_backups: false # Whether or not to take a snapshot before each compaction. Be # careful using this option, since Cassandra won't clean up the # snapshots for you. Mostly useful if you're paranoid when there # is a data format change. snapshot_before_compaction: false # Whether or not a snapshot is taken of the data before keyspace truncation # or dropping of column families. The STRONGLY advised default of true # should be used to provide data safety. If you set this flag to false, you will # lose data on truncation or drop. auto_snapshot: true # When executing a scan, within or across a partition, we need to keep the # tombstones seen in memory so we can return them to the coordinator, which # will use them to make sure other replicas also know about the deleted rows. # With workloads that generate a lot of tombstones, this can cause performance # problems and even exaust the server heap. # (http://www.datastax.com/dev/blog/cassandra-anti-patterns-queues-and-queue-like-datasets) # Adjust the thresholds here if you understand the dangers and want to # scan more tombstones anyway. These thresholds may also be adjusted at runtime # using the StorageService mbean. tombstone_warn_threshold: 1000 tombstone_failure_threshold: 100000 # Granularity of the collation index of rows within a partition. # Increase if your rows are large, or if you have a very large # number of rows per partition. The competing goals are these: # 1) a smaller granularity means more index entries are generated # and looking up rows withing the partition by collation column # is faster # 2) but, Cassandra will keep the collation index in memory for hot # rows (as part of the key cache), so a larger granularity means # you can cache more hot rows column_index_size_in_kb: 64 # Log WARN on any batch size exceeding this value. 5kb per batch by default. # Caution should be taken on increasing the size of this threshold as it can lead to node instability. batch_size_warn_threshold_in_kb: 5 # Log WARN on any batches not of type LOGGED than span across more partitions than this limit unlogged_batch_across_partitions_warn_threshold: 10 # Number of simultaneous compactions to allow, NOT including # validation "compactions" for anti-entropy repair. Simultaneous # compactions can help preserve read performance in a mixed read/write # workload, by mitigating the tendency of small sstables to accumulate # during a single long running compactions. The default is usually # fine and if you experience problems with compaction running too # slowly or too fast, you should look at # compaction_throughput_mb_per_sec first. # # concurrent_compactors defaults to the smaller of (number of disks, # number of cores), with a minimum of 2 and a maximum of 8. # # If your data directories are backed by SSD, you should increase this # to the number of cores. #concurrent_compactors: 1 # Throttles compaction to the given total throughput across the entire # system. The faster you insert data, the faster you need to compact in # order to keep the sstable count down, but in general, setting this to # 16 to 32 times the rate you are inserting data is more than sufficient. # Setting this to 0 disables throttling. Note that this account for all types # of compaction, including validation compaction. compaction_throughput_mb_per_sec: 16 # Log a warning when compacting partitions larger than this value compaction_large_partition_warning_threshold_mb: 100 # When compacting, the replacement sstable(s) can be opened before they # are completely written, and used in place of the prior sstables for # any range that has been written. This helps to smoothly transfer reads # between the sstables, reducing page cache churn and keeping hot rows hot sstable_preemptive_open_interval_in_mb: 50 # Throttles all outbound streaming file transfers on this node to the # given total throughput in Mbps. This is necessary because Cassandra does # mostly sequential IO when streaming data during bootstrap or repair, which # can lead to saturating the network connection and degrading rpc performance. # When unset, the default is 200 Mbps or 25 MB/s. # stream_throughput_outbound_megabits_per_sec: 200 # Throttles all streaming file transfer between the datacenters, # this setting allows users to throttle inter dc output throughput in addition # to throttling all network output traffic as configured with # stream_throughput_outbound_megabits_per_sec # When unset, the default is 200 Mbps or 25 MB/s # inter_dc_stream_throughput_outbound_megabits_per_sec: 200 # How long the coordinator should wait for read operations to complete read_request_timeout_in_ms: 5000 # How long the coordinator should wait for seq or index scans to complete range_request_timeout_in_ms: 10000 # How long the coordinator should wait for writes to complete write_request_timeout_in_ms: 2000 # How long the coordinator should wait for counter writes to complete counter_write_request_timeout_in_ms: 5000 # How long a coordinator should continue to retry a CAS operation # that contends with other proposals for the same row cas_contention_timeout_in_ms: 1000 # How long the coordinator should wait for truncates to complete # (This can be much longer, because unless auto_snapshot is disabled # we need to flush first so we can snapshot before removing the data.) truncate_request_timeout_in_ms: 60000 # The default timeout for other, miscellaneous operations request_timeout_in_ms: 10000 # Enable operation timeout information exchange between nodes to accurately # measure request timeouts. If disabled, replicas will assume that requests # were forwarded to them instantly by the coordinator, which means that # under overload conditions we will waste that much extra time processing # already-timed-out requests. # # Warning: before enabling this property make sure to ntp is installed # and the times are synchronized between the nodes. cross_node_timeout: false # Set socket timeout for streaming operation. # The output session is failed if no data/ack is received by any of the participants # within that period, which means this should also be sufficient to output a large # sstable or rebuild table indexes. # Default value is 86400000ms, which means stale streams timeout after 24 hours. # A value of zero means output sockets should never time out. # streaming_socket_timeout_in_ms: 86400000 # phi value that must be reached for a host to be marked down. # most users should never need to adjust this. # phi_convict_threshold: 8 # endpoint_snitch -- Set this to a class that implements # IEndpointSnitch. The snitch has two functions: # - it teaches Cassandra enough about your network topology to route # requests efficiently # - it allows Cassandra to spread replicas around your cluster to avoid # correlated failures. It does this by grouping machines into # "datacenters" and "racks." Cassandra will do its best not to have # more than one replica on the same "rack" (which may not actually # be a physical location) # # CASSANDRA WILL NOT ALLOW YOU TO SWITCH TO AN INCOMPATIBLE SNITCH # ONCE DATA IS INSERTED INTO THE CLUSTER. This would cause data loss. # This means that if you start with the default SimpleSnitch, which # locates every node on "rack1" in "datacenter1", your only options # if you need to add another datacenter are GossipingPropertyFileSnitch # (and the older PFS). From there, if you want to migrate to an # incompatible snitch like Ec2Snitch you can do it by adding new nodes # under Ec2Snitch (which will locate them in a new "datacenter") and # decommissioning the old ones. # # Out of the box, Cassandra provides # - SimpleSnitch: # Treats Strategy order as proximity. This can improve cache # locality when disabling read repair. Only appropriate for # single-datacenter deployments. # - GossipingPropertyFileSnitch # This should be your go-to snitch for production use. The rack # and datacenter for the local node are defined in # cassandra-rackdc.properties and propagated to other nodes via # gossip. If cassandra-topology.properties exists, it is used as a # fallback, allowing migration from the PropertyFileSnitch. # - PropertyFileSnitch: # Proximity is determined by rack and data center, which are # explicitly configured in cassandra-topology.properties. # - Ec2Snitch: # Appropriate for EC2 deployments in a single Region. Loads Region # and Availability Zone information from the EC2 API. The Region is # treated as the datacenter, and the Availability Zone as the rack. # Only private IPs are used, so this will not work across multiple # Regions. # - Ec2MultiRegionSnitch: # Uses public IPs as broadcast_address to allow cross-region # connectivity. (Thus, you should set seed addresses to the public # IP as well.) You will need to open the storage_port or # ssl_storage_port on the public IP firewall. (For intra-Region # traffic, Cassandra will switch to the private IP after # establishing a connection.) # - RackInferringSnitch: # Proximity is determined by rack and data center, which are # assumed to correspond to the 3rd and 2nd octet of each node's IP # address, respectively. Unless this happens to match your # deployment conventions, this is best used as an example of # writing a custom Snitch class and is provided in that spirit. # # You can use a custom Snitch by setting this to the full class name # of the snitch, which will be assumed to be on your classpath. endpoint_snitch: SimpleSnitch # controls how often to perform the more expensive part of host score # calculation dynamic_snitch_update_interval_in_ms: 100 # controls how often to reset all host scores, allowing a bad host to # possibly recover dynamic_snitch_reset_interval_in_ms: 600000 # if set greater than zero and read_repair_chance is < 1.0, this will allow # 'pinning' of replicas to hosts in order to increase cache capacity. # The badness threshold will control how much worse the pinned host has to be # before the dynamic snitch will prefer other replicas over it. This is # expressed as a double which represents a percentage. Thus, a value of # 0.2 means Cassandra would continue to prefer the static snitch values # until the pinned host was 20% worse than the fastest. dynamic_snitch_badness_threshold: 0.1 # request_scheduler -- Set this to a class that implements # RequestScheduler, which will schedule incoming client requests # according to the specific policy. This is useful for multi-tenancy # with a single Cassandra cluster. # NOTE: This is specifically for requests from the client and does # not affect inter node communication. # org.apache.cassandra.scheduler.NoScheduler - No scheduling takes place # org.apache.cassandra.scheduler.RoundRobinScheduler - Round robin of # client requests to a node with a separate queue for each # request_scheduler_id. The scheduler is further customized by # request_scheduler_options as described below. request_scheduler: org.apache.cassandra.scheduler.NoScheduler # Scheduler Options vary based on the type of scheduler # NoScheduler - Has no options # RoundRobin # - throttle_limit -- The throttle_limit is the number of in-flight # requests per client. Requests beyond # that limit are queued up until # running requests can complete. # The value of 80 here is twice the number of # concurrent_reads + concurrent_writes. # - default_weight -- default_weight is optional and allows for # overriding the default which is 1. # - weights -- Weights are optional and will default to 1 or the # overridden default_weight. The weight translates into how # many requests are handled during each turn of the # RoundRobin, based on the scheduler id. # # request_scheduler_options: # throttle_limit: 80 # default_weight: 5 # weights: # Keyspace1: 1 # Keyspace2: 5 # request_scheduler_id -- An identifier based on which to perform # the request scheduling. Currently the only valid option is keyspace. # request_scheduler_id: keyspace # Enable or disable inter-node encryption # Default settings are TLS v1, RSA 1024-bit keys (it is imperative that # users generate their own keys) TLS_RSA_WITH_AES_128_CBC_SHA as the cipher # suite for authentication, key exchange and encryption of the actual data transfers. # Use the DHE/ECDHE ciphers if running in FIPS 140 compliant mode. # NOTE: No custom encryption options are enabled at the moment # The available internode options are : all, none, dc, rack # # If set to dc cassandra will encrypt the traffic between the DCs # If set to rack cassandra will encrypt the traffic between the racks # # The passwords used in these options must match the passwords used when generating # the keystore and truststore. For instructions on generating these files, see: # http://download.oracle.com/javase/6/docs/technotes/guides/security/jsse/JSSERefGuide.html#CreateKeystore # server_encryption_options: internode_encryption: none keystore: conf/.keystore keystore_password: cassandra truststore: conf/.truststore truststore_password: cassandra # More advanced defaults below: # protocol: TLS # algorithm: SunX509 # store_type: JKS # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA] # require_client_auth: false # enable or disable client/server encryption. client_encryption_options: enabled: false keystore: conf/.keystore keystore_password: cassandra # require_client_auth: false # Set trustore and truststore_password if require_client_auth is true # truststore: conf/.truststore # truststore_password: cassandra # More advanced defaults below: # protocol: TLS # algorithm: SunX509 # store_type: JKS # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA] # internode_compression controls whether traffic between nodes is # compressed. # can be: all - all traffic is compressed # dc - traffic between different datacenters is compressed # none - nothing is compressed. internode_compression: all # Enable or disable tcp_nodelay for inter-dc communication. # Disabling it will result in larger (but fewer) network packets being sent, # reducing overhead from the TCP protocol itself, at the cost of increasing # latency if you block for cross-datacenter responses. inter_dc_tcp_nodelay: false # GC Pauses greater than gc_warn_threshold_in_ms will be logged at WARN level # Adjust the threshold based on your application throughput requirement # By default, Cassandra logs GC Pauses greater than 200 ms at INFO level # gc_warn_threshold_in_ms: 1000
47.081114
216
0.779732
05da98b80277f94c4801bfe983751281bd649dd7
136
yml
YAML
.travis.yml
007gzs/X-UnionPay
82ee061655f050c28b1818cecafc2d5780eccc86
[ "MIT" ]
9
2019-09-06T01:35:44.000Z
2021-11-18T11:12:51.000Z
.travis.yml
napoleon02/demo-pay
dcd752e6c263f87176e53bd3656b4c37f79f773c
[ "MIT" ]
1
2021-09-01T00:55:27.000Z
2021-09-01T01:02:12.000Z
.travis.yml
napoleon02/demo-pay
dcd752e6c263f87176e53bd3656b4c37f79f773c
[ "MIT" ]
3
2019-10-28T10:07:23.000Z
2019-12-24T01:54:37.000Z
language: java sudo: false install: false jdk: - openjdk8 notifications: email: false cache: directories: - '$HOME/.m2'
8
17
0.654412
bd7815716f272a58a559e868041cc148b74ab02a
3,684
yml
YAML
.github/workflows/test.yml
007gzs/django-cool
3b4ed1a8ca020e6f798ca47e20169e5a854b4f24
[ "BSD-3-Clause" ]
11
2020-05-19T09:52:35.000Z
2022-02-25T10:39:56.000Z
.github/workflows/test.yml
007gzs/django-cool
3b4ed1a8ca020e6f798ca47e20169e5a854b4f24
[ "BSD-3-Clause" ]
null
null
null
.github/workflows/test.yml
007gzs/django-cool
3b4ed1a8ca020e6f798ca47e20169e5a854b4f24
[ "BSD-3-Clause" ]
1
2020-12-24T08:14:58.000Z
2020-12-24T08:14:58.000Z
# This workflow will install Python dependencies, run tests and lint with a variety of Python versions # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions name: test on: [push, pull_request] jobs: test: name: Test runs-on: ubuntu-latest strategy: matrix: python-version: [3.6, 3.7, 3.8, 3.9] django-version: [django-2.2, django-3.0, django-3.1, django-3.2] include: - python-version: 3.8 django-version: django-main - python-version: 3.9 django-version: django-main services: # oracle: # image: quay.io/maksymbilenko/oracle-12c # ports: # - 1521:1521 # options: --health-cmd "echo exit|sqlplus system/oracle@//localhost:1521/xe" --health-interval 10s --health-timeout 5s --health-retries 5 postgres: image: postgres env: POSTGRES_USER: django_cool POSTGRES_PASSWORD: django_cool POSTGRES_DB: django_cool_test ports: - 5432:5432 options: --health-cmd pg_isready --health-interval 10s --health-timeout 5s --health-retries 5 mysql: image: mysql env: MYSQL_ALLOW_EMPTY_PASSWORD: yes MYSQL_USER: django_cool MYSQL_PASSWORD: django_cool MYSQL_DATABASE: django_cool_test ports: - 3306:3306 options: --health-cmd "mysqladmin ping" --health-interval 10s --health-timeout 5s --health-retries 5 steps: - name: Check Out uses: actions/checkout@v2 - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v2 with: python-version: ${{ matrix.python-version }} # - name: Install oracle instantclient # run: | # mkdir -p /opt/oracle # cd /opt/oracle # wget -q https://download.oracle.com/otn_software/linux/instantclient/211000/instantclient-basic-linux.x64-21.1.0.0.0.zip # unzip instantclient-basic-linux.x64-21.1.0.0.0.zip - name: Install dependencies run: | sudo apt-get install gettext libaio1 python -m pip install --upgrade pip pip install flake8 "isort>=5.1.0" pytest pip install mysqlclient psycopg2 cx_Oracle - name: Install Django 2.2 run: | pip install 'Django>=2.2,<3' if: matrix.django-version == 'django-2.2' - name: Install Django 3.0 run: | pip install 'Django>=3.0,<3.1' if: matrix.django-version == 'django-3.0' - name: Install Django 3.1 run: | pip install 'Django>=3.1,<3.2' if: matrix.django-version == 'django-3.1' - name: Install Django 3.2 run: | pip install 'Django>=3.2,<4' if: matrix.django-version == 'django-3.2' - name: Install Django main run: | pip install 'https://github.com/django/django/archive/main.tar.gz' if: matrix.django-version == 'django-main' - name: Install djangorestframework run: | pip install djangorestframework - name: Lint with flake8 run: | flake8 cool tests - name: Lint with isort run: | isort --check-only --diff cool tests - name: Test with pytest sqlite run: | pytest --db sqlite - name: Test with pytest postgresql run: | pytest --db postgresql - name: Test with pytest mysql run: | pytest --db mysql # - name: Test with pytest oracle # run: | # export LD_LIBRARY_PATH=/opt/oracle/instantclient_21_1:${LD_LIBRARY_PATH} # echo ${LD_LIBRARY_PATH} # pytest --db oracle
33.798165
145
0.607492
7aafcd3b79ba0e9c5d3cf57d2b4b80c743d8185e
890
yml
YAML
.github/workflows/release.yml
007gzs/oface
9e4ca8ee572783cff4417c58217cdcb8c7af07f4
[ "MIT" ]
2
2021-06-01T01:33:32.000Z
2021-11-23T10:39:35.000Z
.github/workflows/release.yml
007gzs/oface
9e4ca8ee572783cff4417c58217cdcb8c7af07f4
[ "MIT" ]
null
null
null
.github/workflows/release.yml
007gzs/oface
9e4ca8ee572783cff4417c58217cdcb8c7af07f4
[ "MIT" ]
1
2021-07-16T22:42:55.000Z
2021-07-16T22:42:55.000Z
# This workflows will upload a Python Package using Twine when a release is created # For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries name: release on: push: tags: - v* jobs: deploy: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - name: Set up Python uses: actions/setup-python@v1 with: python-version: '3.x' - name: Install dependencies run: | python -m pip install --upgrade pip pip install setuptools wheel twine - name: Build and publish env: TWINE_USERNAME: ${{ secrets.PYPI_USERNAME }} TWINE_PASSWORD: ${{ secrets.PYPI_PASSWORD }} run: | python setup.py sdist bdist_wheel twine check dist/* twine upload --skip-existing dist/*
26.969697
158
0.650562
bb82ff5f453ec6d0745a7d23d5ca1e4d758d3674
894
yml
YAML
.github/workflows/release.yml
007gzs/xface
8432cc3f8c8296790e655d7f9353c87ca2a61036
[ "MIT" ]
null
null
null
.github/workflows/release.yml
007gzs/xface
8432cc3f8c8296790e655d7f9353c87ca2a61036
[ "MIT" ]
null
null
null
.github/workflows/release.yml
007gzs/xface
8432cc3f8c8296790e655d7f9353c87ca2a61036
[ "MIT" ]
null
null
null
# This workflows will upload a Python Package using Twine when a release is created # For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries name: release on: push: tags: - v* jobs: deploy: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - name: Set up Python uses: actions/setup-python@v1 with: python-version: '3.x' - name: Install dependencies run: | python -m pip install --upgrade pip pip install setuptools wheel twine - name: Build and publish env: TWINE_USERNAME: ${{ secrets.PYPI_USERNAME }} TWINE_PASSWORD: ${{ secrets.PYPI_PASSWORD }} run: | python setup.py sdist bdist_wheel twine upload --skip-existing dist/*
27.9375
159
0.631991
530f9842762702642a74ebdbb8bed4ab25b71efc
6,413
yml
YAML
_config.yml
007herelsp/007herelsp.github.com
d7f2b67bbdbe8ec283bdfb4473f7df315761a92e
[ "MIT" ]
1
2019-06-17T00:01:33.000Z
2019-06-17T00:01:33.000Z
_config.yml
007herelsp/007herelsp.github.com
d7f2b67bbdbe8ec283bdfb4473f7df315761a92e
[ "MIT" ]
null
null
null
_config.yml
007herelsp/007herelsp.github.com
d7f2b67bbdbe8ec283bdfb4473f7df315761a92e
[ "MIT" ]
null
null
null
# Welcome to Jekyll! # # This config file is meant for settings that affect your whole blog, values # which you are expected to set up once and rarely need to edit after that. # For technical reasons, this file is *NOT* reloaded automatically when you use # 'jekyll serve'. If you change this file, please restart the server process. # # ,--------. ,--. ,--. ,--. # '--. .--',---. \ `.' /,-' '-. # | | | .-. : .' \ '-. .-' # | | \ --. / .'. \ | | # `--' `----''--' '--' `--' ## => Site Settings ############################## text_skin: default # "default" (default), "dark", "forest", "ocean", "chocolate", "orange" highlight_theme: tomorrow # "default" (default), "tomorrow", "tomorrow-night", "tomorrow-night-eighties", "tomorrow-night-blue", "tomorrow-night-bright" url : # the base hostname & protocol for your site e.g. https://www.someone.com baseurl : / title : 007herelsp's blog description: > # this means to ignore newlines until "Language & timezone" 007herelsp's Blog ## => Language and Timezone ############################## lang: # the language of your site, default as "en" timezone: # see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones for the available values ## => Author and Social ############################## author: type : # "person" (default), "organization" name : 007herelsp url : avatar : # path or url of avatar image (square) bio : I am an amazing person. email : [email protected] facebook : # "user_name" the last part of your profile url, e.g. https://www.facebook.com/user_name twitter : # "user_name" the last part of your profile url, e.g. https://twitter.com/user_name weibo : # "user_id" the last part of your profile url, e.g. https://www.weibo.com/user_id/profile?... googleplus: # "user_id" the last part of your profile url, e.g. https://plus.google.com/u/0/user_id telegram : # "user_name" the last part of your profile url, e.g. https://t.me/user_name medium : # "user_name" the last part of your profile url, e.g. https://medium.com/user_name zhihu : # "user_name" the last part of your profile url, e.g. https://www.zhihu.com/people/user_name douban : # "user_name" the last part of your profile url, e.g. https://www.douban.com/people/user_name linkedin : # "user_name" the last part of your profile url, e.g. https://www.linkedin.com/in/user_name github : 007herelsp # "user_name" the last part of your profile url, e.g. https://github.com/user_name npm : # "user_name" the last part of your profile url, e.g. https://www.npmjs.com/~user_name ## => GitHub Repository (if the site is hosted by GitHub) ############################## repository: 007herelsp/007herelsp.github.com repository_tree: master ## => Paths ############################## paths: root : # title link url, "/" (default) home : # home layout url, "/" (default) archive : # "/archive.html" (default) rss : # "/feed.xml" (default) ## => Post ############################## ## excerpt excerpt_separator: <!--more--> ## license license: CC-BY-NC-4.0 # "CC-BY-4.0", "CC-BY-SA-4.0", "CC-BY-NC-4.0", "CC-BY-ND-4.0" ## TOC toc: selectors: # "h1,h2,h3" (default) ## => Markdown Enhancements ############################## ## Mathjax mathjax: true # false (default), true mathjax_autoNumber: true # false (default), true ## Mermaid mermaid: true # false (default), true ## Chart chart: true # false (default), true ## => Paginate ############################## paginate: 8 paginate_path: /page:num # don't change this unless for special need ## => Sources ############################## sources: # bootcdn (default), unpkg ## => Sharing ############################## sharing: provider: false # false (default), "addtoany", "addthis", "custom" ## AddThis addthis: id: # AddThis pubid, e.g. ra-5xxxxxxxxxxx ## => Comments ############################## comments: provider: false # false (default), "disqus", "gitalk", "valine", "custom" ## Disqus disqus: shortname: # the Disqus shortname for the site ## Gitalk # please refer to https://github.com/gitalk/gitalk for more info. gitalk: clientID : # GitHub Application Client ID clientSecret: # GitHub Application Client Secret repository : # GitHub repo owner : # GitHub repo owner admin: # GitHub repo owner and collaborators, only these guys can initialize GitHub issues, IT IS A LIST. # - your GitHub Id ## Valine # please refer to https://valine.js.org/en/ for more info. valine: app_id : # LeanCloud App id app_key : # LeanCloud App key placeholder : # Prompt information visitor : # false (default) meta : # "[nick, mail, link]" (default) nickname, E-mail, Personal-site ## => Pageview ############################## pageview: provider: leancloud # false (default), "leancloud", "custom" ## Leancloud leancloud: app_id : RyMpl3U2lzrsjG1QCAVoXsRY-gzGzoHsz # LeanCloud App id app_key : uz3cyFllhCwrbQMJ53eIUPit # LeanCloud App key app_class : BlogsCount # LeanCloud App class ## => Search ############################## search: provider: default # "default" (default), false, "google", "custom" ## Google Custom Search Engine google: custom_search_engine_id: # Google Custom Search Engine ID ## => Analytics ############################## analytics: provider: false # false (default), "google", "custom" ## Google Analytics google: tracking_id : # Google Analytics id for the site anonymize_ip: false # Anonymize IP tracking for Analytics ## => Build ############################## markdown : kramdown highlighter : rouge permalink : date exclude: - CHANGELOG.md - HOW_TO_RELEASE.md - Gemfile - Gemfile.lock - LICENSE - README-*.md - README.md - gulpfile.js - jekyll-text-theme.gemspec - package-lock.json - package.json - /docs - /node_modules - /screenshots - /test - /vendor defaults: - scope: path: "" type: posts values: layout: article sharing: true license: true aside: toc: true show_edit_on_github: true show_subscribe: true pageview: true ## => Plugins ############################## plugins: - jekyll-feed - jekyll-paginate - jekyll-sitemap - jemoji
28.502222
152
0.598472
c4f3330118328a2f72656f1b2f9ccd336a4a977c
3,235
yml
YAML
_config.yml
007herelsp/jekyll-TeXt-theme
667b0e5c4622b7845749878008cd97a0efce3eb6
[ "MIT" ]
null
null
null
_config.yml
007herelsp/jekyll-TeXt-theme
667b0e5c4622b7845749878008cd97a0efce3eb6
[ "MIT" ]
null
null
null
_config.yml
007herelsp/jekyll-TeXt-theme
667b0e5c4622b7845749878008cd97a0efce3eb6
[ "MIT" ]
null
null
null
# Welcome to Jekyll! # # This config file is meant for settings that affect your whole blog, values # which you are expected to set up once and rarely need to edit after that. # For technical reasons, this file is *NOT* reloaded automatically when you use # 'jekyll serve'. If you change this file, please restart the server process. # # ,--------. ,--. ,--. ,--. # '--. .--',---. \ `.' /,-' '-. # | | | .-. : .' \ '-. .-' # | | \ --. / .'. \ | | # `--' `----''--' '--' `--' text_color_theme: default #eg: default | dark | forest | ocean | chocolate | orange ## Base settings ## # url: #the base hostname & protocol for your site e.g. https://www.007herelsp.com baseurl: / title: 007herelsp's blog description: > #this means to ignore newlines until "nav_lists:" 007herelsp's Blog nav_lists: - titles: en: About zh: 关于 zh-Hans: 关于 zh-Hant: 關於 url: /about.html # paths: # base: / # home: / # all: /all.html # rss: /feed.xml ## GitHub repository (if the site is hosted by GitHub) ## # repository: #e.g. 007herelsp/007herelsp.github.io # repository_tree: #e.g. master ## Language & timezone ## lang: en #the language of your site, eg: en(English), zh(简体中文), zh-Hans(简体中文), zh-Hant(繁體中文) timezone: Asia/Shanghai #see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones for the available values ## Author & social ## author: name: 007herelsp # email: #your Email address e.g. [email protected] # facebook: #your Facebook username # twitter: #your Twitter username github: 007herelsp #your GitHub username # googleplus: #your Google+ username # weibo: #your Weibo username # douban: #your Douban username # linkedin: #your Linkedin username ## TOC toc: # selectors: 'h1,h2,h3' ## Comment system (Disqus) ## disqus: # shortname: #the Disqus shortname for the site ## Comment system (gitalk) ## # please refer to https://github.com/gitalk/gitalk for more info. gitalk: # clientID: #Github Application Client ID # clientSecret: #Github Application Client Secret # repository: #Github repo # owner: #Github repo owner # admin: #Github repo owner and collaborators, only these guys can initialize github issues, IT IS A LIST. # - owner ## Google Analytics ## # ga_tracking_id: #Google analytics id for the site ## Reading quantity (LeanCloud) ## leancloud: # app_id: #LeanCloud App id # app_key: #LeanCloud App key # app_class: #LeanCloud App class # mathjax: true # mathjax_autoNumber: true # mermaid: true # chart: true ## Paginate ## paginate: 7 paginate_path: /blog/page:num ## Post excerpts ## excerpt_separator: <!--more--> excerpt_type: html # text | html ## Sources ## # sources: bootcss #eg: bootcss | unpkg ## Build ## markdown: kramdown highlighter: rouge permalink: date exclude: - CHANGELOG.md - CHECKLIST.md - Gemfile - Gemfile.lock - LICENSE - README-*.md - README.md - gulpfile.js - jekyll-text-theme.gemspec - package-lock.json - package.json - /docs - /node_modules - /screenshots - /test - /vendor ## Plugins ## plugins: - jekyll-sitemap - jekyll-feed - jekyll-paginate - jemoji ## Server ## server_url: "0.0.0.0"
24.884615
114
0.651314
44b0eb45e4e8b4727242bb789bdcf0056ca01e10
5,144
yml
YAML
vendor/zendframework/zend-validator/.travis.yml
007k/zf-tutorial
b25bae73f0d1533819cade575aeb34f35148496d
[ "BSD-3-Clause" ]
null
null
null
vendor/zendframework/zend-validator/.travis.yml
007k/zf-tutorial
b25bae73f0d1533819cade575aeb34f35148496d
[ "BSD-3-Clause" ]
null
null
null
vendor/zendframework/zend-validator/.travis.yml
007k/zf-tutorial
b25bae73f0d1533819cade575aeb34f35148496d
[ "BSD-3-Clause" ]
null
null
null
sudo: false language: php branches: except: - /^release-.*$/ - /^ghgfk-.*$/ cache: directories: - $HOME/.composer/cache - $HOME/.local - zf-mkdoc-theme env: global: - COMPOSER_ARGS="--no-interaction" - COVERAGE_DEPS="satooshi/php-coveralls" - LEGACY_DEPS="phpunit/phpunit" - TESTS_ZEND_VALIDATOR_ONLINE_ENABLED=true - SITE_URL=https://zendframework.github.io/zend-validator - GH_USER_NAME="Matthew Weier O'Phinney" - [email protected] - GH_REF=github.com/zendframework/zend-validator.git - secure="SoUsUxBFCuC0rVQyDJ/+IB38glC5WeWvg0XxtNj79di7wsQ92Jofp6Uu3NJBB8H1+at1pHetphRm4N+GPQmZGMFTG7LyF5u8duV8t4nDpAz5WfoP1y0IyacP6IrWzANeszOTZ04dlHu3dBdHusNpNxxUHl97bSx4XQUAm2GUTqNkuXNgQJFAAxx91jb5txG4W8KeMnfRm9jeDHP17BCnBMaSkYEXeLpHkYa9wA4lBJ7ZD6LuSC+MhrJCtREBTsWKLJY6xeBjRorUug+uCrNyArPtcOAaOLMSDJ1XIi3L5/Q7HdoldV7aC3V5HjNlpdIEFl33IGiCOyictFCpT1KaKx7TL8zDTMCiqe0cCyfTnq28lzULz2hXg0Kov7BFcRr2Ht/1f96RgrakWQiYTmk+C3YYYA16Fb+MndkMI3WH7WI0suC+5nhPdGl53MCWsd5x2+dDk/ifB/VvxHdGhhgxzAxsYJ41gV/LlzjbCQJNDCnTaL/GHCTUGJEPgwLrn2W52uZx6VggE9wl5z4XkiPqBy6zAAdwF55RRJgCxFttGOMVGdegFLHTf6+13S4sEImNmyVTeuJBZEHxaYRJ21wweOocjC2StKC9V54uPysDcEYwhu8WOsYU34fQdpMx3OHfPmXvhNGqoZ1rVsd5HM0QZZMT+7SI0r3UNKxrPC8LEAU=" matrix: include: - php: 5.6 env: - DEPS=lowest - php: 5.6 env: - DEPS=locked - EXECUTE_HOSTNAME_CHECK=true - TEST_COVERAGE=true - DEPLOY_DOCS="$(if [[ $TRAVIS_BRANCH == 'master' && $TRAVIS_PULL_REQUEST == 'false' ]]; then echo -n 'true' ; else echo -n 'false' ; fi)" - PATH="$HOME/.local/bin:$PATH" - php: 5.6 env: - DEPS=locked - SERVICE_MANAGER_VERSION="^2.7.5" - php: 5.6 env: - DEPS=latest - php: 7 env: - DEPS=lowest - php: 7 env: - DEPS=locked - CS_CHECK=true - php: 7 env: - DEPS=locked - SERVICE_MANAGER_VERSION="^2.7.5" - php: 7 env: - DEPS=latest - php: 7.1 env: - DEPS=lowest - php: 7.1 env: - DEPS=locked - php: 7.1 env: - DEPS=latest - php: hhvm env: - DEPS=lowest - php: hhvm env: - DEPS=locked - php: hhvm env: - DEPS=latest - php: hhvm env: - DEPS=locked - SERVICE_MANAGER_VERSION="^2.7.5" allow_failures: - php: hhvm before_install: - if [[ $TEST_COVERAGE != 'true' ]]; then phpenv config-rm xdebug.ini || return 0 ; fi - travis_retry composer self-update install: - travis_retry composer install $COMPOSER_ARGS --ignore-platform-reqs - if [[ $TRAVIS_PHP_VERSION =~ ^5.6 ]]; then travis_retry composer update $COMPOSER_ARGS --with-dependencies $LEGACY_DEPS ; fi - if [[ $DEPS == 'latest' ]]; then travis_retry composer update $COMPOSER_ARGS ; fi - if [[ $DEPS == 'lowest' ]]; then travis_retry composer update --prefer-lowest --prefer-stable $COMPOSER_ARGS ; fi - if [[ $SERVICE_MANAGER_VERSION != '' ]]; then travis_retry composer require --dev --no-update $COMPOSER_ARGS "zendframework/zend-servicemanager:$SERVICE_MANAGER_VERSION" ; fi - if [[ $SERVICE_MANAGER_VERSION == '' ]]; then travis_retry composer require --dev --no-update $COMPOSER_ARGS "zendframework/zend-servicemanager:^3.0.3" ; fi - if [[ $TEST_COVERAGE == 'true' ]]; then travis_retry composer require --dev $COMPOSER_ARGS $COVERAGE_DEPS ; fi - stty cols 120 - COLUMNS=120 composer show script: - if [[ $TEST_COVERAGE == 'true' ]]; then composer test-coverage ; else composer test ; fi - if [[ $CS_CHECK == 'true' ]]; then composer cs-check ; fi - if [[ $EXECUTE_HOSTNAME_CHECK == "true" && $TRAVIS_PULL_REQUEST == "false" ]]; then php bin/update_hostname_validator.php --check-only; fi - if [[ $DEPLOY_DOCS == "true" && "$TRAVIS_TEST_RESULT" == "0" ]]; then wget -O theme-installer.sh "https://raw.githubusercontent.com/zendframework/zf-mkdoc-theme/master/theme-installer.sh" ; chmod 755 theme-installer.sh ; ./theme-installer.sh ; fi after_script: - if [[ $TEST_COVERAGE == 'true' ]]; then composer upload-coverage ; fi after_success: - if [[ $DEPLOY_DOCS == "true" ]]; then echo "Preparing to build and deploy documentation" ; ./zf-mkdoc-theme/deploy.sh ; echo "Completed deploying documentation" ; fi notifications: email: false slack: rooms: - secure: "ujQTv4jUDjnWAUME3w2VoPyKPBwbCGa7b1YMKxOt4PWxjWuyIUDY743fnmcQUqkX+CUXz9qJHG124RyZQ6UUowG/NZDttp7lppU/bIGJ/K0MuVmpBVwb8y/6rDoRj37V4n8WqAHGevjlRet3E5+gl91PFuSpN5JSj4efI8MlgUF6mqIIZUOifq2yNTZ9MXrG2qojIN4o6G4gttfwUR/3Ah1nD/ZtQBLA7pTd31/UNwtZMQ4IbGmcCMpdUADbQDr24VubjzTJfweSBoAu8Xf3IPPdR5AEfdRvuT1tGYPP4YxmvHxpTH1wF3mCX6b6ubUFMlpbqE50y/v4Mlva+2jXvcZ9Lt/Fs1Hz4pR3P3mM8EJjtj55cXWm+MSBqPBN7SX6AnkYB/OznuzqbCvt5Te09fm++1REYGnxkLxCnwI9GN2sKS7Tr8NxUCZyi9d4sVh7KUnrwFGVAGpViBTeglq+epoClcwupLK1E2m8IjrjUHTYG6NCE1QF/1NrrwuFBUIQuxPj/uE4oZcb8Tmiz9ilGFw/JbMR6WKzYzDSk2GrkJjUpa8Pn570kDL3otfJImPnEOxN73m9jle6P3laVJP9/A5Vm9C86S0aghAaswxCZB7Fql0Pl01WAB1kufqQBM/euFWGB0bQ9TFWoOENZDfD9zGyTkeZq3mgZ6DgU7ft6FA=" on_success: change on_failure: always
42.866667
702
0.714425
005a90d272cb3c6fe3785d8eed2ba364e0887742
1,337
yaml
YAML
package.yaml
007kevin/cf
10e17d9f11de508ecfa95790a93be4b794ac8850
[ "BSD-3-Clause" ]
null
null
null
package.yaml
007kevin/cf
10e17d9f11de508ecfa95790a93be4b794ac8850
[ "BSD-3-Clause" ]
null
null
null
package.yaml
007kevin/cf
10e17d9f11de508ecfa95790a93be4b794ac8850
[ "BSD-3-Clause" ]
null
null
null
name: cf version: 0.1.0.0 github: "githubuser/cf" license: BSD3 author: "Author name here" maintainer: "[email protected]" copyright: "2018 Author name here" extra-source-files: - README.md - ChangeLog.md # Metadata used when publishing your package # synopsis: Short description of your package # category: Web # To avoid duplicated efforts in documentation and dealing with the # complications of embedding Haddock markup inside cabal files, it is # common to point users to the README.md file. description: Please see the README on GitHub at <https://github.com/githubuser/cf#readme> dependencies: - base >= 4.7 && < 5 - optparse-applicative - wreq - http-client - lens - lens-aeson - regex-tdfa - bytestring - pretty-simple - transformers - aeson - aeson-pretty - monadplus - text - errors - directory - filepath - async library: source-dirs: src executables: cf-exe: main: Main.hs source-dirs: app ghc-options: - -threaded - -rtsopts - -with-rtsopts=-N dependencies: - cf tests: cf-test: main: Spec.hs source-dirs: test ghc-options: - -threaded - -rtsopts - -with-rtsopts=-N dependencies: - cf
20.569231
97
0.613313
7cad022105eb384b2e239c7a7c2065e0d7060557
2,168
yaml
YAML
stack.yaml
007kevin/cf
10e17d9f11de508ecfa95790a93be4b794ac8850
[ "BSD-3-Clause" ]
null
null
null
stack.yaml
007kevin/cf
10e17d9f11de508ecfa95790a93be4b794ac8850
[ "BSD-3-Clause" ]
null
null
null
stack.yaml
007kevin/cf
10e17d9f11de508ecfa95790a93be4b794ac8850
[ "BSD-3-Clause" ]
null
null
null
# This file was automatically generated by 'stack init' # # Some commonly used options have been documented as comments in this file. # For advanced use and comprehensive documentation of the format, please see: # https://docs.haskellstack.org/en/stable/yaml_configuration/ # Resolver to choose a 'specific' stackage snapshot or a compiler version. # A snapshot resolver dictates the compiler version and the set of packages # to be used for project dependencies. For example: # # resolver: lts-3.5 # resolver: nightly-2015-09-21 # resolver: ghc-7.10.2 # resolver: ghcjs-0.1.0_ghc-7.10.2 # # The location of a snapshot can be provided as a file or url. Stack assumes # a snapshot provided as a file might change, whereas a url resource does not. # # resolver: ./custom-snapshot.yaml # resolver: https://example.com/snapshots/2018-01-01.yaml resolver: lts-11.8 # User packages to be built. # Various formats can be used as shown in the example below. # # packages: # - some-directory # - https://example.com/foo/bar/baz-0.0.2.tar.gz # - location: # git: https://github.com/commercialhaskell/stack.git # commit: e7b331f14bcffb8367cd58fbfc8b40ec7642100a # - location: https://github.com/commercialhaskell/stack/commit/e7b331f14bcffb8367cd58fbfc8b40ec7642100a # subdirs: # - auto-update # - wai packages: - . # Dependency packages to be pulled from upstream that are not in the resolver # using the same syntax as the packages field. # (e.g., acme-missiles-0.3) # extra-deps: [] # Override default flag values for local packages and extra-deps # flags: {} # Extra package databases containing global packages # extra-package-dbs: [] # Control whether we use the GHC we find on the path # system-ghc: true # # Require a specific version of stack, using version ranges # require-stack-version: -any # Default # require-stack-version: ">=1.7" # # Override the architecture used by stack, especially useful on Windows # arch: i386 # arch: x86_64 # # Extra directories used by stack for building # extra-include-dirs: [/path/to/dir] # extra-lib-dirs: [/path/to/dir] # # Allow a newer minor version of GHC than the snapshot specifies # compiler-check: newer-minor
32.848485
104
0.748155
3690ef9f981a58c91f968fd014028c837f08e6ab
151
yml
YAML
.travis.yml
007lihegong/AXWebViewController-master
f07e73a401d5f4701a3043d9040f8f4f8d00c213
[ "MIT" ]
6
2016-09-14T08:41:08.000Z
2021-06-22T02:40:08.000Z
.travis.yml
007lihegong/AXWebViewController-master
f07e73a401d5f4701a3043d9040f8f4f8d00c213
[ "MIT" ]
null
null
null
.travis.yml
007lihegong/AXWebViewController-master
f07e73a401d5f4701a3043d9040f8f4f8d00c213
[ "MIT" ]
1
2016-09-14T08:41:08.000Z
2016-09-14T08:41:08.000Z
osx_image: xcode7.3 language: objective-c xcode_workspace: AXWebViewController.xcworkspace xcode_scheme: AXWebViewController xcode_sdk: iphonesimulator
30.2
48
0.887417
4b641c6dfb974c62ae39c9f5e392b36835c95523
149
yml
YAML
.travis.yml
007lihegong/WHC_ModelSqliteKit
d9c7666dd47b0d157be8838218352968ce9f4485
[ "MIT" ]
713
2016-05-29T07:54:15.000Z
2022-03-09T08:48:54.000Z
.travis.yml
originalix/WHC_ModelSqliteKit
b81c8bbf8dc2a1e0f693389dd4e468c673c18af2
[ "MIT" ]
65
2016-06-27T07:47:43.000Z
2021-06-08T09:02:41.000Z
.travis.yml
originalix/WHC_ModelSqliteKit
b81c8bbf8dc2a1e0f693389dd4e468c673c18af2
[ "MIT" ]
168
2016-05-30T01:28:33.000Z
2022-03-21T02:54:59.000Z
osx_image: xcode8 language: objective-c xcode_project: WHC_ModelSqliteKit.xcodeproj # path to your xcodeproj folder xcode_schemes: WHC_ModelSqliteKit
37.25
75
0.865772
d5337f8aab688d21193592fe49417538754c4600
4,064
yml
YAML
db/demo-site-data/settings.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
db/demo-site-data/settings.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
db/demo-site-data/settings.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
--- Setting_1: id: 1 name: admin_ip_list description: IP addresses allowed to access admin area (comma-separated) level: site locked: true created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &1 2020-03-15 17:01:38.837487000 Z zone: &2 !ruby/object:ActiveSupport::TimeZone name: Etc/UTC time: *1 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &3 2020-03-15 17:01:38.837487000 Z zone: *2 time: *3 Setting_2: id: 2 name: all_comment_notifications_email description: Set this to an email address to receive a notification for every comment posted on the site level: site locked: true created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &4 2020-03-15 17:01:39.076756000 Z zone: *2 time: *4 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &5 2020-03-15 17:01:39.076756000 Z zone: *2 time: *5 Setting_3: id: 3 name: allowed_to_comment description: Lowest-ranking user-type (Anonymous/Pseudonymous/Authenticated/None) that is allowed to post comments level: site locked: false created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &6 2020-03-15 17:01:39.116922000 Z zone: *2 time: *6 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &7 2020-03-15 17:01:39.116922000 Z zone: *2 time: *7 Setting_4: id: 4 name: default_page description: Default top-level page (either its name or its slug) level: site locked: false created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &8 2020-03-15 17:01:39.134764000 Z zone: *2 time: *8 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &9 2020-03-15 17:01:39.134764000 Z zone: *2 time: *9 Setting_5: id: 5 name: default_section description: Default top-level section (either its name or its slug) level: site locked: false created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &10 2020-03-15 17:01:39.152691000 Z zone: *2 time: *10 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &11 2020-03-15 17:01:39.152691000 Z zone: *2 time: *11 Setting_6: id: 6 name: post_login_redirect description: Where people are redirected after login, if no referer header level: admin locked: false created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &12 2020-03-15 17:01:39.169858000 Z zone: *2 time: *12 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &13 2020-03-15 17:01:39.169858000 Z zone: *2 time: *13 Setting_7: id: 7 name: recaptcha_comment_score description: Minimum score for reCAPTCHA V3 on anon/pseudonymous comments level: admin locked: true created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &14 2020-03-15 17:01:39.187307000 Z zone: *2 time: *14 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &15 2020-03-15 17:01:39.187307000 Z zone: *2 time: *15 Setting_8: id: 8 name: recaptcha_registration_score description: Minimum score for reCAPTCHA V3 on user registration level: admin locked: true created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &16 2020-03-15 17:01:39.204632000 Z zone: *2 time: *16 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &17 2020-03-15 17:01:39.204632000 Z zone: *2 time: *17 Setting_9: id: 9 name: tag_view description: "('cloud' or 'list')" level: user locked: false created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &18 2020-03-15 17:01:39.221611000 Z zone: *2 time: *18 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &19 2020-03-15 17:01:39.221611000 Z zone: *2 time: *19 Setting_10: id: 10 name: theme_name description: '' level: site locked: false created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &20 2020-03-15 17:01:39.238997000 Z zone: *2 time: *20 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &21 2020-03-15 17:01:39.238997000 Z zone: *2 time: *21
28.027586
87
0.695374
38175caac8376dcfac305bdb733bd3fedd4515f7
1,352
yml
YAML
config/database.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
config/database.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
config/database.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
# Database Config default: &default adapter: postgresql encoding: unicode # For details on connection pooling, see Rails configuration guide # https://guides.rubyonrails.org/configuring.html#database-pooling pool: <%= ENV.fetch('RAILS_MAX_THREADS') { 5 } %> development: <<: *default database: shinycms_development # Minimum log levels, in increasing order: # debug5, debug4, debug3, debug2, debug1, # log, notice, warning, error, fatal, and panic # Defaults to warning. #min_messages: debug1 # Warning: The database defined as "test" will be erased and # re-generated from your development database when you run "rake". # Do not set this db to the same as development or production. test: <<: *default database: shinycms_test # For security reasons, you should never put your production database details, # particularly the password, in this file (or anywhere else in your source code) # # Read https://guides.rubyonrails.org/configuring.html#configuring-a-database # for how to pass these details to your app via environment variables instead. # # On Heroku and other platform providers, you may have a full connection URL # available as an environment variable. For example: # # DATABASE_URL="postgres://shinyuser:shinypass@localhost/shinycms" production: <<: *default url: <%= ENV['DATABASE_URL'] %>
30.044444
80
0.741864
383925f9475807a4997017a9d857d45847bb6936
3,125
yml
YAML
db/demo-site-data/setting_values.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
db/demo-site-data/setting_values.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
db/demo-site-data/setting_values.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
--- SettingValue_1: id: 1 setting_id: 1 value: '' created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &1 2020-03-15 17:01:39.066643000 Z zone: &2 !ruby/object:ActiveSupport::TimeZone name: Etc/UTC time: *1 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &3 2020-03-15 17:01:39.066643000 Z zone: *2 time: *3 SettingValue_2: id: 2 setting_id: 2 value: '' created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &4 2020-03-15 17:01:39.109539000 Z zone: *2 time: *4 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &5 2020-03-15 17:01:39.109539000 Z zone: *2 time: *5 SettingValue_3: id: 3 setting_id: 3 value: Anonymous created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &6 2020-03-15 17:01:39.127306000 Z zone: *2 time: *6 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &7 2020-03-15 17:01:39.127306000 Z zone: *2 time: *7 SettingValue_4: id: 4 setting_id: 4 value: '' created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &8 2020-03-15 17:01:39.145470000 Z zone: *2 time: *8 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &9 2020-03-15 17:01:39.145470000 Z zone: *2 time: *9 SettingValue_5: id: 5 setting_id: 5 value: '' created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &10 2020-03-15 17:01:39.163086000 Z zone: *2 time: *10 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &11 2020-03-15 17:01:39.163086000 Z zone: *2 time: *11 SettingValue_6: id: 6 setting_id: 6 value: "/" created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &12 2020-03-15 17:01:39.180033000 Z zone: *2 time: *12 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &13 2020-03-15 17:01:39.180033000 Z zone: *2 time: *13 SettingValue_7: id: 7 setting_id: 7 value: '0.6' created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &14 2020-03-15 17:01:39.197894000 Z zone: *2 time: *14 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &15 2020-03-15 17:01:39.197894000 Z zone: *2 time: *15 SettingValue_8: id: 8 setting_id: 8 value: '0.4' created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &16 2020-03-15 17:01:39.215050000 Z zone: *2 time: *16 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &17 2020-03-15 17:01:39.215050000 Z zone: *2 time: *17 SettingValue_9: id: 9 setting_id: 9 value: cloud created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &18 2020-03-15 17:01:39.232125000 Z zone: *2 time: *18 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &19 2020-03-15 17:01:39.232125000 Z zone: *2 time: *19 SettingValue_10: id: 10 setting_id: 10 value: '' created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &20 2020-03-15 17:01:39.249697000 Z zone: *2 time: *20 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &21 2020-03-15 17:01:39.249697000 Z zone: *2 time: *21
25.406504
54
0.66432
01babdd7af116f6a4c6c521324df6e9c0b1d818c
464
yml
YAML
db/demo-site-data/blogs.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
db/demo-site-data/blogs.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
db/demo-site-data/blogs.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
--- Blog_1: id: 1 name: ShinyBlog description: '' title: The ShinySite Blog slug: shinysite hidden_from_menu: false hidden: false user_id: 1 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &1 2020-02-08 07:23:45.322143000 Z zone: &2 !ruby/object:ActiveSupport::TimeZone name: Etc/UTC time: *1 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &3 2020-02-08 07:23:45.322143000 Z zone: *2 time: *3
23.2
54
0.676724
ed602263b3abfc1bc4f9bd82bf71366733324e89
370
yml
YAML
.codeclimate.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
.codeclimate.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
.codeclimate.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
version: "2" plugins: brakeman: enabled: true bundler-audit: enabled: true csslint: enabled: true editorconfig: enabled: true fixme: enabled: true markdownlint: enabled: true nodesecurity: enabled: true sass-lint: enabled: true spellcheck: enabled: true scss-lint: enabled: true stylelint: enabled: true
14.8
17
0.645946
fe214e0f187a96054bcc38b6baa61c948dc6574e
11,703
yml
YAML
config/locales/en.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
config/locales/en.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
config/locales/en.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
# To use the locales # I18n.t 'hello' # <%= t('hello') %> # # To use a different locale, set it with `I18n.locale`: # I18n.locale = :es # # To learn more, please read the Rails Internationalization guide # available at https://guides.rubyonrails.org/i18n.html. en: site_name: ShinySite home: Home # Generic verbs, used by buttons etc, both on main site and in admin area view: View list: List add: Add edit: Edit post: Post hide: Hide lock: Lock unhide: Unhide unlock: Unlock update: Update delete: Delete destroy: Delete # Confirmation dialogue are_you_sure: Are you sure? # ========== ( Main Site ) ========== blogs: index: title: Blogs set_blog: failure: Blog not found show: reply: Reply to this post sidebar: links: Links links_blurb: Recommended reading from around the web recent_posts: Recent Posts discussions: comments: Comments comment: Comment zero_comments: There are no comments to display new_comment: Add a new comment reply: Reply to this comment hidden_comment: This comment was removed by a moderator. spam: Spam comment_form: anonymous: Anonymous pseudonymous: Pseudonymous log_in_to_post: Log in to post a comment name: Name email: Emailspam_ url: URL add_comment: success: New comment posted failure: Failed to post new comment add_reply: success: New comment posted failure: Failed to post new comment save_comment: success: New comment posted failure: Failed to post new comment errors: not_found: title: "%{resource_type} not found" explanation: The URL you have reached is for a %{resource_type} that does not exist. page: template_file_missing: Unable to display page; its template file is missing. news: index: title: Recent news zero_posts: There are no news posts to display here. sidebar: links: Links links_blurb: Recommended reading from around the web recent_posts: Recent Posts settings: # NB: These translations strings are all here to keep them in one place # together, but not all of them would make sense as user settings. # Particularly not the first one!! admin_ip_list: Admin IP list all_comment_notifications_email: Email to notify of ALL comments allowed_to_comment: Allowed to comment default_page: Default page default_section: Default section post_login_redirect: Post-login redirect recaptcha_comment_score: 'reCAPTCHA: Minimum score for comments' recaptcha_registration_score: 'reCAPTCHA: Minimum score for user registration' tag_view: Default tag view theme_name: Theme name site_settings: index: title: Site settings update: success: Settings updated unchanged: No settings were changed tags: index: title: Tags cloud: title: Tag Cloud list: title: Tag List user: view_profile: View your profile edit_profile: Edit your profile log_in: Log in log_out: Log out register: Register site_settings: Site settings # ========== ( Mailers ) ========== discussion_mailer: they_said: They said parent_comment_notification: subject: "[%{site_name}] %{reply_author_name} replied to your comment" you_said: You said discussion_notification: subject: "[%{site_name}] %{comment_author_name} commented on your %{content_type}" overview_notification: subject: "[%{site_name}] %{comment_author_name} commented on your site" user_mailer: registration: subject: "[%{site_name}] Please confirm your registration" content_types: blog_post: blog post # ========== ( Feature Flags ) ========== feature_flags: off_alert: Sorry, the '%{feature_name}' feature of this site is not available. akismet_on_comments: Detect spam comments with Akismet blogs: Blogs comments: Comments comment_notifications: Comment notifications news: News recaptcha_on_registration_form: Protect registration form with reCAPTCHA recaptcha_on_comment_form: Protect comment form with reCAPTCHA tags: Tags user_login: User Login user_profiles: User Profiles user_registration: User Registration # ========== ( User Capabilities ) ========== capability: capabilities: Admin Capabilities category: admin_users: Admin Users blogs: Blogs blog_posts: Blog Posts comments: Comments discussions: Discussions feature_flags: Feature Flags general: General inserts: Inserts pages: Pages page_sections: Page Sections page_templates: Page Templates settings: Site Settings spam_comments: Spam Comments users: Users web_stats: Web Stats blog_posts: change_author: Change author general: view_admin_area: View admin area view_admin_dashboard: View admin dashboard view_admin_toolbar: View admin toolbar users: view_admin_notes: View admin notes # ========== ( Admin Area ) ========== admin: title: Admin admin_area: Admin area elements: short_text: Short Text long_text: Long Text image: Image html: HTML blogs: title: Blogs index: title: List blogs empty: There are no blogs to display new: title: Create new blog edit: title: Edit blog details create: success: New blog created failure: Failed to create new blog update: success: Blog details updated failure: Failed to update blog details destroy: success: Blog deleted failure: Failed to delete blog set_blog: failure: Couldn't find specified blog blog: posts: title: Posts index: title: List blog posts empty: There are no blog posts to display new: title: Add new blog post edit: title: Edit blog post create: success: New blog post added failure: Failed to add new blog post update: success: Blog post updated failure: Failed to update blog post destroy: success: Blog post deleted failure: Failed to delete blog post set_post: failure: Couldn't find specified blog post comments: title: Spam comments index: title: Spam comment moderation empty: There are currently no comments marked as spam select_all: Select all confirm_spam: Confirm as spam (and delete) remove_flag: Remove spam flag (and publish) update: spam_or_ham: Please choose either 'confirm as spam' or 'remove spam flag' process_spam_comments: success: Spam comments deleted # failure: Failed to delete spam comments process_ham_comments: success: Spam flags removed (comments should now be visible on site) # failure: Failed to remove spam flags dashboard: title: Dashboard discussion: title: Discussion feature_flags: title: Feature Flags index: title: Feature Flags feature: Feature enabled_for: Enabled for... everybody: Everybody logged_in: Logged-in users admins: Admins update: success: Feature flags updated failure: Failed to update feature flags inserts: title: Inserts index: title: Inserts name_placeholder: Name content_placeholder: Content (can be blank) element: unknown_element_type: UNKNOWN ELEMENT TYPE IN INSERTS create: success: New insert added failure: Failed to add new insert update: success: Inserts updated # unchanged: Inserts unchanged failure: Failed to update inserts destroy: success: Insert deleted failure: Failed to delete insert news: title: News index: title: List news postsapp/models/comment.rb empty: There are no news posts to display new: title: Add news post edit: title: Edit news post create: success: News post added failure: Failed to add news post update: success: News post updated failure: Failed to update news post destroy: success: News post deleted failure: Failed to delete news post set_post: failure: Couldn't find specified blog post pages: title: Pages index: title: List pages new: title: Add new page edit: title: Edit page create: success: New page added failure: Failed to add new page update: success: Page details updated failure: Failed to update page details destroy: success: Page deleted failure: Failed to delete page sections: title: Page sections new: title: Add new section edit: title: Edit section create: success: New section added failure: Failed to add new section update: success: Section details updated failure: Failed to update section details destroy: success: Section deleted failure: Failed to section page templates: title: Page templates index: title: List templates new: title: Add new template edit: title: Edit template create: success: New template added failure: Failed to add new template update: success: Template details updated failure: Failed to update template details destroy: success: Template deleted failure: Failed to delete template other: title: Other site_settings: title: Site settings index: title: Site settings name: Name value: Value level: Level user: User admin: Admin site: Site update: success: Settings updated failure: Failed to update some settings unchanged: No settings were changed users: title: Users index: title: List users new: title: Add new user edit: title: Edit user create: success: New user added failure: Failed to add new user update: success: User details updated failure: Failed to update user details destroy: success: User deleted failure: Failed to delete user stats: title: Stats web_stats: title: Web stats index: title: Web stats empty: No stats to display models: page_template: template_file_must_exist: The template file must be in pages/templates setting_value: one_per_user: You can only have one setting per value per user concerns: name_title_slug: slug_must_be_unique: The slug must be unique within its section # Pagination blocks on admin list pages (generated by kaminari) views: pagination: first: "&laquo; First" last: "Last &raquo;" previous: "&lsaquo; Prev" next: "Next &rsaquo;" truncate: "&hellip;" # ckeditor: # page_title: 'CKEditor File Manager' # confirm_delete: 'Delete file?' # buttons: # upload: 'Upload' # cancel: 'Cancel' # delete: 'Delete' # next: 'Next'
25.608315
90
0.62924
0e7abbc1542c0002ee811522f0fd9237d6cf1071
332
yml
YAML
db/demo-site-data/insert_sets.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
db/demo-site-data/insert_sets.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
db/demo-site-data/insert_sets.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
--- InsertSet_1: id: 1 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &1 2020-03-15 17:01:38.810306000 Z zone: &2 !ruby/object:ActiveSupport::TimeZone name: Etc/UTC time: *1 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &3 2020-03-15 17:01:38.810306000 Z zone: *2 time: *3
25.538462
54
0.665663
1effb9abe5d1dbf172678fff6833d489b64b1c8f
2,310
yml
YAML
.rubocop.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
.rubocop.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
.rubocop.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
require: rubocop-rails # Rails and ActiveRecord autogenerate a lot of code that fails Rubocop # (I find this mildly confusing/annoying). Also, there's no point in # having Rubocop look at JavaScript, images, shell scripts, etc. AllCops: EnabledByDefault: true Exclude: - db/**/* - bin/* - app/assets/**/* - coverage/**/* - docs/* - log/* - node_modules/**/* - public/**/* - tmp/**/* - tools/**/* # TODO: see if I can make this method happy at some point Metrics/AbcSize: Exclude: - app/helpers/admin_area_helper.rb # Spec files and the routes file are supposed to be one long block. Shush. Metrics/BlockLength: Exclude: - Gemfile - spec/**/*.rb - config/routes.rb - config/environments/* - lib/tasks/*.rake # Also, spec files with I18n strings in them are more readable with slightly # long lines than they would be with lots of split lines. Again, shush. Layout/LineLength: Exclude: - spec/**/*.rb # This test is for a feature involving a list of IP addresses Style/IpAddresses: Exclude: - spec/requests/admin_spec.rb # Whitespace Considered Helpful Layout/ExtraSpacing: Enabled: false Layout/SpaceInsideArrayLiteralBrackets: Enabled: false Layout/SpaceInsideBlockBraces: Enabled: false Layout/SpaceInsideHashLiteralBraces: Enabled: false Layout/SpaceInsideParens: Enabled: false Layout/SpaceInsidePercentLiteralDelimiters: Enabled: false Layout/SpaceInsideReferenceBrackets: Enabled: false # Allow compact class names (e.g. Admin::Pages::TemplatesController) Style/ClassAndModuleChildren: Enabled: false # I'd rather use one style of regex everywhere, that is clearer for munging URLs # as no need to escape '/': %r{/(path)/parts/} rather than /\/(path)\/parts\// Style/RegexpLiteral: Enabled: false # I have no idea what this is but it was annoying me. Style/FrozenStringLiteralComment: Enabled: false # ... nah Bundler/GemComment: Enabled: false Layout/MultilineMethodArgumentLineBreaks: Enabled: false Lint/NumberConversion: Enabled: false Style/Copyright: Enabled: false Style/DocumentationMethod: Enabled: false Style/InlineComment: Enabled: false Style/MethodCallWithArgsParentheses: Enabled: false Style/MissingElse: Enabled: false Style/StringHashKeys: Enabled: false
24.83871
80
0.730736
64b1802febd2ddb69001ebc0e30ac5cdee1ca32e
852
yml
YAML
.overcommit.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
.overcommit.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
.overcommit.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
# Use this file to configure the Overcommit hooks you wish to use. This will # extend the default configuration defined in: # https://github.com/sds/overcommit/blob/master/config/default.yml # # For a complete list of hooks, see: # https://github.com/sds/overcommit/tree/master/lib/overcommit/hook # # For a complete list of options that you can use to customize hooks, see: # https://github.com/sds/overcommit#configuration PreCommit: ALL: quiet: false Brakeman: enabled: true RuboCop: enabled: true on_warn: fail TrailingWhitespace: enabled: true exclude: - '**/*.md' CaseConflicts: enabled: false PrePush: ALL: quiet: false RSpec: enabled: true BundleAudit: enabled: true Fasterer: enabled: true PostMerge: ALL: quiet: false RuboCop: enabled: true on_warn: fail
20.285714
76
0.691315
259c3fbeef7c5f6f3fa80f2d6098fc0660b8580b
4,896
yml
YAML
db/demo-site-data/capability_categories.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
db/demo-site-data/capability_categories.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
db/demo-site-data/capability_categories.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
--- CapabilityCategory_1: id: 1 name: general created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &1 2020-03-17 12:57:25.944493000 Z zone: &2 !ruby/object:ActiveSupport::TimeZone name: Etc/UTC time: *1 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &3 2020-03-17 12:57:25.944493000 Z zone: *2 time: *3 CapabilityCategory_2: id: 2 name: blogs created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &4 2020-03-17 12:57:25.953882000 Z zone: *2 time: *4 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &5 2020-03-17 12:57:25.953882000 Z zone: *2 time: *5 CapabilityCategory_3: id: 3 name: blog_posts created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &6 2020-03-17 12:57:25.962837000 Z zone: *2 time: *6 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &7 2020-03-17 12:57:25.962837000 Z zone: *2 time: *7 CapabilityCategory_4: id: 4 name: discussions created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &8 2020-03-17 12:57:25.969611000 Z zone: *2 time: *8 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &9 2020-03-17 12:57:25.969611000 Z zone: *2 time: *9 CapabilityCategory_5: id: 5 name: comments created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &10 2020-03-17 12:57:25.975568000 Z zone: *2 time: *10 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &11 2020-03-17 12:57:25.975568000 Z zone: *2 time: *11 CapabilityCategory_6: id: 6 name: spam_comments created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &12 2020-03-17 12:57:25.984965000 Z zone: *2 time: *12 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &13 2020-03-17 12:57:25.984965000 Z zone: *2 time: *13 CapabilityCategory_7: id: 7 name: feature_flags created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &14 2020-03-17 12:57:25.994663000 Z zone: *2 time: *14 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &15 2020-03-17 12:57:25.994663000 Z zone: *2 time: *15 CapabilityCategory_8: id: 8 name: inserts created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &16 2020-03-17 12:57:26.001697000 Z zone: *2 time: *16 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &17 2020-03-17 12:57:26.001697000 Z zone: *2 time: *17 CapabilityCategory_9: id: 9 name: news_posts created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &18 2020-03-17 12:57:26.008290000 Z zone: *2 time: *18 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &19 2020-03-17 12:57:26.008290000 Z zone: *2 time: *19 CapabilityCategory_10: id: 10 name: pages created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &20 2020-03-17 12:57:26.016209000 Z zone: *2 time: *20 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &21 2020-03-17 12:57:26.016209000 Z zone: *2 time: *21 CapabilityCategory_11: id: 11 name: page_sections created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &22 2020-03-17 12:57:26.023354000 Z zone: *2 time: *22 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &23 2020-03-17 12:57:26.023354000 Z zone: *2 time: *23 CapabilityCategory_12: id: 12 name: page_templates created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &24 2020-03-17 12:57:26.030842000 Z zone: *2 time: *24 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &25 2020-03-17 12:57:26.030842000 Z zone: *2 time: *25 CapabilityCategory_13: id: 13 name: settings created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &26 2020-03-17 12:57:26.038595000 Z zone: *2 time: *26 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &27 2020-03-17 12:57:26.038595000 Z zone: *2 time: *27 CapabilityCategory_14: id: 14 name: web_stats created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &28 2020-03-17 12:57:26.046210000 Z zone: *2 time: *28 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &29 2020-03-17 12:57:26.046210000 Z zone: *2 time: *29 CapabilityCategory_15: id: 15 name: users created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &30 2020-03-17 12:57:26.054550000 Z zone: *2 time: *30 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &31 2020-03-17 12:57:26.054550000 Z zone: *2 time: *31 CapabilityCategory_16: id: 16 name: admin_users created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &32 2020-03-17 12:57:26.062204000 Z zone: *2 time: *32 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &33 2020-03-17 12:57:26.062204000 Z zone: *2 time: *33
27.351955
54
0.68219
7d5cbd0a35205e582082722e136423f25d1b00f5
317
yml
YAML
.scss-lint.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
.scss-lint.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
.scss-lint.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
--- exclude: - app/assets/stylesheets/halcyonic/main.scss - node_modules/* linters: ImportantRule: enabled: false LeadingZero: enabled: false MergeableSelector: enabled: false Shorthand: enabled: false SpaceAfterPropertyColon: enabled: false SpaceBetweenParens: enabled: false
17.611111
46
0.712934
99f1a06dfbd6c44f260f302eecb482d2a1525b0f
18,643
yml
YAML
db/demo-site-data/user_capabilities.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
db/demo-site-data/user_capabilities.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
db/demo-site-data/user_capabilities.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
--- UserCapability_1: id: 1 user_id: 1 capability_id: 1 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &1 2020-03-17 13:06:45.997237000 Z zone: &2 !ruby/object:ActiveSupport::TimeZone name: Etc/UTC time: *1 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &3 2020-03-17 13:06:45.997237000 Z zone: *2 time: *3 UserCapability_2: id: 2 user_id: 1 capability_id: 2 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &4 2020-03-17 13:06:46.002052000 Z zone: *2 time: *4 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &5 2020-03-17 13:06:46.002052000 Z zone: *2 time: *5 UserCapability_3: id: 3 user_id: 1 capability_id: 3 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &6 2020-03-17 13:06:46.005157000 Z zone: *2 time: *6 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &7 2020-03-17 13:06:46.005157000 Z zone: *2 time: *7 UserCapability_4: id: 4 user_id: 1 capability_id: 4 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &8 2020-03-17 13:06:46.008159000 Z zone: *2 time: *8 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &9 2020-03-17 13:06:46.008159000 Z zone: *2 time: *9 UserCapability_5: id: 5 user_id: 1 capability_id: 5 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &10 2020-03-17 13:06:46.011363000 Z zone: *2 time: *10 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &11 2020-03-17 13:06:46.011363000 Z zone: *2 time: *11 UserCapability_6: id: 6 user_id: 1 capability_id: 6 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &12 2020-03-17 13:06:46.014590000 Z zone: *2 time: *12 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &13 2020-03-17 13:06:46.014590000 Z zone: *2 time: *13 UserCapability_7: id: 7 user_id: 1 capability_id: 7 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &14 2020-03-17 13:06:46.017718000 Z zone: *2 time: *14 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &15 2020-03-17 13:06:46.017718000 Z zone: *2 time: *15 UserCapability_8: id: 8 user_id: 1 capability_id: 8 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &16 2020-03-17 13:06:46.020740000 Z zone: *2 time: *16 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &17 2020-03-17 13:06:46.020740000 Z zone: *2 time: *17 UserCapability_9: id: 9 user_id: 1 capability_id: 9 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &18 2020-03-17 13:06:46.023522000 Z zone: *2 time: *18 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &19 2020-03-17 13:06:46.023522000 Z zone: *2 time: *19 UserCapability_10: id: 10 user_id: 1 capability_id: 10 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &20 2020-03-17 13:06:46.026380000 Z zone: *2 time: *20 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &21 2020-03-17 13:06:46.026380000 Z zone: *2 time: *21 UserCapability_11: id: 11 user_id: 1 capability_id: 11 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &22 2020-03-17 13:06:46.029344000 Z zone: *2 time: *22 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &23 2020-03-17 13:06:46.029344000 Z zone: *2 time: *23 UserCapability_12: id: 12 user_id: 1 capability_id: 12 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &24 2020-03-17 13:06:46.032203000 Z zone: *2 time: *24 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &25 2020-03-17 13:06:46.032203000 Z zone: *2 time: *25 UserCapability_13: id: 13 user_id: 1 capability_id: 13 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &26 2020-03-17 13:06:46.035089000 Z zone: *2 time: *26 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &27 2020-03-17 13:06:46.035089000 Z zone: *2 time: *27 UserCapability_14: id: 14 user_id: 1 capability_id: 14 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &28 2020-03-17 13:06:46.037972000 Z zone: *2 time: *28 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &29 2020-03-17 13:06:46.037972000 Z zone: *2 time: *29 UserCapability_15: id: 15 user_id: 1 capability_id: 15 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &30 2020-03-17 13:06:46.040805000 Z zone: *2 time: *30 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &31 2020-03-17 13:06:46.040805000 Z zone: *2 time: *31 UserCapability_16: id: 16 user_id: 1 capability_id: 16 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &32 2020-03-17 13:06:46.043567000 Z zone: *2 time: *32 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &33 2020-03-17 13:06:46.043567000 Z zone: *2 time: *33 UserCapability_17: id: 17 user_id: 1 capability_id: 17 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &34 2020-03-17 13:06:46.046384000 Z zone: *2 time: *34 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &35 2020-03-17 13:06:46.046384000 Z zone: *2 time: *35 UserCapability_18: id: 18 user_id: 1 capability_id: 18 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &36 2020-03-17 13:06:46.049256000 Z zone: *2 time: *36 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &37 2020-03-17 13:06:46.049256000 Z zone: *2 time: *37 UserCapability_19: id: 19 user_id: 1 capability_id: 19 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &38 2020-03-17 13:06:46.052076000 Z zone: *2 time: *38 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &39 2020-03-17 13:06:46.052076000 Z zone: *2 time: *39 UserCapability_20: id: 20 user_id: 1 capability_id: 20 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &40 2020-03-17 13:06:46.054880000 Z zone: *2 time: *40 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &41 2020-03-17 13:06:46.054880000 Z zone: *2 time: *41 UserCapability_21: id: 21 user_id: 1 capability_id: 21 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &42 2020-03-17 13:06:46.057673000 Z zone: *2 time: *42 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &43 2020-03-17 13:06:46.057673000 Z zone: *2 time: *43 UserCapability_22: id: 22 user_id: 1 capability_id: 22 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &44 2020-03-17 13:06:46.060550000 Z zone: *2 time: *44 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &45 2020-03-17 13:06:46.060550000 Z zone: *2 time: *45 UserCapability_23: id: 23 user_id: 1 capability_id: 23 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &46 2020-03-17 13:06:46.064340000 Z zone: *2 time: *46 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &47 2020-03-17 13:06:46.064340000 Z zone: *2 time: *47 UserCapability_24: id: 24 user_id: 1 capability_id: 24 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &48 2020-03-17 13:06:46.068224000 Z zone: *2 time: *48 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &49 2020-03-17 13:06:46.068224000 Z zone: *2 time: *49 UserCapability_25: id: 25 user_id: 1 capability_id: 25 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &50 2020-03-17 13:06:46.070861000 Z zone: *2 time: *50 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &51 2020-03-17 13:06:46.070861000 Z zone: *2 time: *51 UserCapability_26: id: 26 user_id: 1 capability_id: 26 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &52 2020-03-17 13:06:46.073339000 Z zone: *2 time: *52 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &53 2020-03-17 13:06:46.073339000 Z zone: *2 time: *53 UserCapability_27: id: 27 user_id: 1 capability_id: 27 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &54 2020-03-17 13:06:46.075745000 Z zone: *2 time: *54 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &55 2020-03-17 13:06:46.075745000 Z zone: *2 time: *55 UserCapability_28: id: 28 user_id: 1 capability_id: 28 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &56 2020-03-17 13:06:46.078383000 Z zone: *2 time: *56 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &57 2020-03-17 13:06:46.078383000 Z zone: *2 time: *57 UserCapability_29: id: 29 user_id: 1 capability_id: 29 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &58 2020-03-17 13:06:46.080943000 Z zone: *2 time: *58 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &59 2020-03-17 13:06:46.080943000 Z zone: *2 time: *59 UserCapability_30: id: 30 user_id: 1 capability_id: 30 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &60 2020-03-17 13:06:46.083301000 Z zone: *2 time: *60 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &61 2020-03-17 13:06:46.083301000 Z zone: *2 time: *61 UserCapability_31: id: 31 user_id: 1 capability_id: 31 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &62 2020-03-17 13:06:46.085709000 Z zone: *2 time: *62 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &63 2020-03-17 13:06:46.085709000 Z zone: *2 time: *63 UserCapability_32: id: 32 user_id: 1 capability_id: 32 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &64 2020-03-17 13:06:46.088099000 Z zone: *2 time: *64 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &65 2020-03-17 13:06:46.088099000 Z zone: *2 time: *65 UserCapability_33: id: 33 user_id: 1 capability_id: 33 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &66 2020-03-17 13:06:46.090741000 Z zone: *2 time: *66 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &67 2020-03-17 13:06:46.090741000 Z zone: *2 time: *67 UserCapability_34: id: 34 user_id: 1 capability_id: 34 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &68 2020-03-17 13:06:46.093477000 Z zone: *2 time: *68 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &69 2020-03-17 13:06:46.093477000 Z zone: *2 time: *69 UserCapability_35: id: 35 user_id: 1 capability_id: 35 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &70 2020-03-17 13:06:46.096494000 Z zone: *2 time: *70 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &71 2020-03-17 13:06:46.096494000 Z zone: *2 time: *71 UserCapability_36: id: 36 user_id: 1 capability_id: 36 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &72 2020-03-17 13:06:46.099390000 Z zone: *2 time: *72 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &73 2020-03-17 13:06:46.099390000 Z zone: *2 time: *73 UserCapability_37: id: 37 user_id: 1 capability_id: 37 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &74 2020-03-17 13:06:46.102219000 Z zone: *2 time: *74 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &75 2020-03-17 13:06:46.102219000 Z zone: *2 time: *75 UserCapability_38: id: 38 user_id: 1 capability_id: 38 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &76 2020-03-17 13:06:46.105338000 Z zone: *2 time: *76 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &77 2020-03-17 13:06:46.105338000 Z zone: *2 time: *77 UserCapability_39: id: 39 user_id: 1 capability_id: 39 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &78 2020-03-17 13:06:46.108130000 Z zone: *2 time: *78 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &79 2020-03-17 13:06:46.108130000 Z zone: *2 time: *79 UserCapability_40: id: 40 user_id: 1 capability_id: 40 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &80 2020-03-17 13:06:46.110869000 Z zone: *2 time: *80 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &81 2020-03-17 13:06:46.110869000 Z zone: *2 time: *81 UserCapability_41: id: 41 user_id: 1 capability_id: 41 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &82 2020-03-17 13:06:46.113628000 Z zone: *2 time: *82 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &83 2020-03-17 13:06:46.113628000 Z zone: *2 time: *83 UserCapability_42: id: 42 user_id: 1 capability_id: 42 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &84 2020-03-17 13:06:46.116251000 Z zone: *2 time: *84 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &85 2020-03-17 13:06:46.116251000 Z zone: *2 time: *85 UserCapability_43: id: 43 user_id: 1 capability_id: 43 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &86 2020-03-17 13:06:46.119563000 Z zone: *2 time: *86 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &87 2020-03-17 13:06:46.119563000 Z zone: *2 time: *87 UserCapability_44: id: 44 user_id: 1 capability_id: 44 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &88 2020-03-17 13:06:46.122355000 Z zone: *2 time: *88 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &89 2020-03-17 13:06:46.122355000 Z zone: *2 time: *89 UserCapability_45: id: 45 user_id: 1 capability_id: 45 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &90 2020-03-17 13:06:46.124942000 Z zone: *2 time: *90 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &91 2020-03-17 13:06:46.124942000 Z zone: *2 time: *91 UserCapability_46: id: 46 user_id: 1 capability_id: 46 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &92 2020-03-17 13:06:46.127734000 Z zone: *2 time: *92 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &93 2020-03-17 13:06:46.127734000 Z zone: *2 time: *93 UserCapability_47: id: 47 user_id: 1 capability_id: 47 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &94 2020-03-17 13:06:46.132365000 Z zone: *2 time: *94 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &95 2020-03-17 13:06:46.132365000 Z zone: *2 time: *95 UserCapability_48: id: 48 user_id: 1 capability_id: 48 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &96 2020-03-17 13:06:46.135916000 Z zone: *2 time: *96 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &97 2020-03-17 13:06:46.135916000 Z zone: *2 time: *97 UserCapability_49: id: 49 user_id: 1 capability_id: 49 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &98 2020-03-17 13:06:46.138868000 Z zone: *2 time: *98 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &99 2020-03-17 13:06:46.138868000 Z zone: *2 time: *99 UserCapability_50: id: 50 user_id: 1 capability_id: 50 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &100 2020-03-17 13:06:46.141887000 Z zone: *2 time: *100 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &101 2020-03-17 13:06:46.141887000 Z zone: *2 time: *101 UserCapability_51: id: 51 user_id: 1 capability_id: 51 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &102 2020-03-17 13:06:46.145021000 Z zone: *2 time: *102 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &103 2020-03-17 13:06:46.145021000 Z zone: *2 time: *103 UserCapability_52: id: 52 user_id: 1 capability_id: 52 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &104 2020-03-17 13:06:46.147930000 Z zone: *2 time: *104 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &105 2020-03-17 13:06:46.147930000 Z zone: *2 time: *105 UserCapability_53: id: 53 user_id: 1 capability_id: 53 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &106 2020-03-17 13:06:46.151168000 Z zone: *2 time: *106 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &107 2020-03-17 13:06:46.151168000 Z zone: *2 time: *107 UserCapability_54: id: 54 user_id: 1 capability_id: 54 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &108 2020-03-17 13:06:46.154222000 Z zone: *2 time: *108 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &109 2020-03-17 13:06:46.154222000 Z zone: *2 time: *109 UserCapability_55: id: 55 user_id: 1 capability_id: 55 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &110 2020-03-17 13:06:46.157064000 Z zone: *2 time: *110 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &111 2020-03-17 13:06:46.157064000 Z zone: *2 time: *111 UserCapability_56: id: 56 user_id: 1 capability_id: 56 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &112 2020-03-17 13:06:46.160039000 Z zone: *2 time: *112 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &113 2020-03-17 13:06:46.160039000 Z zone: *2 time: *113 UserCapability_57: id: 57 user_id: 1 capability_id: 57 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &114 2020-03-17 13:06:46.163283000 Z zone: *2 time: *114 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &115 2020-03-17 13:06:46.163283000 Z zone: *2 time: *115 UserCapability_58: id: 58 user_id: 1 capability_id: 58 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &116 2020-03-17 13:06:46.166266000 Z zone: *2 time: *116 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &117 2020-03-17 13:06:46.166266000 Z zone: *2 time: *117 UserCapability_59: id: 59 user_id: 1 capability_id: 59 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &118 2020-03-17 13:06:46.169290000 Z zone: *2 time: *118 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &119 2020-03-17 13:06:46.169290000 Z zone: *2 time: *119
26.220816
54
0.67929
92d4e61d076fdd346d40548dcc1f258120411550
312
yml
YAML
.travis.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
.travis.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
.travis.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
# Travis CI config, for https://travis-ci.org/denny/ShinyCMS-ruby dist: bionic language: ruby rvm: - 2.6.6 cache: bundler addons: postgresql: "10" before_script: - bundle exec rails assets:precompile - bundle exec rails db:create - bundle exec rails db:schema:load script: - bundle exec rspec
14.181818
65
0.714744
d15e28f501393c7566d166f2cb7b4f8f6edc0d0a
3,916
yml
YAML
.circleci/config.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
.circleci/config.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
.circleci/config.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
# CircleCI config, for https://circleci.com/gh/denny/ShinyCMS-ruby version: 2 references: app: &app image: circleci/ruby:2.6.6-node environment: BUNDLE_JOBS: 3 BUNDLE_RETRY: 3 BUNDLE_PATH: ~/ShinyCMS/vendor/bundle DATABASE_URL: postgres://shinyuser:[email protected]:5432/shinycms_test NODE_ENV: test RAILS_ENV: test SHINYCMS_THEME: TEST db: &db image: circleci/postgres:10-ram environment: POSTGRES_USER: shinyuser POSTGRES_PASSWORD: shinypass POSTGRES_DB: shinycms_test jobs: build: # Setup phase # setup: docker: - *app - *db working_directory: ~/ShinyCMS steps: - checkout # Restore Ruby dependencies from cache, or install them - run: name: Install Bundler command: gem install bundler - restore_cache: keys: - bundler-cache-{{ checksum "Gemfile.lock" }} - bundler-cache- - run: name: Install gems command: bundle check || bundle install - save_cache: key: bundler-cache-{{ checksum "Gemfile.lock" }} paths: - ~/ShinyCMS/vendor/bundle # Restore Node dependencies from cache, or install them - restore_cache: keys: - yarn-cache-{{ checksum "yarn.lock" }} - yarn-cache- - run: name: Install Node modules command: yarn install --cache-folder ~/.cache/yarn - save_cache: key: yarn-cache-{{ checksum "yarn.lock" }} paths: - ~/.cache/yarn # Restore Webpacker assets from cache, or compile them - restore_cache: keys: - webpacker-cache-{{ .Revision }} - webpacker-cache- # run: bundle exec rails assets:precompile - run: name: Compile webpacker assets command: bundle exec rake webpacker:compile - save_cache: key: webpacker-cache-{{ .Revision }} paths: - ~/ShinyCMS/public/packs-test/ # Testing phase # test: # docker: # - *app # - *db # # working_directory: ~/ShinyCMS # parallelism: 4 # # steps: # - checkout # # - restore_cache: # keys: # - bundler-cache-{{ checksum "Gemfile.lock" }} # - bundler-cache- # - restore_cache: # keys: # - yarn-cache-{{ checksum "yarn.lock" }} # - yarn-cache- # - restore_cache: # keys: # - webpacker-cache-{{ .Revision }} # - webpacker-cache- # # # Install bundler (again?) and double-check the gems are all there # - run: # name: Install bundler # command: | # gem install bundler # bundle check || bundle install # Install MJML binary - run: name: Install MJML command: npm install mjml # Database setup - run: name: Wait for database command: dockerize -wait tcp://localhost:5432 -timeout 1m - run: name: Load database schema command: bundle exec rails db:schema:load # Run tests! - run: name: Run tests command: | mkdir /tmp/test-results TEST_FILES=$( circleci tests glob "spec/**/*_spec.rb" | circleci tests split --split-by=timings ) bundle exec rspec $TEST_FILES \ --profile 10 \ --format RspecJunitFormatter \ --out /tmp/test-results/rspec.xml \ --format progress # Store test results for CodeCov - store_test_results: path: /tmp/test-results - store_artifacts: path: /tmp/test-results destination: test-results #workflows: # version: 2 # setup_and_test: # jobs: # - setup # - test: # requires: # - setup
25.933775
109
0.547242
70fe6ca1028653dc313c74ebe0eea2cb060417d9
2,212
yml
YAML
db/demo-site-data/pages.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
db/demo-site-data/pages.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
db/demo-site-data/pages.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
--- Page_1: id: 1 name: Home page description: Homepage using Halcyonic index template title: Home slug: home template_id: 1 sort_order: 1 hidden_from_menu: true hidden: false created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &1 2019-11-13 23:02:20.365551000 Z zone: &2 !ruby/object:ActiveSupport::TimeZone name: Etc/UTC time: *1 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &3 2020-02-08 07:08:56.729762000 Z zone: *2 time: *3 Page_2: id: 2 name: One column description: '' title: One Column slug: one template_id: 2 sort_order: 2 hidden_from_menu: false hidden: false created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &4 2019-11-15 14:06:17.063354000 Z zone: *2 time: *4 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &5 2020-02-08 07:12:43.153008000 Z zone: *2 time: *5 Page_3: id: 3 name: Right sidebar description: '' title: Right Sidebar slug: right-sidebar template_id: 3 section_id: 1 sort_order: 2 hidden_from_menu: false hidden: false created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &6 2019-11-15 14:06:43.040014000 Z zone: *2 time: *6 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &7 2020-02-08 07:08:42.065243000 Z zone: *2 time: *7 Page_4: id: 4 name: Left sidebar description: '' title: Left Sidebar slug: left-sidebar template_id: 4 section_id: 1 sort_order: 1 hidden_from_menu: false hidden: false created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &8 2019-11-15 14:07:01.638743000 Z zone: *2 time: *8 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &9 2020-02-08 07:08:33.101800000 Z zone: *2 time: *9 Page_5: id: 5 name: Three column description: '' title: Three Column slug: three template_id: 5 sort_order: 4 hidden_from_menu: false hidden: false created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &10 2019-11-15 14:07:44.629697000 Z zone: *2 time: *10 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &11 2020-02-08 07:12:52.697160000 Z zone: *2 time: *11
23.284211
54
0.679928
e3f276dde0a33a8e5172c2b728172f883a0803de
12,530
yml
YAML
db/demo-site-data/page_elements.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
db/demo-site-data/page_elements.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
db/demo-site-data/page_elements.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
--- PageElement_1: id: 1 page_id: 1 name: banner_text content: "<p><a href=\"https://shinycms.org/\">ShinyCMS</a> is a free and open source content-management system. This <a href=\"http://github.com/denny/ShinyCMS-ruby\">new version</a> is built with <a href=\"https://www.ruby-lang.org/\">Ruby</a> on <a href=\"https://rubyonrails.org/\">Rails</a> (the <a href=\"https://github.com/denny/ShinyCMS\">original version</a> is built with <a href=\"https://www.perl.org/\">Perl</a> and <a href=\"http://www.catalystframework.org/\">Catalyst</a>).</p>\r\n" content_type: HTML created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &1 2019-11-13 23:02:20.390263000 Z zone: &2 !ruby/object:ActiveSupport::TimeZone name: Etc/UTC time: *1 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &3 2020-02-08 07:08:56.732754000 Z zone: *2 time: *3 PageElement_2: id: 2 page_id: 1 name: banner_button_url content: https://github.com/denny/ShinyCMS-ruby content_type: Short Text created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &4 2019-11-13 23:02:20.394329000 Z zone: *2 time: *4 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &5 2019-11-13 23:06:22.703356000 Z zone: *2 time: *5 PageElement_3: id: 3 page_id: 1 name: banner_button_text content: ShinyCMS on GitHub content_type: Short Text created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &6 2019-11-13 23:02:20.397235000 Z zone: *2 time: *6 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &7 2019-11-13 23:06:22.706076000 Z zone: *2 time: *7 PageElement_4: id: 4 page_id: 1 name: banner_image content: banner.png content_type: Image created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &8 2019-11-13 23:02:20.400180000 Z zone: *2 time: *8 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &9 2019-11-13 23:06:22.708392000 Z zone: *2 time: *9 PageElement_5: id: 5 page_id: 1 name: image1 content: pic01.jpg content_type: Image created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &10 2019-11-13 23:02:20.403049000 Z zone: *2 time: *10 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &11 2019-11-13 23:06:22.710747000 Z zone: *2 time: *11 PageElement_6: id: 6 page_id: 1 name: heading1 content: Pages content_type: Short Text created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &12 2019-11-13 23:02:20.407188000 Z zone: *2 time: *12 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &13 2019-11-13 23:06:22.713010000 Z zone: *2 time: *13 PageElement_7: id: 7 page_id: 1 name: paragraph1 content: The most basic requirement of any CMS, editable pages... about 3/4 working now! content_type: Long Text created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &14 2019-11-13 23:02:20.410240000 Z zone: *2 time: *14 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &15 2019-11-13 23:06:22.715328000 Z zone: *2 time: *15 PageElement_8: id: 8 page_id: 1 name: image2 content: pic02.jpg content_type: Image created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &16 2019-11-13 23:02:20.413991000 Z zone: *2 time: *16 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &17 2019-11-13 23:06:22.717591000 Z zone: *2 time: *17 PageElement_9: id: 9 page_id: 1 name: heading2 content: Sections content_type: Short Text created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &18 2019-11-13 23:02:20.416830000 Z zone: *2 time: *18 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &19 2019-11-13 23:06:22.719974000 Z zone: *2 time: *19 PageElement_10: id: 10 page_id: 1 name: paragraph2 content: You can put your pages into sections and subsections - as many as you need, nested to any depth. Or you can keep them all at the top level on smaller, simpler sites. content_type: Long Text created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &20 2019-11-13 23:02:20.419603000 Z zone: *2 time: *20 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &21 2019-11-13 23:06:22.722361000 Z zone: *2 time: *21 PageElement_11: id: 11 page_id: 1 name: image3 content: pic03.jpg content_type: Image created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &22 2019-11-13 23:02:20.423354000 Z zone: *2 time: *22 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &23 2019-11-13 23:06:22.724690000 Z zone: *2 time: *23 PageElement_12: id: 12 page_id: 1 name: heading3 content: Templates content_type: Short Text created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &24 2019-11-13 23:02:20.427672000 Z zone: *2 time: *24 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &25 2019-11-13 23:06:22.727074000 Z zone: *2 time: *25 PageElement_13: id: 13 page_id: 1 name: paragraph3 content: "Pages are based on templates, which define the way the page looks, and the areas\r\n that can and can't be edited. This means that non-technical users can update\r\n their website content easily and with confidence." content_type: Long Text created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &26 2019-11-13 23:02:20.430730000 Z zone: *2 time: *26 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &27 2019-11-13 23:06:22.729469000 Z zone: *2 time: *27 PageElement_14: id: 14 page_id: 1 name: image4 content: pic04.jpg content_type: Image created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &28 2019-11-13 23:02:20.434252000 Z zone: *2 time: *28 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &29 2019-11-13 23:06:22.731844000 Z zone: *2 time: *29 PageElement_15: id: 15 page_id: 1 name: heading4 content: Shared content content_type: Short Text created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &30 2019-11-13 23:02:20.437399000 Z zone: *2 time: *30 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &31 2019-11-13 23:06:22.734176000 Z zone: *2 time: *31 PageElement_16: id: 16 page_id: 1 name: paragraph4 content: Shared content lets you define editable fragments of text and HTML that you can re-use all through your site - for instance, your contact details at the bottom of every page. content_type: Long Text created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &32 2019-11-13 23:02:20.440446000 Z zone: *2 time: *32 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &33 2019-11-13 23:06:22.736447000 Z zone: *2 time: *33 PageElement_17: id: 17 page_id: 1 name: image5 content: pic05.jpg content_type: Image created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &34 2019-11-13 23:02:20.443198000 Z zone: *2 time: *34 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &35 2019-11-13 23:06:22.740258000 Z zone: *2 time: *35 PageElement_18: id: 18 page_id: 1 name: image6 content: pic06.jpg content_type: Image created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &36 2019-11-13 23:02:20.445939000 Z zone: *2 time: *36 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &37 2019-11-13 23:06:22.744609000 Z zone: *2 time: *37 PageElement_19: id: 19 page_id: 1 name: image7 content: pic07.jpg content_type: Image created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &38 2019-11-13 23:02:20.448600000 Z zone: *2 time: *38 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &39 2019-11-13 23:06:22.748091000 Z zone: *2 time: *39 PageElement_20: id: 20 page_id: 1 name: image8 content: pic08.jpg content_type: Image created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &40 2019-11-13 23:02:20.451349000 Z zone: *2 time: *40 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &41 2019-11-13 23:06:22.750630000 Z zone: *2 time: *41 PageElement_21: id: 21 page_id: 2 name: heading content: '' content_type: Short Text created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &42 2019-11-15 14:06:17.069510000 Z zone: *2 time: *42 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &43 2020-02-08 07:09:21.689697000 Z zone: *2 time: *43 PageElement_22: id: 22 page_id: 2 name: subheading content: '' content_type: Short Text created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &44 2019-11-15 14:06:17.072564000 Z zone: *2 time: *44 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &45 2020-02-08 07:09:21.692111000 Z zone: *2 time: *45 PageElement_23: id: 23 page_id: 2 name: text_content content: '' content_type: Long Text created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &46 2019-11-15 14:06:17.074819000 Z zone: *2 time: *46 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &47 2020-02-08 07:09:21.694530000 Z zone: *2 time: *47 PageElement_24: id: 24 page_id: 3 name: heading content: '' content_type: Short Text created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &48 2019-11-15 14:06:43.055121000 Z zone: *2 time: *48 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &49 2020-02-08 07:08:10.974086000 Z zone: *2 time: *49 PageElement_25: id: 25 page_id: 3 name: subheading content: '' content_type: Short Text created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &50 2019-11-15 14:06:43.060701000 Z zone: *2 time: *50 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &51 2020-02-08 07:08:10.976275000 Z zone: *2 time: *51 PageElement_26: id: 26 page_id: 3 name: text_content content: '' content_type: Long Text created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &52 2019-11-15 14:06:43.064361000 Z zone: *2 time: *52 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &53 2020-02-08 07:08:10.978344000 Z zone: *2 time: *53 PageElement_27: id: 27 page_id: 4 name: heading content: '' content_type: Short Text created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &54 2019-11-15 14:07:01.646462000 Z zone: *2 time: *54 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &55 2020-02-08 07:08:01.640729000 Z zone: *2 time: *55 PageElement_28: id: 28 page_id: 4 name: subheading content: '' content_type: Short Text created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &56 2019-11-15 14:07:01.650915000 Z zone: *2 time: *56 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &57 2020-02-08 07:08:01.643046000 Z zone: *2 time: *57 PageElement_29: id: 29 page_id: 4 name: text_content content: '' content_type: Long Text created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &58 2019-11-15 14:07:01.654622000 Z zone: *2 time: *58 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &59 2020-02-08 07:08:01.646215000 Z zone: *2 time: *59 PageElement_30: id: 30 page_id: 5 name: heading content: '' content_type: Short Text created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &60 2019-11-15 14:07:44.633708000 Z zone: *2 time: *60 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &61 2020-02-08 07:09:11.901622000 Z zone: *2 time: *61 PageElement_31: id: 31 page_id: 5 name: subheading content: '' content_type: Short Text created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &62 2019-11-15 14:07:44.635893000 Z zone: *2 time: *62 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &63 2020-02-08 07:09:11.904274000 Z zone: *2 time: *63 PageElement_32: id: 32 page_id: 5 name: text_content content: '' content_type: Long Text created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &64 2019-11-15 14:07:44.638145000 Z zone: *2 time: *64 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &65 2020-02-08 07:09:11.929044000 Z zone: *2 time: *65
27.121212
146
0.681006
a79abf86043abc6a5b603e0bbaae22af31b469cb
2,890
yml
YAML
db/demo-site-data/comments.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
db/demo-site-data/comments.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
db/demo-site-data/comments.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
--- Comment_1: id: 1 discussion_id: 1 number: 1 author_type: authenticated user_id: 1 author_name: '' author_email: '' author_url: '' title: Properly nested comments... body: Ask for them by name - do not accept inferior substitutes! locked: false hidden: false spam: false posted_at: !ruby/object:ActiveSupport::TimeWithZone utc: &1 2020-02-28 18:56:25.288712000 Z zone: &2 !ruby/object:ActiveSupport::TimeZone name: Etc/UTC time: *1 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &3 2020-02-28 18:56:25.290397000 Z zone: *2 time: *3 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &4 2020-02-28 18:56:25.290397000 Z zone: *2 time: *4 Comment_2: id: 2 discussion_id: 1 number: 2 author_type: pseudonymous user_id: 1 author_name: ShinyCMS author_email: '' author_url: https://shinycms.org title: '' body: "Yes, this is indeed a comment thread. The nested comments feature was added to ShinyCMS (Ruby version) in February 2020.\r\n\r\n(And to the Perl version in August 2010)" locked: false hidden: false spam: false posted_at: !ruby/object:ActiveSupport::TimeWithZone utc: &5 2020-02-28 19:02:46.787922000 Z zone: *2 time: *5 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &6 2020-02-28 19:02:46.791436000 Z zone: *2 time: *6 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &7 2020-02-28 19:02:46.791436000 Z zone: *2 time: *7 Comment_3: id: 3 discussion_id: 1 number: 3 parent_id: 1 author_type: anonymous author_name: '' author_email: '' author_url: '' title: Nested comments FTW! body: '' locked: false hidden: false spam: false posted_at: !ruby/object:ActiveSupport::TimeWithZone utc: &8 2020-02-28 19:03:55.624143000 Z zone: *2 time: *8 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &9 2020-02-28 19:03:55.629818000 Z zone: *2 time: *9 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &10 2020-02-28 19:03:55.629818000 Z zone: *2 time: *10 Comment_4: id: 4 discussion_id: 1 number: 4 parent_id: 3 author_type: authenticated user_id: 1 author_name: '' author_email: '' author_url: '' title: I agree with this mysterious stranger! ;) body: Nested comments are the only acceptable form of comment system in the 21st century. Or the 20th, for that matter. locked: false hidden: false spam: false posted_at: !ruby/object:ActiveSupport::TimeWithZone utc: &11 2020-02-28 19:08:52.158849000 Z zone: *2 time: *11 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &12 2020-02-28 19:08:52.162589000 Z zone: *2 time: *12 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &13 2020-02-28 19:08:52.162589000 Z zone: *2 time: *13
26.036036
84
0.679239
f1117f2f72c6acac061094cae617450b46858ab4
11,422
yml
YAML
db/demo-site-data/page_template_elements.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
db/demo-site-data/page_template_elements.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
db/demo-site-data/page_template_elements.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
--- PageTemplateElement_1: id: 1 template_id: 1 name: banner_text content: '' content_type: HTML created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &1 2019-11-13 22:58:01.395973000 Z zone: &2 !ruby/object:ActiveSupport::TimeZone name: Etc/UTC time: *1 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &3 2019-11-13 22:59:26.614888000 Z zone: *2 time: *3 PageTemplateElement_2: id: 2 template_id: 1 name: banner_button_url content: '' content_type: Short Text created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &4 2019-11-13 22:58:01.400020000 Z zone: *2 time: *4 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &5 2019-11-13 22:59:26.617915000 Z zone: *2 time: *5 PageTemplateElement_3: id: 3 template_id: 1 name: banner_button_text content: '' content_type: Short Text created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &6 2019-11-13 22:58:01.402551000 Z zone: *2 time: *6 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &7 2019-11-13 22:59:26.620389000 Z zone: *2 time: *7 PageTemplateElement_4: id: 4 template_id: 1 name: banner_image content: '' content_type: Image created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &8 2019-11-13 22:58:01.405159000 Z zone: *2 time: *8 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &9 2019-11-14 23:45:33.123865000 Z zone: *2 time: *9 PageTemplateElement_5: id: 5 template_id: 1 name: image1 content: '' content_type: Image created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &10 2019-11-13 22:58:01.408188000 Z zone: *2 time: *10 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &11 2019-11-14 23:45:33.126110000 Z zone: *2 time: *11 PageTemplateElement_6: id: 6 template_id: 1 name: heading1 content: '' content_type: Short Text created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &12 2019-11-13 22:58:01.411159000 Z zone: *2 time: *12 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &13 2019-11-13 22:59:26.628387000 Z zone: *2 time: *13 PageTemplateElement_7: id: 7 template_id: 1 name: paragraph1 content: '' content_type: Long Text created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &14 2019-11-13 22:58:01.414027000 Z zone: *2 time: *14 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &15 2019-11-13 22:59:26.630866000 Z zone: *2 time: *15 PageTemplateElement_8: id: 8 template_id: 1 name: image2 content: '' content_type: Image created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &16 2019-11-13 22:58:01.416734000 Z zone: *2 time: *16 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &17 2019-11-14 23:45:33.128252000 Z zone: *2 time: *17 PageTemplateElement_9: id: 9 template_id: 1 name: heading2 content: '' content_type: Short Text created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &18 2019-11-13 22:58:01.419425000 Z zone: *2 time: *18 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &19 2019-11-13 22:59:26.635848000 Z zone: *2 time: *19 PageTemplateElement_10: id: 10 template_id: 1 name: paragraph2 content: '' content_type: Long Text created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &20 2019-11-13 22:58:01.422095000 Z zone: *2 time: *20 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &21 2019-11-13 22:59:26.638331000 Z zone: *2 time: *21 PageTemplateElement_11: id: 11 template_id: 1 name: image3 content: '' content_type: Image created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &22 2019-11-13 22:58:01.424837000 Z zone: *2 time: *22 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &23 2019-11-14 23:45:33.130791000 Z zone: *2 time: *23 PageTemplateElement_12: id: 12 template_id: 1 name: heading3 content: '' content_type: Short Text created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &24 2019-11-13 22:58:01.428273000 Z zone: *2 time: *24 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &25 2019-11-13 22:59:26.643490000 Z zone: *2 time: *25 PageTemplateElement_13: id: 13 template_id: 1 name: paragraph3 content: '' content_type: Long Text created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &26 2019-11-13 22:58:01.431089000 Z zone: *2 time: *26 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &27 2019-11-13 22:59:26.646034000 Z zone: *2 time: *27 PageTemplateElement_14: id: 14 template_id: 1 name: image4 content: '' content_type: Image created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &28 2019-11-13 22:58:01.433720000 Z zone: *2 time: *28 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &29 2019-11-14 23:45:33.133213000 Z zone: *2 time: *29 PageTemplateElement_15: id: 15 template_id: 1 name: heading4 content: '' content_type: Short Text created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &30 2019-11-13 22:58:01.436391000 Z zone: *2 time: *30 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &31 2019-11-13 22:59:26.651900000 Z zone: *2 time: *31 PageTemplateElement_16: id: 16 template_id: 1 name: paragraph4 content: '' content_type: Long Text created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &32 2019-11-13 22:58:01.439115000 Z zone: *2 time: *32 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &33 2019-11-13 22:59:26.654584000 Z zone: *2 time: *33 PageTemplateElement_17: id: 17 template_id: 1 name: image5 content: '' content_type: Image created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &34 2019-11-13 22:58:01.442078000 Z zone: *2 time: *34 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &35 2019-11-14 23:45:33.135489000 Z zone: *2 time: *35 PageTemplateElement_18: id: 18 template_id: 1 name: image6 content: '' content_type: Image created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &36 2019-11-13 22:58:01.444828000 Z zone: *2 time: *36 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &37 2019-11-14 23:45:33.137740000 Z zone: *2 time: *37 PageTemplateElement_19: id: 19 template_id: 1 name: image7 content: '' content_type: Image created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &38 2019-11-13 22:58:01.447576000 Z zone: *2 time: *38 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &39 2019-11-14 23:45:33.140575000 Z zone: *2 time: *39 PageTemplateElement_20: id: 20 template_id: 1 name: image8 content: '' content_type: Image created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &40 2019-11-13 22:58:01.450396000 Z zone: *2 time: *40 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &41 2019-11-14 23:45:33.143046000 Z zone: *2 time: *41 PageTemplateElement_21: id: 21 template_id: 2 name: heading content_type: Short Text created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &42 2019-11-15 13:58:03.818030000 Z zone: *2 time: *42 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &43 2019-11-15 13:58:03.818030000 Z zone: *2 time: *43 PageTemplateElement_22: id: 22 template_id: 2 name: subheading content_type: Short Text created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &44 2019-11-15 13:58:03.821761000 Z zone: *2 time: *44 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &45 2019-11-15 13:58:03.821761000 Z zone: *2 time: *45 PageTemplateElement_23: id: 23 template_id: 2 name: text_content content_type: Long Text created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &46 2019-11-15 13:58:03.825348000 Z zone: *2 time: *46 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &47 2019-11-15 13:58:03.825348000 Z zone: *2 time: *47 PageTemplateElement_24: id: 24 template_id: 3 name: heading content_type: Short Text created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &48 2019-11-15 13:58:53.968125000 Z zone: *2 time: *48 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &49 2019-11-15 13:58:53.968125000 Z zone: *2 time: *49 PageTemplateElement_25: id: 25 template_id: 3 name: subheading content_type: Short Text created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &50 2019-11-15 13:58:53.979030000 Z zone: *2 time: *50 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &51 2019-11-15 13:58:53.979030000 Z zone: *2 time: *51 PageTemplateElement_26: id: 26 template_id: 3 name: text_content content_type: Long Text created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &52 2019-11-15 13:58:53.984544000 Z zone: *2 time: *52 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &53 2019-11-15 13:58:53.984544000 Z zone: *2 time: *53 PageTemplateElement_27: id: 27 template_id: 4 name: heading content_type: Short Text created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &54 2019-11-15 13:59:20.098229000 Z zone: *2 time: *54 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &55 2019-11-15 13:59:20.098229000 Z zone: *2 time: *55 PageTemplateElement_28: id: 28 template_id: 4 name: subheading content_type: Short Text created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &56 2019-11-15 13:59:20.104701000 Z zone: *2 time: *56 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &57 2019-11-15 13:59:20.104701000 Z zone: *2 time: *57 PageTemplateElement_29: id: 29 template_id: 4 name: text_content content_type: Long Text created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &58 2019-11-15 13:59:20.109233000 Z zone: *2 time: *58 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &59 2019-11-15 13:59:20.109233000 Z zone: *2 time: *59 PageTemplateElement_30: id: 30 template_id: 5 name: heading content_type: Short Text created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &60 2019-11-15 14:00:17.735230000 Z zone: *2 time: *60 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &61 2019-11-15 14:00:17.735230000 Z zone: *2 time: *61 PageTemplateElement_31: id: 31 template_id: 5 name: subheading content_type: Short Text created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &62 2019-11-15 14:00:17.738664000 Z zone: *2 time: *62 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &63 2019-11-15 14:00:17.738664000 Z zone: *2 time: *63 PageTemplateElement_32: id: 32 template_id: 5 name: text_content content_type: Long Text created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &64 2019-11-15 14:00:17.741474000 Z zone: *2 time: *64 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &65 2019-11-15 14:00:17.741474000 Z zone: *2 time: *65
26.018223
54
0.687095
f16ae90fca5c9acc1b91f1a6510521eedd36802d
18,365
yml
YAML
db/demo-site-data/capabilities.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
db/demo-site-data/capabilities.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
db/demo-site-data/capabilities.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
--- Capability_1: id: 1 name: view_admin_area category_id: 1 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &1 2020-03-17 12:57:26.086138000 Z zone: &2 !ruby/object:ActiveSupport::TimeZone name: Etc/UTC time: *1 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &3 2020-03-17 12:57:26.086138000 Z zone: *2 time: *3 Capability_2: id: 2 name: view_admin_dashboard category_id: 1 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &4 2020-03-17 12:57:26.094426000 Z zone: *2 time: *4 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &5 2020-03-17 12:57:26.094426000 Z zone: *2 time: *5 Capability_3: id: 3 name: view_admin_toolbar category_id: 1 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &6 2020-03-17 12:57:26.108761000 Z zone: *2 time: *6 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &7 2020-03-17 12:57:26.108761000 Z zone: *2 time: *7 Capability_4: id: 4 name: list category_id: 2 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &8 2020-03-17 12:57:26.125925000 Z zone: *2 time: *8 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &9 2020-03-17 12:57:26.125925000 Z zone: *2 time: *9 Capability_5: id: 5 name: add category_id: 2 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &10 2020-03-17 12:57:26.141852000 Z zone: *2 time: *10 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &11 2020-03-17 12:57:26.141852000 Z zone: *2 time: *11 Capability_6: id: 6 name: edit category_id: 2 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &12 2020-03-17 12:57:26.151117000 Z zone: *2 time: *12 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &13 2020-03-17 12:57:26.151117000 Z zone: *2 time: *13 Capability_7: id: 7 name: destroy category_id: 2 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &14 2020-03-17 12:57:26.173356000 Z zone: *2 time: *14 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &15 2020-03-17 12:57:26.173356000 Z zone: *2 time: *15 Capability_8: id: 8 name: list category_id: 3 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &16 2020-03-17 12:57:26.189042000 Z zone: *2 time: *16 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &17 2020-03-17 12:57:26.189042000 Z zone: *2 time: *17 Capability_9: id: 9 name: add category_id: 3 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &18 2020-03-17 12:57:26.198683000 Z zone: *2 time: *18 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &19 2020-03-17 12:57:26.198683000 Z zone: *2 time: *19 Capability_10: id: 10 name: edit category_id: 3 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &20 2020-03-17 12:57:26.218671000 Z zone: *2 time: *20 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &21 2020-03-17 12:57:26.218671000 Z zone: *2 time: *21 Capability_11: id: 11 name: destroy category_id: 3 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &22 2020-03-17 12:57:26.226454000 Z zone: *2 time: *22 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &23 2020-03-17 12:57:26.226454000 Z zone: *2 time: *23 Capability_12: id: 12 name: change_author category_id: 3 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &24 2020-03-17 12:57:26.239065000 Z zone: *2 time: *24 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &25 2020-03-17 12:57:26.239065000 Z zone: *2 time: *25 Capability_13: id: 13 name: hide category_id: 4 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &26 2020-03-17 12:57:26.248681000 Z zone: *2 time: *26 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &27 2020-03-17 12:57:26.248681000 Z zone: *2 time: *27 Capability_14: id: 14 name: unhide category_id: 4 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &28 2020-03-17 12:57:26.256230000 Z zone: *2 time: *28 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &29 2020-03-17 12:57:26.256230000 Z zone: *2 time: *29 Capability_15: id: 15 name: lock category_id: 4 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &30 2020-03-17 12:57:26.263468000 Z zone: *2 time: *30 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &31 2020-03-17 12:57:26.263468000 Z zone: *2 time: *31 Capability_16: id: 16 name: unlock category_id: 4 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &32 2020-03-17 12:57:26.272054000 Z zone: *2 time: *32 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &33 2020-03-17 12:57:26.272054000 Z zone: *2 time: *33 Capability_17: id: 17 name: hide category_id: 5 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &34 2020-03-17 12:57:26.279920000 Z zone: *2 time: *34 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &35 2020-03-17 12:57:26.279920000 Z zone: *2 time: *35 Capability_18: id: 18 name: unhide category_id: 5 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &36 2020-03-17 12:57:26.287084000 Z zone: *2 time: *36 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &37 2020-03-17 12:57:26.287084000 Z zone: *2 time: *37 Capability_19: id: 19 name: lock category_id: 5 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &38 2020-03-17 12:57:26.309566000 Z zone: *2 time: *38 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &39 2020-03-17 12:57:26.309566000 Z zone: *2 time: *39 Capability_20: id: 20 name: unlock category_id: 5 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &40 2020-03-17 12:57:26.326839000 Z zone: *2 time: *40 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &41 2020-03-17 12:57:26.326839000 Z zone: *2 time: *41 Capability_21: id: 21 name: destroy category_id: 5 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &42 2020-03-17 12:57:26.343007000 Z zone: *2 time: *42 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &43 2020-03-17 12:57:26.343007000 Z zone: *2 time: *43 Capability_22: id: 22 name: list category_id: 6 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &44 2020-03-17 12:57:26.362741000 Z zone: *2 time: *44 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &45 2020-03-17 12:57:26.362741000 Z zone: *2 time: *45 Capability_23: id: 23 name: add category_id: 6 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &46 2020-03-17 12:57:26.376294000 Z zone: *2 time: *46 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &47 2020-03-17 12:57:26.376294000 Z zone: *2 time: *47 Capability_24: id: 24 name: destroy category_id: 6 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &48 2020-03-17 12:57:26.387498000 Z zone: *2 time: *48 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &49 2020-03-17 12:57:26.387498000 Z zone: *2 time: *49 Capability_25: id: 25 name: list category_id: 7 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &50 2020-03-17 12:57:26.398480000 Z zone: *2 time: *50 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &51 2020-03-17 12:57:26.398480000 Z zone: *2 time: *51 Capability_26: id: 26 name: edit category_id: 7 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &52 2020-03-17 12:57:26.410168000 Z zone: *2 time: *52 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &53 2020-03-17 12:57:26.410168000 Z zone: *2 time: *53 Capability_27: id: 27 name: list category_id: 8 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &54 2020-03-17 12:57:26.421661000 Z zone: *2 time: *54 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &55 2020-03-17 12:57:26.421661000 Z zone: *2 time: *55 Capability_28: id: 28 name: add category_id: 8 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &56 2020-03-17 12:57:26.434042000 Z zone: *2 time: *56 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &57 2020-03-17 12:57:26.434042000 Z zone: *2 time: *57 Capability_29: id: 29 name: edit category_id: 8 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &58 2020-03-17 12:57:26.447543000 Z zone: *2 time: *58 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &59 2020-03-17 12:57:26.447543000 Z zone: *2 time: *59 Capability_30: id: 30 name: destroy category_id: 8 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &60 2020-03-17 12:57:26.467018000 Z zone: *2 time: *60 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &61 2020-03-17 12:57:26.467018000 Z zone: *2 time: *61 Capability_31: id: 31 name: list category_id: 9 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &62 2020-03-17 12:57:26.499379000 Z zone: *2 time: *62 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &63 2020-03-17 12:57:26.499379000 Z zone: *2 time: *63 Capability_32: id: 32 name: add category_id: 9 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &64 2020-03-17 12:57:26.507447000 Z zone: *2 time: *64 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &65 2020-03-17 12:57:26.507447000 Z zone: *2 time: *65 Capability_33: id: 33 name: edit category_id: 9 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &66 2020-03-17 12:57:26.515684000 Z zone: *2 time: *66 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &67 2020-03-17 12:57:26.515684000 Z zone: *2 time: *67 Capability_34: id: 34 name: destroy category_id: 9 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &68 2020-03-17 12:57:26.523054000 Z zone: *2 time: *68 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &69 2020-03-17 12:57:26.523054000 Z zone: *2 time: *69 Capability_35: id: 35 name: change_author category_id: 9 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &70 2020-03-17 12:57:26.543909000 Z zone: *2 time: *70 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &71 2020-03-17 12:57:26.543909000 Z zone: *2 time: *71 Capability_36: id: 36 name: list category_id: 10 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &72 2020-03-17 12:57:26.560097000 Z zone: *2 time: *72 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &73 2020-03-17 12:57:26.560097000 Z zone: *2 time: *73 Capability_37: id: 37 name: add category_id: 10 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &74 2020-03-17 12:57:26.573749000 Z zone: *2 time: *74 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &75 2020-03-17 12:57:26.573749000 Z zone: *2 time: *75 Capability_38: id: 38 name: edit category_id: 10 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &76 2020-03-17 12:57:26.583240000 Z zone: *2 time: *76 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &77 2020-03-17 12:57:26.583240000 Z zone: *2 time: *77 Capability_39: id: 39 name: destroy category_id: 10 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &78 2020-03-17 12:57:26.591689000 Z zone: *2 time: *78 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &79 2020-03-17 12:57:26.591689000 Z zone: *2 time: *79 Capability_40: id: 40 name: list category_id: 11 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &80 2020-03-17 12:57:26.599324000 Z zone: *2 time: *80 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &81 2020-03-17 12:57:26.599324000 Z zone: *2 time: *81 Capability_41: id: 41 name: add category_id: 11 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &82 2020-03-17 12:57:26.607418000 Z zone: *2 time: *82 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &83 2020-03-17 12:57:26.607418000 Z zone: *2 time: *83 Capability_42: id: 42 name: edit category_id: 11 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &84 2020-03-17 12:57:26.615964000 Z zone: *2 time: *84 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &85 2020-03-17 12:57:26.615964000 Z zone: *2 time: *85 Capability_43: id: 43 name: destroy category_id: 11 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &86 2020-03-17 12:57:26.624752000 Z zone: *2 time: *86 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &87 2020-03-17 12:57:26.624752000 Z zone: *2 time: *87 Capability_44: id: 44 name: list category_id: 12 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &88 2020-03-17 12:57:26.632884000 Z zone: *2 time: *88 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &89 2020-03-17 12:57:26.632884000 Z zone: *2 time: *89 Capability_45: id: 45 name: add category_id: 12 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &90 2020-03-17 12:57:26.641334000 Z zone: *2 time: *90 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &91 2020-03-17 12:57:26.641334000 Z zone: *2 time: *91 Capability_46: id: 46 name: edit category_id: 12 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &92 2020-03-17 12:57:26.650670000 Z zone: *2 time: *92 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &93 2020-03-17 12:57:26.650670000 Z zone: *2 time: *93 Capability_47: id: 47 name: destroy category_id: 12 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &94 2020-03-17 12:57:26.659847000 Z zone: *2 time: *94 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &95 2020-03-17 12:57:26.659847000 Z zone: *2 time: *95 Capability_48: id: 48 name: list category_id: 13 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &96 2020-03-17 12:57:26.667945000 Z zone: *2 time: *96 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &97 2020-03-17 12:57:26.667945000 Z zone: *2 time: *97 Capability_49: id: 49 name: edit category_id: 13 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &98 2020-03-17 12:57:26.676828000 Z zone: *2 time: *98 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &99 2020-03-17 12:57:26.676828000 Z zone: *2 time: *99 Capability_50: id: 50 name: list category_id: 14 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &100 2020-03-17 12:57:26.685841000 Z zone: *2 time: *100 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &101 2020-03-17 12:57:26.685841000 Z zone: *2 time: *101 Capability_51: id: 51 name: list category_id: 15 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &102 2020-03-17 12:57:26.699293000 Z zone: *2 time: *102 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &103 2020-03-17 12:57:26.699293000 Z zone: *2 time: *103 Capability_52: id: 52 name: add category_id: 15 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &104 2020-03-17 12:57:26.709918000 Z zone: *2 time: *104 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &105 2020-03-17 12:57:26.709918000 Z zone: *2 time: *105 Capability_53: id: 53 name: edit category_id: 15 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &106 2020-03-17 12:57:26.727695000 Z zone: *2 time: *106 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &107 2020-03-17 12:57:26.727695000 Z zone: *2 time: *107 Capability_54: id: 54 name: destroy category_id: 15 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &108 2020-03-17 12:57:26.735437000 Z zone: *2 time: *108 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &109 2020-03-17 12:57:26.735437000 Z zone: *2 time: *109 Capability_55: id: 55 name: view_admin_notes category_id: 15 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &110 2020-03-17 12:57:26.743566000 Z zone: *2 time: *110 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &111 2020-03-17 12:57:26.743566000 Z zone: *2 time: *111 Capability_56: id: 56 name: list category_id: 16 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &112 2020-03-17 12:57:26.751472000 Z zone: *2 time: *112 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &113 2020-03-17 12:57:26.751472000 Z zone: *2 time: *113 Capability_57: id: 57 name: add category_id: 16 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &114 2020-03-17 12:57:26.759206000 Z zone: *2 time: *114 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &115 2020-03-17 12:57:26.759206000 Z zone: *2 time: *115 Capability_58: id: 58 name: edit category_id: 16 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &116 2020-03-17 12:57:26.768034000 Z zone: *2 time: *116 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &117 2020-03-17 12:57:26.768034000 Z zone: *2 time: *117 Capability_59: id: 59 name: destroy category_id: 16 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &118 2020-03-17 12:57:26.782245000 Z zone: *2 time: *118 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &119 2020-03-17 12:57:26.782245000 Z zone: *2 time: *119
25.829817
54
0.674435
19a0045e852d20789eaa09b20f4c6a13cd5b4161
839
yml
YAML
db/demo-site-data/users.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
db/demo-site-data/users.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
db/demo-site-data/users.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
--- User_1: id: 1 username: admin email: [email protected] canonical_email: [email protected] encrypted_password: "$2a$11$4NhY/QjSyayOMRgVyJgO4uWiL5MhZDjezOhihQXDQK5SkGM4zS1X2" sign_in_count: 0 confirmation_token: zr1F-PYVWcY-zry8tqFy confirmed_at: !ruby/object:ActiveSupport::TimeWithZone utc: &1 2019-12-14 03:29:42.578292000 Z zone: &2 !ruby/object:ActiveSupport::TimeZone name: Etc/UTC time: *1 confirmation_sent_at: !ruby/object:ActiveSupport::TimeWithZone utc: &3 2019-12-14 03:29:42.460851000 Z zone: *2 time: *3 failed_attempts: 0 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &4 2019-12-14 03:29:42.460691000 Z zone: *2 time: *4 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &5 2019-12-14 03:37:16.988134000 Z zone: *2 time: *5
29.964286
84
0.711561
6b4d793902524d46a29e0fc5fde1082a0f5aedab
4,928
yml
YAML
config/i18n-tasks.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
config/i18n-tasks.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
config/i18n-tasks.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
# i18n-tasks finds and manages missing and unused translations: https://github.com/glebm/i18n-tasks # The "main" locale. base_locale: en ## All available locales are inferred from the data by default. Alternatively, specify them explicitly: # locales: [es, fr] ## Reporting locale, default: en. Available: en, ru. # internal_locale: en # Read and write translations. data: ## Translations are read from the file system. Supported format: YAML, JSON. ## Provide a custom adapter: # adapter: I18n::Tasks::Data::FileSystem # Locale files or `File.find` patterns where translations are read from: read: ## Default: - config/locales/%{locale}.yml ## More files (e.g. include Devise translations): - config/locales/**/*.%{locale}.yml # Locale files to write new keys to, based on a list of key pattern => file rules. Matched from top to bottom: # `i18n-tasks normalize -p` will force move the keys according to these rules write: ## For example, write devise and simple form keys to their respective files: # - ['{devise, simple_form}.*', 'config/locales/\1.%{locale}.yml'] ## Catch-all default: # - config/locales/%{locale}.yml # External locale data (e.g. gems). # This data is not considered unused and is never written to. external: ## Example (replace %#= with %=): # - "<%#= %x[bundle show vagrant].chomp %>/templates/locales/%{locale}.yml" ## Specify the router (see Readme for details). Valid values: conservative_router, pattern_router, or a custom class. # router: conservative_router yaml: write: # do not wrap lines at 80 characters line_width: -1 ## Pretty-print JSON: # json: # write: # indent: ' ' # space: ' ' # object_nl: "\n" # array_nl: "\n" # Find translate calls search: ## Paths or `File.find` patterns to search in: paths: - app/ - spec/ ## Root directories for relative keys resolution. relative_roots: - app/controllers - app/helpers - app/mailers - app/models # - app/presenters - app/views - app/views/shinycms ## Files or `File.fnmatch` patterns to exclude from search. Some files are always excluded regardless of this setting: ## %w(*.jpg *.png *.gif *.svg *.ico *.eot *.otf *.ttf *.woff *.woff2 *.pdf *.css *.sass *.scss *.less *.yml *.json) exclude: - app/assets/images - app/assets/fonts - app/assets/videos ## Alternatively, the only files or `File.fnmatch patterns` to search in `paths`: ## If specified, this settings takes priority over `exclude`, but `exclude` still applies. # only: ["*.rb", "*.html.slim"] ## If `strict` is `false`, guess usages such as t("categories.#{category}.title"). The default is `true`. # strict: false ## Multiple scanners can be used. Their results are merged. ## The options specified above are passed down to each scanner. Per-scanner options can be specified as well. ## See this example of a custom scanner: https://github.com/glebm/i18n-tasks/wiki/A-custom-scanner-example ## Translation Services # translation: # # Google Translate # # Get an API key and set billing info at https://code.google.com/apis/console to use Google Translate # google_translate_api_key: "AbC-dEf5" # # DeepL Pro Translate # # Get an API key and subscription at https://www.deepl.com/pro to use DeepL Pro # deepl_api_key: "48E92789-57A3-466A-9959-1A1A1A1A1A1A" ## Do not consider these keys missing: ignore_missing: # jQuery - date.month_names - error # - errors.messages.not_saved # - 'errors.messages.{accepted,blank,invalid,too_short,too_long}' # - '{devise,simple_form}.*' ## Consider these keys used: ignore_unused: # Devise - devise.* - errors.messages.* - destroy # - 'activerecord.attributes.*' # - '{devise,kaminari,will_paginate}.*' # - 'simple_form.{yes,no}' # - 'simple_form.{placeholders,hints,labels}.*' # - 'simple_form.{error_notification,required}.:' ## Exclude these keys from the `i18n-tasks eq-base' report: # ignore_eq_base: # all: # - common.ok # fr,es: # - common.brand ## Exclude these keys from the `i18n-tasks check-consistent-interpolations` report: # ignore_inconsistent_interpolations: # - 'activerecord.attributes.*' ## Ignore these keys completely: # ignore: # - kaminari.* ## Sometimes, it isn't possible for i18n-tasks to match the key correctly, ## e.g. in case of a relative key defined in a helper method. ## In these cases you can use the built-in PatternMapper to map patterns to keys, e.g.: # # <%# I18n::Tasks.add_scanner 'I18n::Tasks::Scanners::PatternMapper', # only: %w(*.html.haml *.html.slim), # patterns: [['= title\b', '.page_title']] %> # # The PatternMapper can also match key literals via a special %{key} interpolation, e.g.: # # <%# I18n::Tasks.add_scanner 'I18n::Tasks::Scanners::PatternMapper', # patterns: [['\bSpree\.t[( ]\s*%{key}', 'spree.%{key}']] %>
33.986207
120
0.676339
6b7fddcf8bc1eb4898f8da327296e072aa450d0b
4,735
yml
YAML
db/demo-site-data/feature_flags.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
db/demo-site-data/feature_flags.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
db/demo-site-data/feature_flags.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
--- FeatureFlag_1: id: 1 name: blogs description: Turn this on if you want a blog on your site enabled: true enabled_for_logged_in: true enabled_for_admins: true created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &1 2020-03-17 12:57:26.804332000 Z zone: &2 !ruby/object:ActiveSupport::TimeZone name: Etc/UTC time: *1 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &3 2020-03-17 12:57:26.804332000 Z zone: *2 time: *3 FeatureFlag_2: id: 2 name: comments description: Enable comment and discussion features site-wide enabled: true enabled_for_logged_in: true enabled_for_admins: true created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &4 2020-03-17 12:57:26.817558000 Z zone: *2 time: *4 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &5 2020-03-17 12:57:26.817558000 Z zone: *2 time: *5 FeatureFlag_3: id: 3 name: comment_notifications description: Send notification emails to people who get comments enabled: true enabled_for_logged_in: true enabled_for_admins: true created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &6 2020-03-17 12:57:26.827522000 Z zone: *2 time: *6 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &7 2020-03-17 12:57:26.827522000 Z zone: *2 time: *7 FeatureFlag_4: id: 4 name: news description: Add a news section to your site enabled: true enabled_for_logged_in: true enabled_for_admins: true created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &8 2020-03-17 12:57:26.837352000 Z zone: *2 time: *8 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &9 2020-03-17 12:57:26.837352000 Z zone: *2 time: *9 FeatureFlag_5: id: 5 name: akismet_on_comments description: Detect spam comments with Akismet enabled: true enabled_for_logged_in: true enabled_for_admins: true created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &10 2020-03-17 12:57:26.844903000 Z zone: *2 time: *10 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &11 2020-03-17 12:57:26.844903000 Z zone: *2 time: *11 FeatureFlag_6: id: 6 name: recaptcha_on_comment_form description: Protect comment forms with reCAPTCHA enabled: true enabled_for_logged_in: true enabled_for_admins: true created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &12 2020-03-17 12:57:26.866563000 Z zone: *2 time: *12 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &13 2020-03-17 12:57:26.866563000 Z zone: *2 time: *13 FeatureFlag_7: id: 7 name: recaptcha_on_registration_form description: Protect user registration form with reCAPTCHA enabled: true enabled_for_logged_in: true enabled_for_admins: true created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &14 2020-03-17 12:57:26.877471000 Z zone: *2 time: *14 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &15 2020-03-17 12:57:26.877471000 Z zone: *2 time: *15 FeatureFlag_8: id: 8 name: tags description: Turn on site-wide tag features enabled: true enabled_for_logged_in: true enabled_for_admins: true created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &16 2020-03-17 12:57:26.885963000 Z zone: *2 time: *16 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &17 2020-03-17 12:57:26.885963000 Z zone: *2 time: *17 FeatureFlag_9: id: 9 name: user_login description: Allow users to log in enabled: true enabled_for_logged_in: true enabled_for_admins: true created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &18 2020-03-17 12:57:26.893734000 Z zone: *2 time: *18 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &19 2020-03-17 13:09:17.178988000 Z zone: *2 time: *19 FeatureFlag_10: id: 10 name: user_profiles description: Allow viewing of user profiles enabled: true enabled_for_logged_in: true enabled_for_admins: true created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &20 2020-03-17 12:57:26.903031000 Z zone: *2 time: *20 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &21 2020-03-17 13:10:02.941913000 Z zone: *2 time: *21 FeatureFlag_11: id: 11 name: user_registration description: Allow new users to create an account enabled: true enabled_for_logged_in: true enabled_for_admins: true created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &22 2020-03-17 12:57:26.910223000 Z zone: *2 time: *22 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &23 2020-03-17 13:10:20.904198000 Z zone: *2 time: *23
28.184524
66
0.713411
86886f5a625ff23a844a0c8a6f7e165910fb48f7
2,019
yml
YAML
db/demo-site-data/page_templates.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
db/demo-site-data/page_templates.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
db/demo-site-data/page_templates.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
--- PageTemplate_1: id: 1 name: Index description: Home-page layout from the Halcyonic theme filename: index created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &1 2019-11-13 22:58:01.310912000 Z zone: &2 !ruby/object:ActiveSupport::TimeZone name: Etc/UTC time: *1 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &3 2019-11-15 13:57:31.807177000 Z zone: *2 time: *3 PageTemplate_2: id: 2 name: No sidebar description: Single-column layout from the Halcyonic theme filename: no-sidebar created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &4 2019-11-15 13:58:03.812220000 Z zone: *2 time: *4 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &5 2019-11-15 13:58:03.812220000 Z zone: *2 time: *5 PageTemplate_3: id: 3 name: Right sidebar description: Two column layout (with sidebar on right) from the Halcyonic theme filename: right-sidebar created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &6 2019-11-15 13:58:53.945967000 Z zone: *2 time: *6 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &7 2019-11-15 13:58:53.945967000 Z zone: *2 time: *7 PageTemplate_4: id: 4 name: Left sidebar description: Two column layout (with sidebar on left) from the Halcyonic theme filename: left-sidebar created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &8 2019-11-15 13:59:20.085489000 Z zone: *2 time: *8 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &9 2019-11-15 13:59:20.085489000 Z zone: *2 time: *9 PageTemplate_5: id: 5 name: Double sidebar description: Three column layout (sidebars on both sides) from the Halcyonic theme filename: double-sidebar created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &10 2019-11-15 14:00:17.729663000 Z zone: *2 time: *10 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &11 2019-11-15 14:00:17.729663000 Z zone: *2 time: *11
29.691176
84
0.699851
84349379ffd7ca712589fe0c9c5a09c6e0fab6ea
953
yml
YAML
config/storage.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
config/storage.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
config/storage.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
test: service: Disk root: <%= Rails.root.join("tmp/storage") %> local: service: Disk root: <%= Rails.root.join("storage") %> amazon: service: S3 bucket: <%= ENV['AWS_BUCKET'] %> region: <%= ENV['AWS_REGION'] %> access_key_id: <%= ENV['AWS_ACCESS_KEY_ID'] %> secret_access_key: <%= ENV['AWS_SECRET_ACCESS_KEY'] %> # Remember not to checkin your GCS keyfile to a repository # google: # service: GCS # project: your_project # credentials: <%= Rails.root.join("path/to/gcs.keyfile") %> # bucket: your_own_bucket # Use rails credentials:edit to set the Azure Storage secret (as azure_storage:storage_access_key) # microsoft: # service: AzureStorage # storage_account_name: your_account_name # storage_access_key: <%= Rails.application.credentials.dig(:azure_storage, :storage_access_key) %> # container: your_container_name # mirror: # service: Mirror # primary: local # mirrors: [ amazon, google, microsoft ]
28.029412
101
0.697796
b50dc12a933ccae2ec2be8ad097cf6960a1b3d5e
408
yml
YAML
db/demo-site-data/discussions.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
db/demo-site-data/discussions.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
db/demo-site-data/discussions.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
--- Discussion_1: id: 1 resource_type: BlogPost resource_id: 1 locked: false hidden: false created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &1 2020-02-19 20:22:30.021699000 Z zone: &2 !ruby/object:ActiveSupport::TimeZone name: Etc/UTC time: *1 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &3 2020-02-19 20:22:30.021699000 Z zone: *2 time: *3
24
54
0.678922
9755d10709d8cdcdf30bc1cc19898604091fb6c4
1,015
yml
YAML
db/demo-site-data/blog_posts.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
db/demo-site-data/blog_posts.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
db/demo-site-data/blog_posts.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
--- BlogPost_1: id: 1 title: Demo content slug: demo-content body: "<p>I&#39;m never sure what to do about demo content for ShinyCMS. The Perl version ended up with a weird mixture of content about the CMS, extracts from a book with suitably friendly licensing, and word salad from the Futurama Lorem Ipsum generator.</p>\r\n\r\n<p>Now here we are with the Ruby version, and apparently I haven&#39;t learned my lesson - so I&#39;m starting with content about the CMS again. Or in this case, meta-content.</p>\r\n" hidden: false blog_id: 1 user_id: 1 posted_at: !ruby/object:ActiveSupport::TimeWithZone utc: &1 2020-02-08 07:24:27.000000000 Z zone: &2 !ruby/object:ActiveSupport::TimeZone name: Etc/UTC time: *1 created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &3 2020-02-08 07:24:27.246059000 Z zone: *2 time: *3 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &4 2020-02-19 18:43:39.829059000 Z zone: *2 time: *4
36.25
88
0.692611
73688c1c86e88af2cd2663564a8b7e462b2edba7
461
yml
YAML
db/demo-site-data/page_sections.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
db/demo-site-data/page_sections.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
db/demo-site-data/page_sections.yml
007lva/ShinyCMS-ruby
faf8d068dcbd42996cf7166e2ad64a5329f543ba
[ "Ruby" ]
null
null
null
--- PageSection_1: id: 1 name: Two column description: '' title: Two Column slug: two sort_order: 3 hidden_from_menu: false hidden: false created_at: !ruby/object:ActiveSupport::TimeWithZone utc: &1 2020-02-08 07:07:31.570282000 Z zone: &2 !ruby/object:ActiveSupport::TimeZone name: Etc/UTC time: *1 updated_at: !ruby/object:ActiveSupport::TimeWithZone utc: &3 2020-02-08 07:07:31.570282000 Z zone: *2 time: *3
23.05
54
0.67462
8a7c469d6bebfe32a3607ef408d722d2ac4c238f
259
yml
YAML
.ameba.yml
007lva/clear
66eeb784fbec4ecfe79b4fd43365a0d484af5722
[ "MIT" ]
null
null
null
.ameba.yml
007lva/clear
66eeb784fbec4ecfe79b4fd43365a0d484af5722
[ "MIT" ]
null
null
null
.ameba.yml
007lva/clear
66eeb784fbec4ecfe79b4fd43365a0d484af5722
[ "MIT" ]
null
null
null
# This configuration file was generated by `ameba --gen-config` # on 2018-06-24 15:35:58 +02:00 using Ameba version 0.7.0. # The point is for the user to remove these configuration records # one by one as the reported problems are removed from the code base.
51.8
69
0.756757
a9f33a8389e4cbebc1fcb52df42576e318d1730b
545
yml
YAML
shard.yml
007lva/clear
66eeb784fbec4ecfe79b4fd43365a0d484af5722
[ "MIT" ]
null
null
null
shard.yml
007lva/clear
66eeb784fbec4ecfe79b4fd43365a0d484af5722
[ "MIT" ]
null
null
null
shard.yml
007lva/clear
66eeb784fbec4ecfe79b4fd43365a0d484af5722
[ "MIT" ]
null
null
null
name: clear version: 0.2.0 authors: - Yacine Petitprez <[email protected]> description: Clear ORM is an advanced ORM focusing on PostgreSQL # scripts: # postinstall: "./bin/install-cli" dependencies: admiral: github: jwaldrip/admiral.cr generate: github: anykeyh/generate.cr pg: github: will/crystal-pg inflector: github: phoffer/inflector.cr version: "~> 0.1.8" development_dependencies: coverage: github: anykeyh/crystal-coverage ameba: github: veelenga/ameba version: 0.7.0 license: MIT
17.580645
64
0.700917
0fea1a8b6813a5b6738e12b785ec8fec44926de4
996
yml
YAML
.travis.yml
007lva/clear
66eeb784fbec4ecfe79b4fd43365a0d484af5722
[ "MIT" ]
null
null
null
.travis.yml
007lva/clear
66eeb784fbec4ecfe79b4fd43365a0d484af5722
[ "MIT" ]
null
null
null
.travis.yml
007lva/clear
66eeb784fbec4ecfe79b4fd43365a0d484af5722
[ "MIT" ]
null
null
null
language: crystal script: - crystal spec - crystal docs services: - postgresql before_script: - psql -V addons: postgresql: '9.6' deploy: provider: pages skip_cleanup: true github_token: "$GITHUB_TOKEN" project_name: anykeyh/clear on: branch: master local_dir: docs env: global: secure: FuELrhXQOsmvL1EDSA1z14vBg25MAhIVR/sPv9Zhd1R3wyMku6oG2DZ8/0tk1B818rH52Z54/vDV54hB1TBKfOYHSTu1Yq4Bp9HQIzXuLSKp0RquT9feNoNXBXtSI73kR7Ic4JOV3+lPj2wGpqp0rmvFR+gIo4A0yJuViWPQcsok7SDJULfsqdHGV9ffglm40bja6H9qJvlGuXZiSEpJdrjBpWprN3f6vtWaG/38FjgH8F1Pot5jCfLBuIOQuB59qxdBhgr6KYdrNLZGVXFhsBuUUvvBIliBGn0SKYeAwj1y0LudxqOXNXgNOJhNv7b2/ZdIpVpabMs3Yntn8JHuub1Uo6KdzTmpvUeJuIjrmXReXxo4/vkHYjs4gYUDUQ95p89wWP96M0TBincrSiPXiqiPC4I82EoyE3CRQ92Nh+udywuYRBWZ2+WIsT5RNPmW1WwhFdhqiOlYn2Sk8AwI95JcMQ5UsiXp6U2YL53ytgnmMCToYHbS+4Qkwu32iaix2MVd5/ufVmwCxT1QDdFsX8SmUpCOritjPhYI11qGeSlNynNR69nrLHCbbpLdHwrWWxu3yNOi2Gh6Khps2HOuVtw+H2RTwLi2Mi3BBEiw0Rc+hMtN1e4kE0e1ytMRo7u+6AUKWkZZAjXlo9bXE9A/DGJw9lYYIyLykiWxilFMbQ0=
45.272727
696
0.89257
a6b1a670d5c3900088e1f7f5c1a2ceaf4367f7fd
104
yml
YAML
shard.yml
007lva/halite
4e893bd3575182ed534bcd0076a942ccd7d068c1
[ "MIT" ]
null
null
null
shard.yml
007lva/halite
4e893bd3575182ed534bcd0076a942ccd7d068c1
[ "MIT" ]
null
null
null
shard.yml
007lva/halite
4e893bd3575182ed534bcd0076a942ccd7d068c1
[ "MIT" ]
null
null
null
name: halite version: 0.3.2 authors: - icyleaf <[email protected]> crystal: 0.25.0 license: MIT
10.4
34
0.692308
ad089a884d5c807ce029594f7d27c2e640263931
1,416
yml
YAML
.circleci/config.yml
007lva/halite
4e893bd3575182ed534bcd0076a942ccd7d068c1
[ "MIT" ]
null
null
null
.circleci/config.yml
007lva/halite
4e893bd3575182ed534bcd0076a942ccd7d068c1
[ "MIT" ]
null
null
null
.circleci/config.yml
007lva/halite
4e893bd3575182ed534bcd0076a942ccd7d068c1
[ "MIT" ]
null
null
null
version: 2 jobs: build: docker: - image: crystallang/crystal environment: DOCS_PATH: "docs" GIT_USER: "icyleaf" GIT_EMAIL: "[email protected]" GIT_REPO: "[email protected]:icyleaf/halite.git" GH_REF: "github.com/icyleaf/gitlab.cr" branches: ignore: - gh-pages working_directory: ~/halite steps: - checkout - run: name: "Crystal Version" command: crystal version - run: name: "Specs" command: crystal spec - run: name: "Generate API documents" command: | COMMIT_HASH=$(git rev-parse --short HEAD) COMMIT_DATE=$(git log -1 --format=%ci) COMMIT_STATUS="[${COMMIT_HASH}](https://${GH_REF}/commit/${COMMIT_HASH})" sed -i -e "s/latest commit/$(echo ${COMMIT_STATUS} | sed -e "s/\//\\\\\//g") (${COMMIT_DATE})/" README.md crystal doc - deploy: name: "Upload to gh-page" command: | git config --global user.name "$GIT_USER" git config --global user.email "$GIT_EMAIL" cd "${DOCS_PATH}" git init git remote add origin $GIT_REPO git fetch origin git reset origin/gh-pages git add -A . git commit --allow-empty -m "Updating documents" git push origin HEAD:gh-pages
31.466667
117
0.537429
75ef5c06f94ae2065edfc01cb8ad84edd76b2e4f
1,841
yml
YAML
.travis.yml
007lva/poltergeist
d2569f8f7b9f14dbfb0a9c7371a61ae0e3b8c11a
[ "MIT" ]
null
null
null
.travis.yml
007lva/poltergeist
d2569f8f7b9f14dbfb0a9c7371a61ae0e3b8c11a
[ "MIT" ]
null
null
null
.travis.yml
007lva/poltergeist
d2569f8f7b9f14dbfb0a9c7371a61ae0e3b8c11a
[ "MIT" ]
null
null
null
dist: trusty sudo: false script: bundle exec rake before_script: ${PHANTOMJS:-phantomjs} --version before_install: - mkdir -p travis-phantomjs2 travis-phantomjs21 - if [ ! -f $PWD/travis-phantomjs2/phantomjs ]; then wget https://github.com/Pyppe/phantomjs2.0-ubuntu14.04x64/raw/master/bin/phantomjs -O $PWD/travis-phantomjs2/phantomjs; fi - if [ ! -f $PWD/travis-phantomjs21/phantomjs ]; then wget https://bitbucket.org/ariya/phantomjs/downloads/phantomjs-2.1.1-linux-x86_64.tar.bz2 -O $PWD/phantomjs-2.1-linux-x86_64.tar.bz2; tar -xvf $PWD/phantomjs-2.1-linux-x86_64.tar.bz2 -C $PWD/travis-phantomjs21 --strip-components 2 phantomjs-2.1.1-linux-x86_64/bin/phantomjs; fi - chmod +x $PWD/travis-phantomjs2/phantomjs $PWD/travis-phantomjs21/phantomjs - gem update bundler cache: directories: - $PWD/travis-phantomjs2 - $PWD/travis-phantomjs21 rvm: - 2.3.3 - 2.2.2 - 2.1.6 - rbx-3 gemfile: - Gemfile env: global: - NOKOGIRI_USE_SYSTEM_LIBRARIES=true - POLTERGEIST_TEST_HOST=poltergeist.test.com addons: hosts: # Used to test custom host driver feature - poltergeist.test.com matrix: include: - rvm: 1.9.3 gemfile: gemfiles/Gemfile.1_9_3 - rvm: jruby-19mode gemfile: gemfiles/Gemfile.1_9_3 - rvm: 2.0.0 gemfile: gemfiles/Gemfile.2_0_0 - rvm: 2.2.2 gemfile: Gemfile env: PHANTOMJS=$PWD/travis-phantomjs2/phantomjs - rvm: 2.4.1 gemfile: Gemfile env: PHANTOMJS=$PWD/travis-phantomjs21/phantomjs - rvm: 2.3.3 gemfile: gemfiles/Gemfile.capybara_master env: PHANTOMJS=$PWD/travis-phantomjs21/phantomjs - rvm: jruby-9.1.7.0 gemfile: Gemfile env: PHANTOMJS=$PWD/travis-phantomjs21/phantomjs allow_failures: - gemfile: gemfiles/Gemfile.capybara_master - rvm: jruby-9.1.7.0 - rvm: rbx-3
32.298246
334
0.69799
be83367c664fcf42a93584188208f5f607c9211f
72
yml
YAML
.travis.yml
007lva/polyfill
da84be3c9d75708ecd7f41624a98919208805718
[ "MIT" ]
null
null
null
.travis.yml
007lva/polyfill
da84be3c9d75708ecd7f41624a98919208805718
[ "MIT" ]
null
null
null
.travis.yml
007lva/polyfill
da84be3c9d75708ecd7f41624a98919208805718
[ "MIT" ]
null
null
null
language: ruby rvm: - 2.5.1 - 2.4.4 - 2.3.7 - 2.2.10 - 2.1.10
9
14
0.444444
9d2dddf38b656a96730eafe902e6089a27e5dc53
759
yml
YAML
.rubocop.yml
007lva/polyfill
da84be3c9d75708ecd7f41624a98919208805718
[ "MIT" ]
null
null
null
.rubocop.yml
007lva/polyfill
da84be3c9d75708ecd7f41624a98919208805718
[ "MIT" ]
null
null
null
.rubocop.yml
007lva/polyfill
da84be3c9d75708ecd7f41624a98919208805718
[ "MIT" ]
null
null
null
AllCops: TargetRubyVersion: 2.1 Metrics/AbcSize: Enabled: false Metrics/BlockLength: Enabled: false Metrics/CyclomaticComplexity: Enabled: false Metrics/LineLength: Enabled: false Metrics/MethodLength: Enabled: false Metrics/PerceivedComplexity: Enabled: false Style/AsciiComments: Enabled: false Style/BarePercentLiterals: EnforcedStyle: percent_q Style/ClassAndModuleCamelCase: Enabled: false Style/Documentation: Enabled: false Style/DoubleNegation: Enabled: false Style/FrozenStringLiteralComment: Enabled: false Style/MultilineMethodCallIndentation: EnforcedStyle: indented Style/NumericPredicate: EnforcedStyle: comparison Style/PercentLiteralDelimiters: PreferredDelimiters: '%W': '[]' '%i': '[]' '%w': '[]'
21.083333
37
0.777339
a4b140e3fa78598fcccc31ee0efa9d58e18bbdfc
344
yml
YAML
.travis.yml
007lva/savon
85104ae52ca3dfff4adebe32693fa4aa9833eed9
[ "MIT" ]
null
null
null
.travis.yml
007lva/savon
85104ae52ca3dfff4adebe32693fa4aa9833eed9
[ "MIT" ]
null
null
null
.travis.yml
007lva/savon
85104ae52ca3dfff4adebe32693fa4aa9833eed9
[ "MIT" ]
2
2018-09-12T07:45:08.000Z
2019-12-15T11:35:43.000Z
# https://github.com/travis-ci/travis-ci/wiki/.travis.yml-options language: "ruby" sudo: false before_install: - gem install bundler script: "bundle exec rake --trace" rvm: - 2.2.4 - 2.3.0 - 2.4.1 - jruby-9.1.13.0 - rbx-2 matrix: allow_failures: - rvm: rbx-2 fast_finish: true notifications: irc: "irc.freenode.org#savon"
18.105263
65
0.668605
db0a0135c5b6cde627226ec5751a797f021208ed
965
yml
YAML
config/secrets.yml
007oscar/sample_app
29829496714c63649652f555a38f6092ab9db3c6
[ "Ruby", "Beerware", "MIT" ]
null
null
null
config/secrets.yml
007oscar/sample_app
29829496714c63649652f555a38f6092ab9db3c6
[ "Ruby", "Beerware", "MIT" ]
null
null
null
config/secrets.yml
007oscar/sample_app
29829496714c63649652f555a38f6092ab9db3c6
[ "Ruby", "Beerware", "MIT" ]
null
null
null
# Be sure to restart your server when you modify this file. # Your secret key is used for verifying the integrity of signed cookies. # If you change this key, all old signed cookies will become invalid! # Make sure the secret is at least 30 characters and all random, # no regular words or you'll be exposed to dictionary attacks. # You can use `rails secret` to generate a secure secret key. # Make sure the secrets in this file are kept private # if you're sharing your code publicly. development: secret_key_base: 29131c99d285517b983aa149d1e3c4c59c69694d49feb99974df5ab89cfe9915d55ab927bc69436655112d5defc8e585935a7b7ef225478fc7f2caa2546dfc5b test: secret_key_base: 46037606e059462bf18da0897055d46fac25ce1b4c1d55842fee5f2746215e8146b5598c1b7aa77a1a87106c790abe0ac991f58ce584c2124c28b8c7585c25e9 # Do not keep production secrets in the repository, # instead read values from the environment. production: secret_key_base: <%= ENV["SECRET_KEY_BASE"] %>
41.956522
147
0.823834
4e0d21acd83258c39542ff330cc5132ed48c4d91
3,355
yml
YAML
_config.yml
007pig/rustabc
a5e387a930e99c9f028a103a189ff65b51709b4b
[ "MIT" ]
1
2021-04-25T01:58:55.000Z
2021-04-25T01:58:55.000Z
_config.yml
007pig/rustabc
a5e387a930e99c9f028a103a189ff65b51709b4b
[ "MIT" ]
null
null
null
_config.yml
007pig/rustabc
a5e387a930e99c9f028a103a189ff65b51709b4b
[ "MIT" ]
null
null
null
name: Twister description: a powerful jekyll theme url: #place url baseurl: #place folder name if site is served in subfolder permalink: /:title/ paginate: 8 paginate_path: /page/:num/ tag_dir: /tag category_dir: /category author_dir: /author author_title_prefix: 'Posted by ' media_folder: /img author_default_avatar: /profile-pic.jpg tumblr: "#" pinterest: "#" instagram: "#" twitter: "#" facebook: "#" youtube: "#" vimeo: "#" soundcloud: "#" encoding: utf-8 defaults: - scope: path: "" type: "posts" values: layout: "post" type: "standard" homedisplay: "featimg" markdown: kramdown kramdown: input: GFM syntax_highlighter: rouge relative_permalinks: false exclude: [.git, .gitignore, sass, .sass-cache, package.json, gruntfile.js, node_modules, README.md] # prose.io configurations prose: media: "img" metadata: _posts: - name: "title" field: element: "text" label: "title" - name: "layout" field: element: "select" label: "Layout" help: "display post at full width or with sidebar" options: - name: "with sidebar" value: "post" - name: "full width" value: "post_full" - name: "author" field: element: "text" label: "Author" help: "author id as defined within _data/authors.yml" type: "text" - name: "featimg" field: element: "text" label: "Featured Image" help: "featured image within the img-folder" type: "text" - name: "type" field: element: "select" label: "Post Type" help: "each type has some special feature" options: - name: "Standard" value: "standard" - name: "Image" value: "image" - name: "Gallery" value: "gallery" - name: "Audio" value: "audio" - name: "YouTube" value: "youtube" - name: "Vimeo" value: "vimeo" - name: "homedisplay" field: element: "select" label: "Home Display" help: "decide whether to display embedded element (for post types: audio, youtube, vimeo) or Featured Image on home page" options: - name: "Featured Image" value: "featimg" - name: "Embedded Element" value: "iframe" - name: "vimeo-embed" field: element: "text" label: "Vimeo Embedding Code" help: "Set post type to vimeo in order for this to work" - name: "gallery-id" field: element: "text" label: "Gallery ID" help: "ID of the gallery defined within _data/galleries.yml - use include in content to use" - name: "audio-embed" field: element: "text" label: "Audio Embedding Code" help: "Set post type to audio in order for this to work" - name: "yt-video-id" field: element: "text" label: "YouTube Video ID" help: "Set post type to youtube in order for this to work. Do not set featured image, as the youtube video thumbnail will be used" type: "text"
27.727273
140
0.547243
277e6feb00a2e881a1c2f81ef551a356a559b121
6,845
yml
YAML
.travis.yml
007skyfall/vim
ee5aeeeadc192558e6c0926176e1e1ca05ba9635
[ "Vim" ]
null
null
null
.travis.yml
007skyfall/vim
ee5aeeeadc192558e6c0926176e1e1ca05ba9635
[ "Vim" ]
null
null
null
.travis.yml
007skyfall/vim
ee5aeeeadc192558e6c0926176e1e1ca05ba9635
[ "Vim" ]
null
null
null
language: c dist: trusty os: - osx - linux compiler: - clang - gcc env: - BUILD=yes TEST=test COVERAGE=no FEATURES=tiny "CONFOPT='--disable-gui'" SHADOWOPT= SRCDIR=./src CHECK_AUTOCONF=no - BUILD=yes TEST=test COVERAGE=no FEATURES=tiny CONFOPT= SHADOWOPT= SRCDIR=./src CHECK_AUTOCONF=no # ASAN build - BUILD=yes TEST=test SANITIZER_CFLAGS="-g -O1 -DABORT_ON_INTERNAL_ERROR -DEXITFREE -fsanitize=address -fno-omit-frame-pointer" FEATURES=huge SRCDIR=./src CHECK_AUTOCONF=no ASAN_OPTIONS="print_stacktrace=1 log_path=asan" LSAN_OPTIONS="suppressions=$TRAVIS_BUILD_DIR/src/testdir/lsan-suppress.txt" "CONFOPT='--enable-perlinterp --enable-pythoninterp --enable-rubyinterp --enable-luainterp --enable-tclinterp'" - BUILD=yes TEST="scripttests test_libvterm" COVERAGE=yes CFLAGS=--coverage LDFLAGS=--coverage FEATURES=huge SHADOWOPT= SRCDIR=./src CHECK_AUTOCONF=no "CONFOPT='--enable-perlinterp --enable-pythoninterp --enable-python3interp --enable-rubyinterp --enable-luainterp --enable-tclinterp'" - BUILD=no TEST=unittests COVERAGE=yes CFLAGS=--coverage LDFLAGS=--coverage FEATURES=huge SHADOWOPT= SRCDIR=./src CHECK_AUTOCONF=yes - BUILD=yes TEST=test COVERAGE=no FEATURES=normal CONFOPT= SHADOWOPT="-C src/shadow" SRCDIR=./src/shadow CHECK_AUTOCONF=no - BUILD=yes TEST=test COVERAGE=no FEATURES=small CONFOPT= SHADOWOPT= SRCDIR=./src CHECK_AUTOCONF=no # Mac OSX build - BUILD=yes TEST=test COVERAGE=no FEATURES=huge SHADOWOPT= SRCDIR=./src CHECK_AUTOCONF=no "CONFOPT='--enable-perlinterp --enable-pythoninterp --enable-rubyinterp --enable-luainterp --enable-tclinterp'" sudo: false # instead of a 2*2*7 matrix (2*os + 2*compiler + 7*env), # exclude some builds on mac os x and linux # on mac os x "tiny" is always without GUI # linux: 2*compiler + 5*env + mac: 2*compiler + 2*env matrix: exclude: - os: osx env: BUILD=yes TEST=test COVERAGE=no FEATURES=tiny CONFOPT= SHADOWOPT= SRCDIR=./src CHECK_AUTOCONF=no - os: osx env: BUILD=yes TEST=test COVERAGE=no FEATURES=normal CONFOPT= SHADOWOPT="-C src/shadow" SRCDIR=./src/shadow CHECK_AUTOCONF=no - os: osx env: BUILD=no TEST=unittests COVERAGE=yes CFLAGS=--coverage LDFLAGS=--coverage FEATURES=huge SHADOWOPT= SRCDIR=./src CHECK_AUTOCONF=yes - os: osx env: BUILD=yes TEST=test COVERAGE=no FEATURES=small CONFOPT= SHADOWOPT= SRCDIR=./src CHECK_AUTOCONF=no - os: osx env: BUILD=yes TEST="scripttests test_libvterm" COVERAGE=yes CFLAGS=--coverage LDFLAGS=--coverage FEATURES=huge SHADOWOPT= SRCDIR=./src CHECK_AUTOCONF=no "CONFOPT='--enable-perlinterp --enable-pythoninterp --enable-python3interp --enable-rubyinterp --enable-luainterp --enable-tclinterp'" - os: osx env: BUILD=yes TEST=test SANITIZER_CFLAGS="-g -O1 -DABORT_ON_INTERNAL_ERROR -DEXITFREE -fsanitize=address -fno-omit-frame-pointer" FEATURES=huge SRCDIR=./src CHECK_AUTOCONF=no ASAN_OPTIONS="print_stacktrace=1 log_path=asan" LSAN_OPTIONS="suppressions=$TRAVIS_BUILD_DIR/src/testdir/lsan-suppress.txt" "CONFOPT='--enable-perlinterp --enable-pythoninterp --enable-rubyinterp --enable-luainterp --enable-tclinterp'" - os: linux compiler: clang env: BUILD=yes TEST=test SANITIZER_CFLAGS="-g -O1 -DABORT_ON_INTERNAL_ERROR -DEXITFREE -fsanitize=address -fno-omit-frame-pointer" FEATURES=huge SRCDIR=./src CHECK_AUTOCONF=no ASAN_OPTIONS="print_stacktrace=1 log_path=asan" LSAN_OPTIONS="suppressions=$TRAVIS_BUILD_DIR/src/testdir/lsan-suppress.txt" "CONFOPT='--enable-perlinterp --enable-pythoninterp --enable-rubyinterp --enable-luainterp --enable-tclinterp'" - os: linux compiler: clang env: BUILD=no TEST=unittests COVERAGE=yes CFLAGS=--coverage LDFLAGS=--coverage FEATURES=huge SHADOWOPT= SRCDIR=./src CHECK_AUTOCONF=yes - os: linux compiler: clang env: BUILD=yes TEST=test COVERAGE=no FEATURES=small CONFOPT= SHADOWOPT= SRCDIR=./src CHECK_AUTOCONF=no - os: linux env: BUILD=yes TEST=test COVERAGE=no FEATURES=huge SHADOWOPT= SRCDIR=./src CHECK_AUTOCONF=no "CONFOPT='--enable-perlinterp --enable-pythoninterp --enable-rubyinterp --enable-luainterp --enable-tclinterp'" branches: except: - /^v[0-9]/ addons: apt: packages: - autoconf - clang - lcov - libperl-dev - python-dev - python3-dev - liblua5.2-dev - lua5.2 - ruby-dev - tcl-dev - cscope - libgtk2.0-dev homebrew: packages: - lua update: true before_install: - rvm reset # Remove /opt/python/3.x.x/bin from $PATH for using system python3. # ("pyenv global system" doesn't seem to work.) - if [ "$TRAVIS_OS_NAME" = "linux" ] && which python3 | grep '/opt/python/' > /dev/null; then export PATH=$(echo $PATH | sed -e "s#$(echo $(which python3) | sed -e 's#/python3$##'):##"); fi - if [ "$COVERAGE" = "yes" ]; then pip install --user cpp-coveralls; fi # needed for https support for coveralls # building cffi only works with gcc, not with clang - if [ "$COVERAGE" = "yes" ]; then CC=gcc pip install --user pyopenssl ndg-httpsclient pyasn1; fi # Lua is not installed on Travis OSX - if [ "$TRAVIS_OS_NAME" = "osx" ]; then export LUA_PREFIX=/usr/local; fi # Use llvm-cov instead of gcov when compiler is clang. - if [ "$TRAVIS_OS_NAME" = "linux" ] && [ "$CC" = "clang" ]; then ln -sf $(which llvm-cov) /home/travis/bin/gcov; fi # Start virtual framebuffer to be able to test the GUI. Does not work on OS X. before_script: - if [ "$TRAVIS_OS_NAME" = "linux" ]; then export DISPLAY=:99.0 && sh -e /etc/init.d/xvfb start && sleep 3; fi script: - NPROC=$(getconf _NPROCESSORS_ONLN) - if [ "$CHECK_AUTOCONF" = "yes" -a "$CC" = "gcc" ]; then make -C src autoconf; fi - if [ "x$SHADOWOPT" != x ]; then make -C src shadow; fi - (cd ${SRCDIR} && ./configure --with-features=$FEATURES $CONFOPT --enable-fail-if-missing && if [ "$BUILD" = "yes" ]; then make -j$NPROC; fi) # Show Vim version and also if_xx versions. - if [ "$BUILD" = "yes" ]; then ${SRCDIR}/vim --version; ${SRCDIR}/vim --not-a-term -u NONE -S ${SRCDIR}/testdir/if_ver-1.vim -c quit > /dev/null; ${SRCDIR}/vim --not-a-term -u NONE -S ${SRCDIR}/testdir/if_ver-2.vim -c quit > /dev/null; cat if_ver.txt; fi - make $SHADOWOPT $TEST - if [ -n "$ASAN_OPTIONS" ]; then for log in $(find -type f -name 'asan.*' -size +0); do asan_symbolize < "$log"; err=1; done; fi - if [ -n "$err" ]; then exit 1; fi after_success: - if [ "$COVERAGE" = "yes" ]; then ~/.local/bin/coveralls -b ${SRCDIR} -x .xs -e ${SRCDIR}/if_perl.c -e ${SRCDIR}/xxd -e ${SRCDIR}/libvterm --encodings utf-8 latin-1 EUC-KR; fi - if [ "$COVERAGE" = "yes" ]; then cd ${SRCDIR} && bash <(curl -s https://codecov.io/bash) ; fi # vim:set sts=2 sw=2 tw=0 et:
55.201613
257
0.68824
72c0cc14d886ba1aaeee4f3e799e18f331020c9f
1,221
yml
YAML
{{cookiecutter.app_name}}/docker-compose.yml
007sunny/cookiecutter-flask
6168af92854e1fb725de9d97b29e5425044b78bf
[ "MIT" ]
null
null
null
{{cookiecutter.app_name}}/docker-compose.yml
007sunny/cookiecutter-flask
6168af92854e1fb725de9d97b29e5425044b78bf
[ "MIT" ]
null
null
null
{{cookiecutter.app_name}}/docker-compose.yml
007sunny/cookiecutter-flask
6168af92854e1fb725de9d97b29e5425044b78bf
[ "MIT" ]
null
null
null
version: "3.6" x-build-args: &build_args INSTALL_PYTHON_VERSION: 3.7.5 x-default-volumes: &default_volumes volumes: - ./:/app - ./dev.db:/tmp/dev.db services: flask-dev: build: context: . target: development args: <<: *build_args image: "{{cookiecutter.app_name}}-development" environment: DD_ENV: local DD_SERVICE: {{cookiecutter.app_name}} DD_VERSION: 1.0 DD_LOGS_INJECTION: "true" LOG_LEVEL: info FLASK_ENV: development ports: - "5000:5000" - "2992:2992" <<: *default_volumes flask-prod: build: context: . target: production args: <<: *build_args image: "{{cookiecutter.app_name}}-production" ports: - "5000:5000" environment: FLASK_ENV: production FLASK_DEBUG: 0 LOG_LEVEL: info GUNICORN_WORKERS: 4 <<: *default_volumes manage: build: context: . target: development args: <<: *build_args entrypoint: pipenv run flask environment: FLASK_ENV: production FLASK_DEBUG: 0 image: "{{cookiecutter.app_name}}-manage" stdin_open: true tty: true <<: *default_volumes
19.693548
50
0.592957
ee8ab3300f7df02be807cfcc1ef0232d550c227e
550
yaml
YAML
{{cookiecutter.app_name}}/.github/workflows/lint.yaml
007sunny/cookiecutter-flask
6168af92854e1fb725de9d97b29e5425044b78bf
[ "MIT" ]
null
null
null
{{cookiecutter.app_name}}/.github/workflows/lint.yaml
007sunny/cookiecutter-flask
6168af92854e1fb725de9d97b29e5425044b78bf
[ "MIT" ]
null
null
null
{{cookiecutter.app_name}}/.github/workflows/lint.yaml
007sunny/cookiecutter-flask
6168af92854e1fb725de9d97b29e5425044b78bf
[ "MIT" ]
null
null
null
name: Build Status on: - push jobs: build: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - name: Set up Python uses: actions/setup-python@v2 with: python-version: {{cookiecutter.python_version}} - name: Install Python dependencies run: | pip install pipenv pipenv install --dev - run: cp .env.example .env - name: Run Python lints run: pipenv run flask lint --check - name: Run Python tests run: pipenv run flask test
22.916667
57
0.583636
a9d532a8d3ff913cf85a83d9b0859c11275fc540
775
yaml
YAML
flask-template.yaml
007sunny/cookiecutter-flask
6168af92854e1fb725de9d97b29e5425044b78bf
[ "MIT" ]
null
null
null
flask-template.yaml
007sunny/cookiecutter-flask
6168af92854e1fb725de9d97b29e5425044b78bf
[ "MIT" ]
null
null
null
flask-template.yaml
007sunny/cookiecutter-flask
6168af92854e1fb725de9d97b29e5425044b78bf
[ "MIT" ]
null
null
null
apiVersion: backstage.io/v1alpha1 kind: Template metadata: name: flask-template title: Flask Microservice Template description: Create a microservice based off the Python Flask Framework tags: - recommended - python - flask spec: owner: [email protected] templater: cookiecutter type: service path: '.' schema: required: - component_id - description properties: component_id: title: Name type: string description: Unique name of the service description: title: Description type: string description: Help others understand what the domain of this service is. app_name: title: App name type: string description: Name of your app
23.484848
79
0.660645
0bf1822399e794032422beb3a5280239715a6c3b
216
yaml
YAML
catalog-info.yaml
007sunny/cookiecutter-flask
6168af92854e1fb725de9d97b29e5425044b78bf
[ "MIT" ]
null
null
null
catalog-info.yaml
007sunny/cookiecutter-flask
6168af92854e1fb725de9d97b29e5425044b78bf
[ "MIT" ]
null
null
null
catalog-info.yaml
007sunny/cookiecutter-flask
6168af92854e1fb725de9d97b29e5425044b78bf
[ "MIT" ]
null
null
null
apiVersion: backstage.io/v1alpha1 kind: Component metadata: name: cookiecutter-flask annotations: github.com/project-slug: 007sunny/cookiecutter-flask spec: type: other lifecycle: unknown owner: team-a
19.636364
56
0.768519
be626785f2d45a8415cae85ba3aca3d353afa4c7
2,446
yml
YAML
.github/workflows/codeql-analysis.yml
007sya/Project2021
28c6a88f6b7b84f130c1930815230cc99bdc6d10
[ "Apache-2.0" ]
5
2021-03-22T12:40:15.000Z
2021-04-19T08:26:12.000Z
.github/workflows/codeql-analysis.yml
angel-langdon/Project2021
28c6a88f6b7b84f130c1930815230cc99bdc6d10
[ "Apache-2.0" ]
null
null
null
.github/workflows/codeql-analysis.yml
angel-langdon/Project2021
28c6a88f6b7b84f130c1930815230cc99bdc6d10
[ "Apache-2.0" ]
3
2021-05-26T10:55:17.000Z
2021-05-27T08:36:37.000Z
# For most projects, this workflow file will not need changing; you simply need # to commit it to your repository. # # You may wish to alter this file to override the set of languages analyzed, # or to provide custom queries or build logic. # # ******** NOTE ******** # We have attempted to detect the languages in your repository. Please check # the `language` matrix defined below to confirm you have the correct set of # supported CodeQL languages. # name: "CodeQL" on: push: branches: [ main ] pull_request: # The branches below must be a subset of the branches above branches: [ main ] schedule: - cron: '39 7 * * 2' jobs: analyze: name: Analyze runs-on: ubuntu-latest permissions: actions: read contents: read security-events: write strategy: fail-fast: false matrix: language: [ 'javascript', 'python' ] # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ] # Learn more: # https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed steps: - name: Checkout repository uses: actions/checkout@v2 # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL uses: github/codeql-action/init@v1 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. # By default, queries listed here will override any specified in a config file. # Prefix the list here with "+" to use these queries and those in the config file. # queries: ./path/to/local/query, your-org/your-repo/queries@main # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild uses: github/codeql-action/autobuild@v1 # ℹ️ Command-line programs to run using the OS shell. # 📚 https://git.io/JvXDl # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines # and modify them (or add more) to build your code if your project # uses a compiled language #- run: | # make bootstrap # make release - name: Perform CodeQL Analysis uses: github/codeql-action/analyze@v1
33.972222
188
0.666394
2db36fda53cb82b31a824802cb9011fb52133bb2
3,275
yml
YAML
_config.yml
007vasy/benFerrumBlog
907c274340141bf169f3b33afcd69230f70d5c90
[ "Apache-2.0" ]
null
null
null
_config.yml
007vasy/benFerrumBlog
907c274340141bf169f3b33afcd69230f70d5c90
[ "Apache-2.0" ]
4
2020-03-30T23:52:21.000Z
2022-02-26T06:59:41.000Z
_config.yml
007vasy/benFerrumBlog
907c274340141bf169f3b33afcd69230f70d5c90
[ "Apache-2.0" ]
null
null
null
# Welcome to Jekyll! # # This config file is meant for settings that affect your whole blog. # # If you need help with YAML syntax, here are some quick references for you: # https://learn-the-web.algonquindesign.ca/topics/markdown-yaml-cheat-sheet/#yaml # https://learnxinyminutes.com/docs/yaml/ title: fastpages description: An easy to use blogging platform with support for Jupyter Notebooks. @7vasy # you can comment the below line out if your repo name is not different than your baseurl github_repo: "benFerrumBlog" # OPTIONAL: override baseurl and url if using a custom domain # Note: leave out the trailing / from this value. @7vasy.github.io" # the base hostname & protocol for your site, e.g. http://example.com ########################################################### ######### Special Instructions for baseurl ############### # #### Scenario One: If you do not have a Custom Domain ##### # - if you are not using a custom domain, the baseurl *must* be set to your repo name # #### Scenario Two: If you have a Custom Domain ##### # 1. If your domain does NOT have a subpath, this leave this value as "" # 2. If your domain does have a subpath, you must preceed the value with a / and NOT have a / at the end. # For example: # "" is valid # "/blog" is valid # "/blog/site/" is invalid ( / at the end) # "/blog/site" is valid # "blog/site" is invalid ( because doesn't begin with a /) # # 3. You must replace the parameter `baseurl` in _action_files/settings.ini with the same value as you set here but WITHOUT QUOTES. # baseurl: "/benFerrumBlog" # the subpath of your site, e.g. "/blog". # Github and twitter are optional: minima: social_links: twitter: fastdotai github: fastai # Set this to true to get LaTeX math equation support use_math: # Set this to true to display the summary of your blog post under your title on the Home page. show_description: true # Set this to true to display image previews on home page, if they exist show_image: false # Set this to true to display tags on each post show_tags: true # Add your Google Analytics ID here if you have one and want to use it google_analytics: exclude: - docker-compose.yml - action.yml - Makefile # this setting allows you to keep pages organized in the _pages folder include: - _pages # This specifies what badges are turned on by default for notebook posts. default_badges: github: true binder: true colab: true # Everything below here should be left alone. Modifications may break fastpages future: true theme: minima plugins: - jekyll-feed - jekyll-gist - jekyll-octicons - jekyll-toc - jekyll-twitter-plugin - jekyll-relative-links - jekyll-seo-tag - jekyll-remote-theme - jekyll-paginate # See https://jekyllrb.com/docs/pagination/ # For pagination to work, you cannot have index.md at the root of your repo, instead you must rename this file to index.html paginate: 15 paginate_path: /page:num/ remote_theme: jekyll/minima titles_from_headings: enabled: true strip_title: true collections: true highlighter: rouge markdown: kramdown kramdown: math_engine: katex input: GFM auto_ids: true hard_wrap: false syntax_highlighter: rouge
30.045872
131
0.701374
38d3140cc7758c1613706f861c3d208937935d2a
1,746
yml
YAML
.github/workflows/ci.yml
007vasy/pym-disk
d60c5965ec1fa6b00d5d493a9f5ed5b07b9cba7f
[ "Apache-2.0" ]
1
2020-12-18T08:05:13.000Z
2020-12-18T08:05:13.000Z
.github/workflows/ci.yml
007vasy/pym-disk
d60c5965ec1fa6b00d5d493a9f5ed5b07b9cba7f
[ "Apache-2.0" ]
null
null
null
.github/workflows/ci.yml
007vasy/pym-disk
d60c5965ec1fa6b00d5d493a9f5ed5b07b9cba7f
[ "Apache-2.0" ]
null
null
null
name: Continuous integration on: [push] jobs: check: name: Check runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - uses: actions-rs/toolchain@v1 with: profile: minimal toolchain: stable override: true - uses: actions-rs/cargo@v1 with: command: check test: name: Test Suite runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - uses: actions-rs/toolchain@v1 with: profile: minimal toolchain: stable override: true - uses: actions-rs/cargo@v1 with: command: test fmt: name: Rustfmt runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - uses: actions-rs/toolchain@v1 with: profile: minimal toolchain: stable override: true - run: rustup component add rustfmt - uses: actions-rs/cargo@v1 with: command: fmt args: --all -- --check clippy: name: Clippy runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - uses: actions-rs/toolchain@v1 with: profile: minimal toolchain: stable override: true - run: rustup component add clippy - uses: actions-rs/cargo@v1 with: command: clippy args: -- -D clippy::all -D clippy::pedantic -D warnings build: name: Build runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - uses: actions-rs/toolchain@v1 with: profile: minimal toolchain: stable override: true - uses: actions-rs/cargo@v1 with: command: build
22.675325
65
0.548683