diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
new file mode 100644
index 00000000..26533ea1
--- /dev/null
+++ b/.github/workflows/release.yml
@@ -0,0 +1,29 @@
+# # # # Copyright (c) 2023, Oracle and/or its affiliates.
+# # # # Licensed under the Universal Permissive License v1.0 as shown at https://oss.oracle.com/licenses/upl.
+name: Release Charts
+
+on:
+ push:
+ branches:
+ - main
+
+jobs:
+ release:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v2
+ with:
+ fetch-depth: 0
+
+ - name: Configure Git
+ run: |
+ git config user.name "$GITHUB_ACTOR"
+ git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
+
+ - name: Run chart-releaser
+ uses: helm/chart-releaser-action@v1.1.0
+ with:
+ charts_dir: charts
+ env:
+ CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
diff --git a/.gitignore b/.gitignore
index 663c67b1..3e6dd831 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,3 +1,6 @@
+# Copyright (c) 2023, Oracle and/or its affiliates.
+# Licensed under the Universal Permissive License v1.0 as shown at https://oss.oracle.com/licenses/upl.
+
.DS_Store
####
@@ -54,4 +57,9 @@ terraform.rc
# Ignore util dir
logan/util/*
+# helm-chart
+charts/oci-onm/Chart.lock
+charts/oci-onm/charts/
+# zip artifacts
+releases/
\ No newline at end of file
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
new file mode 100644
index 00000000..4b542536
--- /dev/null
+++ b/.pre-commit-config.yaml
@@ -0,0 +1,31 @@
+# Copyright (c) 2023, Oracle and/or its affiliates.
+# Licensed under the Universal Permissive License v1.0 as shown at https://oss.oracle.com/licenses/upl.
+
+repos:
+- repo: https://github.com/pre-commit/pre-commit-hooks
+ rev: v2.3.0
+ hooks:
+ - id: check-yaml
+ args: [--allow-multiple-documents]
+ exclude: '^charts/.*/templates/'
+ - id: end-of-file-fixer
+ - id: trailing-whitespace
+ #- id: check-json
+ - id: check-merge-conflict
+ - id: check-symlinks
+ #- repo: https://github.com/Lucas-C/pre-commit-hooks
+ # rev: v1.5.1
+ # hooks:
+ # - id: insert-license
+ # exclude: '*.json|*.txt|.*.md|.*.txt|.pre-commit-config.yaml'
+ # args:
+ # - --license-filepath
+ # - ./docs/license-short.txt
+- repo: https://github.com/norwoodj/helm-docs
+ rev: v1.2.0
+ hooks:
+ - id: helm-docs
+ args:
+ - --chart-search-root=charts
+ # The `./` makes it relative to the chart-search-root set above
+ #- --template-files=./_templates.gotmpl
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 61f6639e..cf0f8b3d 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,11 +1,20 @@
# Change Log
-## 2022-02-07
+## 2023-06-14
+### Added
+- Kubernetes Metrics Collection to OCI Monitoring using OCI Management Agent.
+- Support for Kubernetes Service and EndpointSlice Object logs collection.
+### Changed
+- Refactoring of helm chart, terraform and stack/market place app to support the consolidation of logs, objects and metrics collection.
+### Breaking Changes
+- The refactoring work done in this version, may cause issues if you upgrade to this version (v3.0.0) from previous versions. Refer [here](README.md#2x-to-3x) for further details.
+
+## 2023-02-07
### Added
- Create a new mount (rw) using the value provided for baseDir.
-- Expose "encoding" parameter of Fluentd's tail plugin as part of values.yaml, which allows users to override default encoding (ASCII-8BIT) for applicable logs/log types.
+- Expose "encoding" parameter of Fluentd's tail plugin as part of values.yaml, which allows users to override default encoding (ASCII-8BIT) for applicable logs/log types.
- Partial CRI logs handling.
-- Oracle Resource Manager / Terraform support for deploying the solution.
+- Oracle Resource Manager / Terraform support for deploying the solution.
### Changed
- Modified /var/log to mount as readonly by default, except when /var/log is set as baseDir (to store Fluentd state, buffer etc.,).
### Breaking Changes
@@ -13,8 +22,8 @@
## 2022-08-30
### Added
-- Helm chart templatisation/parameterisation to provide granular level control on the chart and its values.
-- Support for custom ServiceAccount.
+- Helm chart templatisation/parameterisation to provide granular level control on the chart and its values.
+- Support for custom ServiceAccount.
### Breaking Changes
- If you have not modified any of the templates values.yaml for any customisation including custom Fluentd configuration etc., then upgrading to this version is a non breaking change. In case, if you have any modifications or customisations, then you may need to adjust those according to the new templatisation format before upgrading to this version.
@@ -35,7 +44,7 @@
- Pod Annotations based customiation of configuration paremeters (oci_la_log_source_name, oci_la_log_group_id, oci_la_entity_id) for logs collected through "Kubernetes Container Generic Logs".
- README update for custom configuration documentation.
- Flush interval and timeout label configuration for Concat plugin section.
-
+
## 2022-02-24
### Added
- Oracle Linux 8 based Docker Image support.
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 00000000..1abc8f8d
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2023 Oracle and/or its affiliates. All rights reserved.
+
+The Universal Permissive License (UPL), Version 1.0
+
+Subject to the condition set forth below, permission is hereby granted to any person obtaining a copy of this
+software, associated documentation and/or data (collectively the "Software"), free of charge and under any and
+all copyright rights in the Software, and any and all patent rights owned or freely licensable by each licensor
+hereunder covering either (i) the unmodified Software as contributed to or provided by such licensor, or
+(ii) the Larger Works (as defined below), to deal in both
+
+(a) the Software, and
+(b) any piece of software and/or hardware listed in the lrgrwrks.txt file if one is included with the Software
+(each a “Larger Work” to which the Software is contributed by such licensors),
+
+without restriction, including without limitation the rights to copy, create derivative works of, display,
+perform, and distribute the Software and make, use, sell, offer for sale, import, export, have made, and have
+sold the Software and the Larger Work(s), and to sublicense the foregoing rights on either these or other terms.
+
+This license is subject to the following condition:
+The above copyright notice and either this complete permission notice or at a minimum a reference to the UPL must
+be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
+THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
+CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+IN THE SOFTWARE.
diff --git a/README.md b/README.md
index 013272e4..006d878f 100644
--- a/README.md
+++ b/README.md
@@ -1,519 +1,375 @@
-# Monitoring Solution for Kubernetes
+# OCI Kubernetes Monitoring Solution
-## About
+OCI Kubernetes Monitoring Solution is a turn-key Kubernetes monitoring and management package based on OCI Logging Analytics cloud service, OCI Monitoring, OCI Management Agent and Fluentd.
-This provides an end-to-end monitoring solution for Oracle Container Engine for Kubernetes (OKE) and other forms of Kubernetes Clusters using Logging Analytics, Monitoring and other Oracle Cloud Infrastructure (OCI) Services.
+It enables DevOps, Cloud Admins, Developers, and Sysadmins to
-![Kubernetes Cluster Summary Dashboard](logan/images/kubernetes-cluster-summary-dashboard.png)
-
-![Kubernetes Nodes Dashboard](logan/images/kubernetes-nodes-dashboard.png)
-
-![Kubernetes Workloads Dashboard](logan/images/kubernetes-workloads-dashboard.png)
-
-![Kubernetes Pods Dashboard](logan/images/kubernetes-pods-dashboard.png)
-
-## Logs
-
-This solutions offers collection of various logs of a Kubernetes cluster into OCI Logging Analytics and offer rich analytics on top of the collected logs. Users may choose to customise the log collection by modifying the out of the box configuration that it provides.
-
-### Kubernetes System/Service Logs
-
-OKE or Kubernetes comes up with some built-in services where each one has different responsibilities and they run on one or more nodes in the cluster either as Deployments or DaemonSets.
-
-The following service logs are configured to be collected out of the box:
-- Kube Proxy
-- Kube Flannel
-- Kubelet
-- CoreDNS
-- CSI Node Driver
-- DNS Autoscaler
-- Cluster Autoscaler
-- Proxymux Client
-
-### Linux System Logs
-
-The following Linux system logs are configured to be collected out of the box:
-- Syslog
-- Secure logs
-- Cron logs
-- Mail logs
-- Audit logs
-- Ksplice Uptrack logs
-- Yum logs
-
-### Control Plane Logs
-
-The following are various Control Plane components in OKE/Kubernetes.
-- Kube API Server
-- Kube Scheduler
-- Kube Controller Manager
-- Cloud Controller Manager
-- etcd
+* Continuously monitor health and performance
+* Troubleshoot issues and identify their root causes
+* Optimize IT environment based on long term data
+* Identify configuration, and security issues
-At present, control plane logs are not covered as part of out of the box collection, as these logs are not exposed to OKE customers.
-The out of the box collection for these logs will be available soon for generic Kubernetes clusters and for OKE (when OKE makes these logs accessible to end users).
+across their entire environment - using Logs, Metrics, and Object metadata.
-### Application Pod/Container Logs
+It does extensive enrichment of logs, metrics and object information to enable cross correlation across entities from different tiers in OCI Logging Analytics. A collection of dashboards is provided to get users started quickly.
-All the logs from application pods writing STDOUT/STDERR are typically available under /var/log/containers/.
-Application which are having custom log handlers (say log4j or similar) may route their logs differently but in general would be available on the node (through a volume).
+## Dashboards
-## Kubernetes Objects
-
-"Kubernetes objects are persistent entities in the Kubernetes system. Kubernetes uses these entities to represent the state of your cluster. Specifically, they can describe:
-- What containerized applications are running (and on which nodes)
-- The resources available to those applications
-- The policies around how those applications behave, such as restart policies, upgrades, and fault-tolerance"
-
-*Reference* : [Kubernetes Objects](https://kubernetes.io/docs/concepts/overview/working-with-objects/kubernetes-objects/)
-
-The following are the list of objects supported at present:
-- Nodes
-- Namespaces
-- Pods
-- DaemonSets
-- Deployments
-- ReplicaSets
-- Events
-
-## Installation Instructions
-
-### Deploy using Oracle Resource Manager
-
-> **_NOTE:_** If you aren't already signed in, when prompted, enter the tenancy and user credentials. Review and accept the terms and conditions. If you aren't on-boarded to OCI Logging Analytics, refer to [Pre-requisites](#pre-requisites) section to enable Logging Analytics in the region where you want to deploy the stack. The default container image available through the deployment is only for demo/non-production use-cases, we recommend you to refer [Docker Image](#docker-image) section to build your own image.
+![Kubernetes Cluster Summary Dashboard](logan/images/kubernetes-cluster-summary-dashboard.png)
-- Click to deploy the stack
+
+ Expand for more dasshboard screenshots
- [![Deploy to Oracle Cloud][orm_button]][oci_kubernetes_monitoring_stack]
+![Kubernetes Nodes Dashboard](logan/images/kubernetes-nodes-dashboard.png)
-- Select the region and compartment where you want to deploy the stack.
+![Kubernetes Workloads Dashboard](logan/images/kubernetes-workloads-dashboard.png)
-- Follow the on-screen prompts and instructions to create the stack.
+![Kubernetes Pods Dashboard](logan/images/kubernetes-pods-dashboard.png)
-- After creating the stack, click Terraform Actions, and select Plan.
+
-- Wait for the job to be completed, and review the plan.
-- To make any changes, return to the Stack Details page, click Edit Stack, and make the required changes. Then, run the Plan action again.
+## Get Started :rocket:
-- If no further changes are necessary, return to the Stack Details page, click Terraform Actions, and select Apply.
+:stop_sign: Upgrading to a major version (like 2.x to 3.x)? See [upgrade](#upgrading-to-a-major-version) section below for details. :warning:
### Pre-requisites
-- Logging Analytics Service must be enabled in the given OCI region before trying out the following Solution. Refer [Logging Analytics Quick Start](https://docs.oracle.com/en-us/iaas/logging-analytics/doc/quick-start.html) for details.
-- Create a Logging Analytics LogGroup(s) if not have done already. Refer [Create Log Group](https://docs.oracle.com/en-us/iaas/logging-analytics/doc/create-logging-analytics-resources.html#GUID-D1758CFB-861F-420D-B12F-34D1CC5E3E0E).
-- Enable access to the log group(s) to uploads logs from Kubernetes environment:
- - For InstancePrincipal based AuthZ (recommended for OKE and Kubernetes clusters running on OCI):
- - Create a dynamic group including relevant OCI Instances. Refer [this](https://docs.oracle.com/en-us/iaas/Content/Identity/Tasks/managingdynamicgroups.htm) for details about managing dynamic groups.
- - Add an IAM policy like,
- ```
- Allow dynamic-group to {LOG_ANALYTICS_LOG_GROUP_UPLOAD_LOGS} in compartment
- ```
- - For Config file based (user principal) AuthZ:
- - Add an IAM policy like,
- ```
- Allow group to {LOG_ANALYTICS_LOG_GROUP_UPLOAD_LOGS} in compartment
- ```
-
-### Docker Image
-
-We are in the process of building a docker image based off Oracle Linux 8 including Fluentd, OCI Logging Analytics Output Plugin and all the required dependencies.
-All the dependencies will be build from source and installed into the image. This image soon would be available to use as a pre-built image as is (OR) to create a custom image using this image as a base image.
-At present, follow the below mentioned steps to build an image either using Dockerfile off Oracle Linux 8 as base image (OR) Dockerfile off Fluentd base image from Dockerhub (off Debian).
-- Download all the files from one of the below mentioned dirs into a local machine having access to internet.
- - [OL8](logan/docker-images/v1.0/oraclelinux/8/)
- - [Debian](logan/docker-images/v1.0/debian/)
-- Run the following command to build the docker image.
- - *docker build -t fluentd_oci_la -f Dockerfile .*
-- The docker image built from the above step, can either be pushed to Docker Hub or OCI Container Registry (OCIR) or to a Local Docker Registry depending on the requirements.
- - [How to push the image to Docker Hub](https://docs.docker.com/docker-hub/repos/#pushing-a-docker-container-image-to-docker-hub)
- - [How to push the image to OCIR](https://www.oracle.com/webfolder/technetwork/tutorials/obe/oci/registry/index.html).
- - [How to push the image to Local Registry](https://docs.docker.com/registry/deploying/).
-
-### Deploying Kuberenetes resources using Kubectl
-
-#### Pre-requisites
-
-- A machine having kubectl installed and setup to point to your Kubernetes environment.
-
-#### To enable Logs collection
-
-Download all the yaml files from [this dir](logan/kubernetes-resources/logs-collection/).
-These yaml files needs to be applied using kubectl to create the necessary resources that enables the logs collection into Logging Analytics through a Fluentd based DaemonSet.
-
-##### configmap-docker.yaml | configmap-cri.yaml
-
-- This file contains the necessary out of the box fluentd configuration to collect Kubernetes System/Service Logs, Linux System Logs and Application Pod/Container Logs.
-- Some log locations may differ for Kubernetes clusters other than OKE, EKS and may need modifications accordingly.
-- Use configmap-docker.yaml for Kubernetes clusters based off Docker runtime (e.g., OKE < 1.20) and configmap-cri.yaml for Kubernetes clusters based off CRI-O.
-- Inline comments are available in the file for each of the source/filter/match blocks for easy reference for making any changes to the configuration.
-- Refer [this](https://docs.oracle.com/en/learn/oci_logging_analytics_fluentd/) to learn about each of the Logging Analytics Fluentd Output plugin configuration parameters.
-- **Note**: A generic source with time only parser is defined/configured for collecting all application pod logs from /var/log/containers/ out of the box.
- It is recommended to define and use a LogSource/LogParser at Logging Analytics for a given log type and then modify the configuration accordingly.
- When adding a configuration (Source, Filter section) for any new container log, also exclude the log path from generic log collection,
- by adding the log path to *exclude_path* field in *in_tail_containerlogs* source block. This is to avoid the duplicate collection of logs through generic log collection.
- Refer [this](#custom-configuration) section for further details.
-
-##### fluentd-daemonset.yaml
-
-- This file has all the necessary resources required to deploy and run the Fluentd docker image as Daemonset.
-- Inline comments are available in the file describing each of the fields/sections.
-- Make sure to replace the fields with actual values before deploying.
-- At minimum, , , needs to be updated.
-- It is recommended to update , too, to tag all the logs processed with corresponding Kubernetes cluster at Logging Analytics.
-
-##### secrets.yaml (Optional)
-
-- At present, InstancePrincipal and OCI Config File (UserPrincipal) based Auth/AuthZ are supported for Fluentd to talk to OCI Logging Analytics APIs.
-- We recommend to use InstancePrincipal based AuthZ for OKE and all clusters which are running on OCI VMs and that is the default auth type configured.
-- Applying this file is not required when using InstancePrincipal based auth type.
-- When config file based Authz is used, modify this file to fill out the values under config section with appropriate values.
-
-##### Commands Reference
-
-Apply the yaml files in the sequence of configmap-docker.yaml(or configmap-cri.yaml), secrets.yaml (not required for default auth type) and fluentd-daemonset.yaml.
-
-```
-$ kubectl apply -f configmap-docker.yaml
-configmap/oci-la-fluentd-logs-configmap created
-
-$ kubectl apply -f secrets.yaml
-secret/oci-la-credentials-secret created
-
-$ kubectl apply -f fluentd-daemonset.yaml
-serviceaccount/oci-la-fluentd-serviceaccount created
-clusterrole.rbac.authorization.k8s.io/oci-la-fluentd-logs-clusterrole created
-clusterrolebinding.rbac.authorization.k8s.io/oci-la-fluentd-logs-clusterrolebinding created
-daemonset.apps/oci-la-fluentd-daemonset created
-```
-
-Use the following command to restart DaemonSet after applying any modifications to configmap or secrets to reflect the changes into the Fluentd.
+* OCI Logging Analytics service must be onboarded with the minumum required policies, in the OCI region where you want to monitor. Refer [Logging Analytics Quick Start](https://docs.oracle.com/en-us/iaas/logging-analytics/doc/quick-start.html) for details.
+* Create OCI Logging Analytics LogGroup(s) if not done already. Refer [Create Log Group](https://docs.oracle.com/en-us/iaas/logging-analytics/doc/create-logging-analytics-resources.html#GUID-D1758CFB-861F-420D-B12F-34D1CC5E3E0E) for details.
+* OCI Dynamic Groups, User Group and Policies.
+
+ Details
+
+ * Create a dynamic group with the following sample rule for OCI Management Agent. Refer [Managing Dynamic Groups](https://docs.oracle.com/en-us/iaas/Content/Identity/Tasks/managingdynamicgroups.htm) for details.
+ ```
+ ALL {resource.type='managementagent', resource.compartment.id='OCI Management Agent Compartment OCID'}
+ ```
+ * Create a dynamic group with following sample rule for OKE Instances.
+ ```
+ ALL {instance.compartment.id='OCI Management Agent Compartment OCID'}
+ ```
+ - **Note**: _This dynamic group is not required for non OKE or when you choose to use Config file based AuthZ for monitoring the logs._
+ * Create a user and user group using which the logs to be published to OCI Logging Analytics. Refer [Managing Users](https://docs.oracle.com/en-us/iaas/Content/Identity/Tasks/managingusers.htm) and [Managing User Groups](https://docs.oracle.com/en-us/iaas/Content/Identity/Tasks/managinggroups.htm) for details.
+ - **Note**: _This is not required for OKE when you choose to use the default (Instance princiapal) AuthZ mechanism._
+ * Create a policy with following statements.
+ * Policy Statement for providing necessary access to upload the metrics.
+ ```
+ Allow dynamic-group to use metrics in compartment WHERE target.metrics.namespace = 'mgmtagent_kubernetes_metrics'
+ ```
+ * Policy Statement for providing necessary access to upload the logs and objects data.
+ ```
+ Allow dynamic-group to {LOG_ANALYTICS_LOG_GROUP_UPLOAD_LOGS} in compartment
+ ```
+ OR
+ ```
+ Allow group to {LOG_ANALYTICS_LOG_GROUP_UPLOAD_LOGS} in compartment
+ ```
+
-```
-kubectl rollout restart daemonset oci-la-fluentd-daemonset -n=kube-system
-```
+### Installation instructions
-#### To enable Kubernetes Objects collection
+#### Multiple methods of installation are avialble, with following differences:
-Download all the yaml files from [this dir](logan/kubernetes-resources/objects-collection/).
-These yaml files needs to be applied using kubectl to create the necessary resources that enables the Kuberetes Objects collection into Logging Analytics.
+| Deployment Method | Supported Environments | Collection Automation | Dashboards | Customzations |
+| ----| :----:| :----:| :---: | ---|
+| Helm | All* | :heavy_check_mark: | Manual| Full Control (Recommended)
+| OCI Resource Manager | OKE | :heavy_check_mark:| :heavy_check_mark: | Partial Control
+| Terraform | OKE | :heavy_check_mark: | :heavy_check_mark: | Partial Control
+| kubectl | All* | Manual | Manual | Full Control (Not recommended)
-##### configMap-objects.yaml
+\* For some environments, modification of the configuration may be required.
-- This file contains the necessary out of the box fluentd configuration to collect Kubernetes Objects.
-- Refer [this](https://docs.oracle.com/en/learn/oci_logging_analytics_fluentd/) to learn about each of the Logging Analytics Fluentd Output plugin configuration parameters.
+#### Helm
-##### fluentd-deployment.yaml
+##### 0 Pre-requisites
-Refer [this](#fluentd-daemonsetyaml) section.
+* Workstation or OCI Cloud Shell with access configured to the target k8s cluster.
+* Helm ([Installation instructions](https://helm.sh/docs/intro/install/)).
-##### secrets.yaml (Optional)
+##### 1 Download helm chart
-Refer [this](#secretsyaml-optional) section.
+* [latest](https://github.com/oracle-quickstart/oci-kubernetes-monitoring/releases/latest/download/helm-chart.tgz)
+* Go to [releases](https://github.com/oracle-quickstart/oci-kubernetes-monitoring/releases) for a specific version.
-##### Commands Reference
+##### 2 Update values.yaml
-Apply the yaml files in the sequence of configmap-objects.yaml, secrets.yaml (not required for default auth type) and fluentd-deployment.yaml.
+* Create override_values.yaml, to override the minimum required variables in values.yaml.
+ - override_values.yaml
+ ```
+ global:
+ # -- OCID for OKE cluster or a unique ID for other Kubernetes clusters.
+ kubernetesClusterID:
+ # -- Provide a unique name for the cluster. This would help in uniquely identifying the logs and metrics data at OCI Logging Analytics and OCI Monitoring respectively.
+ kubernetesClusterName:
+
+ oci-onm-logan:
+ # Go to OCI Logging Analytics Administration, click Service Details, and note the namespace value.
+ ociLANamespace:
+ # OCI Logging Analytics Log Group OCID
+ ociLALogGroupID:
+
+ oci-onm-mgmt-agent:
+ mgmtagent:
+ # Provide the base64 encoded content of the Management Agent Install Key file
+ installKeyFileContent:
+ ```
+* **Refer to the oci-onm chart and sub-charts values.yaml for customising or modifying any other configuration.** It is recommended to not modify the values.yaml provided with the charts, instead use override_values.yaml to achieve the same.
+
+##### 3.a Install helm release
+Use the following `helm install` command to the install the chart. Provide a desired release name, path to override_values.yaml and path to helm chart.
```
-$ kubectl apply -f configmap-objects.yaml
-configmap/oci-la-fluentd-objects-configmap configured
-
-$ kubectl apply -f fluentd-deployment.yaml
-serviceaccount/oci-la-fluentd-serviceaccount unchanged
-clusterrole.rbac.authorization.k8s.io/oci-la-fluentd-objects-clusterrole created
-clusterrolebinding.rbac.authorization.k8s.io/oci-la-fluentd-objects-clusterrolebinding created
-deployment.apps/oci-la-fluentd-deployment created
+helm install --values
```
+Refer [this](https://helm.sh/docs/helm/helm_install/) for further details on `helm install`.
-Use the following command to restart Deployment after applying any modifications to configmap or secrets to reflect the changes into the Fluentd.
+##### 3.b Upgrade helm release
+Use the following `helm upgrade` command if any further changes to override_values.yaml needs to be applied or a new chart version needs to be deployed.
```
-kubectl rollout restart deployment oci-la-fluentd-deployment -n=kube-system
+helm upgrade --values
```
+Refer [this](https://helm.sh/docs/helm/helm_upgrade/) for further details on `helm upgrade`.
-### Deploying Kuberenetes resources using Helm
-
-#### Pre-requisites
-
-- Install helm if not done already. Refer [this](https://helm.sh/docs/intro/install/).
-- Download the helm chart from [this dir](logan/helm-chart/).
-
-#### values.yaml
+##### 3.c Import Dashboards
-- This file contains all the default values possible to setup the logs and objects collection, but few values needs to be provided either through an external values.yaml file or by modifying this file. It is recommended to use external values.yaml to override any values.
-- Inline documentation has the description and possible values for each of the configuration parameters.
-- At minimum, the following needs to be set accordingly. image:url, ociLANamespace, ociLALogGroupID. It is recommended to set kubernetesClusterID and kubernetesClusterName too, to tag all the logs processed with corresponding Kubernetes cluster at Logging Analytics.
-- Use "docker" as runtime for Kubernetes clusters based off Docker runtime (e.g., OKE < 1.20) and "cri" for Kubernetes clusters based off CRI-O. The default is "cri".
-- Use "InstancePrincipal" as authtype for OKE and all clusters which are running on OCI VMs and "config" as authtype for OCI Config file based Auth/AuthZ. config under oci section needs to be updated with relevant info when authtype is chosen as "config". The default is "InstancePrincipal".
+Dashboards needs to be imported manually. Below is an example for importing Dashboards using OCI CLI.
-#### Commands Reference
-
-It is recommended to validate the values using the following `helm template` command before actually installing. Provide path to exterval values.yaml and path to helm-chart.
-
-```
-helm template --values
-```
-
-Now, the chart can be installed using the following `helm install` command. Provide a desired release name, path to exterval values.yaml and path to helm-chart.
+1. Download and configure [OCI CLI](https://docs.oracle.com/en-us/iaas/Content/API/SDKDocs/cliinstall.htm) or open cloud-shell where OCI CLI is pre-installed. Alternative methods like REST API, SDK, Terraform etc can also be used.
+2. Find the **OCID** of the compartment, where the dashboards need to be imported.
+3. Download the dashboard JSONs from [here](logan/terraform/oke/modules/dashboards/dashboards_json/) (TBD).
+4. **Replace** all the instances of the keyword - "`${compartment_ocid}`" in the JSONs with the **Compartment OCID** identified in previous step.
+ * Following command is for quick reference that can be used in a linux/cloud-shell envirnment :
-```
-helm install --values
-```
+ ```
+ sed -i "s/\${compartment_ocid}//g" *.json
+ ```
-Use the following `helm upgrade` command if any further changes to values.yaml needs to be applied or a new chart version needs to be deployed. Refer [this](https://helm.sh/docs/helm/helm_upgrade/) for further details on `helm upgrade`.
+5. Run the following commands to import the dashboards.
-```
-helm upgrade --values
-```
+ ```
+ oci management-dashboard dashboard import --from-json file://cluster.json
+ oci management-dashboard dashboard import --from-json file://node.json
+ oci management-dashboard dashboard import --from-json file://workload.json
+ oci management-dashboard dashboard import --from-json file://pod.json
+ ```
-Use the following `helm uninstall` command to delete the chart. Provide the release name used when creating the chart.
+##### 4 Uninstall
+Use the following `helm uninstall` command to uninstall the chart. Provide the release name used when creating the chart.
```
-helm uninstall
+helm upgrade --values
```
-
-## Custom Configuration
+Refer [this](https://helm.sh/docs/helm/helm_uninstall/) for further details on `helm uninstall`.
+
+#### OCI Resource Manager
-### How to use custom logSource (oci_la_log_source_name) and/or other custom configuration for Pod/Container Logs collected through "Kubernetes Container Generic Logs" logSource ?
+Launch OCI Resource Manager Stack in OCI Tenancy and Region of the OKE Cluster, which you want to monitor.
-A generic source with time only parser is defined/configured for collecting all application pod logs from /var/log/containers/ out of the box.
-This is to ensure that all the logs generated by all pods are collected and pushed to Logging Analytics.
-Often you may need to configure a custom logSource for a particular pod log, either by using one of the existing OOB logSources at Logging Analytics or by defining one custom logSource matching to the requirements.
-Once you have defined/identified a logSource for a particular pod log, the following are couple of ways to get those pod logs associated to the logSource.
+[![Launch OCI Resource Manager Stack][orm_button]][oci_kubernetes_monitoring_stack]
-#### Through Pod Annotations
+
+ Instructions
+
+ * Select the region and compartment where you want to deploy the stack.
+ * Follow the on-screen prompts and instructions to create the stack.
+ * After creating the stack, click Terraform Actions, and select Plan.
+ * Wait for the job to be completed, and review the plan.
+ * To make any changes, return to the Stack Details page, click Edit Stack, and make the required changes. Then, run the Plan action again.
+ * If no further changes are necessary, return to the Stack Details page, click Terraform Actions, and select Apply.
+
+
+
+#### Kubectl
-In this approach, all that you need to do is add the following annotation, "oracle.com/oci_la_log_source_name" (with logSourceName as value) to all the pods of choice.
-This approach works for all the use-cases, except for multi-line plain text formatted logs.
+
+ While the recommended approach for installation is through helm, if you intend to use `kubectl` based installation, then the resource yaml files can still be generated through `helm` using the instructions provided below.
-- Refer [this doc](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) to find how to add the annotation through Pod's metadata section. This is the recommended approach as it provides the persistent behavior.
-- Refer [this doc](https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#annotate) to find how to add annotation through 'kubectl annotate' command. You may use this approach for quick testing.
+##### 0 Pre-requisites
-**Note** The following configuration parameters are supported for customisation through Pod Annotations in addition to logSource,
- - oracle.com/oci_la_log_group_id => to use custom logGroupId (oci_la_log_group_id)
- - oracle.com/oci_la_entity_id => to use custom entityId (oci_la_entity_id)
+* Workstation or OCI Cloud Shell with access configured to the target k8s cluster.
+* Helm ([Installation instructions](https://helm.sh/docs/intro/install/)).
+* Kubectl ([Installation instructions](https://kubernetes.io/docs/tasks/tools/#kubectl)).
-#### Through customLogs section
+##### 1 Download helm chart
-In this approach, all that you need to do is to provide the necessary configuration information like log file path, logSource, multiline start regular expression (in case of multi-line logs) in the customLogs section of values.yaml.
-Using this information the corresponding Fluentd configuration is generated automatically.
+Refer [here](#1-download-helm-chart).
-**Note** This approach is valid only when using helm chart based installation.
+##### 2 Update values.yaml
-The following example demonstrates a container customLogs configuration
-```
- #custom-id1:
- #path: /var/log/containers/custom*.log
- #ociLALogSourceName: "Custom1 Logs"
- #multilineStartRegExp:
- # Set isContainerLog to false if the log is not a container log (/var/log/containers/*.log). Default value is true.
- #isContainerLog: true
-```
-
-The following example demonstrates a non container customLogs configuration
-```
- #custom-id2:
- #path: /var/log/custom/*.log
- #ociLALogSourceName: "Custom2 Logs"
- #multilineStartRegExp:
- # Set isContainerLog to false if the log is not a container log (/var/log/containers/*.log). Default value is true.
- #isContainerLog: false
-```
-
-#### Through Custom Fluentd conf
-
-In this approach, a new set of Source, Filter sections have to be created in the customFluentdConf section of values.yaml.
-The following example demonstrates a custom fluentd config to tag /var/log/containers/frontend*.log with logSource "Guestbook Frontend Logs"
-(*to be added to helm-chart values.yaml, under customFluentdConf section if using helm chart OR to either of configmap-cri.yaml / configmap-docker.yaml if using kubectl approach).
+Refer [here](#2-update-valuesyaml).
+
+##### 3.a Generate yamls
+Use the following `helm template` command to generate the resource yaml files. Provide path to override_values.yaml, path to helm chart and path to a dir where the yaml files to be generated.
```
-
- @type tail
- @id in_tail_frontend
- path_key tailed_path
- path /var/log/containers/frontend-*.log
- pos_file /var/log/oci_la_fluentd_outplugin/pos/frontend.logs.pos
- tag oci.oke.frontend.*
- read_from_head "#{ENV['FLUENT_OCI_READ_FROM_HEAD'] || true}"
-
- {{- if eq $runtime "docker" }}
- @type json
- {{- else}}
- @type cri
- {{- end }}
-
-
-
- # Record transformer filter to apply Logging Analytics configuration to each record.
-
- @type record_transformer
- enable_ruby true
-
- oci_la_metadata ${{"{{"}}"Kubernetes Cluster Name": "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_NAME'] || 'UNDEFINED'}", "Kubernetes Cluster ID": "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_ID'] || 'UNDEFINED'}"{{"}}"}}
- oci_la_log_group_id "#{ENV['FLUENT_OCI_KUBERNETES_LOGGROUP_ID'] || ENV['FLUENT_OCI_DEFAULT_LOGGROUP_ID']}"
- oci_la_log_path "${record['tailed_path']}"
- oci_la_log_source_name "Guestbook Frontend Logs"
- {{- if eq $runtime "docker" }}
- message "${record['log']}"
- {{- end }}
- tag ${tag}
-
-
+helm template --values --output-dir
```
-**Note**: The log path */var/log/containers/frontend-*.log* has to be excluded from the generic container logs to avoid duplicate log collection. Add the log path to *exclude_path* value under *in_tail_containerlogs* source section.
-
-In addition to the above, you may need to modify the source section to add *multiline* parser, if the logs are of plain text multi-line format (OR) add a concat plugin filter if the logs are of say multi-line but wrapped in json.
-Refer OOB fluentd config in the helm-chart values.yaml for examples.
-
+Refer [this](https://helm.sh/docs/helm/helm_template/) for further details on `helm template`.
+
+##### 3.b Install
-### How to use your own ServiceAccount ?
+Use `kubectl` tool to apply the yaml files generated in the previous step in the following order.
-**Note**: This is supported only through the helm chart based deployment.
+* oci-onm-common
+ ```
+ kubectl apply -f namespace.yaml
+ kubectl apply -f clusterrole.yaml
+ kubectl apply -f clusterrolebinding.yaml
+ kubectl apply -f serviceAccount.yaml
+ ```
+* oci-onm-logan
+ ```
+ kubectl apply -f logs-configmap.yaml
+ kubectl apply -f objects-configmap.yaml
+ kubectl apply -f fluentd-daemonset.yaml
+ kubectl apply -f fluentd-deployment.yaml
+ ```
+ _For non OKE or when you choose to use Config file based AuthZ for monitoring the logs, you may need to apply oci-config-secret.yaml before applying fluentd-daemonset.yaml & fluentd-deployment.yaml. Refer [here](docs/FAQ.md#how-to-use-configfile-based-authz-user-principal-instead-of-default-authz-instance-principal-) for how to configure Config based AuthZ._
+* oci-onm-mgmt-agent
+ ```
+ kubectl apply -f mgmt-agent-secrets.yaml
+ kubectl apply -f metrics-configmap.yaml
+ kubectl apply -f mgmt-agent-statefulset.yaml
+ kubectl apply -f mgmt-agent-headless-service.yaml
+ kubectl apply -f metric_server.yaml
+ ```
-By default, a cluster role, cluster role binding and serviceaccount will be created for the Fluentd pods to access (readonly) various objects within the cluster for supporting logs and objects collection. However, if you want to use your own serviceaccount, you can do the same by setting the "createServiceAccount" variable to false and providing your own serviceaccount in the "serviceAccount" variable. Ensure that the serviceaccount should be in the same namespace as the namespace used for the whole deployment. The namespace for the whole deployment can be set using the "namespace" variable, whose default value is "kube-system".
+##### 3.c Import Dashboards
-The serviceaccount must be binded to a cluster role defined in your cluster, which allows access to various objects metadata. The following sample is a recommended minimalistic role definition as of chart version 2.0.0.
-
-```
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRole
-metadata:
- name: oci-la-fluentd-generic-clusterrole
-rules:
- - apiGroups:
- - ""
- resources:
- - '*'
- verbs:
- - get
- - list
- - watch
- - apiGroups:
- - apps
- - batch
- resources:
- - '*'
- verbs:
- - get
- - list
- - watch
-```
-
-Once you have the cluster role defined, to bind the cluster role to your serviceaccount use the following cluster role binding definition.
-
-```
-kind: ClusterRoleBinding
-apiVersion: rbac.authorization.k8s.io/v1
-metadata:
- name: oci-la-fluentd-generic-clusterrolebinding
-roleRef:
- kind: ClusterRole
- name: oci-la-fluentd-generic-clusterrole
- apiGroup: rbac.authorization.k8s.io
-subjects:
- - kind: ServiceAccount
- name:
- namespace:
-```
+Refer [here](#3c-import-dashboards).
+
+
-### How to set encoding for logs ?
+### Upgrading to a major version
-**Note**: This is supported only through the helm chart based deployment.
+#### 2.x to 3.x
-By default Fluentd tail plugin that is being used to collect various logs has default encoding set to ASCII-8BIT. To overrided the default encoding, use one of the following approaches.
+One of the major changes introduced in 3.0.0 is refactoring of helm chart where major features of the solution got split into separate sub-charts. 2.x has only support for logs and objects collection using Fluentd and OCI Logging Analytics and this is now moved into a separate chart oci-onm-logan and included as a sub-chart to the main chart oci-onm. This is a breaking change w.r.t the values.yaml and any customisations that you might have done on top of it. There is no breaking change w.r.t functionality offered in 2.x. For full list of changes in 3.x, refer to [changelog](CHANGELOG.md).
-#### Global level
+You may fall into one of the below categories and may need to take actions accordingly.
+
+##### Have no customisations to the existing chart or values.yaml
-Set value for encoding under fluentd:tailPlugin section of values.yaml, which applies to all the logs being collected from the cluster.
+We recommend you to uninstall the release created using 2.x chart and follow the installation instructions mentioned [here](#helm) for installing the release using 3.x chart.
-```
-fluentd:
- ...
- ...
- tailPlugin:
+###### Sample 2.x values.yaml (external or override yaml to update the mandatory variables)
+
+ image:
+ url:
+ imagePullPolicy: Always
+ ociLANamespace:
+ ociLALogGroupID: ocid1.loganalyticsloggroup.oc1.phx.amaaaaaa......
+ kubernetesClusterID: ocid1.cluster.oc1.phx.aaaaaaaaa.......
+ kubernetesClusterName:
+
+###### Sample 3.x values.yaml
+
+ global:
+ # -- OCID for OKE cluster or a unique ID for other Kubernetes clusters.
+ kubernetesClusterID: ocid1.cluster.oc1.phx.aaaaaaaaa.......
+ # -- Provide a unique name for the cluster. This would help in uniquely identifying the logs and metrics data at OCI Logging Analytics and OCI Monitoring respectively.
+ kubernetesClusterName:
+
+ oci-onm-logan:
+ # Go to OCI Logging Analytics Administration, click Service Details, and note the namespace value.
+ ociLANamespace:
+ # OCI Logging Analytics Log Group OCID
+ ociLALogGroupID: ocid1.loganalyticsloggroup.oc1.phx.amaaaaaa......
+
+##### Have customisations to the existing chart or values.yaml
+
+If you have modified values.yaml provided in helm chart directly, we recommend you to identify all the changes and move them to override_values.yaml and follow the instructions provided in install or upgrade sections under [this](#helm). We recommend you to use override_values.yaml for updating values for any variables or to incorporate any customisations on top of existing values.yaml.
+
+If you are already using a separate values.yaml for your customisations, you still need to compare 2.x vs 3.x variable heirarchy and make the necessary changes accordingly.
+
+
+ Examples
+
+##### Example 1: Using docker runtime instead of default runtime (cri)
+
+ **2.x**
+
+ runtime: docker
+ image:
+ url:
+ imagePullPolicy: Always
+ ociLANamespace:
+ ociLALogGroupID: ocid1.loganalyticsloggroup.oc1.phx.amaaaaaa......
+ kubernetesClusterID: ocid1.cluster.oc1.phx.aaaaaaaaa.......
+ kubernetesClusterName:
+
+ **3.x**
+
+ global:
+ # -- OCID for OKE cluster or a unique ID for other Kubernetes clusters.
+ kubernetesClusterID: ocid1.cluster.oc1.phx.aaaaaaaaa.......
+ # -- Provide a unique name for the cluster. This would help in uniquely identifying the logs and metrics data at OCI Logging Analytics and OCI Monitoring respectively.
+ kubernetesClusterName:
+
+ oci-onm-logan:
+ runtime: docker
+ # Go to OCI Logging Analytics Administration, click Service Details, and note the namespace value.
+ ociLANamespace:
+ # OCI Logging Analytics Log Group OCID
+ ociLALogGroupID: ocid1.loganalyticsloggroup.oc1.phx.amaaaaaa......
+
+ ##### Example 2: Customisation of a specific log
+
+ **2.x**
+
...
...
- encoding:
-```
-
-#### Specific log type level
-
-The encoding can be set at invidivual log types like kubernetesSystem, linuxSystem, genericContainerLogs, which applies to all the logs under the specific log type.
-
-```
-fluentd:
- ...
- ...
- kubernetesSystem:
+ custom-log1:
+ path: /var/log/containers/custom-1.log
+ ociLALogSourceName: "Custom1 Logs"
+ #multilineStartRegExp:
+ isContainerLog: true
...
...
- encoding:
-```
-```
-fluentd:
- ...
- ...
- genericContainerLogs:
+ **3.x**
+
...
...
- encoding:
-```
+ oci-onm-logan:
+ ...
+ ...
+ custom-log1:
+ path: /var/log/containers/custom-1.log
+ ociLALogSourceName: "Custom1 Logs"
+ #multilineStartRegExp:
+ isContainerLog: true
+ ...
+ ...
+ ...
+ ...
+
+ *The difference is all about moving the required configuration (variable definitions) under oci-onm-logan section appropriately.*
+
+
+
+## Getting Help
-#### Specific log level
+#### [Ask a question](https://github.com/oracle-quickstart/oci-kubernetes-monitoring/discussions/new?category=q-a)
-The encoding can be set at individual log level too, which takes precedence over all others.
+## Resources
-```
-fluentd:
- ...
- ...
- kubernetesSystem:
- ...
- ...
- logs:
- kube-proxy:
- encoding:
-```
+#### :question: [Frequently Asked Questions](./docs/FAQ.md)
-```
-fluentd:
- ...
- ...
- customLogs:
- custom-log1:
- ...
- ...
- encoding:
- ...
- ...
-```
+#### [Custom Logs Configuration](./docs/custom-logs.md)
-## Importing Logging Analytics Kubernetes Dashboards
+#### [Building Custom Container Images](./docs/custom-images.md)
-The Dashboards are imported as part of deploying the Kubernetes solution using [Oracle Resource Manager stack](#deploy-using-oracle-resource-manager). The following steps can be used to import the Dashboards manually to your tenancy.
+## License
-1. Download and configure [OCI CLI](https://docs.oracle.com/en-us/iaas/Content/API/SDKDocs/cliinstall.htm) or open cloud-shell where OCI CLI is pre-installed. Alternative methods like REST API, SDK, Terraform etc can also be used.
-1. Find the **OCID** of compartment, where the dashboards need to be imported.
-1. Download the dashboard JSONs from [here](logan/terraform/oke/modules/dashboards/dashboards_json/).
-1. **Replace** all the instances of the keyword - "`${compartment_ocid}`" in the JSONs with the **Compartment OCID** identified in STEP 2.
- - Following are the set of commands for quick reference that can be used in a linux/cloud-shell envirnment :
+Copyright (c) 2023, Oracle and/or its affiliates.
+Licensed under the Universal Permissive License v1.0 as shown at .
- ```
- sed -i "s/\${compartment_ocid}//g" file://cluster.json
- sed -i "s/\${compartment_ocid}//g" file://node.json
- sed -i "s/\${compartment_ocid}//g" file://workload.json
- sed -i "s/\${compartment_ocid}//g" file://pod.json
- ```
-1. Run the following commands to import the dashboards.
+## [Contributors][def]
- ```
- oci management-dashboard dashboard import --from-json file://cluster.json
- oci management-dashboard dashboard import --from-json file://node.json
- oci management-dashboard dashboard import --from-json file://workload.json
- oci management-dashboard dashboard import --from-json file://pod.json
- ```
+[def]: https://github.com/oracle-quickstart/oci-kubernetes-monitoring/graphs/contributors
[orm_button]: https://oci-resourcemanager-plugin.plugins.oci.oraclecloud.com/latest/deploy-to-oracle-cloud.svg
diff --git a/charts/common/Chart.yaml b/charts/common/Chart.yaml
new file mode 100644
index 00000000..f969495b
--- /dev/null
+++ b/charts/common/Chart.yaml
@@ -0,0 +1,9 @@
+# Copyright (c) 2023, Oracle and/or its affiliates.
+# Licensed under the Universal Permissive License v1.0 as shown at https://oss.oracle.com/licenses/upl.
+
+apiVersion: v2
+name: oci-onm-common
+description: Common resources for OCI Kubernetes Monitoring solution helm charts
+type: application
+version: 3.0.0
+appVersion: "3.0.0"
diff --git a/charts/common/README.md b/charts/common/README.md
new file mode 100644
index 00000000..310eaa02
--- /dev/null
+++ b/charts/common/README.md
@@ -0,0 +1,17 @@
+# oci-onm-common
+
+![Version: 3.0.0](https://img.shields.io/badge/Version-3.0.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 3.0.0](https://img.shields.io/badge/AppVersion-3.0.0-informational?style=flat-square)
+
+Common resources for OCI Kubernetes Monitoring solution helm charts
+
+## Values
+
+| Key | Type | Default | Description |
+|-----|------|---------|-------------|
+| createNamespace | bool | `true` | Automatically create namespace for all resources (namespaced) used by OCI Kubernetes Monitoring Solution. |
+| createServiceAccount | bool | `true` | Automatically create, a readonly cluster role, cluster role binding and serviceaccount is required # to read various cluster objects for monitoring. If set to false serviceaccount value must be provided in the parent chart. Refer, README for the cluster role definition and other details. |
+| namespace | string | `"oci-onm"` | Kubernetes Namespace for creating serviceaccount. Default: oci-onm |
+| resourceNamePrefix | string | `"oci-onm"` | Resoure Name Prefix: Wherever allowed, this prefix will be used with all resources used by this chart |
+
+----------------------------------------------
+Autogenerated from chart metadata using [helm-docs v1.11.0](https://github.com/norwoodj/helm-docs/releases/v1.11.0)
diff --git a/charts/common/templates/_helpers.tpl b/charts/common/templates/_helpers.tpl
new file mode 100644
index 00000000..ef8035e6
--- /dev/null
+++ b/charts/common/templates/_helpers.tpl
@@ -0,0 +1,49 @@
+
+# Copyright (c) 2023, Oracle and/or its affiliates.
+# Licensed under the Universal Permissive License v1.0 as shown at https://oss.oracle.com/licenses/upl.
+
+# tpl render function
+{{- define "common.tplvalues.render" -}}
+ {{- if typeIs "string" .value }}
+ {{- tpl .value .context }}
+ {{- else }}
+ {{- tpl (.value | toYaml) .context }}
+ {{- end }}
+{{- end -}}
+
+# Prefix for all resources created using this chart.
+{{- define "common.resourceNamePrefix" -}}
+ {{- if .Values.resourceNamePrefix -}}
+ {{ include "common.tplvalues.render" ( dict "value" .Values.resourceNamePrefix "context" .) | trunc 63 | trimSuffix "-" }}
+ {{- else -}}
+ {{- "oci-onm" -}}
+ {{- end -}}
+{{- end -}}
+
+#createNamespace
+{{- define "common.createNamespace" -}}
+ {{ include "common.tplvalues.render" ( dict "value" .Values.createNamespace "context" .) }}
+{{- end -}}
+
+# namespace
+{{- define "common.namespace" -}}
+ {{- if .Values.namespace -}}
+ {{ include "common.tplvalues.render" ( dict "value" .Values.namespace "context" .) }}
+ {{- else -}}
+ {{- "oci-onm" -}}
+ {{- end -}}
+{{- end -}}
+
+#createServiceAccount
+{{- define "common.createServiceAccount" -}}
+ {{ include "common.tplvalues.render" ( dict "value" .Values.createServiceAccount "context" .) }}
+{{- end -}}
+
+#serviceAccount
+{{- define "common.serviceAccount" -}}
+ {{- if .Values.serviceAccount -}}
+ {{ include "common.tplvalues.render" ( dict "value" .Values.serviceAccount "context" .) }}
+ {{- else -}}
+ {{ include "common.resourceNamePrefix" . }}
+ {{- end -}}
+{{- end -}}
diff --git a/charts/common/templates/clusterrole.yaml b/charts/common/templates/clusterrole.yaml
new file mode 100644
index 00000000..7489230e
--- /dev/null
+++ b/charts/common/templates/clusterrole.yaml
@@ -0,0 +1,32 @@
+# Copyright (c) 2023, Oracle and/or its affiliates.
+# Licensed under the Universal Permissive License v1.0 as shown at https://oss.oracle.com/licenses/upl.
+
+{{- if eq (include "common.createServiceAccount" .) "true" }}
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: {{ include "common.resourceNamePrefix" . }}
+rules:
+ - apiGroups:
+ - ""
+ resources:
+ - '*'
+ verbs:
+ - get
+ - list
+ - watch
+ - nonResourceURLs: ["/metrics"]
+ verbs: ["get"]
+ - apiGroups:
+ - apps
+ - batch
+ - discovery.k8s.io
+ - metrics.k8s.io
+ resources:
+ - '*'
+ verbs:
+ - get
+ - list
+ - watch
+{{- end }}
diff --git a/charts/common/templates/clusterrolebinding.yaml b/charts/common/templates/clusterrolebinding.yaml
new file mode 100644
index 00000000..f720358f
--- /dev/null
+++ b/charts/common/templates/clusterrolebinding.yaml
@@ -0,0 +1,18 @@
+# Copyright (c) 2023, Oracle and/or its affiliates.
+# Licensed under the Universal Permissive License v1.0 as shown at https://oss.oracle.com/licenses/upl.
+
+{{- if eq (include "common.createServiceAccount" .) "true" }}
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: {{ include "common.resourceNamePrefix" . }}
+roleRef:
+ kind: ClusterRole
+ name: {{ include "common.resourceNamePrefix" . }}
+ apiGroup: rbac.authorization.k8s.io
+subjects:
+- kind: ServiceAccount
+ name: {{ include "common.resourceNamePrefix" . }}
+ namespace: {{ include "common.namespace" . }}
+{{- end }}
diff --git a/charts/common/templates/namespace.yaml b/charts/common/templates/namespace.yaml
new file mode 100644
index 00000000..f3e0f150
--- /dev/null
+++ b/charts/common/templates/namespace.yaml
@@ -0,0 +1,10 @@
+# Copyright (c) 2023, Oracle and/or its affiliates.
+# Licensed under the Universal Permissive License v1.0 as shown at https://oss.oracle.com/licenses/upl.
+
+{{- if eq (include "common.createNamespace" .) "true" }}
+---
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: {{ include "common.namespace" . }}
+{{- end }}
diff --git a/charts/common/templates/serviceAccount.yaml b/charts/common/templates/serviceAccount.yaml
new file mode 100644
index 00000000..5ca351e1
--- /dev/null
+++ b/charts/common/templates/serviceAccount.yaml
@@ -0,0 +1,11 @@
+# Copyright (c) 2023, Oracle and/or its affiliates.
+# Licensed under the Universal Permissive License v1.0 as shown at https://oss.oracle.com/licenses/upl.
+
+{{- if eq (include "common.createServiceAccount" .) "true" }}
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ include "common.serviceAccount" . }}
+ namespace: {{ include "common.namespace" . }}
+{{- end }}
diff --git a/charts/common/values.yaml b/charts/common/values.yaml
new file mode 100644
index 00000000..5ab34a6d
--- /dev/null
+++ b/charts/common/values.yaml
@@ -0,0 +1,17 @@
+# Copyright (c) 2023, Oracle and/or its affiliates.
+# Licensed under the Universal Permissive License v1.0 as shown at https://oss.oracle.com/licenses/upl.
+
+# -- Automatically create, a readonly cluster role, cluster role binding and
+# serviceaccount is required # to read various cluster objects for monitoring.
+# If set to false serviceaccount value must be provided in the parent chart.
+# Refer, README for the cluster role definition and other details.
+createServiceAccount: true
+
+# -- Automatically create namespace for all resources (namespaced) used by OCI Kubernetes Monitoring Solution.
+createNamespace: true
+
+# -- Kubernetes Namespace for creating serviceaccount. Default: oci-onm
+namespace: oci-onm
+
+# -- Resoure Name Prefix: Wherever allowed, this prefix will be used with all resources used by this chart
+resourceNamePrefix: oci-onm
diff --git a/charts/logan/Chart.yaml b/charts/logan/Chart.yaml
new file mode 100644
index 00000000..0cb5e02a
--- /dev/null
+++ b/charts/logan/Chart.yaml
@@ -0,0 +1,15 @@
+# Copyright (c) 2023, Oracle and/or its affiliates.
+# Licensed under the Universal Permissive License v1.0 as shown at https://oss.oracle.com/licenses/upl.
+
+apiVersion: v2
+name: oci-onm-logan
+description: Charts for sending Kubernetes platform logs, compute logs, and Kubernetes Objects information to OCI Logging Analytics.
+type: application
+version: 3.0.0
+appVersion: "3.0.0"
+
+dependencies:
+- name: oci-onm-common
+ version: "3.0.0"
+ repository: "file://../common"
+ condition: oci-onm-common.enabled
diff --git a/charts/logan/README.md b/charts/logan/README.md
new file mode 100644
index 00000000..6e1d6f3b
--- /dev/null
+++ b/charts/logan/README.md
@@ -0,0 +1,89 @@
+# oci-onm-logan
+
+![Version: 3.0.0](https://img.shields.io/badge/Version-3.0.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 3.0.0](https://img.shields.io/badge/AppVersion-3.0.0-informational?style=flat-square)
+
+Charts for sending Kubernetes platform logs, compute logs, and Kubernetes Objects information to OCI Logging Analytics.
+
+## Requirements
+
+| Repository | Name | Version |
+|------------|------|---------|
+| file://../common | oci-onm-common | 3.0.0 |
+
+## Values
+
+| Key | Type | Default | Description |
+|-----|------|---------|-------------|
+| authtype | string | `"InstancePrincipal"` | Allowed values: InstancePrincipal, config |
+| extraEnv | list | `[]` | Logging Analytics OCID for OKE Cluster ociLAEntityID: Logging Analytics additional metadata. Use this to tag all the collected logs with one or more key:value pairs. Key must be a valid field in Logging Analytics metadata: "Client Host Region": "PCT" "Environment": "Production" "Third key": "Third Value" @param extra environment variables. Example name: ENV_VARIABLE_NAME value: ENV_VARIABLE_VALUE |
+| extraVolumeMounts | list | `[]` | @param extraVolumeMounts Mount extra volume(s). Example: - name: tmpDir mountPath: /tmp |
+| extraVolumes | list | `[]` | @param extraVolumes Extra volumes. Example: - name: tmpDir hostPath: path: /tmp log |
+| fluentd.baseDir | string | `"/var/log"` | Base directory on the node (with read write permission) for storing fluentd plugins related data. |
+| fluentd.customFluentdConf | string | `""` | |
+| fluentd.customLogs | string | `nil` | Configuration for any custom logs which are not part of the default configuration defined in this file. All the pod/container logs will be collected as per "genericContainerLogs" section. Use this section to create a custom configuration for any of the container logs. Also, you can use this section to define configuration for any other log path existing on a Kubernetes worker node custom-id1: path: /var/log/containers/custom*.log Logging Analytics log source to use for parsing and processing the logs: ociLALogSourceName: "Custom1 Logs" The regular expression pattern for the starting line in case of multi-line logs. multilineStartRegExp: Set isContainerLog to false if the log is not a container log (/var/log/containers/*.log). Default value is true. isContainerLog: true |
+| fluentd.file | string | `"fluent.conf"` | Fluentd config file name |
+| fluentd.genericContainerLogs.exclude_path | list | `["\"/var/log/containers/kube-proxy-*.log\"","\"/var/log/containers/kube-flannel-*.log\"","\"/var/log/containers/kube-dns-autoscaler-*.log\"","\"/var/log/containers/coredns-*.log\"","\"/var/log/containers/csi-oci-node-*.log\"","\"/var/log/containers/proxymux-client-*.log\"","\"/var/log/containers/cluster-autoscaler-*.log\""]` | List of log paths to exclude that are already part of other specific configurations defined (like Kube Proxy, Kube Flannel) If you want to create a custom configuration for any of the container logs using the customLogs section, then exclude the corresponding log path here. |
+| fluentd.genericContainerLogs.ociLALogSourceName | string | `"Kubernetes Container Generic Logs"` | Default Logging Analytics log source to use for parsing and processing the logs: Kubernetes Container Generic Logs. |
+| fluentd.genericContainerLogs.path | string | `"/var/log/containers/*.log"` | |
+| fluentd.kubernetesMetadataFilter.ca_file | string | `nil` | Path to CA file for Kubernetes server certificate validation |
+| fluentd.kubernetesMetadataFilter.kubernetes_url | string | `nil` | Kubernetes API server URL. Alternatively, environment variables KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT can be used Environment variable are given preference. |
+| fluentd.kubernetesMetadataFilter.skip_container_metadata | bool | `false` | Skip the container fields container_image and container_image_id in the metadata. |
+| fluentd.kubernetesMetadataFilter.skip_labels | bool | `false` | Skip all label fields from the metadata. |
+| fluentd.kubernetesMetadataFilter.skip_master_url | bool | `false` | Skip the master_url field from the metadata. |
+| fluentd.kubernetesMetadataFilter.skip_namespace_metadata | bool | `false` | Skip the namespace_id field from the metadata. The fetch_namespace_metadata function will be skipped. The plugin will be faster and cpu consumption will be less. |
+| fluentd.kubernetesMetadataFilter.verify_ssl | bool | `true` | Validate SSL certificates |
+| fluentd.kubernetesMetadataFilter.watch | bool | `true` | Set up a watch on the pods on the API server for updates to metadata. By default, true. |
+| fluentd.kubernetesObjects | object | `{"objectsList":{"cron_jobs":{"api_endpoint":"apis/batch"},"daemon_sets":{"api_endpoint":"apis/apps"},"deployments":{"api_endpoint":"apis/apps"},"events":{"api_endpoint":""},"jobs":{"api_endpoint":"apis/batch"},"namespaces":{"api_endpoint":""},"nodes":{"api_endpoint":""},"pods":{"api_endpoint":""},"replica_sets":{"api_endpoint":"apis/apps"},"stateful_sets":{"api_endpoint":"apis/apps"}}}` | Configuration for collecting Kubernetes Object information. Supported objects are Node, Pod, Namespace, Event, DaemonSet, ReplicaSet, Deployment, StatefulSet, Job, CronJob |
+| fluentd.kubernetesSystem.logs.cluster-autoscaler | object | `{"multilineStartRegExp":"/^\\S\\d{2}\\d{2}\\s+[^\\:]+:[^\\:]+:[^\\.]+\\.\\d{0,3}/","ociLALogSourceName":"Kubernetes Autoscaler Logs","path":"/var/log/containers/cluster-autoscaler-*.log"}` | Kubernetes Autoscaler Logs collection configuration |
+| fluentd.kubernetesSystem.logs.coredns | object | `{"multilineStartRegExp":"/^\\[[^\\]]+\\]\\s+/","ociLALogSourceName":"Kubernetes Core DNS Logs","path":"/var/log/containers/coredns-*.log"}` | Kubernetes Core DNS Logs collection configuration |
+| fluentd.kubernetesSystem.logs.csinode | object | `{"ociLALogSourceName":"Kubernetes CSI Node Driver Logs","path":"/var/log/containers/csi-oci-node-*.log"}` | Kubernetes CSI Node Driver Logs collection configuration |
+| fluentd.kubernetesSystem.logs.kube-dns-autoscaler | object | `{"multilineStartRegExp":"/^\\S\\d{2}\\d{2}\\s+[^\\:]+:[^\\:]+:[^\\.]+\\.\\d{0,3}/","ociLALogSourceName":"Kubernetes DNS Autoscaler Logs","path":"/var/log/containers/kube-dns-autoscaler-*.log"}` | Kubernetes DNS Autoscaler Logs collection configuration |
+| fluentd.kubernetesSystem.logs.kube-flannel | object | `{"multilineStartRegExp":"/^\\S\\d{2}\\d{2}\\s+[^\\:]+:[^\\:]+:[^\\.]+\\.\\d{0,3}/","ociLALogSourceName":"Kubernetes Flannel Logs","path":"/var/log/containers/kube-flannel-*.log"}` | Kube Flannel logs collection configuration |
+| fluentd.kubernetesSystem.logs.kube-proxy | object | `{"multilineStartRegExp":"/^\\S\\d{2}\\d{2}\\s+[^\\:]+:[^\\:]+:[^\\.]+\\.\\d{0,3}/","ociLALogSourceName":"Kubernetes Proxy Logs","path":"/var/log/containers/kube-proxy-*.log"}` | Kube Proxy logs collection configuration |
+| fluentd.kubernetesSystem.logs.proxymux | object | `{"ociLALogSourceName":"OKE Proxymux Client Logs","path":"/var/log/containers/proxymux-client-*.log"}` | Proxymux Client Logs collection configuration |
+| fluentd.linuxSystem.logs.cronlog | object | `{"multilineStartRegExp":"/^(?:(?:\\d+\\s+)?<([^>]*)>(?:\\d+\\s+)?)?\\S+\\s+\\d{1,2}\\s+\\d{1,2}:\\d{1,2}:\\d{1,2}\\s+/","ociLALogSourceName":"Linux Cron Logs","path":"/var/log/cron*"}` | Linux CRON logs collection configuration |
+| fluentd.linuxSystem.logs.kubeletlog | object | `{"ociLALogSourceName":"Kubernetes Kubelet Logs"}` | kubelet logs collection configuration |
+| fluentd.linuxSystem.logs.linuxauditlog | object | `{"ociLALogSourceName":"Linux Audit Logs","path":"/var/log/audit/audit*"}` | Linux audit logs collection configuration |
+| fluentd.linuxSystem.logs.maillog | object | `{"multilineStartRegExp":"/^(?:(?:\\d+\\s+)?<([^>]*)>(?:\\d+\\s+)?)?\\S+\\s+\\d{1,2}\\s+\\d{1,2}:\\d{1,2}:\\d{1,2}\\s+/","ociLALogSourceName":"Linux Mail Delivery Logs","path":"/var/log/maillog*"}` | Linux maillog collection configuration |
+| fluentd.linuxSystem.logs.securelog | object | `{"multilineStartRegExp":"/^(?:(?:\\d+\\s+)?<([^>]*)>(?:\\d+\\s+)?)?\\S+\\s+\\d{1,2}\\s+\\d{1,2}:\\d{1,2}:\\d{1,2}\\s+/","ociLALogSourceName":"Linux Secure Logs","path":"/var/log/secure*"}` | Linux CRON logs collection configuration |
+| fluentd.linuxSystem.logs.syslog | object | `{"multilineStartRegExp":"/^(?:(?:\\d+\\s+)?<([^>]*)>(?:\\d+\\s+)?)?\\S+\\s+\\d{1,2}\\s+\\d{1,2}:\\d{1,2}:\\d{1,2}\\s+/","ociLALogSourceName":"Linux Syslog Logs","path":"/var/log/messages*"}` | Linux syslog collection configuration |
+| fluentd.linuxSystem.logs.uptracklog | object | `{"multilineStartRegExp":"/^\\d{4}-\\d{2}-\\d{2}\\s+\\d{2}:\\d{2}:\\d{2}/","ociLALogSourceName":"Ksplice Logs","path":"/var/log/uptrack*"}` | Linux uptrack logs collection configuration |
+| fluentd.linuxSystem.logs.yum | object | `{"ociLALogSourceName":"Linux YUM Logs","path":"/var/log/yum.log*"}` | Linux yum logs collection configuration |
+| fluentd.ociLoggingAnalyticsOutputPlugin.buffer | object | `{"disable_chunk_backup":true,"flush_interval":30,"flush_thread_burst_interval":0.05,"flush_thread_count":1,"flush_thread_interval":0.5,"retry_exponential_backoff_base":2,"retry_forever":true,"retry_max_times":17,"retry_wait":2,"total_limit_size":"5368709120"}` | Fluentd Buffer Configuration |
+| fluentd.ociLoggingAnalyticsOutputPlugin.plugin_log_file_count | int | `10` | The number of archived or rotated log files to keep, must be non-zero. |
+| fluentd.ociLoggingAnalyticsOutputPlugin.plugin_log_file_size | string | `"10MB"` | The maximum log file size at which point the log file to be rotated, for example, 1KB, 1MB, etc. |
+| fluentd.ociLoggingAnalyticsOutputPlugin.plugin_log_level | string | `"info"` | Output plugin logging level: DEBUG < INFO < WARN < ERROR < FATAL < UNKNOWN |
+| fluentd.ociLoggingAnalyticsOutputPlugin.profile_name | string | `"DEFAULT"` | OCI API Key profile to use, if multiple profiles are found in the OCI API config file. |
+| fluentd.path | string | `"/var/opt/conf"` | Path to the fluentd config file |
+| fluentd.tailPlugin | object | `{"flushInterval":60,"readFromHead":true}` | Config for Logs Collection using fluentd tail plugin |
+| global.namespace | string | `"oci-onm"` | Kubernetes Namespace for creating monitoring resources. Ignored if oci-kubernetes-monitoring-common.createNamespace set to false. |
+| global.resourceNamePrefix | string | `"oci-onm"` | Resource names prefix used, where allowed. |
+| image.imagePullPolicy | string | `"Always"` | Image pull policy |
+| image.imagePullSecrets | string | `nil` | |
+| image.url | string | `"container-registry.oracle.com/oci_observability_management/oci-la-fluentd-collector:1.0.0"` | Replace this value with actual docker image url |
+| kubernetesClusterID | string | `nil` | OKE Cluster OCID e.g. ocid1.cluster.oc1.phx.aaaaaaaahhbadf3rxa62faaeixanvr7vftmkg6hupycbf4qszctf2wbmqqxq |
+| kubernetesClusterName | string | `nil` | Kubernetes Cluster name. Need not be the OKE Cluster display name. e.g. production-cluster |
+| namespace | string | `"{{ .Values.global.namespace }}"` | Kubernetes Namespace for deploying monitoring resources deployed by this chart. |
+| objectsPollingFrequency | string | `"5m"` | Collection frequency (in minutes) for Kubernetes Objects |
+| oci-onm-common.createNamespace | bool | `true` | Automatically create namespace for all resources (namespaced) used by OCI Kubernetes Monitoring Solution. |
+| oci-onm-common.createServiceAccount | bool | `true` | Automatically create, a readonly cluster role, cluster role binding and serviceaccount is required # to read various cluster objects for monitoring. If set to false serviceaccount value must be provided in the parent chart. Refer, README for the cluster role definition and other details. |
+| oci-onm-common.namespace | string | `"{{ .Values.global.namespace }}"` | Kubernetes Namespace for creating serviceaccount. Default: oci-onm |
+| oci-onm-common.resourceNamePrefix | string | `"{{ .Values.global.resourceNamePrefix }}"` | Resoure Name Prefix: Wherever allowed, this prefix will be used with all resources used by this chart |
+| oci-onm-common.serviceAccount | string | `"{{ .Values.global.resourceNamePrefix }}"` | Kubernetes ServiceAccount name |
+| oci.configFiles."private.pem" | string | `""` | Private key file data -----BEGIN RSA PRIVATE KEY----- XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX -----END RSA PRIVATE KEY----- |
+| oci.configFiles.config | string | `"# Replace each of the below fields with actual values.\n[DEFAULT]\nuser=\nfingerprint=\nkey_file=\ntenancy=\nregion="` | config file [data](https://docs.oracle.com/en-us/iaas/Content/API/Concepts/sdkconfig.htm) Replace each of the below fields with actual values. [DEFAULT] user= fingerprint= key_file= tenancy= region= |
+| oci.file | string | `"config"` | Config file name |
+| oci.path | string | `"/var/opt/.oci"` | Path to the OCI API config file |
+| ociLALogGroupID | string | `nil` | OCID of Logging Analytics Log Group to send logs to. Can be overridden for individual log types. e.g. ocid1.loganalyticsloggroup.oc1.phx.amaaaaasdfaskriauucc55rlwlxe4ahe2vfmtuoqa6qsgu7mb6jugxacsk6a |
+| ociLANamespace | string | `nil` | |
+| resourceNamePrefix | string | `"{{ .Values.global.resourceNamePrefix }}"` | Resoure Name Prefix: Wherever allowed, this prefix will be used with all resources used by this chart |
+| resources.limits | object | `{"memory":"500Mi"}` | Limits |
+| resources.requests | object | `{"cpu":"100m","memory":"250Mi"}` | Resource requests |
+| runtime | string | `"cri"` | Container runtime for Kubernetes Cluster. Requires fluentd configuration changes accordingly Allowed values: docker, cri(for OKE 1.20 and above) |
+| serviceAccount | string | `"{{ .Values.global.resourceNamePrefix }}"` | Kubernetes ServiceAccount |
+| volumes | object | `{"containerdataHostPath":"/u01/data/docker/containers","podsHostPath":"/var/log/pods"}` | Log logvolumes for pod logs and container logs |
+| volumes.containerdataHostPath | string | `"/u01/data/docker/containers"` | Path to the container data logs on Kubernetes Nodes |
+| volumes.podsHostPath | string | `"/var/log/pods"` | Path to the pod logs on Kubernetes Nodes |
+
+----------------------------------------------
+Autogenerated from chart metadata using [helm-docs v1.11.0](https://github.com/norwoodj/helm-docs/releases/v1.11.0)
diff --git a/charts/logan/templates/_helpers.tpl b/charts/logan/templates/_helpers.tpl
new file mode 100644
index 00000000..974684b5
--- /dev/null
+++ b/charts/logan/templates/_helpers.tpl
@@ -0,0 +1,53 @@
+
+# Copyright (c) 2023, Oracle and/or its affiliates.
+# Licensed under the Universal Permissive License v1.0 as shown at https://oss.oracle.com/licenses/upl.
+
+# tpl render function
+{{- define "common.tplvalues.render" -}}
+ {{- if typeIs "string" .value }}
+ {{- tpl .value .context }}
+ {{- else }}
+ {{- tpl (.value | toYaml) .context }}
+ {{- end }}
+{{- end -}}
+
+# Prefix for all resources created using this chart.
+{{- define "logan.resourceNamePrefix" -}}
+ {{- if .Values.resourceNamePrefix -}}
+ {{ include "common.tplvalues.render" ( dict "value" .Values.resourceNamePrefix "context" .) | trunc 63 | trimSuffix "-" }}
+ {{- else -}}
+ {{- "oci-onm" -}}
+ {{- end -}}
+{{- end -}}
+
+# namespace
+{{- define "logan.namespace" -}}
+ {{- if .Values.namespace -}}
+ {{ include "common.tplvalues.render" ( dict "value" .Values.namespace "context" .) }}
+ {{- else -}}
+ {{- "oci-onm" -}}
+ {{- end -}}
+{{- end -}}
+
+#serviceAccount
+{{- define "logan.serviceAccount" -}}
+ {{ include "common.tplvalues.render" ( dict "value" .Values.serviceAccount "context" .) }}
+{{- end -}}
+
+#kubernetesClusterId
+{{- define "logan.kubernetesClusterId" -}}
+ {{- if .Values.kubernetesClusterID -}}
+ {{ include "common.tplvalues.render" ( dict "value" .Values.kubernetesClusterID "context" .) }}
+ {{- else -}}
+ {{- "UNDEFINED" -}}
+ {{- end -}}
+{{- end -}}
+
+#kubernetesClusterName
+{{- define "logan.kubernetesClusterName" -}}
+ {{- if .Values.kubernetesClusterName -}}
+ {{ include "common.tplvalues.render" ( dict "value" .Values.kubernetesClusterName "context" .) }}
+ {{- else -}}
+ {{- "UNDEFINED" -}}
+ {{- end -}}
+{{- end -}}
diff --git a/logan/helm-chart/templates/fluentd-daemonset.yaml b/charts/logan/templates/fluentd-daemonset.yaml
similarity index 79%
rename from logan/helm-chart/templates/fluentd-daemonset.yaml
rename to charts/logan/templates/fluentd-daemonset.yaml
index d20c6807..276f2d04 100644
--- a/logan/helm-chart/templates/fluentd-daemonset.yaml
+++ b/charts/logan/templates/fluentd-daemonset.yaml
@@ -1,44 +1,44 @@
+# Copyright (c) 2023, Oracle and/or its affiliates.
+# Licensed under the Universal Permissive License v1.0 as shown at https://oss.oracle.com/licenses/upl.
+
---
{{- $authtype := .Values.authtype | lower }}
{{- $imagePullSecrets := .Values.image.imagePullSecrets }}
+{{- $resourceNamePrefix := (include "logan.resourceNamePrefix" .) }}
apiVersion: apps/v1
kind: DaemonSet
metadata:
- name: {{ include "oci-la-fluentd.name" . }}-daemonset
- namespace: {{ default "kube-system" .Values.namespace }}
+ name: {{ $resourceNamePrefix }}-logan
+ namespace: {{ include "logan.namespace" . }}
labels:
- app: {{ include "oci-la-fluentd.name" . }}-logs
+ app: {{ $resourceNamePrefix }}-logan
version: v1
spec:
selector:
matchLabels:
- app: {{ include "oci-la-fluentd.name" . }}-logs
+ app: {{ $resourceNamePrefix }}-logan
version: v1
template:
metadata:
annotations:
{{- if eq $authtype "config" }}
- checksum/secrets: {{ include (print $.Template.BasePath "/oci-config-secrets.yaml") . | sha256sum }}
+ checksum/secrets: {{ include (print $.Template.BasePath "/oci-config-secret.yaml") . | sha256sum }}
{{- end}}
- checksum/configmap: {{ include (print $.Template.BasePath "/configmap-logs.yaml") . | sha256sum }}
+ checksum/configmap: {{ include (print $.Template.BasePath "/logs-configmap.yaml") . | sha256sum }}
labels:
- app: {{ include "oci-la-fluentd.name" . }}-logs
+ app: {{ $resourceNamePrefix }}-logan
version: v1
spec:
- {{- if .Values.createServiceAccount }}
- serviceAccountName: {{ include "oci-la-fluentd.name" . }}-serviceaccount
- {{- else }}
- serviceAccountName: "{{ .Values.serviceAccount | required (printf "serviceAccount is required when createServiceAccount is false") }}"
- {{- end }}
+ serviceAccountName: {{ include "logan.serviceAccount" . }}
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
- {{- if $imagePullSecrets }}
- imagePullSecrets:
+ {{- if $imagePullSecrets }}
+ imagePullSecrets:
- name: {{ .Values.image.imagePullSecrets }}
{{- end}}
containers:
- - name: {{ include "oci-la-fluentd.name" . }}-logs
+ - name: {{ $resourceNamePrefix }}-fluentd
image: {{ .Values.image.url }}
imagePullPolicy: {{ default "IfNotPresent" .Values.image.imagePullPolicy }}
env:
@@ -53,13 +53,13 @@ spec:
- name: FLUENT_OCI_NAMESPACE
value: {{ .Values.ociLANamespace }}
- name: FLUENT_OCI_KUBERNETES_CLUSTER_ID
- value: {{ .Values.kubernetesClusterID }}
+ value: {{ include "logan.kubernetesClusterId" . }}
- name: FLUENT_OCI_KUBERNETES_CLUSTER_NAME
- value: {{ .Values.kubernetesClusterName }}
+ value: {{ include "logan.kubernetesClusterName" . }}
{{- if eq $authtype "config" }}
- name: FLUENT_OCI_CONFIG_LOCATION
value: {{ .Values.oci.path }}/{{ .Values.oci.file }}
- {{- end }}
+ {{- end }}
{{- if .Values.extraEnv }}
{{- toYaml .Values.extraEnv | nindent 10 }}
{{- end }}
@@ -117,17 +117,17 @@ spec:
- name: basedir
hostPath:
path: {{ .Values.fluentd.baseDir }}
- {{- end }}
+ {{- end }}
{{- if eq $authtype "config" }}
# Mount directory where oci config exists
- name: ociconfigdir
projected:
sources:
- secret:
- name: {{ include "oci-la-fluentd.name" . }}-credentials-secret
+ name: {{ $resourceNamePrefix }}-oci-config
{{- end }}
# Mount directory where fluentd config exists
- name: fluentdconfigdir
configMap:
# Provide the name of the ConfigMap to mount.
- name: {{ include "oci-la-fluentd.name" . }}-logs-configmap
+ name: {{ $resourceNamePrefix }}-logs
diff --git a/logan/helm-chart/templates/fluentd-deployment.yaml b/charts/logan/templates/fluentd-deployment.yaml
similarity index 73%
rename from logan/helm-chart/templates/fluentd-deployment.yaml
rename to charts/logan/templates/fluentd-deployment.yaml
index ebbf4f5b..d9d5c38d 100644
--- a/logan/helm-chart/templates/fluentd-deployment.yaml
+++ b/charts/logan/templates/fluentd-deployment.yaml
@@ -1,41 +1,41 @@
+# Copyright (c) 2023, Oracle and/or its affiliates.
+# Licensed under the Universal Permissive License v1.0 as shown at https://oss.oracle.com/licenses/upl.
+
---
{{- $authtype := .Values.authtype | lower }}
{{- $imagePullSecrets := .Values.image.imagePullSecrets }}
+{{- $resourceNamePrefix := (include "logan.resourceNamePrefix" .) }}
apiVersion: apps/v1
kind: Deployment
metadata:
- name: {{ include "oci-la-fluentd.name" . }}-deployment
- namespace: {{ default "kube-system" .Values.namespace }}
+ name: {{ $resourceNamePrefix }}-logan
+ namespace: {{ include "logan.namespace" . }}
labels:
- app: {{ include "oci-la-fluentd.name" . }}-objects
+ app: {{ $resourceNamePrefix }}-logan
version: v1
spec:
selector:
matchLabels:
- app: {{ include "oci-la-fluentd.name" . }}-objects
+ app: {{ $resourceNamePrefix }}-logan
version: v1
template:
metadata:
annotations:
{{- if eq $authtype "config" }}
- checksum/secrets: {{ include (print $.Template.BasePath "/oci-config-secrets.yaml") . | sha256sum }}
+ checksum/secrets: {{ include (print $.Template.BasePath "/oci-config-secret.yaml") . | sha256sum }}
{{- end}}
- checksum/configmap: {{ include (print $.Template.BasePath "/configmap-objects.yaml") . | sha256sum }}
+ checksum/configmap: {{ include (print $.Template.BasePath "/objects-configmap.yaml") . | sha256sum }}
labels:
- app: {{ include "oci-la-fluentd.name" . }}-objects
+ app: {{ $resourceNamePrefix }}-logan
version: v1
spec:
- {{- if .Values.createServiceAccount }}
- serviceAccountName: {{ include "oci-la-fluentd.name" . }}-serviceaccount
- {{- else }}
- serviceAccountName: "{{ .Values.serviceAccount | required (printf "serviceAccount is required when createServiceAccount is false") }}"
- {{- end }}
- {{- if $imagePullSecrets }}
- imagePullSecrets:
+ serviceAccountName: {{ include "logan.serviceAccount" . }}
+ {{- if $imagePullSecrets }}
+ imagePullSecrets:
- name: {{ .Values.image.imagePullSecrets }}
{{- end}}
containers:
- - name: {{ include "oci-la-fluentd.name" . }}-objects
+ - name: {{ $resourceNamePrefix }}-fluentd
image: {{ .Values.image.url }}
imagePullPolicy: {{ default "IfNotPresent" .Values.image.imagePullPolicy }}
env:
@@ -50,13 +50,13 @@ spec:
- name: FLUENT_OCI_NAMESPACE
value: {{ .Values.ociLANamespace }}
- name: FLUENT_OCI_KUBERNETES_CLUSTER_ID
- value: {{ .Values.kubernetesClusterID }}
+ value: {{ include "logan.kubernetesClusterId" . }}
- name: FLUENT_OCI_KUBERNETES_CLUSTER_NAME
- value: {{ .Values.kubernetesClusterName }}
+ value: {{ include "logan.kubernetesClusterName" . }}
{{- if eq $authtype "config" }}
- name: FLUENT_OCI_CONFIG_LOCATION
value: {{ .Values.oci.path }}/{{ .Values.oci.file }}
- {{- end }}
+ {{- end }}
{{- if .Values.extraEnv }}
{{- toYaml .Values.extraEnv | nindent 10 }}
{{- end }}
@@ -95,10 +95,10 @@ spec:
projected:
sources:
- secret:
- name: {{ include "oci-la-fluentd.name" . }}-credentials-secret
+ name: {{ $resourceNamePrefix }}-oci-config
{{- end }}
# Mount directory where fluentd config exists
- name: fluentdconfigdir
configMap:
# Provide the name of the ConfigMap to mount.
- name: {{ include "oci-la-fluentd.name" . }}-objects-configmap
+ name: {{ $resourceNamePrefix }}-objects
diff --git a/logan/helm-chart/templates/configmap-logs.yaml b/charts/logan/templates/logs-configmap.yaml
similarity index 80%
rename from logan/helm-chart/templates/configmap-logs.yaml
rename to charts/logan/templates/logs-configmap.yaml
index 89286305..4c3adcc0 100644
--- a/logan/helm-chart/templates/configmap-logs.yaml
+++ b/charts/logan/templates/logs-configmap.yaml
@@ -1,14 +1,19 @@
+# Copyright (c) 2023, Oracle and/or its affiliates.
+# Licensed under the Universal Permissive License v1.0 as shown at https://oss.oracle.com/licenses/upl.
+
+{{- $kubernetesClusterName := (include "logan.kubernetesClusterName" .) }}
+{{- $kubernetesClusterId := (include "logan.kubernetesClusterId" .) }}
apiVersion: v1
kind: ConfigMap
metadata:
- name: {{ include "oci-la-fluentd.name" . }}-logs-configmap
- namespace: {{ default "kube-system" .Values.namespace }}
+ name: {{ include "logan.resourceNamePrefix" . }}-logs
+ namespace: {{ include "logan.namespace" . }}
data:
# file-like keys
fluent.conf: |
{{- $authtype := .Values.authtype | lower }}
@include kubernetes.conf
-
+
# Filter to add kubernetes metadata
@type kubernetes_metadata
@@ -28,13 +33,13 @@ data:
de_dot false
annotation_match [ ".*" ]
-
+
# Match block to ensure all the logs including concat plugin timeout logs will have same label
@type relabel
@label @NORMAL
-
+
# Match block to set info required for oci-logging-analytics fluentd output plugin
@@ -89,7 +94,7 @@ data:
path {{ $logDefinition.path | required (printf "fluentd.kubernetesSystem.logs.%s.path is required" $name) }}
pos_file {{ $.Values.fluentd.baseDir }}/oci_la_fluentd_outplugin/pos/{{ $name }}.logs.pos
tag oci.oke.{{ $name }}.*
- read_from_head "{{ $.Values.fluentd.tailPlugin.readFromHead }}"
+ read_from_head "{{ $.Values.fluentd.tailPlugin.readFromHead }}"
{{- if $logDefinition.encoding }}
encoding {{ $logDefinition.encoding }}
{{- else if $.Values.fluentd.kubernetesSystem.encoding }}
@@ -110,11 +115,11 @@ data:
enable_ruby true
{{- if $logDefinition.metadata }}
- oci_la_metadata ${{"{{"}}"Kubernetes Cluster Name":"{{ $.Values.kubernetesClusterName | default "UNDEFINED" }}", "Kubernetes Cluster ID": "{{ $.Values.kubernetesClusterID | default "UNDEFINED" }}" {{- range $k, $v := $logDefinition.metadata }},{{ $k | quote }}: {{ $v | quote -}} {{- end }}{{"}}"}}
+ oci_la_metadata ${{"{{"}}"Kubernetes Cluster Name":"{{ $kubernetesClusterName }}", "Kubernetes Cluster ID": "{{ $kubernetesClusterId }}" {{- range $k, $v := $logDefinition.metadata }},{{ $k | quote }}: {{ $v | quote -}} {{- end }}{{"}}"}}
{{- else if $.Values.fluentd.kubernetesSystem.metadata }}
- oci_la_metadata ${{"{{"}}"Kubernetes Cluster Name":"{{ $.Values.kubernetesClusterName | default "UNDEFINED" }}", "Kubernetes Cluster ID": "{{ $.Values.kubernetesClusterID | default "UNDEFINED" }}" {{- range $k, $v := $.Values.fluentd.kubernetesSystem.metadata }},{{ $k | quote }}: {{ $v | quote -}} {{- end }}{{"}}"}}
+ oci_la_metadata ${{"{{"}}"Kubernetes Cluster Name":"{{ $kubernetesClusterName }}", "Kubernetes Cluster ID": "{{ $kubernetesClusterId }}" {{- range $k, $v := $.Values.fluentd.kubernetesSystem.metadata }},{{ $k | quote }}: {{ $v | quote -}} {{- end }}{{"}}"}}
{{- else }}
- oci_la_metadata ${{"{{"}}"Kubernetes Cluster Name":"{{ $.Values.kubernetesClusterName | default "UNDEFINED" }}", "Kubernetes Cluster ID": "{{ $.Values.kubernetesClusterID | default "UNDEFINED" }}" {{- range $k, $v := $.Values.metadata }},{{ $k | quote }}: {{ $v | quote -}} {{- end }}{{"}}"}}
+ oci_la_metadata ${{"{{"}}"Kubernetes Cluster Name":"{{ $kubernetesClusterName }}", "Kubernetes Cluster ID": "{{ $kubernetesClusterId }}" {{- range $k, $v := $.Values.metadata }},{{ $k | quote }}: {{ $v | quote -}} {{- end }}{{"}}"}}
{{- end }}
{{- if $logDefinition.ociLALogGroupID }}
oci_la_log_group_id "{{ $logDefinition.ociLALogGroupID }}"
@@ -123,7 +128,7 @@ data:
{{- else }}
oci_la_log_group_id "{{ required "ociLALogGroupID is required" $.Values.ociLALogGroupID }}"
{{- end }}
- oci_la_log_path "${record['tailed_path']}"
+ oci_la_log_path "${record['tailed_path']}"
oci_la_log_source_name "{{ $logDefinition.ociLALogSourceName | required (printf "fluentd.kubernetesSystem.logs.%s.ociLALogSourceName is required" $name) }}"
{{- if $logDefinition.ociLAEntityID }}
oci_la_entity_id "{{ $logDefinition.ociLAEntityID }}"
@@ -136,7 +141,7 @@ data:
oci_la_log_set "{{ $.Values.fluentd.kubernetesSystem.ociLALogSet | default $.Values.ociLALogSet }}"
{{- end }}
{{- if eq $runtime "docker" }}
- message "${record['log']}"
+ message "${record['log']}"
{{- end }}
tag ${tag}
@@ -168,7 +173,7 @@ data:
{{- end }}
{{- end }}
{{- end }}
-
+
{{- if .Values.fluentd.linuxSystem }}
{{- range $name, $logDefinition := .Values.fluentd.linuxSystem.logs }}
{{- if and (ne $name "syslog") (ne $name "kubeletlog") }}
@@ -202,11 +207,11 @@ data:
enable_ruby true
{{- if $logDefinition.metadata }}
- oci_la_metadata ${{"{{"}}"Kubernetes Cluster Name":"{{ $.Values.kubernetesClusterName | default "UNDEFINED" }}", "Kubernetes Cluster ID": "{{ $.Values.kubernetesClusterID | default "UNDEFINED" }}", "Node": "#{ENV['K8S_NODE_NAME'] || 'UNDEFINED'}" {{- range $k, $v := $logDefinition.metadata }},{{ $k | quote }}: {{ $v | quote -}} {{- end }}{{"}}"}}
+ oci_la_metadata ${{"{{"}}"Kubernetes Cluster Name":"{{ $kubernetesClusterName }}", "Kubernetes Cluster ID": "{{ $kubernetesClusterId }}", "Node": "#{ENV['K8S_NODE_NAME'] || 'UNDEFINED'}" {{- range $k, $v := $logDefinition.metadata }},{{ $k | quote }}: {{ $v | quote -}} {{- end }}{{"}}"}}
{{- else if $.Values.fluentd.linuxSystem.metadata }}
- oci_la_metadata ${{"{{"}}"Kubernetes Cluster Name":"{{ $.Values.kubernetesClusterName | default "UNDEFINED" }}", "Kubernetes Cluster ID": "{{ $.Values.kubernetesClusterID | default "UNDEFINED" }}", "Node": "#{ENV['K8S_NODE_NAME'] || 'UNDEFINED'}" {{- range $k, $v := $.Values.fluentd.linuxSystem.metadata }},{{ $k | quote }}: {{ $v | quote -}} {{- end }}{{"}}"}}
+ oci_la_metadata ${{"{{"}}"Kubernetes Cluster Name":"{{ $kubernetesClusterName }}", "Kubernetes Cluster ID": "{{ $kubernetesClusterId }}", "Node": "#{ENV['K8S_NODE_NAME'] || 'UNDEFINED'}" {{- range $k, $v := $.Values.fluentd.linuxSystem.metadata }},{{ $k | quote }}: {{ $v | quote -}} {{- end }}{{"}}"}}
{{- else }}
- oci_la_metadata ${{"{{"}}"Kubernetes Cluster Name":"{{ $.Values.kubernetesClusterName | default "UNDEFINED" }}", "Kubernetes Cluster ID": "{{ $.Values.kubernetesClusterID | default "UNDEFINED" }}", "Node": "#{ENV['K8S_NODE_NAME'] || 'UNDEFINED'}" {{- range $k, $v := $.Values.metadata }},{{ $k | quote }}: {{ $v | quote -}} {{- end }}{{"}}"}}
+ oci_la_metadata ${{"{{"}}"Kubernetes Cluster Name":"{{ $kubernetesClusterName }}", "Kubernetes Cluster ID": "{{ $kubernetesClusterId }}", "Node": "#{ENV['K8S_NODE_NAME'] || 'UNDEFINED'}" {{- range $k, $v := $.Values.metadata }},{{ $k | quote }}: {{ $v | quote -}} {{- end }}{{"}}"}}
{{- end }}
{{- if $logDefinition.ociLALogGroupID }}
oci_la_log_group_id "{{ $logDefinition.ociLALogGroupID }}"
@@ -215,7 +220,7 @@ data:
{{- else }}
oci_la_log_group_id "{{ required "ociLALogGroupID is required" $.Values.ociLALogGroupID }}"
{{- end }}
- oci_la_log_path "${record['tailed_path']}"
+ oci_la_log_path "${record['tailed_path']}"
oci_la_log_source_name "{{ $logDefinition.ociLALogSourceName | required (printf "fluentd.linuxSystem.logs.%s.ociLALogSourceName is required" $name) }}"
{{- if $logDefinition.ociLAEntityID }}
oci_la_entity_id "{{ $logDefinition.ociLAEntityID }}"
@@ -244,7 +249,7 @@ data:
path {{ required "fluentd.linuxSystem.logs.syslog.path is required" .Values.fluentd.linuxSystem.logs.syslog.path }}
pos_file {{ .Values.fluentd.baseDir }}/oci_la_fluentd_outplugin/pos/syslog.logs.pos
tag oci.oke.syslog.messages.**
- read_from_head "{{ .Values.fluentd.tailPlugin.readFromHead }}"
+ read_from_head "{{ .Values.fluentd.tailPlugin.readFromHead }}"
{{- if .Values.fluentd.linuxSystem.logs.syslog.encoding }}
encoding {{ .Values.fluentd.linuxSystem.logs.syslog.encoding }}
{{- else if .Values.fluentd.linuxSystem.encoding }}
@@ -258,7 +263,7 @@ data:
format1 /^(?.*)/
-
+
# Match block to filter kubelet logs from syslogs
@type rewrite_tag_filter
@@ -274,18 +279,18 @@ data:
tag oci.oke.syslog.syslog.*
-
+
# Record transformer filter to apply Logging Analytics configuration to each record.
@type record_transformer
enable_ruby true
{{- if .Values.fluentd.linuxSystem.logs.kubeletlog.metadata }}
- oci_la_metadata ${{"{{"}}"Kubernetes Cluster Name":"{{ .Values.kubernetesClusterName | default "UNDEFINED" }}", "Kubernetes Cluster ID": "{{ .Values.kubernetesClusterID | default "UNDEFINED" }}", "Node": "#{ENV['K8S_NODE_NAME'] || 'UNDEFINED'}" {{- range $k, $v := .Values.fluentd.linuxSystem.logs.kubeletlog.metadata }},{{ $k | quote }}: {{ $v | quote -}} {{- end }}{{"}}"}}
+ oci_la_metadata ${{"{{"}}"Kubernetes Cluster Name":"{{ $kubernetesClusterName }}", "Kubernetes Cluster ID": "{{ $kubernetesClusterId }}", "Node": "#{ENV['K8S_NODE_NAME'] || 'UNDEFINED'}" {{- range $k, $v := .Values.fluentd.linuxSystem.logs.kubeletlog.metadata }},{{ $k | quote }}: {{ $v | quote -}} {{- end }}{{"}}"}}
{{- else if .Values.fluentd.linuxSystem.metadata }}
- oci_la_metadata ${{"{{"}}"Kubernetes Cluster Name":"{{ .Values.kubernetesClusterName | default "UNDEFINED" }}", "Kubernetes Cluster ID": "{{ .Values.kubernetesClusterID | default "UNDEFINED" }}", "Node": "#{ENV['K8S_NODE_NAME'] || 'UNDEFINED'}" {{- range $k, $v := .Values.fluentd.linuxSystem.metadata }},{{ $k | quote }}: {{ $v | quote -}} {{- end }}{{"}}"}}
+ oci_la_metadata ${{"{{"}}"Kubernetes Cluster Name":"{{ $kubernetesClusterName }}", "Kubernetes Cluster ID": "{{ $kubernetesClusterId }}", "Node": "#{ENV['K8S_NODE_NAME'] || 'UNDEFINED'}" {{- range $k, $v := .Values.fluentd.linuxSystem.metadata }},{{ $k | quote }}: {{ $v | quote -}} {{- end }}{{"}}"}}
{{- else }}
- oci_la_metadata ${{"{{"}}"Kubernetes Cluster Name":"{{ .Values.kubernetesClusterName | default "UNDEFINED" }}", "Kubernetes Cluster ID": "{{ .Values.kubernetesClusterID | default "UNDEFINED" }}", "Node": "#{ENV['K8S_NODE_NAME'] || 'UNDEFINED'}" {{- range $k, $v := .Values.metadata }},{{ $k | quote }}: {{ $v | quote -}} {{- end }}{{"}}"}}
+ oci_la_metadata ${{"{{"}}"Kubernetes Cluster Name":"{{ $kubernetesClusterName }}", "Kubernetes Cluster ID": "{{ $kubernetesClusterId }}", "Node": "#{ENV['K8S_NODE_NAME'] || 'UNDEFINED'}" {{- range $k, $v := .Values.metadata }},{{ $k | quote }}: {{ $v | quote -}} {{- end }}{{"}}"}}
{{- end }}
{{- if .Values.fluentd.linuxSystem.logs.kubeletlog.ociLALogGroupID }}
oci_la_log_group_id "{{ .Values.fluentd.linuxSystem.logs.kubeletlog.ociLALogGroupID }}"
@@ -294,7 +299,7 @@ data:
{{- else }}
oci_la_log_group_id "{{ required "ociLALogGroupID is required" .Values.ociLALogGroupID }}"
{{- end }}
- oci_la_log_path "${record['tailed_path']}"
+ oci_la_log_path "${record['tailed_path']}"
oci_la_log_source_name "{{ required "fluentd.linuxSystem.logs.kubeletlog.ociLALogSourceName is required" .Values.fluentd.linuxSystem.logs.kubeletlog.ociLALogSourceName }}"
{{- if .Values.fluentd.linuxSystem.logs.kubeletlog.ociLAEntityID }}
oci_la_entity_id "{{ .Values.fluentd.linuxSystem.logs.kubeletlog.ociLAEntityID }}"
@@ -309,18 +314,18 @@ data:
tag ${tag}
-
+
# Record transformer filter to apply Logging Analytics configuration to each record.
@type record_transformer
enable_ruby true
{{- if .Values.fluentd.linuxSystem.logs.syslog.metadata }}
- oci_la_metadata ${{"{{"}}"Kubernetes Cluster Name":"{{ .Values.kubernetesClusterName | default "UNDEFINED" }}", "Kubernetes Cluster ID": "{{ .Values.kubernetesClusterID | default "UNDEFINED" }}", "Node": "#{ENV['K8S_NODE_NAME'] || 'UNDEFINED'}" {{- range $k, $v := .Values.fluentd.linuxSystem.logs.syslog.metadata }},{{ $k | quote }}: {{ $v | quote -}} {{- end }}{{"}}"}}
+ oci_la_metadata ${{"{{"}}"Kubernetes Cluster Name":"{{ $kubernetesClusterName }}", "Kubernetes Cluster ID": "{{ $kubernetesClusterId }}", "Node": "#{ENV['K8S_NODE_NAME'] || 'UNDEFINED'}" {{- range $k, $v := .Values.fluentd.linuxSystem.logs.syslog.metadata }},{{ $k | quote }}: {{ $v | quote -}} {{- end }}{{"}}"}}
{{- else if .Values.fluentd.linuxSystem.metadata }}
- oci_la_metadata ${{"{{"}}"Kubernetes Cluster Name":"{{ .Values.kubernetesClusterName | default "UNDEFINED" }}", "Kubernetes Cluster ID": "{{ .Values.kubernetesClusterID | default "UNDEFINED" }}", "Node": "#{ENV['K8S_NODE_NAME'] || 'UNDEFINED'}" {{- range $k, $v := .Values.fluentd.linuxSystem.metadata }},{{ $k | quote }}: {{ $v | quote -}} {{- end }}{{"}}"}}
+ oci_la_metadata ${{"{{"}}"Kubernetes Cluster Name":"{{ $kubernetesClusterName }}", "Kubernetes Cluster ID": "{{ $kubernetesClusterId }}", "Node": "#{ENV['K8S_NODE_NAME'] || 'UNDEFINED'}" {{- range $k, $v := .Values.fluentd.linuxSystem.metadata }},{{ $k | quote }}: {{ $v | quote -}} {{- end }}{{"}}"}}
{{- else }}
- oci_la_metadata ${{"{{"}}"Kubernetes Cluster Name":"{{ .Values.kubernetesClusterName | default "UNDEFINED" }}", "Kubernetes Cluster ID": "{{ .Values.kubernetesClusterID | default "UNDEFINED" }}", "Node": "#{ENV['K8S_NODE_NAME'] || 'UNDEFINED'}" {{- range $k, $v := .Values.metadata }},{{ $k | quote }}: {{ $v | quote -}} {{- end }}{{"}}"}}
+ oci_la_metadata ${{"{{"}}"Kubernetes Cluster Name":"{{ $kubernetesClusterName }}", "Kubernetes Cluster ID": "{{ $kubernetesClusterId }}", "Node": "#{ENV['K8S_NODE_NAME'] || 'UNDEFINED'}" {{- range $k, $v := .Values.metadata }},{{ $k | quote }}: {{ $v | quote -}} {{- end }}{{"}}"}}
{{- end }}
{{- if .Values.fluentd.linuxSystem.logs.syslog.ociLALogGroupID }}
oci_la_log_group_id "{{ .Values.fluentd.linuxSystem.logs.syslog.ociLALogGroupID }}"
@@ -329,7 +334,7 @@ data:
{{- else }}
oci_la_log_group_id "{{ required "ociLALogGroupID is required" .Values.ociLALogGroupID }}"
{{- end }}
- oci_la_log_path "${record['tailed_path']}"
+ oci_la_log_path "${record['tailed_path']}"
oci_la_log_source_name "{{ required "fluentd.linuxSystem.logs.syslog.ociLALogSourceName is required" .Values.fluentd.linuxSystem.logs.syslog.ociLALogSourceName }}"
{{- if .Values.fluentd.linuxSystem.logs.syslog.ociLAEntityID }}
oci_la_entity_id "{{ .Values.fluentd.linuxSystem.logs.syslog.ociLAEntityID }}"
@@ -390,9 +395,9 @@ data:
enable_ruby true
{{- if $logDefinition.metadata }}
- oci_la_metadata ${{"{{"}}"Kubernetes Cluster Name":"{{ $.Values.kubernetesClusterName | default "UNDEFINED" }}", "Kubernetes Cluster ID": "{{ $.Values.kubernetesClusterID | default "UNDEFINED" }}" {{- range $k, $v := $logDefinition.metadata }},{{ $k | quote }}: {{ $v | quote -}} {{- end }}{{"}}"}}
+ oci_la_metadata ${{"{{"}}"Kubernetes Cluster Name":"{{ $kubernetesClusterName }}", "Kubernetes Cluster ID": "{{ $kubernetesClusterId }}" {{- range $k, $v := $logDefinition.metadata }},{{ $k | quote }}: {{ $v | quote -}} {{- end }}{{"}}"}}
{{- else }}
- oci_la_metadata ${{"{{"}}"Kubernetes Cluster Name":"{{ $.Values.kubernetesClusterName | default "UNDEFINED" }}", "Kubernetes Cluster ID": "{{ $.Values.kubernetesClusterID | default "UNDEFINED" }}" {{- range $k, $v := $.Values.metadata }},{{ $k | quote }}: {{ $v | quote -}} {{- end }}{{"}}"}}
+ oci_la_metadata ${{"{{"}}"Kubernetes Cluster Name":"{{ $kubernetesClusterName }}", "Kubernetes Cluster ID": "{{ $kubernetesClusterId }}" {{- range $k, $v := $.Values.metadata }},{{ $k | quote }}: {{ $v | quote -}} {{- end }}{{"}}"}}
{{- end }}
{{- if $logDefinition.ociLALogGroupID }}
oci_la_log_group_id "{{ $logDefinition.ociLALogGroupID }}"
@@ -461,10 +466,10 @@ data:
@type json
{{- else}}
@type cri
- {{- end }}
+ {{- end }}
-
+
# Filter to add kubernetes metadata
@type kubernetes_metadata
@@ -484,22 +489,22 @@ data:
de_dot false
annotation_match [ ".*" ]
-
+
# Record transformer filter to apply Logging Analytics configuration to each record.
@type record_transformer
enable_ruby true
{{- if .Values.fluentd.genericContainerLogs.metadata }}
- oci_la_metadata ${{"{{"}}"Kubernetes Cluster Name":"{{ .Values.kubernetesClusterName | default "UNDEFINED" }}", "Kubernetes Cluster ID": "{{ .Values.kubernetesClusterID | default "UNDEFINED" }}" {{- range $k, $v := .Values.fluentd.genericContainerLogs.metadata }},{{ $k | quote }}: {{ $v | quote -}} {{- end }}{{"}}"}}
+ oci_la_metadata ${{"{{"}}"Kubernetes Cluster Name":"{{ $kubernetesClusterName }}", "Kubernetes Cluster ID": "{{ $kubernetesClusterId }}" {{- range $k, $v := .Values.fluentd.genericContainerLogs.metadata }},{{ $k | quote }}: {{ $v | quote -}} {{- end }}{{"}}"}}
{{- else }}
- oci_la_metadata ${{"{{"}}"Kubernetes Cluster Name":"{{ .Values.kubernetesClusterName | default "UNDEFINED" }}", "Kubernetes Cluster ID": "{{ .Values.kubernetesClusterID | default "UNDEFINED" }}" {{- range $k, $v := .Values.metadata }},{{ $k | quote }}: {{ $v | quote -}} {{- end }}{{"}}"}}
+ oci_la_metadata ${{"{{"}}"Kubernetes Cluster Name":"{{ $kubernetesClusterName }}", "Kubernetes Cluster ID": "{{ $kubernetesClusterId }}" {{- range $k, $v := .Values.metadata }},{{ $k | quote }}: {{ $v | quote -}} {{- end }}{{"}}"}}
{{- end }}
oci_la_log_group_id ${record.dig("kubernetes", "annotations", "oracle.com/oci_la_log_group_id") ? record.dig("kubernetes", "annotations", "oracle.com/oci_la_log_group_id") : "{{ .Values.fluentd.genericContainerLogs.ociLALogGroupID | default .Values.ociLALogGroupID }}"}
oci_la_log_path "${record['tailed_path']}"
oci_la_log_source_name ${record.dig("kubernetes", "annotations", "oracle.com/oci_la_log_source_name") ? record.dig("kubernetes", "annotations", "oracle.com/oci_la_log_source_name") : "{{ .Values.fluentd.genericContainerLogs.ociLALogSourceName | default "Kubernetes Container Generic Logs" }}"}
oci_la_entity_id ${record.dig("kubernetes", "annotations", "oracle.com/oci_la_entity_id") ? record.dig("kubernetes", "annotations", "oracle.com/oci_la_entity_id") : "{{ .Values.fluentd.genericContainerLogs.ociLAEntityID | default .Values.ociLAEntityID }}"}
- oci_la_log_set ${record.dig("kubernetes", "annotations", "oracle.com/oci_la_log_set") ? record.dig("kubernetes", "annotations", "oracle.com/oci_la_log_set") : "{{ .Values.fluentd.genericContainerLogs.ociLALogSet | default .Values.ociLALogSet }}"}
+ oci_la_log_set ${record.dig("kubernetes", "annotations", "oracle.com/oci_la_log_set") ? record.dig("kubernetes", "annotations", "oracle.com/oci_la_log_set") : "{{ .Values.fluentd.genericContainerLogs.ociLALogSet | default .Values.ociLALogSet }}"}
{{- if eq $runtime "docker" }}
message "${record['log']}"
{{- end }}
@@ -507,7 +512,7 @@ data:
{{- end }}
-
+
# Concat filter to handle partial logs in CRI/ContainerD
# Docker can also have partial logs but handling is different for different docker versions. Considering Kubernetes/OKE moved to ContainerD/CRI since last 4-5 releases, ignoring docker handling.
# This filter can not be clubbed with concat filter for multiline as both are mutually exclusive.
@@ -522,9 +527,8 @@ data:
timeout_label "@NORMAL"
{{- end }}
-
+
#customFluentd config
- {{- if .Values.fluentd.customFluentdConf }}
+ {{- if .Values.fluentd.customFluentdConf }}
{{- include "common.tplvalues.render" (dict "value" .Values.fluentd.customFluentdConf "context" $) | nindent 4 }}
{{- end }}
-
diff --git a/logan/helm-chart/templates/configmap-objects.yaml b/charts/logan/templates/objects-configmap.yaml
similarity index 83%
rename from logan/helm-chart/templates/configmap-objects.yaml
rename to charts/logan/templates/objects-configmap.yaml
index ffa3bf46..91687cb7 100644
--- a/logan/helm-chart/templates/configmap-objects.yaml
+++ b/charts/logan/templates/objects-configmap.yaml
@@ -1,8 +1,13 @@
+# Copyright (c) 2023, Oracle and/or its affiliates.
+# Licensed under the Universal Permissive License v1.0 as shown at https://oss.oracle.com/licenses/upl.
+
+{{- $kubernetesClusterName := (include "logan.kubernetesClusterName" .) }}
+{{- $kubernetesClusterId := (include "logan.kubernetesClusterId" .) }}
apiVersion: v1
kind: ConfigMap
metadata:
- name: {{ include "oci-la-fluentd.name" . }}-objects-configmap
- namespace: {{ default "kube-system" .Values.namespace }}
+ name: {{ include "logan.resourceNamePrefix" . }}-objects
+ namespace: {{ include "logan.namespace" . }}
data:
# fluentd config file data.
fluent.conf: |
@@ -17,8 +22,8 @@ data:
{{- if eq $authtype "config" }}
config_file_location {{ .Values.oci.path }}/{{ .Values.oci.file }}
profile_name "{{ .Values.fluentd.ociLoggingAnalyticsOutputPlugin.profile_name }}"
- {{- end }}
- plugin_log_location "{{ .Values.fluentd.baseDir }}"
+ {{- end }}
+ plugin_log_location "{{ .Values.fluentd.baseDir }}"
plugin_log_level "{{ .Values.fluentd.ociLoggingAnalyticsOutputPlugin.plugin_log_level }}"
plugin_log_file_size "{{ .Values.fluentd.ociLoggingAnalyticsOutputPlugin.plugin_log_file_size }}"
plugin_log_file_count "{{ .Values.fluentd.ociLoggingAnalyticsOutputPlugin.plugin_log_file_count }}"
@@ -81,7 +86,7 @@ data:
resource_name cron_jobs
interval {{ $.Values.objectsPollingFrequency }}
-
+
{{- end }}
{{- end }}
@@ -90,9 +95,9 @@ data:
enable_ruby true
{{- if .Values.fluentd.kubernetesObjects.metadata }}
- oci_la_metadata ${{"{{"}}"Kubernetes Cluster Name":"{{ .Values.kubernetesClusterName | default "UNDEFINED" }}", "Kubernetes Cluster ID": "{{ .Values.kubernetesClusterID | default "UNDEFINED" }}" {{- range $k, $v := .Values.fluentd.kubernetesObjects.metadata }},{{ $k | quote }}: {{ $v | quote -}} {{- end }}{{"}}"}}
+ oci_la_metadata ${{"{{"}}"Kubernetes Cluster Name":"{{ $kubernetesClusterName }}", "Kubernetes Cluster ID": "{{ $kubernetesClusterId }}" {{- range $k, $v := .Values.fluentd.kubernetesObjects.metadata }},{{ $k | quote }}: {{ $v | quote -}} {{- end }}{{"}}"}}
{{- else }}
- oci_la_metadata ${{"{{"}}"Kubernetes Cluster Name":"{{ .Values.kubernetesClusterName | default "UNDEFINED" }}", "Kubernetes Cluster ID": "{{ .Values.kubernetesClusterID | default "UNDEFINED" }}" {{- range $k, $v := .Values.metadata }},{{ $k | quote }}: {{ $v | quote -}} {{- end }}{{"}}"}}
+ oci_la_metadata ${{"{{"}}"Kubernetes Cluster Name":"{{ $kubernetesClusterName }}", "Kubernetes Cluster ID": "{{ $kubernetesClusterId }}" {{- range $k, $v := .Values.metadata }},{{ $k | quote }}: {{ $v | quote -}} {{- end }}{{"}}"}}
{{- end }}
oci_la_log_group_id "{{ .Values.fluentd.kubernetesObjects.ociLALogGroupID | default .Values.ociLALogGroupID }}"
oci_la_entity_id "{{ .Values.fluentd.kubernetesObjects.ociLAEntityID | default .Values.ociLAEntityID }}"
diff --git a/logan/helm-chart/templates/oci-config-secrets.yaml b/charts/logan/templates/oci-config-secret.yaml
similarity index 53%
rename from logan/helm-chart/templates/oci-config-secrets.yaml
rename to charts/logan/templates/oci-config-secret.yaml
index 15290203..6f291d2e 100644
--- a/logan/helm-chart/templates/oci-config-secrets.yaml
+++ b/charts/logan/templates/oci-config-secret.yaml
@@ -1,14 +1,17 @@
+# Copyright (c) 2023, Oracle and/or its affiliates.
+# Licensed under the Universal Permissive License v1.0 as shown at https://oss.oracle.com/licenses/upl.
+
{{- $authtype := .Values.authtype | lower }}
{{- if eq $authtype "config" }}
apiVersion: v1
kind: Secret
type: Opaque
metadata:
- name: {{ include "oci-la-fluentd.name" . }}-credentials-secret
- namespace: {{ default "kube-system" .Values.namespace }}
+ name: {{ include "logan.resourceNamePrefix" . }}-oci-config
+ namespace: {{ include "logan.namespace" . }}
stringData:
{{- range $key, $value := .Values.oci.configFiles }}
{{ $key }}: |
{{- include "common.tplvalues.render" (dict "value" $value "context" $) | nindent 4 }}
{{- end }}
-{{- end}}
\ No newline at end of file
+{{- end}}
diff --git a/logan/helm-chart/values.schema.json b/charts/logan/values.schema.json
similarity index 93%
rename from logan/helm-chart/values.schema.json
rename to charts/logan/values.schema.json
index 42275ec3..27e9f3c3 100644
--- a/logan/helm-chart/values.schema.json
+++ b/charts/logan/values.schema.json
@@ -18,7 +18,7 @@
"properties": {
"url": {
"type": "string"
- },
+ },
"pullPolicy": {
"type": "string",
"pattern": "^(Always|Never|IfNotPresent)$"
@@ -37,10 +37,10 @@
"type": "string"
},
"ociLANamespace": {
- "type": "string"
+ "type": "string"
},
"ociLALogGroupID": {
- "type": "string"
+ "type": "string"
},
"fluentd": {
"type": "object",
diff --git a/charts/logan/values.yaml b/charts/logan/values.yaml
new file mode 100644
index 00000000..33951228
--- /dev/null
+++ b/charts/logan/values.yaml
@@ -0,0 +1,452 @@
+# Copyright (c) 2023, Oracle and/or its affiliates.
+# Licensed under the Universal Permissive License v1.0 as shown at https://oss.oracle.com/licenses/upl.
+
+global:
+ # -- Kubernetes Namespace for creating monitoring resources.
+ # Ignored if oci-kubernetes-monitoring-common.createNamespace set to false.
+ namespace: oci-onm
+ # -- Resource names prefix used, where allowed.
+ resourceNamePrefix: oci-onm
+
+oci-onm-common:
+# -- Automatically create, a readonly cluster role, cluster role binding and
+# serviceaccount is required # to read various cluster objects for monitoring.
+# If set to false serviceaccount value must be provided in the parent chart.
+# Refer, README for the cluster role definition and other details.
+ createServiceAccount: true
+
+ # -- Automatically create namespace for all resources (namespaced) used by OCI Kubernetes Monitoring Solution.
+ createNamespace: true
+
+ # -- Kubernetes Namespace for creating serviceaccount. Default: oci-onm
+ namespace: "{{ .Values.global.namespace }}"
+
+ # -- Resoure Name Prefix: Wherever allowed, this prefix will be used with all resources used by this chart
+ resourceNamePrefix: "{{ .Values.global.resourceNamePrefix }}"
+
+ # -- Kubernetes ServiceAccount name
+ serviceAccount: "{{ .Values.global.resourceNamePrefix }}"
+
+# -- Container runtime for Kubernetes Cluster. Requires fluentd configuration changes accordingly
+# Allowed values: docker, cri(for OKE 1.20 and above)
+runtime: cri
+
+# -- Authentication type for authenticating with OCI Logging Analytics service
+# -- Allowed values: InstancePrincipal, config
+authtype: InstancePrincipal
+
+# -- Kubernetes Namespace for deploying monitoring resources deployed by this chart.
+namespace: "{{ .Values.global.namespace }}"
+
+
+# -- Resoure Name Prefix: Wherever allowed, this prefix will be used with all resources used by this chart
+resourceNamePrefix: "{{ .Values.global.resourceNamePrefix }}"
+
+# -- Kubernetes ServiceAccount
+serviceAccount: "{{ .Values.global.resourceNamePrefix }}"
+image:
+ # Image pull secrets for. Secret must be in the namespace defined by namespace
+ imagePullSecrets:
+ # -- Replace this value with actual docker image url
+ url: container-registry.oracle.com/oci_observability_management/oci-la-fluentd-collector:1.0.0
+ # -- Image pull policy
+ imagePullPolicy: Always
+
+# -- Logging Analytics namespace. Can be found in OCI console --> Logging Analytics --> Administration --> Service
+ociLANamespace:
+# -- OCID of Logging Analytics Log Group to send logs to.
+# Can be overridden for individual log types.
+# e.g. ocid1.loganalyticsloggroup.oc1.phx.amaaaaasdfaskriauucc55rlwlxe4ahe2vfmtuoqa6qsgu7mb6jugxacsk6a
+ociLALogGroupID:
+
+# -- OKE Cluster OCID
+# e.g. ocid1.cluster.oc1.phx.aaaaaaaahhbadf3rxa62faaeixanvr7vftmkg6hupycbf4qszctf2wbmqqxq
+kubernetesClusterID:
+
+# -- Kubernetes Cluster name. Need not be the OKE Cluster display name.
+# e.g. production-cluster
+kubernetesClusterName:
+
+# -- Logging Analytics OCID for OKE Cluster
+#ociLAEntityID:
+
+# Logging Analytics additional metadata. Use this to tag all the collected logs with one or more key:value pairs.
+# Key must be a valid field in Logging Analytics
+#metadata:
+ #"Client Host Region": "PCT"
+ #"Environment": "Production"
+ #"Third key": "Third Value"
+
+# @param extra environment variables. Example
+# name: ENV_VARIABLE_NAME
+# value: ENV_VARIABLE_VALUE
+extraEnv: []
+
+# Requests and limits for Memory and CPU
+resources:
+ # -- Limits
+ limits:
+ memory: 500Mi
+ # -- Resource requests
+ requests:
+ cpu: 100m
+ memory: 250Mi
+
+# -- @param extraVolumes Extra volumes.
+# Example:
+# - name: tmpDir
+# hostPath:
+# path: /tmp log
+extraVolumes: []
+
+# -- @param extraVolumeMounts Mount extra volume(s). Example:
+# - name: tmpDir
+# mountPath: /tmp
+extraVolumeMounts: []
+
+# -- Log logvolumes for pod logs and container logs
+volumes:
+ # -- Path to the pod logs on Kubernetes Nodes
+ podsHostPath: /var/log/pods
+ # -- Path to the container data logs on Kubernetes Nodes
+ containerdataHostPath: /u01/data/docker/containers
+
+## -- OCI API Key Based authentication details. Required when authtype set to config
+oci:
+ # -- Path to the OCI API config file
+ path: /var/opt/.oci
+ # -- Config file name
+ file: config
+ configFiles:
+ # -- config file [data](https://docs.oracle.com/en-us/iaas/Content/API/Concepts/sdkconfig.htm)
+ # Replace each of the below fields with actual values.
+ # [DEFAULT]
+ # user=
+ # fingerprint=
+ # key_file=
+ # tenancy=
+ # region=
+ config: |-
+ # Replace each of the below fields with actual values.
+ [DEFAULT]
+ user=
+ fingerprint=
+ key_file=
+ tenancy=
+ region=
+ # -- Private key file data
+ # -----BEGIN RSA PRIVATE KEY-----
+ # XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ # -----END RSA PRIVATE KEY-----
+ private.pem: |-
+
+# -- Collection frequency (in minutes) for Kubernetes Objects
+objectsPollingFrequency: 5m
+
+# Fluentd configuration.
+fluentd:
+ # -- Path to the fluentd config file
+ path: /var/opt/conf
+ # -- Fluentd config file name
+ file: fluent.conf
+ # -- Base directory on the node (with read write permission) for storing fluentd plugins related data.
+ baseDir: /var/log
+ # Configuration for oci-logging-analytics fluentd output plugin
+ ociLoggingAnalyticsOutputPlugin:
+ # -- OCI API Key profile to use, if multiple profiles are found in the OCI API config file.
+ profile_name: 'DEFAULT'
+ # -- Output plugin logging level: DEBUG < INFO < WARN < ERROR < FATAL < UNKNOWN
+ plugin_log_level: 'info'
+ # -- The maximum log file size at which point the log file to be rotated, for example, 1KB, 1MB, etc.
+ plugin_log_file_size: '10MB'
+ # -- The number of archived or rotated log files to keep, must be non-zero.
+ plugin_log_file_count: 10
+ # -- Fluentd Buffer Configuration
+ buffer:
+ # Max number of threads to flush or write chunks in parallel.
+ flush_thread_count: 1
+ # Max stored buffer size.
+ # all append operations will fail with error, and data will be lost after total size of the stored buffer reaches this limit.
+ total_limit_size: '5368709120' # 5GB
+ # Frequency of flushing the chunks to output plugin.
+ flush_interval: 30 # seconds
+ # Flush thread interval
+ flush_thread_interval: 0.5 # seconds
+ # Flush thread burst interval
+ flush_thread_burst_interval: 0.05 # seconds
+ # Wait in seconds before the next retry to flush.
+ retry_wait: 2 # seconds
+ # Max number of time to retry. Mandatory when retry_forever set to false.
+ retry_max_times: 17
+ # Wait in seconds before the next constant factor of exponential backoff.
+ retry_exponential_backoff_base: 2
+ # If true, plugin will ignore retry_max_times option and retry flushing forever.
+ retry_forever: true
+ # Disable chunk backup
+ disable_chunk_backup: true
+
+ # Configuration for kubernetes_metadata filter [plugin](https://github.com/fabric8io/fluent-plugin-kubernetes_metadata_filter)
+ kubernetesMetadataFilter:
+ # -- Kubernetes API server URL.
+ # Alternatively, environment variables KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT can be used
+ # Environment variable are given preference.
+ kubernetes_url:
+ # -- Validate SSL certificates
+ verify_ssl: true
+ # -- Path to CA file for Kubernetes server certificate validation
+ ca_file:
+ # -- Skip all label fields from the metadata.
+ skip_labels: false
+ # -- Skip the container fields container_image and container_image_id in the metadata.
+ skip_container_metadata: false
+ # -- Skip the master_url field from the metadata.
+ skip_master_url: false
+ # -- Skip the namespace_id field from the metadata. The fetch_namespace_metadata function will be skipped. The plugin will be faster and cpu consumption will be less.
+ skip_namespace_metadata: false
+ # -- Set up a watch on the pods on the API server for updates to metadata. By default, true.
+ watch: true
+ # -- Config for Logs Collection using fluentd tail plugin
+ tailPlugin:
+ # If true, starts to read the logs from the head of the file or the last read position recorded in pos_file
+ readFromHead: true
+ # frequency of flushing the chunks to output plugin.
+ flushInterval: 60 # seconds
+ # Specifies the encoding of logs. By default, in_tail emits string value as ASCII-8BIT encoding. If encoding is specified, in_tail changes string to given encoding.
+ # When encoding is set at this level, it gets applied to all the logs being collected. Instead, it can also be set at individual logs under sections like kubernetesSystem, genericContainerLogs, customLogs etc.
+ # encoding:
+
+ # Configuration for rewrite_tag plugin
+ rewriteTagPlugin:
+ hostname_command: "cat /etc/hostname"
+
+ # Configuration for Kubernetes System specific logs like Kube Flannel, Kube Proxy etc.
+ kubernetesSystem:
+ # Setting the following properties will override the default/generic configuration and applies to all Kubernetes system logs
+ #ociLALogGroupID:
+ #metadata:
+ #"Client Host Region": "America"
+ #"Environment": "Production"
+ #"Third Key": "Third Value"
+ #ociLAEntityID:
+ #encoding:
+ logs:
+ # -- Kube Proxy logs collection configuration
+ kube-proxy:
+ # kube-proxy Log file location.
+ path: /var/log/containers/kube-proxy-*.log
+ # Logging Analytics log source to use for parsing and processing Kubernetes Proxy Logs.
+ ociLALogSourceName: "Kubernetes Proxy Logs"
+ # Regular expression pattern for the starting line in case of multi-line logs.
+ multilineStartRegExp: /^\S\d{2}\d{2}\s+[^\:]+:[^\:]+:[^\.]+\.\d{0,3}/
+ #metadata:
+ #"Client Host Region": "America"
+ #"Environment": "Production"
+ #"Third Key": "Third Value"
+ #ociLAEntityID:
+ #ociLALogGroupID:
+ #encoding:
+
+ # -- Kube Flannel logs collection configuration
+ kube-flannel:
+ # kube-flannel log files location.
+ path: /var/log/containers/kube-flannel-*.log
+ # Logging Analytics log source to use for parsing and processing Kubernetes Flannel Logs.
+ ociLALogSourceName: "Kubernetes Flannel Logs"
+ # The regular expression pattern for the starting line in case of multi-line logs.
+ multilineStartRegExp: /^\S\d{2}\d{2}\s+[^\:]+:[^\:]+:[^\.]+\.\d{0,3}/
+
+ # -- Kubernetes DNS Autoscaler Logs collection configuration
+ kube-dns-autoscaler:
+ path: /var/log/containers/kube-dns-autoscaler-*.log
+ # Logging Analytics log source to use for parsing and processing Kubernetes DNS Autoscaler Logs.
+ ociLALogSourceName: "Kubernetes DNS Autoscaler Logs"
+ # The regular expression pattern for the starting line in case of multi-line logs.
+ multilineStartRegExp: /^\S\d{2}\d{2}\s+[^\:]+:[^\:]+:[^\.]+\.\d{0,3}/
+
+ # -- Kubernetes Core DNS Logs collection configuration
+ coredns:
+ # coredns log files location.
+ path: /var/log/containers/coredns-*.log
+ # Logging Analytics log source to use for parsing and processing Kubernetes Core DNS Logs.
+ ociLALogSourceName: "Kubernetes Core DNS Logs"
+ # Regular expression pattern for the starting line in case of multi-line logs.
+ multilineStartRegExp: /^\[[^\]]+\]\s+/
+
+ # -- Kubernetes CSI Node Driver Logs collection configuration
+ csinode:
+ # csinode log files location.
+ path: /var/log/containers/csi-oci-node-*.log
+ # Logging Analytics log source to use for parsing and processing Kubernetes CSI Node Driver Logs.
+ ociLALogSourceName: "Kubernetes CSI Node Driver Logs"
+
+ # -- Proxymux Client Logs collection configuration
+ proxymux:
+ # proxymux log files location..
+ path: /var/log/containers/proxymux-client-*.log
+ # Logging Analytics log source to use for parsing and processing OKE Proxymux Client Logs.
+ ociLALogSourceName: "OKE Proxymux Client Logs"
+
+ # -- Kubernetes Autoscaler Logs collection configuration
+ cluster-autoscaler:
+ # cluster autoscalar log files location.
+ path: /var/log/containers/cluster-autoscaler-*.log
+ # Logging Analytics log source to use for parsing and processing Kubernetes Autoscaler Logs.
+ ociLALogSourceName: "Kubernetes Autoscaler Logs"
+ # The regular expression pattern for the starting line in case of multi-line logs.
+ multilineStartRegExp: /^\S\d{2}\d{2}\s+[^\:]+:[^\:]+:[^\.]+\.\d{0,3}/
+ # Configuration for Linux System specific logs like CronLogs and SecureLogs
+ linuxSystem:
+ logs:
+ # -- Linux CRON logs collection configuration
+ cronlog:
+ # cron log file path
+ path: /var/log/cron*
+ # Logging Analytics log source to use for parsing and processing Linux Cron Logs.
+ ociLALogSourceName: "Linux Cron Logs"
+ # The regular expression pattern for the starting line in case of multi-line logs.
+ multilineStartRegExp: /^(?:(?:\d+\s+)?<([^>]*)>(?:\d+\s+)?)?\S+\s+\d{1,2}\s+\d{1,2}:\d{1,2}:\d{1,2}\s+/
+
+ # -- Linux CRON logs collection configuration
+ securelog:
+ # linux secure logs file path
+ path: /var/log/secure*
+ # Logging Analytics log source to use for parsing and processing Linux Secure Logs.
+ ociLALogSourceName: "Linux Secure Logs"
+ # The regular expression pattern for the starting line in case of multi-line logs.
+ multilineStartRegExp: /^(?:(?:\d+\s+)?<([^>]*)>(?:\d+\s+)?)?\S+\s+\d{1,2}\s+\d{1,2}:\d{1,2}:\d{1,2}\s+/
+
+ # -- kubelet logs collection configuration
+ kubeletlog:
+ # Logging Analytics log source to use for parsing and processing Kubernetes Kubelet Logs.
+ ociLALogSourceName: "Kubernetes Kubelet Logs"
+
+ # -- Linux syslog collection configuration
+ syslog:
+ # syslog file path
+ path: /var/log/messages*
+ # Logging Analytics log source to use for parsing and processing Linux Syslog Logs.
+ ociLALogSourceName: "Linux Syslog Logs"
+ # The regular expression pattern for the starting line in case of multi-line logs.
+ multilineStartRegExp: /^(?:(?:\d+\s+)?<([^>]*)>(?:\d+\s+)?)?\S+\s+\d{1,2}\s+\d{1,2}:\d{1,2}:\d{1,2}\s+/
+ # -- Linux maillog collection configuration
+ maillog:
+ # maillog file path
+ path: /var/log/maillog*
+ # Logging Analytics log source to use for parsing and processing Linux Mail Delivery Logs.
+ ociLALogSourceName: "Linux Mail Delivery Logs"
+ # The regular expression pattern for the starting line in case of multi-line logs.
+ multilineStartRegExp: /^(?:(?:\d+\s+)?<([^>]*)>(?:\d+\s+)?)?\S+\s+\d{1,2}\s+\d{1,2}:\d{1,2}:\d{1,2}\s+/
+
+ # -- Linux audit logs collection configuration
+ linuxauditlog:
+ # audit log file path
+ path: /var/log/audit/audit*
+ # Logging Analytics log source to use for parsing and processing Linux Audit Logs.
+ ociLALogSourceName: "Linux Audit Logs"
+
+ # -- Linux uptrack logs collection configuration
+ uptracklog:
+ # uptrack log files path.
+ path: /var/log/uptrack*
+ # Logging Analytics log source to use for parsing and processing ksplice Logs.
+ ociLALogSourceName: "Ksplice Logs"
+ # The regular expression pattern for the starting line in case of multi-line logs.
+ multilineStartRegExp: /^\d{4}-\d{2}-\d{2}\s+\d{2}:\d{2}:\d{2}/
+
+ # -- Linux yum logs collection configuration
+ yum:
+ # yum log files path
+ path: /var/log/yum.log*
+ # Logging Analytics log source to use for parsing and processing Linux YUM Logs.
+ ociLALogSourceName: "Linux YUM Logs"
+
+ # Generic configuration for all container/pod logs
+ genericContainerLogs:
+ # -- Default Logging Analytics log source to use for parsing and processing the logs: Kubernetes Container Generic Logs.
+ ociLALogSourceName: "Kubernetes Container Generic Logs"
+ path: /var/log/containers/*.log
+ # -- List of log paths to exclude that are already part of other specific configurations defined (like Kube Proxy, Kube Flannel)
+ # If you want to create a custom configuration for any of the container logs using the customLogs section, then exclude the corresponding log path here.
+ exclude_path:
+ - '"/var/log/containers/kube-proxy-*.log"'
+ - '"/var/log/containers/kube-flannel-*.log"'
+ - '"/var/log/containers/kube-dns-autoscaler-*.log"'
+ - '"/var/log/containers/coredns-*.log"'
+ - '"/var/log/containers/csi-oci-node-*.log"'
+ - '"/var/log/containers/proxymux-client-*.log"'
+ - '"/var/log/containers/cluster-autoscaler-*.log"'
+
+ # -- Configuration for any custom logs which are not part of the default configuration defined in this file.
+ # All the pod/container logs will be collected as per "genericContainerLogs" section.
+ # Use this section to create a custom configuration for any of the container logs.
+ # Also, you can use this section to define configuration for any other log path existing on a Kubernetes worker node
+ #custom-id1:
+ #path: /var/log/containers/custom*.log
+ # Logging Analytics log source to use for parsing and processing the logs:
+ #ociLALogSourceName: "Custom1 Logs"
+ # The regular expression pattern for the starting line in case of multi-line logs.
+ #multilineStartRegExp:
+ # Set isContainerLog to false if the log is not a container log (/var/log/containers/*.log). Default value is true.
+ #isContainerLog: true
+ customLogs:
+ # A unique identifier to represent the configuration for a single log path
+ #custom-id1:
+ #path: /var/log/containers/custom*.log
+ # Logging Analytics log source to use for parsing and processing the logs:
+ #ociLALogSourceName: "Custom1 Logs"
+ # The regular expression pattern for the starting line in case of multi-line logs.
+ #multilineStartRegExp:
+ # Set isContainerLog to false if the log is not a container log (/var/log/containers/*.log). Default value is true.
+ #isContainerLog: true
+ #custom-id2:
+ #path: /var/log/custom/*.log
+ # Logging Analytics log source to use for parsing and processing the logs:
+ #ociLALogSourceName: "Custom2 Logs"
+ # The regular expression pattern for the starting line in case of multi-line logs.
+ #multilineStartRegExp:
+ # Set isContainerLog to false if the log is not a container log (/var/log/containers/*.log). Default value is true.
+ #isContainerLog: false
+
+ # -- Alternative approach to define the configuration for any custom logs which are not part of the default configuration defined in this file.
+ # Provide the Fluentd configuration with the source and filter sections for your custom logs in this section. Exclude the match section. It would be used without any modification.
+ # Notes:
+ # Ensure that @id in the source section is unique and does not collide with any default configuration defined in this file
+ # Tag must start with "oci." and must be unique.
+ # In case of container log (/var/log/containers/*.log), exclude the corresponding log path in "genericContainerLogs" section.
+ customFluentdConf: |
+
+ # -- Configuration for collecting Kubernetes Object information.
+ # Supported objects are Node, Pod, Namespace, Event, DaemonSet, ReplicaSet, Deployment, StatefulSet, Job, CronJob
+ kubernetesObjects:
+ #metadata:
+ #"Client Host Region": "America"
+ #"Environment": "Production"
+ #"Third Key": "Third Value"
+ #ociLAEntityID:
+ #ociLALogGroupID:
+ objectsList:
+ nodes:
+ #api_version: v1
+ api_endpoint: ""
+ pods:
+ api_endpoint: ""
+ namespaces:
+ api_endpoint: ""
+ services:
+ api_endpoint: ""
+ events:
+ api_endpoint: ""
+ daemon_sets:
+ api_endpoint: apis/apps
+ replica_sets:
+ api_endpoint: apis/apps
+ deployments:
+ api_endpoint: apis/apps
+ stateful_sets:
+ api_endpoint: apis/apps
+ jobs:
+ api_endpoint: apis/batch
+ cron_jobs:
+ api_endpoint: apis/batch
+ endpoint_slices:
+ api_endpoint: apis/discovery.k8s.io
diff --git a/charts/mgmt-agent/.helmignore b/charts/mgmt-agent/.helmignore
new file mode 100644
index 00000000..a23e8f3a
--- /dev/null
+++ b/charts/mgmt-agent/.helmignore
@@ -0,0 +1,26 @@
+# Copyright (c) 2023, Oracle and/or its affiliates.
+# Licensed under the Universal Permissive License v1.0 as shown at https://oss.oracle.com/licenses/upl.
+
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/logan/helm-chart/Chart.yaml b/charts/mgmt-agent/Chart.yaml
similarity index 69%
rename from logan/helm-chart/Chart.yaml
rename to charts/mgmt-agent/Chart.yaml
index d7e9f64f..b6ba1397 100644
--- a/logan/helm-chart/Chart.yaml
+++ b/charts/mgmt-agent/Chart.yaml
@@ -1,6 +1,9 @@
+# Copyright (c) 2023, Oracle and/or its affiliates.
+# Licensed under the Universal Permissive License v1.0 as shown at https://oss.oracle.com/licenses/upl.
+
apiVersion: v2
-name: oci-la-fluentd
-description: Helm chart for collecting Kubernetes logs and objects using Fluentd into OCI Logging Analytics.
+name: oci-onm-mgmt-agent
+description: A Helm chart for collecting Kubernetes Metrics using OCI Management Agent into OCI Monitoring.
# A chart can be either an 'application' or a 'library' chart.
#
@@ -15,11 +18,16 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
-version: 2.0.1
+version: 3.0.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
-# appVersion here reflects OCI Logging Analytics Fluentd Output Plugin version
-appVersion: "2.0.3"
+appVersion: "1.16.0"
+
+dependencies:
+- name: oci-onm-common
+ version: "3.0.0"
+ repository: "file://../common"
+ condition: oci-onm-common.enabled
diff --git a/charts/mgmt-agent/README.md b/charts/mgmt-agent/README.md
new file mode 100644
index 00000000..bb1e6ee0
--- /dev/null
+++ b/charts/mgmt-agent/README.md
@@ -0,0 +1,36 @@
+# oci-onm-mgmt-agent
+
+![Version: 3.0.0](https://img.shields.io/badge/Version-3.0.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.16.0](https://img.shields.io/badge/AppVersion-1.16.0-informational?style=flat-square)
+
+A Helm chart for collecting Kubernetes Metrics using OCI Management Agent into OCI Monitoring.
+
+## Requirements
+
+| Repository | Name | Version |
+|------------|------|---------|
+| file://../common | oci-onm-common | 3.0.0 |
+
+## Values
+
+| Key | Type | Default | Description |
+|-----|------|---------|-------------|
+| deployMetricServer | bool | `true` | By default, metric server will be deployed and used by Management Agent to collect metrics. You can set this to false if you already have metric server installed on your cluster |
+| global.namespace | string | `"oci-onm"` | Kubernetes Namespace in which the resources to be created. Set oci-kubernetes-monitoring-common:createNamespace set to true, if the namespace doesn't exist. |
+| global.resourceNamePrefix | string | `"oci-onm"` | Prefix to be attached to resources created through this chart. Not all resources may have this prefix. |
+| kubernetesCluster.compartmentId | string | `nil` | OCI Compartment Id to push Kubernetes Monitoring metrics. If not specified default is same as Agent compartment |
+| kubernetesCluster.name | string | `nil` | Kubernetes cluster name |
+| kubernetesCluster.namespace | string | `"*"` | Kubernetes cluster namespace(s) to monitor. This can be a comma-separated list of namespaces or '*' to monitor all the namespaces |
+| mgmtagent.image.secret | string | `nil` | Image secrets to use for pulling container image (base64 encoded content of ~/.docker/config.json file) |
+| mgmtagent.image.url | string | `nil` | Replace this value with actual docker image URL for Management Agent |
+| mgmtagent.installKey | string | `"resources/input.rsp"` | Copy the downloaded Management Agent Install Key file under root helm directory as resources/input.rsp |
+| mgmtagent.installKeyFileContent | string | `nil` | Provide the base64 encoded content of the Management Agent Install Key file |
+| namespace | string | `"{{ .Values.global.namespace }}"` | Kubernetes namespace to create and install this helm chart in |
+| oci-onm-common.createNamespace | bool | `true` | If createNamespace is set to true, it tries to create the namespace defined in 'namespace' variable. |
+| oci-onm-common.createServiceAccount | bool | `true` | By default, a cluster role, cluster role binding and serviceaccount will be created for the monitoring pods to be able to (readonly) access various objects within the cluster, to support collection of various telemetry data. You may set this to false and provide your own serviceaccount (in the parent chart(s)) which has the necessary cluster role(s) binded to it. Refer, README for the cluster role definition and other details. |
+| oci-onm-common.namespace | string | `"{{ .Values.global.namespace }}"` | Kubernetes Namespace in which the serviceaccount to be created. |
+| oci-onm-common.resourceNamePrefix | string | `"{{ .Values.global.resourceNamePrefix }}"` | Prefix to be attached to resources created through this chart. Not all resources may have this prefix. |
+| oci-onm-common.serviceAccount | string | `"{{ .Values.global.resourceNamePrefix }}"` | Name of the Kubernetes ServiceAccount |
+| serviceAccount | string | `"{{ .Values.global.resourceNamePrefix }}"` | Name of the Kubernetes ServiceAccount |
+
+----------------------------------------------
+Autogenerated from chart metadata using [helm-docs v1.11.0](https://github.com/norwoodj/helm-docs/releases/v1.11.0)
diff --git a/charts/mgmt-agent/resources/input.rsp b/charts/mgmt-agent/resources/input.rsp
new file mode 100644
index 00000000..abc612c1
--- /dev/null
+++ b/charts/mgmt-agent/resources/input.rsp
@@ -0,0 +1,2 @@
+# Copyright (c) 2023, Oracle and/or its affiliates.
+# Licensed under the Universal Permissive License v1.0 as shown at https://oss.oracle.com/licenses/upl.
diff --git a/charts/mgmt-agent/resources/mgmtagent_kubernetes_dashboard.json b/charts/mgmt-agent/resources/mgmtagent_kubernetes_dashboard.json
new file mode 100644
index 00000000..fdfd317a
--- /dev/null
+++ b/charts/mgmt-agent/resources/mgmtagent_kubernetes_dashboard.json
@@ -0,0 +1,448 @@
+# Copyright (c) 2023, Oracle and/or its affiliates.
+# Licensed under the Universal Permissive License v1.0 as shown at https://oss.oracle.com/licenses/upl.
+
+{
+ "dashboards": [
+ {
+ "dashboardId": "",
+ "displayName": "Kubernetes Monitoring Sample Dashboard",
+ "description": "Sample Dashboard",
+ "compartmentId": "",
+ "providerId": "log-analytics",
+ "providerName": "Logging Analytics",
+ "providerVersion": "3.0.0",
+ "tiles": [
+ {
+ "displayName": "Node Count",
+ "savedSearchId": "OOBSS-management-dashboard-123",
+ "row": 0,
+ "column": 0,
+ "height": 3,
+ "width": 3,
+ "nls": {},
+ "uiConfig": {
+ "defaultDataSource": "mgmtagent_kubernetes_metrics/nodeCount",
+ "internalKey": "OOBSS-management-dashboard-123",
+ "singleValueInfo": {
+ "default": "0",
+ "maxFontSize": 60,
+ "val": "aggregatedDatapoints.value"
+ },
+ "vizType": "singleValue"
+ },
+ "dataConfig": [
+ {
+ "name": "mgmtagent_kubernetes_metrics/nodeCount",
+ "parameters": {
+ "compartmentId": "$(params.compartmentIdParam)",
+ "endTime": "$(params.time.end)",
+ "mql": "nodeCount[auto].grouping().sum()",
+ "namespace": "mgmtagent_kubernetes_metrics",
+ "startTime": "$(params.time.start)"
+ },
+ "type": "monitoringDataSource"
+ }
+ ],
+ "state": "DEFAULT",
+ "drilldownConfig": [],
+ "parametersMap": {
+ "compartmentIdParam": "$(dashboard.params.compartmentId)",
+ "time": "$(dashboard.params.time)"
+ }
+ },
+ {
+ "displayName": "Pod Count",
+ "savedSearchId": "OOBSS-management-dashboard-123",
+ "row": 0,
+ "column": 3,
+ "height": 3,
+ "width": 3,
+ "nls": {},
+ "uiConfig": {
+ "defaultDataSource": "mgmtagent_kubernetes_metrics/podCount",
+ "internalKey": "OOBSS-management-dashboard-123",
+ "singleValueInfo": {
+ "default": "0",
+ "maxFontSize": 60,
+ "val": "aggregatedDatapoints.value"
+ },
+ "vizType": "singleValue"
+ },
+ "dataConfig": [
+ {
+ "name": "mgmtagent_kubernetes_metrics/podCount",
+ "parameters": {
+ "compartmentId": "$(params.compartmentIdParam)",
+ "endTime": "$(params.time.end)",
+ "mql": "podCount[auto]{nodeName=$(dashboard.params.k8sNodeFilter)}.grouping().sum()",
+ "namespace": "mgmtagent_kubernetes_metrics",
+ "startTime": "$(params.time.start)"
+ },
+ "type": "monitoringDataSource"
+ }
+ ],
+ "state": "DEFAULT",
+ "drilldownConfig": [],
+ "parametersMap": {
+ "compartmentIdParam": "$(dashboard.params.compartmentId)",
+ "time": "$(dashboard.params.time)"
+ }
+ },
+ {
+ "displayName": "Pod Distribution",
+ "savedSearchId": "OOBSS-management-dashboard-123",
+ "row": 0,
+ "column": 6,
+ "height": 3,
+ "width": 6,
+ "nls": {
+ "NODE": {
+ "key": "Node: "
+ },
+ "PODS": {
+ "key": "Pods: "
+ }
+ },
+ "uiConfig": {
+ "internalKey": "OOBSS-management-dashboard-123",
+ "defaultDataSource": "mgmtagent_kubernetes_metrics/podCount2",
+ "chartInfo": {
+ "colorBy": "dimensions.nodeName",
+ "value": "aggregatedDatapoints.value",
+ "group": "",
+ "series": "dimensions.nodeName"
+ },
+ "jetConfig": {
+ "type": "pie",
+ "timeAxisType": "auto",
+ "styleDefaults": {
+ "pieInnerRadius": 0.7,
+ "dataLabelPosition": "auto",
+ "dataLabelCollision": "fitInBounds"
+ },
+ "selectionMode": "none",
+ "orientation": "vertical",
+ "coordinateSystem": "cartesian",
+ "sorting": "off",
+ "stack": "off",
+ "stackLabel": "off",
+ "dataCursor": "off",
+ "legend": {
+ "rendered": true,
+ "position": "end"
+ },
+ "valueFormats": {
+ "value": {
+ "converterName": "numberConverter",
+ "converterOptions": {
+ "style": "decimal",
+ "decimalFormat": "short",
+ "maximumFractionDigits": 2
+ },
+ "tooltipLabel": "Pods",
+ "tooltipDisplay": "auto"
+ },
+ "series": {
+ "tooltipLabel": "Node",
+ "tooltipDisplay": "auto"
+ },
+ "group": {
+ "tooltipLabel": "",
+ "tooltipDisplay": "off"
+ }
+ }
+ }
+ },
+ "dataConfig": [
+ {
+ "name": "mgmtagent_kubernetes_metrics/podCount2",
+ "parameters": {
+ "compartmentId": "$(params.compartmentIdParam)",
+ "endTime": "$(params.time.end)",
+ "mql": "podCount[auto].groupBy(nodeName).sum()",
+ "namespace": "mgmtagent_kubernetes_metrics",
+ "startTime": "$(params.time.start)"
+ },
+ "type": "monitoringDataSource"
+ }
+ ],
+ "state": "DEFAULT",
+ "drilldownConfig": [],
+ "parametersMap": {
+ "compartmentIdParam": "$(dashboard.params.compartmentId)",
+ "time": "$(dashboard.params.time)"
+ }
+ },
+ {
+ "displayName": "Node CPU Usage",
+ "savedSearchId": "OOBSS-management-dashboard-123",
+ "row": 3,
+ "column": 0,
+ "height": 3,
+ "width": 6,
+ "nls": {},
+ "uiConfig": {
+ "chartInfo": {
+ "colorBy": "dimensions.nodeName",
+ "enableCorrelation": true,
+ "group": "aggregatedDatapoints.timestamp",
+ "series": "dimensions.nodeName",
+ "value": "aggregatedDatapoints.value"
+ },
+ "defaultDataSource": "mgmtagent_kubernetes_metrics/nodeCpuPercentUsage",
+ "internalKey": "OOBSS-management-dashboard-123",
+ "jetConfig": {
+ "dataCursor": "on",
+ "legend": {
+ "position": "top",
+ "rendered": true
+ },
+ "stack": "off",
+ "styleDefaults": {
+ "lineWidth": 2
+ },
+ "timeAxisType": "enabled",
+ "type": "lineWithArea",
+ "xAxis": {
+ "viewportMax": "$(context.time.end)",
+ "viewportMin": "$(context.time.start)"
+ },
+ "yAxis": {
+ "title": "Utilization (%)"
+ }
+ }
+ },
+ "dataConfig": [
+ {
+ "name": "mgmtagent_kubernetes_metrics/nodeCpuPercentUsage",
+ "parameters": {
+ "compartmentId": "$(params.compartmentIdParam)",
+ "endTime": "$(params.time.end)",
+ "mql": "nodeCpuUsage[auto].groupBy(nodeName).mean()",
+ "namespace": "mgmtagent_kubernetes_metrics",
+ "startTime": "$(params.time.start)"
+ },
+ "type": "monitoringDataSource"
+ }
+ ],
+ "state": "DEFAULT",
+ "drilldownConfig": [],
+ "parametersMap": {
+ "compartmentIdParam": "$(dashboard.params.compartmentId)",
+ "time": "$(dashboard.params.time)"
+ }
+ },
+ {
+ "displayName": "Node Memory Usage",
+ "savedSearchId": "OOBSS-management-dashboard-123",
+ "row": 3,
+ "column": 6,
+ "height": 3,
+ "width": 6,
+ "nls": {},
+ "uiConfig": {
+ "chartInfo": {
+ "colorBy": "dimensions.nodeName",
+ "enableCorrelation": true,
+ "group": "aggregatedDatapoints.timestamp",
+ "series": "dimensions.nodeName",
+ "value": "aggregatedDatapoints.value"
+ },
+ "defaultDataSource": "mgmtagent_kubernetes_metrics/nodeMemoryPercentUsage",
+ "internalKey": "OOBSS-management-dashboard-123",
+ "jetConfig": {
+ "dataCursor": "on",
+ "legend": {
+ "position": "top",
+ "rendered": true
+ },
+ "stack": "off",
+ "styleDefaults": {
+ "lineWidth": 2
+ },
+ "timeAxisType": "enabled",
+ "type": "lineWithArea",
+ "xAxis": {
+ "viewportMax": "$(context.time.end)",
+ "viewportMin": "$(context.time.start)"
+ },
+ "yAxis": {}
+ }
+ },
+ "dataConfig": [
+ {
+ "name": "mgmtagent_kubernetes_metrics/nodeMemoryPercentUsage",
+ "parameters": {
+ "compartmentId": "$(params.compartmentIdParam)",
+ "endTime": "$(params.time.end)",
+ "mql": "nodeMemoryUsage[auto].groupBy(nodeName).mean()",
+ "namespace": "mgmtagent_kubernetes_metrics",
+ "startTime": "$(params.time.start)"
+ },
+ "type": "monitoringDataSource"
+ }
+ ],
+ "state": "DEFAULT",
+ "drilldownConfig": [],
+ "parametersMap": {
+ "compartmentIdParam": "$(dashboard.params.compartmentId)",
+ "time": "$(dashboard.params.time)"
+ }
+ },
+ {
+ "displayName": "Pod CPU Usage",
+ "savedSearchId": "OOBSS-management-dashboard-123",
+ "row": 6,
+ "column": 0,
+ "height": 3,
+ "width": 6,
+ "nls": {},
+ "uiConfig": {
+ "chartInfo": {
+ "colorBy": "dimensions.containerName",
+ "enableCorrelation": true,
+ "group": "aggregatedDatapoints.timestamp",
+ "series": "dimensions.containerName",
+ "value": "aggregatedDatapoints.value"
+ },
+ "defaultDataSource": "mgmtagent_kubernetes_metrics/podCpuUsage",
+ "internalKey": "OOBSS-management-dashboard-123",
+ "jetConfig": {
+ "dataCursor": "on",
+ "legend": {
+ "position": "top",
+ "rendered": true
+ },
+ "stack": "off",
+ "styleDefaults": {
+ "lineWidth": 2
+ },
+ "timeAxisType": "enabled",
+ "type": "lineWithArea",
+ "xAxis": {
+ "viewportMax": "$(context.time.end)",
+ "viewportMin": "$(context.time.start)"
+ },
+ "yAxis": {
+ "title": "nanocores"
+ }
+ }
+ },
+ "dataConfig": [
+ {
+ "name": "mgmtagent_kubernetes_metrics/podCpuUsage",
+ "parameters": {
+ "compartmentId": "$(params.compartmentIdParam)",
+ "endTime": "$(context.time.end)",
+ "mql": "podCpuUsage[auto].groupBy(containerName).mean()",
+ "namespace": "mgmtagent_kubernetes_metrics",
+ "startTime": "$(context.time.start)"
+ },
+ "type": "monitoringDataSource"
+ }
+ ],
+ "state": "DEFAULT",
+ "drilldownConfig": [],
+ "parametersMap": {
+ "compartmentIdParam": "$(dashboard.params.compartmentId)",
+ "time": "$(dashboard.params.time)"
+ }
+ },
+ {
+ "displayName": "Pod Memory Usage",
+ "savedSearchId": "OOBSS-management-dashboard-123",
+ "row": 6,
+ "column": 6,
+ "height": 3,
+ "width": 6,
+ "nls": {},
+ "uiConfig": {
+ "chartInfo": {
+ "colorBy": "dimensions.containerName",
+ "enableCorrelation": true,
+ "group": "aggregatedDatapoints.timestamp",
+ "series": "dimensions.containerName",
+ "value": "aggregatedDatapoints.value"
+ },
+ "defaultDataSource": "mgmtagent_kubernetes_metrics/podMemoryUsage",
+ "internalKey": "OOBSS-management-dashboard-123",
+ "jetConfig": {
+ "dataCursor": "on",
+ "legend": {
+ "position": "top",
+ "rendered": true
+ },
+ "stack": "off",
+ "styleDefaults": {
+ "lineWidth": 2
+ },
+ "timeAxisType": "enabled",
+ "type": "lineWithArea",
+ "xAxis": {
+ "viewportMax": "$(context.time.end)",
+ "viewportMin": "$(context.time.start)"
+ },
+ "yAxis": {
+ "title": "KibiBytes"
+ }
+ }
+ },
+ "dataConfig": [
+ {
+ "name": "mgmtagent_kubernetes_metrics/podMemoryUsage",
+ "parameters": {
+ "compartmentId": "$(params.compartmentIdParam)",
+ "endTime": "$(params.time.end)",
+ "mql": "podMemoryUsage[auto].groupBy(containerName).mean()",
+ "namespace": "mgmtagent_kubernetes_metrics",
+ "startTime": "$(params.time.start)"
+ },
+ "type": "monitoringDataSource"
+ }
+ ],
+ "state": "DEFAULT",
+ "drilldownConfig": [],
+ "parametersMap": {
+ "compartmentIdParam": "$(dashboard.params.compartmentId)",
+ "time": "$(dashboard.params.time)"
+ }
+ }
+ ],
+ "isOobDashboard": false,
+ "isShowInHome": false,
+ "metadataVersion": "2.0",
+ "isShowDescription": true,
+ "screenImage": "todo: provide value[mandatory]",
+ "nls": {},
+ "uiConfig": {
+ "isFilteringEnabled": false,
+ "isRefreshEnabled": true,
+ "isTimeRangeEnabled": true
+ },
+ "dataConfig": [],
+ "type": "normal",
+ "isFavorite": false,
+ "savedSearches": [],
+ "parametersConfig": [
+ {
+ "displayName": "Compartment",
+ "localStorageKey": "compartmentId",
+ "name": "compartmentId",
+ "parametersMap": {
+ "isActiveCompartment": "true",
+ "isStoreInLocalStorage": false
+ },
+ "savedSearchId": "OOBSS-management-dashboard-compartment-filter",
+ "state": "DEFAULT"
+ },
+ {
+ "name": "time",
+ "src": "$(context.time)"
+ }
+ ],
+ "drilldownConfig": [],
+ "freeformTags": {},
+ "definedTags": {}
+ }
+ ]
+}
diff --git a/charts/mgmt-agent/resources/sample_mgmtagent_kubernetes_dashboard.png b/charts/mgmt-agent/resources/sample_mgmtagent_kubernetes_dashboard.png
new file mode 100644
index 00000000..5f1c7a55
Binary files /dev/null and b/charts/mgmt-agent/resources/sample_mgmtagent_kubernetes_dashboard.png differ
diff --git a/charts/mgmt-agent/templates/_helpers.tpl b/charts/mgmt-agent/templates/_helpers.tpl
new file mode 100644
index 00000000..3fe0694e
--- /dev/null
+++ b/charts/mgmt-agent/templates/_helpers.tpl
@@ -0,0 +1,43 @@
+# Copyright (c) 2023, Oracle and/or its affiliates.
+# Licensed under the Universal Permissive License v1.0 as shown at https://oss.oracle.com/licenses/upl.
+
+# tpl render function
+{{- define "common.tplvalues.render" -}}
+ {{- if typeIs "string" .value }}
+ {{- tpl .value .context }}
+ {{- else }}
+ {{- tpl (.value | toYaml) .context }}
+ {{- end }}
+{{- end -}}
+
+# Prefix for all resources created using this chart.
+{{- define "mgmt-agent.resourceNamePrefix" -}}
+ {{- if .Values.resourceNamePrefix -}}
+ {{ include "common.tplvalues.render" ( dict "value" .Values.resourceNamePrefix "context" .) | trunc 63 | trimSuffix "-" }}
+ {{- else -}}
+ {{- "oci-onm" -}}
+ {{- end -}}
+{{- end -}}
+
+# namespace
+{{- define "mgmt-agent.namespace" -}}
+ {{- if .Values.namespace -}}
+ {{ include "common.tplvalues.render" ( dict "value" .Values.namespace "context" .) }}
+ {{- else -}}
+ {{- "oci-onm" -}}
+ {{- end -}}
+{{- end -}}
+
+#serviceAccount
+{{- define "mgmt-agent.serviceAccount" -}}
+ {{ include "common.tplvalues.render" ( dict "value" .Values.serviceAccount "context" .) }}
+{{- end -}}
+
+#kubernetesClusterName
+{{- define "mgmt-agent.kubernetesClusterName" -}}
+ {{- if .Values.kubernetesCluster.name -}}
+ {{ include "common.tplvalues.render" ( dict "value" .Values.kubernetesCluster.name "context" .) }}
+ {{- else -}}
+ {{- "UNDEFINED" -}}
+ {{- end -}}
+{{- end -}}
diff --git a/charts/mgmt-agent/templates/metric_server.yaml b/charts/mgmt-agent/templates/metric_server.yaml
new file mode 100644
index 00000000..0cb8db16
--- /dev/null
+++ b/charts/mgmt-agent/templates/metric_server.yaml
@@ -0,0 +1,187 @@
+# Copyright (c) 2023, Oracle and/or its affiliates.
+# Licensed under the Universal Permissive License v1.0 as shown at https://oss.oracle.com/licenses/upl.
+
+{{- if .Values.deployMetricServer }}
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ labels:
+ k8s-app: metrics-server
+ name: metrics-server
+ namespace: kube-system
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ k8s-app: metrics-server
+ rbac.authorization.k8s.io/aggregate-to-admin: "true"
+ rbac.authorization.k8s.io/aggregate-to-edit: "true"
+ rbac.authorization.k8s.io/aggregate-to-view: "true"
+ name: system:aggregated-metrics-reader
+rules:
+- apiGroups:
+ - metrics.k8s.io
+ resources:
+ - pods
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ labels:
+ k8s-app: metrics-server
+ name: system:metrics-server
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - nodes/metrics
+ verbs:
+ - get
+- apiGroups:
+ - ""
+ resources:
+ - pods
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ labels:
+ k8s-app: metrics-server
+ name: metrics-server-auth-reader
+ namespace: kube-system
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: extension-apiserver-authentication-reader
+subjects:
+- kind: ServiceAccount
+ name: metrics-server
+ namespace: kube-system
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ labels:
+ k8s-app: metrics-server
+ name: metrics-server:system:auth-delegator
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: system:auth-delegator
+subjects:
+- kind: ServiceAccount
+ name: metrics-server
+ namespace: kube-system
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ labels:
+ k8s-app: metrics-server
+ name: system:metrics-server
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: system:metrics-server
+subjects:
+- kind: ServiceAccount
+ name: metrics-server
+ namespace: kube-system
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ k8s-app: metrics-server
+ name: metrics-server
+ namespace: kube-system
+spec:
+ ports:
+ - name: https
+ port: 443
+ protocol: TCP
+ targetPort: https
+ selector:
+ k8s-app: metrics-server
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ k8s-app: metrics-server
+ name: metrics-server
+ namespace: kube-system
+spec:
+ selector:
+ matchLabels:
+ k8s-app: metrics-server
+ strategy:
+ rollingUpdate:
+ maxUnavailable: 0
+ template:
+ metadata:
+ labels:
+ k8s-app: metrics-server
+ spec:
+ containers:
+ - args:
+ - --cert-dir=/tmp
+ - --secure-port=4443
+ - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
+ - --kubelet-use-node-status-port
+ - --metric-resolution=15s
+ image: registry.k8s.io/metrics-server/metrics-server:v0.6.3
+ imagePullPolicy: IfNotPresent
+ name: metrics-server
+ ports:
+ - containerPort: 4443
+ name: https
+ protocol: TCP
+ resources:
+ requests:
+ cpu: 100m
+ memory: 200Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ runAsNonRoot: true
+ runAsUser: 1000
+ volumeMounts:
+ - mountPath: /tmp
+ name: tmp-dir
+ nodeSelector:
+ kubernetes.io/os: linux
+ priorityClassName: system-cluster-critical
+ serviceAccountName: metrics-server
+ volumes:
+ - emptyDir: {}
+ name: tmp-dir
+---
+apiVersion: apiregistration.k8s.io/v1
+kind: APIService
+metadata:
+ labels:
+ k8s-app: metrics-server
+ name: v1beta1.metrics.k8s.io
+spec:
+ group: metrics.k8s.io
+ groupPriorityMinimum: 100
+ insecureSkipTLSVerify: true
+ service:
+ name: metrics-server
+ namespace: kube-system
+ version: v1beta1
+ versionPriority: 100
+{{- end }}
diff --git a/charts/mgmt-agent/templates/metrics-configmap.yaml b/charts/mgmt-agent/templates/metrics-configmap.yaml
new file mode 100644
index 00000000..258b53cd
--- /dev/null
+++ b/charts/mgmt-agent/templates/metrics-configmap.yaml
@@ -0,0 +1,16 @@
+# Copyright (c) 2023, Oracle and/or its affiliates.
+# Licensed under the Universal Permissive License v1.0 as shown at https://oss.oracle.com/licenses/upl.
+
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ include "mgmt-agent.resourceNamePrefix" . }}-metrics
+ namespace: {{ include "mgmt-agent.namespace" . }}
+data:
+ monitoring.properties: |
+ # compartmentId to push Monitoring metrics
+ compartmentId={{ .Values.kubernetesCluster.compartmentId }}
+ # Kubernetes cluster name
+ clusterName={{ include "mgmt-agent.kubernetesClusterName" . }}
+ # Kubernetes Namespace to monitor
+ kubernetesNamespace={{ .Values.kubernetesCluster.namespace }}
diff --git a/charts/mgmt-agent/templates/mgmt-agent-headless-service.yaml b/charts/mgmt-agent/templates/mgmt-agent-headless-service.yaml
new file mode 100644
index 00000000..9dd63879
--- /dev/null
+++ b/charts/mgmt-agent/templates/mgmt-agent-headless-service.yaml
@@ -0,0 +1,14 @@
+# Copyright (c) 2023, Oracle and/or its affiliates.
+# Licensed under the Universal Permissive License v1.0 as shown at https://oss.oracle.com/licenses/upl.
+
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "mgmt-agent.resourceNamePrefix" . }}-mgmt-agent
+ namespace: {{ include "mgmt-agent.namespace" . }}
+ labels:
+ app: {{ include "mgmt-agent.resourceNamePrefix" . }}-mgmt-agent
+spec:
+ clusterIP: None
+ selector:
+ app: {{ include "mgmt-agent.resourceNamePrefix" . }}-mgmt-agent
diff --git a/charts/mgmt-agent/templates/mgmt-agent-secrets.yaml b/charts/mgmt-agent/templates/mgmt-agent-secrets.yaml
new file mode 100644
index 00000000..b8545b9d
--- /dev/null
+++ b/charts/mgmt-agent/templates/mgmt-agent-secrets.yaml
@@ -0,0 +1,30 @@
+# Copyright (c) 2023, Oracle and/or its affiliates.
+# Licensed under the Universal Permissive License v1.0 as shown at https://oss.oracle.com/licenses/upl.
+
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ include "mgmt-agent.resourceNamePrefix" . }}-mgmt-agent-rsp
+ namespace: {{ include "mgmt-agent.namespace" . }}
+type: Opaque
+data:
+ input.rsp: |
+ {{- if .Values.mgmtagent.installKeyFileContent }}
+ {{ .Values.mgmtagent.installKeyFileContent }}
+ {{ else }}
+ {{ .Files.Get .Values.mgmtagent.installKey | b64enc }}
+ {{- end }}
+
+---
+{{- if .Values.mgmtagent.image.secret }}
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ include "mgmt-agent.resourceNamePrefix" . }}-mgmt-agent-container-registry-key
+ namespace: {{ include "mgmt-agent.namespace" . }}
+type: kubernetes.io/dockerconfigjson
+data:
+ .dockerconfigjson: |
+ {{ .Values.mgmtagent.image.secret }}
+{{- end }}
diff --git a/charts/mgmt-agent/templates/mgmt-agent-statefulset.yaml b/charts/mgmt-agent/templates/mgmt-agent-statefulset.yaml
new file mode 100644
index 00000000..239c5f9b
--- /dev/null
+++ b/charts/mgmt-agent/templates/mgmt-agent-statefulset.yaml
@@ -0,0 +1,72 @@
+# Copyright (c) 2023, Oracle and/or its affiliates.
+# Licensed under the Universal Permissive License v1.0 as shown at https://oss.oracle.com/licenses/upl.
+
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: {{ include "mgmt-agent.resourceNamePrefix" . }}-mgmt-agent
+ namespace: {{ include "mgmt-agent.namespace" . }}
+ labels:
+ app: {{ include "mgmt-agent.resourceNamePrefix" . }}-mgmt-agent
+spec:
+ serviceName: {{ include "mgmt-agent.resourceNamePrefix" . }}-mgmt-agent
+ replicas: 1
+ selector:
+ matchLabels:
+ app: {{ include "mgmt-agent.resourceNamePrefix" . }}-mgmt-agent
+ template:
+ metadata:
+ labels:
+ app: {{ include "mgmt-agent.resourceNamePrefix" . }}-mgmt-agent
+ spec:
+ securityContext:
+ runAsUser: {{ default 0 .Values.deployment.security.runAsUser }}
+ runAsGroup: {{ default 0 .Values.deployment.security.runAsGroup }}
+ fsGroup: {{ default 0 .Values.deployment.security.fsGroup }}
+ serviceAccountName: {{ include "mgmt-agent.serviceAccount" . }}
+ imagePullSecrets:
+ - name: {{ include "mgmt-agent.resourceNamePrefix" . }}-mgmt-agent-container-registry-key
+ restartPolicy: Always
+ containers:
+ - name: {{ include "mgmt-agent.resourceNamePrefix" . }}-mgmt-agent
+ image: {{ .Values.mgmtagent.image.url }}
+ resources:
+ requests:
+ cpu: {{ .Values.deployment.resource.request.cpuCore }}
+ memory: {{ .Values.deployment.resource.request.memory }}
+ limits:
+ cpu: {{ .Values.deployment.resource.limit.cpuCore }}
+ memory: {{ .Values.deployment.resource.limit.memory }}
+ volumeMounts:
+ - name: mgmtagent-secret
+ mountPath: /opt/oracle/mgmtagent_secret
+ readOnly: true
+ - name: mgmtagent-pvc
+ mountPath: /opt/oracle
+ - name: mgmtagent-config
+ mountPath: /opt/oracle/mgmtagent_config
+ - mountPath: /tmp
+ name: tmp
+ securityContext:
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
+ volumes:
+ - name: mgmtagent-secret
+ secret:
+ secretName: {{ include "mgmt-agent.resourceNamePrefix" . }}-mgmt-agent-rsp
+ - name: mgmtagent-config
+ configMap:
+ name: {{ include "mgmt-agent.resourceNamePrefix" . }}-metrics
+ - emptyDir: {}
+ name: tmp
+ volumeClaimTemplates:
+ - metadata:
+ name: mgmtagent-pvc
+ spec:
+ accessModes: [ "ReadWriteOnce" ]
+ {{- if .Values.deployment.storageClass }}
+ storageClassName: {{ .Values.deployment.storageClass }}
+ {{- end }}
+ resources:
+ requests:
+ storage: {{ .Values.deployment.resource.request.storage }}
diff --git a/charts/mgmt-agent/values.schema.json b/charts/mgmt-agent/values.schema.json
new file mode 100644
index 00000000..701acfad
--- /dev/null
+++ b/charts/mgmt-agent/values.schema.json
@@ -0,0 +1,217 @@
+{
+ "$schema": "https://json-schema.org/draft-07/schema#",
+ "properties":
+ {
+ "global":
+ {
+ "properties":
+ {
+ "namespace":
+ {
+ "type": "string"
+ },
+ "resourceNamePrefix":
+ {
+ "type": "string"
+ }
+ },
+ "required":
+ [
+ "namespace",
+ "resourceNamePrefix"
+ ]
+ },
+ "mgmtagent":
+ {
+ "properties":
+ {
+ "installKeyFileContent":
+ {
+ "type":
+ [
+ "string",
+ "null"
+ ]
+ },
+ "installKey":
+ {
+ "type":
+ [
+ "string",
+ "null"
+ ]
+ },
+ "image":
+ {
+ "properties":
+ {
+ "url":
+ {
+ "type": "string"
+ },
+ "secret":
+ {
+ "type":
+ [
+ "string",
+ "null"
+ ]
+ }
+ },
+ "required":
+ [
+ "url"
+ ]
+ }
+ },
+ "anyOf":
+ [
+ {
+ "properties":
+ {
+ "installKeyFileContent":
+ {
+ "minLength": 0
+ }
+ },
+ "required":
+ [
+ "installKey"
+ ]
+ },
+ {
+ "properties":
+ {
+ "installKey":
+ {
+ "minLength": 0
+ }
+ },
+ "required":
+ [
+ "installKeyFileContent"
+ ]
+ }
+ ],
+ "type": "object"
+ },
+ "namespace":
+ {
+ "type": "string"
+ },
+ "deployMetricServer":
+ {
+ "type": "boolean"
+ },
+ "kubernetesCluster":
+ {
+ "properties":
+ {
+ "name":
+ {
+ "type": "string"
+ },
+ "namespace":
+ {
+ "type": "string"
+ }
+ },
+ "required":
+ [
+ "name",
+ "namespace"
+ ]
+ },
+ "deployment":
+ {
+ "properties":
+ {
+ "security":
+ {
+ "properties":
+ {
+ "runAsUser":
+ {
+ "type":
+ [
+ "integer",
+ "null"
+ ]
+ },
+ "runAsGroup":
+ {
+ "type":
+ [
+ "integer",
+ "null"
+ ]
+ },
+ "fsGroup":
+ {
+ "type":
+ [
+ "integer",
+ "null"
+ ]
+ }
+ }
+ },
+ "resource":
+ {
+ "properties":
+ {
+ "request":
+ {
+ "properties":
+ {
+ "cpuCore":
+ {
+ "type": "string"
+ },
+ "memory":
+ {
+ "type": "string"
+ },
+ "storage":
+ {
+ "type": "string"
+ }
+ },
+ "required":
+ [
+ "cpuCore",
+ "memory",
+ "storage"
+ ]
+ },
+ "limit":
+ {
+ "properties":
+ {
+ "cpuCore":
+ {
+ "type": "string"
+ },
+ "memory":
+ {
+ "type": "string"
+ }
+ },
+ "required":
+ [
+ "cpuCore",
+ "memory"
+ ]
+ }
+ }
+ }
+ }
+ }
+ },
+ "required":
+ [
+ "namespace"
+ ],
+ "title": "Values",
+ "type": "object"
+}
\ No newline at end of file
diff --git a/charts/mgmt-agent/values.yaml b/charts/mgmt-agent/values.yaml
new file mode 100644
index 00000000..56b5c372
--- /dev/null
+++ b/charts/mgmt-agent/values.yaml
@@ -0,0 +1,81 @@
+# Copyright (c) 2023, Oracle and/or its affiliates.
+# Licensed under the Universal Permissive License v1.0 as shown at https://oss.oracle.com/licenses/upl.
+
+global:
+ # -- Kubernetes Namespace in which the resources to be created. Set oci-kubernetes-monitoring-common:createNamespace set to true, if the namespace doesn't exist.
+ namespace: oci-onm
+ # -- Prefix to be attached to resources created through this chart. Not all resources may have this prefix.
+ resourceNamePrefix: oci-onm
+
+oci-onm-common:
+ # -- By default, a cluster role, cluster role binding and serviceaccount will be created for the monitoring pods to be able to (readonly) access various objects within the cluster, to support collection of various telemetry data. You may set this to false and provide your own serviceaccount (in the parent chart(s)) which has the necessary cluster role(s) binded to it. Refer, README for the cluster role definition and other details.
+ createServiceAccount: true
+ # -- If createNamespace is set to true, it tries to create the namespace defined in 'namespace' variable.
+ createNamespace: true
+ # -- Kubernetes Namespace in which the serviceaccount to be created.
+ namespace: "{{ .Values.global.namespace }}"
+ # -- Prefix to be attached to resources created through this chart. Not all resources may have this prefix.
+ resourceNamePrefix: "{{ .Values.global.resourceNamePrefix }}"
+ # -- Name of the Kubernetes ServiceAccount
+ serviceAccount: "{{ .Values.global.resourceNamePrefix }}"
+
+mgmtagent:
+ # Provide either installKeyFileContent or installKey as an install key. If both provided then installKeyFileContent will take higher precedence.
+
+ # -- Provide the base64 encoded content of the Management Agent Install Key file
+ installKeyFileContent:
+ # -- Copy the downloaded Management Agent Install Key file under root helm directory as resources/input.rsp
+ installKey: resources/input.rsp
+ # Follow steps documented at https://github.com/oracle/docker-images/tree/main/OracleManagementAgent to build docker image.
+ image:
+ # -- Replace this value with actual docker image URL for Management Agent
+ url:
+ # -- Image secrets to use for pulling container image (base64 encoded content of ~/.docker/config.json file)
+ secret:
+
+# -- Kubernetes namespace to create and install this helm chart in
+namespace: "{{ .Values.global.namespace }}"
+
+# -- Name of the Kubernetes ServiceAccount
+serviceAccount: "{{ .Values.global.resourceNamePrefix }}"
+
+# -- By default, metric server will be deployed and used by Management Agent to collect metrics. You can set this to false if you already have metric server installed on your cluster
+deployMetricServer: true
+
+# Kubernetes Cluster details to monitor
+kubernetesCluster:
+ # -- OCI Compartment Id to push Kubernetes Monitoring metrics. If not specified default is same as Agent compartment
+ compartmentId:
+ # -- Kubernetes cluster name
+ name:
+ # -- Kubernetes cluster namespace(s) to monitor. This can be a comma-separated list of namespaces or '*' to monitor all the namespaces
+ namespace: '*'
+
+deployment:
+ security:
+ # Processes in the Container will run as user ID 1000, replace it with a different value if desired
+ runAsUser: 1000
+ # Processes in the Container will use group ID 2000, replace it with a different value if desired
+ runAsGroup: 2000
+ # Files created in the Container will use group ID 2000, replace it with a different value if desired
+ fsGroup: 2000
+
+ # Provide the agent resources as per Kubernetes resource quantity
+ resource:
+ # Provide the minimum required resources
+ request:
+ # specify the cpu cores
+ cpuCore: 200m
+ # specify the memory
+ memory: 500Mi
+ # specify the storage capacity for StatefulSet's PVC
+ storage: 2Gi
+ # Provide the maximum limit for resources
+ limit:
+ # specify the cpu cores
+ cpuCore: 500m
+ # specify the memory
+ memory: 1Gi
+
+ # Provide the storage class for StatefulSet's PVC. If not provided then the Cluster's default storage class will be used.
+ storageClass:
diff --git a/charts/oci-onm/Chart.yaml b/charts/oci-onm/Chart.yaml
new file mode 100644
index 00000000..4d692282
--- /dev/null
+++ b/charts/oci-onm/Chart.yaml
@@ -0,0 +1,41 @@
+# Copyright (c) 2023, Oracle and/or its affiliates.
+# Licensed under the Universal Permissive License v1.0 as shown at https://oss.oracle.com/licenses/upl.
+
+apiVersion: v2
+name: oci-onm
+description: Helm chart for collecting Kubernetes logs & objects and metrics using Fluentd and ManagementAgent into OCI Logging Analytics and OCI Monitoring respectively.
+
+# A chart can be either an 'application' or a 'library' chart.
+#
+# Application charts are a collection of templates that can be packaged into versioned archives
+# to be deployed.
+#
+# Library charts provide useful utilities or functions for the chart developer. They're included as
+# a dependency of application charts to inject those utilities and functions into the rendering
+# pipeline. Library charts do not define any templates and therefore cannot be deployed.
+type: application
+
+# This is the chart version. This version number should be incremented each time you make changes
+# to the chart and its templates, including the app version.
+# Versions are expected to follow Semantic Versioning (https://semver.org/)
+version: 3.0.0
+
+# This is the version number of the application being deployed. This version number should be
+# incremented each time you make changes to the application. Versions are not expected to
+# follow Semantic Versioning. They should reflect the version the application is using.
+# It is recommended to use it with quotes.
+appVersion: "3.0.0"
+
+dependencies:
+- name: oci-onm-common
+ version: "3.0.0"
+ repository: "file://../common"
+ condition: oci-onm-common.enabled
+- name: oci-onm-logan
+ version: "3.0.0"
+ repository: "file://../logan"
+ condition: oci-onm-logan.enabled
+- name: oci-onm-mgmt-agent
+ version: "3.0.0"
+ repository: "file://../mgmt-agent"
+ condition: oci-onm-mgmt-agent.enabled
diff --git a/charts/oci-onm/README.md b/charts/oci-onm/README.md
new file mode 100644
index 00000000..8184698c
--- /dev/null
+++ b/charts/oci-onm/README.md
@@ -0,0 +1,46 @@
+# oci-onm
+
+![Version: 3.0.0](https://img.shields.io/badge/Version-3.0.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 3.0.0](https://img.shields.io/badge/AppVersion-3.0.0-informational?style=flat-square)
+
+Helm chart for collecting Kubernetes logs & objects and metrics using Fluentd and ManagementAgent into OCI Logging Analytics and OCI Monitoring respectively.
+
+## Requirements
+
+| Repository | Name | Version |
+|------------|------|---------|
+| file://../common | oci-onm-common | 3.0.0 |
+| file://../logan | oci-onm-logan | 3.0.0 |
+| file://../mgmt-agent | oci-onm-mgmt-agent | 3.0.0 |
+
+## Values
+
+| Key | Type | Default | Description |
+|-----|------|---------|-------------|
+| global.kubernetesClusterID | string | `nil` | OKE OCID for an OKE cluster or an unique ID for other Kubernetes clusters. |
+| global.kubernetesClusterName | string | `nil` | Provide a unique name for the cluster. This would help uniquely identifying the logs and metrics data at OCI Logging Analytics and OCI Monitoring respectivelt, when moitoring multiple clustersa |
+| global.namespace | string | `"oci-onm"` | Kubernetes Namespace in which the resources to be created. Set oci-kubernetes-monitoring-common:createNamespace set to true, if the namespace doesn't exist. |
+| global.resourceNamePrefix | string | `"oci-onm"` | Prefix to be attached to resources created through this chart. Not all resources may have this prefix. |
+| oci-onm-common.createNamespace | bool | `true` | If createNamespace is set to true, it tries to create the namespace defined in 'namespace' variable. |
+| oci-onm-common.createServiceAccount | bool | `true` | By default, a cluster role, cluster role binding and serviceaccount will be created for the monitoring pods to be able to (readonly) access various objects within the cluster, to support collection of various telemetry data. You may set this to false and provide your own serviceaccount which has the necessary cluster role(s) binded to it. Refer, README for the cluster role definition and other details. |
+| oci-onm-common.namespace | string | `"{{ .Values.global.namespace }}"` | Kubernetes Namespace in which the serviceaccount to be created. |
+| oci-onm-common.resourceNamePrefix | string | `"{{ .Values.global.resourceNamePrefix }}"` | Prefix to be attached to resources created through this chart. Not all resources may have this prefix. |
+| oci-onm-common.serviceAccount | string | `"{{ .Values.global.resourceNamePrefix }}"` | Name of the Kubernetes ServiceAccount |
+| oci-onm-logan.image.url | string | `"container-registry.oracle.com/oci_observability_management/oci-la-fluentd-collector:1.0.0"` | |
+| oci-onm-logan.kubernetesClusterID | string | `"{{ .Values.global.kubernetesClusterID }}"` | |
+| oci-onm-logan.kubernetesClusterName | string | `"{{ .Values.global.kubernetesClusterName }}"` | |
+| oci-onm-logan.namespace | string | `"{{ .Values.global.namespace }}"` | |
+| oci-onm-logan.oci-onm-common.enabled | bool | `false` | |
+| oci-onm-logan.ociLALogGroupID | string | `nil` | |
+| oci-onm-logan.ociLANamespace | string | `nil` | |
+| oci-onm-logan.serviceAccount | string | `"{{ .Values.global.resourceNamePrefix }}"` | |
+| oci-onm-mgmt-agent.kubernetesCluster.name | string | `"{{ .Values.global.kubernetesClusterName }}"` | |
+| oci-onm-mgmt-agent.mgmtagent.image.secret | string | `nil` | |
+| oci-onm-mgmt-agent.mgmtagent.image.url | string | `nil` | |
+| oci-onm-mgmt-agent.mgmtagent.installKey | string | `"resources/input.rsp"` | |
+| oci-onm-mgmt-agent.mgmtagent.installKeyFileContent | string | `nil` | |
+| oci-onm-mgmt-agent.namespace | string | `"{{ .Values.global.namespace }}"` | |
+| oci-onm-mgmt-agent.oci-onm-common.enabled | bool | `false` | |
+| oci-onm-mgmt-agent.serviceAccount | string | `"{{ .Values.global.resourceNamePrefix }}"` | |
+
+----------------------------------------------
+Autogenerated from chart metadata using [helm-docs v1.11.0](https://github.com/norwoodj/helm-docs/releases/v1.11.0)
diff --git a/charts/oci-onm/templates/_helpers.tpl b/charts/oci-onm/templates/_helpers.tpl
new file mode 100644
index 00000000..9600d4db
--- /dev/null
+++ b/charts/oci-onm/templates/_helpers.tpl
@@ -0,0 +1,11 @@
+# Copyright (c) 2023, Oracle and/or its affiliates.
+# Licensed under the Universal Permissive License v1.0 as shown at https://oss.oracle.com/licenses/upl.
+
+# tpl render function
+{{- define "common.tplvalues.render" -}}
+ {{- if typeIs "string" .value }}
+ {{- tpl .value .context }}
+ {{- else }}
+ {{- tpl (.value | toYaml) .context }}
+ {{- end }}
+{{- end -}}
diff --git a/charts/oci-onm/values.yaml b/charts/oci-onm/values.yaml
new file mode 100644
index 00000000..dcd56c10
--- /dev/null
+++ b/charts/oci-onm/values.yaml
@@ -0,0 +1,55 @@
+# Copyright (c) 2023, Oracle and/or its affiliates.
+# Licensed under the Universal Permissive License v1.0 as shown at https://oss.oracle.com/licenses/upl.
+
+global:
+ # -- Kubernetes Namespace in which the resources to be created. Set oci-kubernetes-monitoring-common:createNamespace set to true, if the namespace doesn't exist.
+ namespace: oci-onm
+ # -- Prefix to be attached to resources created through this chart. Not all resources may have this prefix.
+ resourceNamePrefix: oci-onm
+ # -- OKE OCID for an OKE cluster or an unique ID for other Kubernetes clusters.
+ kubernetesClusterID:
+ # -- Provide a unique name for the cluster. This would help uniquely identifying the logs and metrics data at OCI Logging Analytics and OCI Monitoring respectivelt, when moitoring multiple clustersa
+ kubernetesClusterName:
+
+oci-onm-common:
+ # -- By default, a cluster role, cluster role binding and serviceaccount will be created for the monitoring pods to be able to (readonly) access various objects within the cluster, to support collection of various telemetry data. You may set this to false and provide your own serviceaccount which has the necessary cluster role(s) binded to it. Refer, README for the cluster role definition and other details.
+ createServiceAccount: true
+ # -- If createNamespace is set to true, it tries to create the namespace defined in 'namespace' variable.
+ createNamespace: true
+ # -- Kubernetes Namespace in which the serviceaccount to be created.
+ namespace: "{{ .Values.global.namespace }}"
+ # -- Prefix to be attached to resources created through this chart. Not all resources may have this prefix.
+ resourceNamePrefix: "{{ .Values.global.resourceNamePrefix }}"
+ # -- Name of the Kubernetes ServiceAccount
+ serviceAccount: "{{ .Values.global.resourceNamePrefix }}"
+
+oci-onm-logan:
+ oci-onm-common:
+ enabled: false
+ namespace: "{{ .Values.global.namespace }}"
+ serviceAccount: "{{ .Values.global.resourceNamePrefix }}"
+ kubernetesClusterID: "{{ .Values.global.kubernetesClusterID }}"
+ kubernetesClusterName: "{{ .Values.global.kubernetesClusterName }}"
+ image:
+ url: container-registry.oracle.com/oci_observability_management/oci-la-fluentd-collector:1.0.0
+ # Go to OCI Logging Analytics Administration, click Service Details, and note the namespace value.
+ ociLANamespace:
+ # OCI Logging Analytics Default Log Group OCID
+ ociLALogGroupID:
+
+oci-onm-mgmt-agent:
+ oci-onm-common:
+ enabled: false
+ namespace: "{{ .Values.global.namespace }}"
+ serviceAccount: "{{ .Values.global.resourceNamePrefix }}"
+ kubernetesCluster:
+ name: "{{ .Values.global.kubernetesClusterName }}"
+ mgmtagent:
+ # Provide the base64 encoded content of the Management Agent Install Key file
+ installKeyFileContent:
+ # Follow steps documented at https://github.com/oracle/docker-images/tree/main/OracleManagementAgent to build docker image.
+ image:
+ # Replace this value with actual docker image URL for Management Agent
+ url: container-registry.oracle.com/oci_observability_management/oci-management-agent:1.0.0
+ # Image secrets to use for pulling container image (base64 encoded content of ~/.docker/config.json file)
+ secret:
\ No newline at end of file
diff --git a/docs/FAQ.md b/docs/FAQ.md
new file mode 100644
index 00000000..9ed21f8e
--- /dev/null
+++ b/docs/FAQ.md
@@ -0,0 +1,198 @@
+## FAQ
+
+### Can I use kubectl do deploy the solution?
+
+Helm is the recommended method of deployment. kubectl based deployment can be done by generating individual templates using helm. Refer [this](README.md#kubectl) for details.
+
+### Can I use my own ServiceAccount ?
+
+**Note**: This is supported only through the helm chart based deployment.
+
+By default, a cluster role, cluster role binding and serviceaccount will be created for the Fluentd and Management Agent pods to access (readonly) various Kubernetes Objects within the cluster for supporting logs, objects and metrics collection. However, if you want to use your own serviceaccount, you can do the same by setting the "oci-onm-common.createServiceAccount" variable to false and providing your own serviceaccount in the "oci-onm-common.serviceAccount" variable. Ensure that the serviceaccount should be in the same namespace as the namespace used for the whole deployment. The namespace for the whole deployment can be set using the "oci-onm-common.namespace" variable, whose default value is "oci-onm".
+
+The serviceaccount must be binded to a cluster role defined in your cluster, which allows access to various objects metadata. The following sample is a recommended minimalistic role definition as of chart version 3.0.0.
+
+```
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: oci-onm
+rules:
+ - apiGroups:
+ - ""
+ resources:
+ - '*'
+ verbs:
+ - get
+ - list
+ - watch
+ - nonResourceURLs: ["/metrics"]
+ verbs: ["get"]
+ - apiGroups:
+ - apps
+ - batch
+ - discovery.k8s.io
+ - metrics.k8s.io
+ resources:
+ - '*'
+ verbs:
+ - get
+ - list
+ - watch
+```
+
+Once you have the cluster role defined, to bind the cluster role to your serviceaccount use the following cluster role binding definition.
+
+```
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: oci-onm
+roleRef:
+ kind: ClusterRole
+ name: oci-onm
+ apiGroup: rbac.authorization.k8s.io
+subjects:
+ - kind: ServiceAccount
+ name:
+ namespace:
+```
+
+### How to set encoding for logs ?
+
+**Note**: This is supported only through the helm chart based deployment.
+
+By default Fluentd tail plugin that is being used to collect various logs has default encoding set to ASCII-8BIT. To overrided the default encoding, use one of the following approaches.
+
+#### Global level
+
+Set value for encoding under fluentd:tailPlugin section of values.yaml, which applies to all the logs being collected from the cluster.
+
+```
+..
+..
+oci-onm-logan:
+ ..
+ ..
+ fluentd:
+ ...
+ ...
+ tailPlugin:
+ ...
+ ...
+ encoding:
+```
+
+#### Specific log type level
+
+The encoding can be set at invidivual log types like kubernetesSystem, linuxSystem, genericContainerLogs, which applies to all the logs under the specific log type.
+
+```
+..
+..
+oci-onm-logan:
+ ..
+ ..
+ fluentd:
+ ...
+ ...
+ kubernetesSystem:
+ ...
+ ...
+ encoding:
+```
+
+```
+..
+..
+oci-onm-logan:
+ ..
+ ..
+ fluentd:
+ ...
+ ...
+ genericContainerLogs:
+ ...
+ ...
+ encoding:
+```
+
+#### Specific log level
+
+The encoding can be set at individual log level too, which takes precedence over all others.
+
+```
+..
+..
+oci-onm-logan:
+ ..
+ ..
+ fluentd:
+ ...
+ ...
+ kubernetesSystem:
+ ...
+ ...
+ logs:
+ kube-proxy:
+ encoding:
+```
+
+```
+..
+..
+oci-onm-logan:
+ ..
+ ..
+ fluentd:
+ ...
+ ...
+ customLogs:
+ custom-log1:
+ ...
+ ...
+ encoding:
+```
+
+### How to use Configfile based AuthZ (User Principal) instead of default AuthZ (Instance Principal) ?
+
+**Note**: This is supported only through the helm chart based deployment.
+
+The default AuthZ configuration for connecting to OCI Services from the monitoring pods running in the Kubernetes clusters is `InstancePrincipal` and it is the recommended approach for OKE. If you are trying to monitor Kubernetes clusters other than OKE, you need to use `config` file based AuthZ instead.
+
+First you need to have a OCI local user (preferrably a dedicated user created only for this use-case so that you can restrict the policies accordingly) and OCI user group. Then you need to generate API Signing key and policies.
+
+ * Refer [OCI API Signing Key](https://docs.oracle.com/en-us/iaas/Content/API/Concepts/apisigningkey.htm) for instructions on how to generate API Signing key for a given user.
+ * Refer [this](README.md#pre-requisites) for creating required policies.
+
+#### Helm configuration
+
+Modify your override_values.yaml to add the following.
+
+```
+...
+...
+oci-onm-logan:
+ ...
+ ...
+ authtype: config
+ ## -- OCI API Key Based authentication details. Required when authtype set to config
+ oci:
+ # -- Path to the OCI API config file
+ path: /var/opt/.oci
+ # -- Config file name
+ file: config
+ configFiles:
+ config: |-
+ # Replace each of the below fields with actual values.
+ [DEFAULT]
+ user=
+ fingerprint=
+ key_file=/var/opt/.oci/private.pem
+ tenancy=
+ region=
+ private.pem: |-
+ # -----BEGIN RSA PRIVATE KEY-----
+ # XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ # -----END RSA PRIVATE KEY-----
+```
diff --git a/docs/custom-images.md b/docs/custom-images.md
new file mode 100644
index 00000000..bf5da24e
--- /dev/null
+++ b/docs/custom-images.md
@@ -0,0 +1,25 @@
+### Container Images
+
+By default, pre-built images by Oracle are used.
+
+#### Pre-built images
+
+* [Fluentd Container Image](https://container-registry.oracle.com/ords/f?p=113:4:13515970073310:::4:P4_REPOSITORY,AI_REPOSITORY,AI_REPOSITORY_NAME,P4_REPOSITORY_NAME,P4_EULA_ID,P4_BUSINESS_AREA_ID:1843,1843,OCI%20Logging%20Analytics%20Fluentd%20based%20Collector,OCI%20Logging%20Analytics%20Fluentd%20based%20Collector,1,0&cs=3UtJ-CmXRZ5iKQ-QrQfja1Mxp3EIiFQ7TwBty97eqA8LmTyZtsiaFZgLmGu-qD28SwH3RIUZVXxYevRBNBR5yng)
+* [Management Agent Container Image](https://container-registry.oracle.com/ords/f?p=113:4:13515970073310:::4:P4_REPOSITORY,AI_REPOSITORY,AI_REPOSITORY_NAME,P4_REPOSITORY_NAME,P4_EULA_ID,P4_BUSINESS_AREA_ID:2004,2004,OCI%20Management%20Agent%20Container%20Image,OCI%20Management%20Agent%20Container%20Image,1,0&cs=35eEP-Hh_4zhB7KLZ1uShwA7SEd5xmbYo-gwkV-TJaxhVB25CIxgQN7EfUbBlUcZQHiX-peQRtm7MAGxO-hEjTA)
+
+#### Building images
+
+##### Fluentd Container Image
+
+- Download all the files from the below mentioned dir into a local machine having access to internet and docker installed.
+ - [OL8](logan/docker-images/v1.0/oraclelinux/8/)
+- Run the following command to build the image.
+ - `docker build -t oci-la-fluentd-collector-custom -f Dockerfile .`
+- The docker image built from the above step, can either be pushed to Docker Hub or OCI Container Registry (OCIR) or to a Local Docker Registry depending on the requirements.
+ - [How to push the image to Docker Hub](https://docs.docker.com/docker-hub/repos/#pushing-a-docker-container-image-to-docker-hub)
+ - [How to push the image to OCIR](https://www.oracle.com/webfolder/technetwork/tutorials/obe/oci/registry/index.html).
+ - [How to push the image to Local Registry](https://docs.docker.com/registry/deploying/).
+
+##### Management Agent Container Image
+Instructions to build the container image for Management Agent are available in the Oracle's Docker Images repository on [Github](https://github.com/oracle/docker-images/tree/main/OracleManagementAgent)
+
diff --git a/docs/custom-logs.md b/docs/custom-logs.md
new file mode 100644
index 00000000..44bb1606
--- /dev/null
+++ b/docs/custom-logs.md
@@ -0,0 +1,135 @@
+## Custom Logs Configuration
+
+### How to use custom logSource (oci_la_log_source_name) and/or other custom configuration for Pod/Container Logs collected through "Kubernetes Container Generic Logs" logSource ?
+
+A generic source with time only parser is defined/configured for collecting all application pod logs from /var/log/containers/ out of the box.
+This is to ensure that all the logs generated by all pods are collected and pushed to Logging Analytics.
+Often you may need to configure a custom logSource for a particular pod log, either by using one of the existing OOB logSources at Logging Analytics or by defining one custom logSource matching to the requirements.
+Once you have defined/identified a logSource for a particular pod log, the following are couple of ways to get those pod logs associated to the logSource.
+
+#### Use Pod Annotations
+
+In this approach, all that you need to do is add the following annotation, `oracle.com/oci_la_log_source_name` (with logSourceName as value) to all the pods of choice.
+This approach works for all the use-cases, except for multi-line plain text formatted logs.
+
+* Refer [this doc](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) to find how to add the annotation through Pod's metadata section. This is the recommended approach as it provides the persistent behavior.
+* Refer [this doc](https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#annotate) to find how to add annotation through 'kubectl annotate' command. You may use this approach for quick testing.
+
+**Note** The following configuration parameters are supported for customisation through Pod Annotations in addition to logSource,
+
+* oracle.com/oci_la_log_group_id => to use custom logGroupId (oci_la_log_group_id)
+* oracle.com/oci_la_entity_id => to use custom entityId (oci_la_entity_id)
+
+#### customLogs section in helm chart values.yaml
+
+In this approach, all that you need to do is to provide the necessary configuration information like log file path, logSource, multiline start regular expression (in case of multi-line logs) in the customLogs section of override_values.yaml.
+Using this information the corresponding Fluentd configuration is generated automatically.
+
+**Note** This approach is valid only when using helm chart based installation.
+
+The following example demonstrates a container customLogs configuration
+
+```
+...
+...
+oci-onm-logan:
+ ...
+ ...
+ fluentd:
+ ...
+ ...
+ custom-log1:
+ path: /var/log/containers/custom-1.log
+ ociLALogSourceName: "Custom1 Logs"
+ multilineStartRegExp:
+ isContainerLog: true
+```
+
+The following example demonstrates a non container customLogs configuration
+
+```
+...
+...
+oci-onm-logan:
+ ...
+ ...
+ fluentd:
+ ...
+ ...
+ custom-log2:
+ path: /var/log/custom/custom-2.log
+ ociLALogSourceName: "Custom2 Logs"
+ multilineStartRegExp:
+ isContainerLog: false
+```
+
+#### Use Fluentd conf
+
+In this approach, a new set of Source, Filter sections have to be created in the customFluentdConf section of values.yaml.
+The following example demonstrates a custom fluentd config to tag `/var/log/containers/frontend*.log` with logSource "Guestbook Frontend Logs"
+(*To be added to helm-chart override_values.yaml, under customFluentdConf section*).
+
+```
+...
+...
+oci-onm-logan:
+ ...
+ ...
+ fluentd:
+ ...
+ ...
+ customFluentdConf: |
+
+ @type tail
+ @id in_tail_frontend
+ path_key tailed_path
+ path /var/log/containers/frontend-*.log
+ pos_file /var/log/oci_la_fluentd_outplugin/pos/frontend.logs.pos
+ tag oci.oke.frontend.*
+ read_from_head "#{ENV['FLUENT_OCI_READ_FROM_HEAD'] || true}"
+
+ {{- if eq $runtime "docker" }}
+ @type json
+ {{- else}}
+ @type cri
+ {{- end }}
+
+
+
+ # Record transformer filter to apply Logging Analytics configuration to each record.
+
+ @type record_transformer
+ enable_ruby true
+
+ oci_la_metadata ${{"{{"}}"Kubernetes Cluster Name": "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_NAME'] || 'UNDEFINED'}", "Kubernetes Cluster ID": "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_ID'] || 'UNDEFINED'}"{{"}}"}}
+ oci_la_log_group_id "#{ENV['FLUENT_OCI_KUBERNETES_LOGGROUP_ID'] || ENV['FLUENT_OCI_DEFAULT_LOGGROUP_ID']}"
+ oci_la_log_path "${record['tailed_path']}"
+ oci_la_log_source_name "Guestbook Frontend Logs"
+ {{- if eq $runtime "docker" }}
+ message "${record['log']}"
+ {{- end }}
+ tag ${tag}
+
+
+```
+
+**Note**: The log path `/var/log/containers/frontend-*.log` has to be excluded from the generic container logs to avoid duplicate log collection. Add the log path to*exclude_path*value under*in_tail_containerlogs* source section.
+
+```
+...
+...
+oci-onm-logan:
+ ...
+ ...
+ fluentd:
+ ...
+ ...
+ genericContainerLogs:
+ exclude_path:
+ - '"/var/log/containers/kube-proxy-*.log"'
+ ...
+ ...
+ - '"/var/log/containers/frontend-*.log"'
+```
+
+In addition to the above, you may need to modify the source section to add `multiline parser`, if the logs are of plain text multi-line format (OR) add a concat plugin filter if the logs are of say multi-line but wrapped in json. Refer oci-onm-logan chart logs-configmap template for examples.
diff --git a/docs/license-short.txt b/docs/license-short.txt
new file mode 100644
index 00000000..a57d7295
--- /dev/null
+++ b/docs/license-short.txt
@@ -0,0 +1,2 @@
+Copyright (c) 2023, Oracle and/or its affiliates.
+Licensed under the Universal Permissive License v1.0 as shown at https://oss.oracle.com/licenses/upl.
diff --git a/logan/docker-images/v1.0/debian/Dockerfile b/logan/docker-images/v1.0/debian/Dockerfile
index 3ab3961c..d7b74e46 100644
--- a/logan/docker-images/v1.0/debian/Dockerfile
+++ b/logan/docker-images/v1.0/debian/Dockerfile
@@ -1,6 +1,9 @@
+# Copyright (c) 2023, Oracle and/or its affiliates.
+# Licensed under the Universal Permissive License v1.0 as shown at https://oss.oracle.com/licenses/upl.
+
### Referred version from fluentd-kubernetes-daemonset gitgub repo ###
-FROM fluent/fluentd:v1.14.3-debian-1.0
+FROM fluent/fluentd:v1.14.3-debian-1.0
USER root
WORKDIR /home/fluent
diff --git a/logan/docker-images/v1.0/debian/Gemfile b/logan/docker-images/v1.0/debian/Gemfile
index d379bd31..ca3fd5b0 100644
--- a/logan/docker-images/v1.0/debian/Gemfile
+++ b/logan/docker-images/v1.0/debian/Gemfile
@@ -1,9 +1,12 @@
+# Copyright (c) 2023, Oracle and/or its affiliates.
+# Licensed under the Universal Permissive License v1.0 as shown at https://oss.oracle.com/licenses/upl.
+
source "https://rubygems.org"
gem "fluentd", "1.14.3"
gem "fluent-plugin-oci-logging-analytics", "2.0.3"
gem "fluent-plugin-concat", "~> 2.5.0"
-gem "fluent-plugin-rewrite-tag-filter", "~> 2.4.0"
+gem "fluent-plugin-rewrite-tag-filter", "~> 2.4.0"
gem "fluent-plugin-parser-cri", "~> 0.1.1"
gem "fluent-plugin-kubernetes_metadata_filter", "2.9.5"
gem "fluent-plugin-kubernetes-objects", "1.1.12"
diff --git a/logan/docker-images/v1.0/debian/entrypoint.sh b/logan/docker-images/v1.0/debian/entrypoint.sh
index 43dad7bd..b62287b1 100644
--- a/logan/docker-images/v1.0/debian/entrypoint.sh
+++ b/logan/docker-images/v1.0/debian/entrypoint.sh
@@ -1,3 +1,6 @@
#!/usr/bin/env sh
+# Copyright (c) 2023, Oracle and/or its affiliates.
+# Licensed under the Universal Permissive License v1.0 as shown at https://oss.oracle.com/licenses/upl.
+
exec fluentd -c ${FLUENTD_CONF} -p /fluentd/plugins --gemfile /fluentd/Gemfile ${FLUENTD_OPT}
diff --git a/logan/docker-images/v1.0/oraclelinux/8/Dockerfile b/logan/docker-images/v1.0/oraclelinux/8/Dockerfile
index 36c064d2..fcdc7973 100644
--- a/logan/docker-images/v1.0/oraclelinux/8/Dockerfile
+++ b/logan/docker-images/v1.0/oraclelinux/8/Dockerfile
@@ -1,3 +1,6 @@
+# Copyright (c) 2023, Oracle and/or its affiliates.
+# Licensed under the Universal Permissive License v1.0 as shown at https://oss.oracle.com/licenses/upl.
+
FROM container-registry.oracle.com/os/oraclelinux:8
USER root
@@ -15,7 +18,7 @@ COPY Gemfile /fluentd/
# Install ruby 2.7 along with rubygems and bundler.
RUN dnf -y module enable ruby:2.7 \
- && dnf -y install --nodocs ruby ruby-libs \
+ && dnf -y install --nodocs ruby ruby-libs \
&& dnf -y install --nodocs rubygems rubygem-openssl rubygem-psych \
&& dnf -y install --nodocs rubygem-bundler rubygem-io-console \
# Install development dependent packages for gems native installation
@@ -35,7 +38,7 @@ RUN dnf -y module enable ruby:2.7 \
&& curl -L -o /tmp/jemalloc-4.5.0.tar.bz2 https://github.com/jemalloc/jemalloc/releases/download/4.5.0/jemalloc-4.5.0.tar.bz2 \
&& cd /tmp && tar -xjf jemalloc-4.5.0.tar.bz2 && cd jemalloc-4.5.0/ \
&& ./configure && make \
- && mv lib/libjemalloc.so.2 /usr/lib \
+ && mv lib/libjemalloc.so.2 /usr/lib \
# Install hostname, required by fluent-plugin-rewrite-tag-filter
&& dnf -y install hostname \
# Remove all the development dependent packages
diff --git a/logan/docker-images/v1.0/oraclelinux/8/Gemfile b/logan/docker-images/v1.0/oraclelinux/8/Gemfile
index 10908808..0df012dc 100644
--- a/logan/docker-images/v1.0/oraclelinux/8/Gemfile
+++ b/logan/docker-images/v1.0/oraclelinux/8/Gemfile
@@ -1,3 +1,6 @@
+# Copyright (c) 2023, Oracle and/or its affiliates.
+# Licensed under the Universal Permissive License v1.0 as shown at https://oss.oracle.com/licenses/upl.
+
source "https://rubygems.org"
gem "oj", "3.10.18"
diff --git a/logan/docker-images/v1.0/oraclelinux/8/entrypoint.sh b/logan/docker-images/v1.0/oraclelinux/8/entrypoint.sh
index 43dad7bd..b62287b1 100644
--- a/logan/docker-images/v1.0/oraclelinux/8/entrypoint.sh
+++ b/logan/docker-images/v1.0/oraclelinux/8/entrypoint.sh
@@ -1,3 +1,6 @@
#!/usr/bin/env sh
+# Copyright (c) 2023, Oracle and/or its affiliates.
+# Licensed under the Universal Permissive License v1.0 as shown at https://oss.oracle.com/licenses/upl.
+
exec fluentd -c ${FLUENTD_CONF} -p /fluentd/plugins --gemfile /fluentd/Gemfile ${FLUENTD_OPT}
diff --git a/logan/helm-chart/templates/NOTES.txt b/logan/helm-chart/templates/NOTES.txt
deleted file mode 100644
index e69de29b..00000000
diff --git a/logan/helm-chart/templates/_helpers.tpl b/logan/helm-chart/templates/_helpers.tpl
deleted file mode 100644
index 5ff69cb2..00000000
--- a/logan/helm-chart/templates/_helpers.tpl
+++ /dev/null
@@ -1,14 +0,0 @@
-{{/*
-Expand the name of the chart.
-*/}}
-{{- define "oci-la-fluentd.name" -}}
-{{- default .Chart.Name .Values.name | trunc 63 | trimSuffix "-" }}
-{{- end }}
-
-{{- define "common.tplvalues.render" -}}
- {{- if typeIs "string" .value }}
- {{- tpl .value .context }}
- {{- else }}
- {{- tpl (.value | toYaml) .context }}
- {{- end }}
-{{- end -}}
diff --git a/logan/helm-chart/templates/clusterrole-logs.yaml b/logan/helm-chart/templates/clusterrole-logs.yaml
deleted file mode 100644
index fa2b78cd..00000000
--- a/logan/helm-chart/templates/clusterrole-logs.yaml
+++ /dev/null
@@ -1,17 +0,0 @@
-{{- if .Values.createServiceAccount }}
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRole
-metadata:
- name: {{ include "oci-la-fluentd.name" . }}-logs-clusterrole
- namespace: {{ default "kube-system" .Values.namespace }}
-rules:
-- apiGroups:
- - ""
- resources:
- - '*'
- verbs:
- - get
- - list
- - watch
-{{- end }}
diff --git a/logan/helm-chart/templates/clusterrole-objects.yaml b/logan/helm-chart/templates/clusterrole-objects.yaml
deleted file mode 100644
index e31527a0..00000000
--- a/logan/helm-chart/templates/clusterrole-objects.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
-{{- if .Values.createServiceAccount }}
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRole
-metadata:
- name: {{ include "oci-la-fluentd.name" . }}-objects-clusterrole
- namespace: {{ default "kube-system" .Values.namespace }}
-rules:
- - apiGroups:
- - ""
- resources:
- - '*'
- verbs:
- - get
- - list
- - watch
- - apiGroups:
- - apps
- - batch
- resources:
- - '*'
- verbs:
- - get
- - list
- - watch
-{{- end }}
diff --git a/logan/helm-chart/templates/clusterrolebinding-logs.yaml b/logan/helm-chart/templates/clusterrolebinding-logs.yaml
deleted file mode 100644
index dde1e606..00000000
--- a/logan/helm-chart/templates/clusterrolebinding-logs.yaml
+++ /dev/null
@@ -1,15 +0,0 @@
-{{- if .Values.createServiceAccount }}
----
-kind: ClusterRoleBinding
-apiVersion: rbac.authorization.k8s.io/v1
-metadata:
- name: {{ include "oci-la-fluentd.name" . }}-logs-clusterrolebinding
-roleRef:
- kind: ClusterRole
- name: {{ include "oci-la-fluentd.name" . }}-logs-clusterrole
- apiGroup: rbac.authorization.k8s.io
-subjects:
-- kind: ServiceAccount
- name: {{ include "oci-la-fluentd.name" . }}-serviceaccount
- namespace: {{ default "kube-system" .Values.namespace }}
-{{- end }}
diff --git a/logan/helm-chart/templates/clusterrolebinding-objects.yaml b/logan/helm-chart/templates/clusterrolebinding-objects.yaml
deleted file mode 100644
index fa61c482..00000000
--- a/logan/helm-chart/templates/clusterrolebinding-objects.yaml
+++ /dev/null
@@ -1,15 +0,0 @@
-{{- if .Values.createServiceAccount }}
----
-kind: ClusterRoleBinding
-apiVersion: rbac.authorization.k8s.io/v1
-metadata:
- name: {{ include "oci-la-fluentd.name" . }}-objects-clusterrolebinding
-roleRef:
- kind: ClusterRole
- name: {{ include "oci-la-fluentd.name" . }}-objects-clusterrole
- apiGroup: rbac.authorization.k8s.io
-subjects:
-- kind: ServiceAccount
- name: {{ include "oci-la-fluentd.name" . }}-serviceaccount
- namespace: {{ default "kube-system" .Values.namespace }}
-{{- end }}
diff --git a/logan/helm-chart/templates/serviceAccount.yaml b/logan/helm-chart/templates/serviceAccount.yaml
deleted file mode 100644
index ee405fd2..00000000
--- a/logan/helm-chart/templates/serviceAccount.yaml
+++ /dev/null
@@ -1,8 +0,0 @@
-{{- if .Values.createServiceAccount }}
----
-apiVersion: v1
-kind: ServiceAccount
-metadata:
- name: {{ include "oci-la-fluentd.name" . }}-serviceaccount
- namespace: {{ default "kube-system" .Values.namespace }}
-{{- end }}
diff --git a/logan/helm-chart/values.yaml b/logan/helm-chart/values.yaml
deleted file mode 100644
index 0e30dd4e..00000000
--- a/logan/helm-chart/values.yaml
+++ /dev/null
@@ -1,369 +0,0 @@
-# This value is to determine the runtime of k8s and change the configuration of fluentd accordingly
-# Possible values are docker and cri(for OKE 1.20 and above)
-runtime: cri
-# Auth type to be used by oci fluentd output plugin to upload logs into logging analytics
-# Possible values are InstancePrincipal and config
-authtype: InstancePrincipal
-# namespace of k8s in which this helm chart needs to be installed
-namespace: kube-system
-# By default, a cluster role, cluster role binding and serviceaccount will be created for the Fluentd pods to access (readonly) various objects within the cluster for supporting logs and objects collection. You can set this to false and provide your own serviceaccount which has the necessary role(s) binded to it. Refer, README for the cluster role definition and other details.
-createServiceAccount: true
-# Name of the ServiceAccount to be used. Valid only when createServiceAccount is set to false.
-serviceAccount:
-image:
- # Replace this value with actual image pull secrets.
- # Make sure the secret is in the same namespace as specified above.
- imagePullSecrets:
- # Replace this value with actual docker image url
- url:
- # Replace this value with desired value for image pull policy
- imagePullPolicy: Always
-# Go to Logging Analytics Administration, click Service Details, and note the namespace value.
-ociLANamespace:
-# Logging Analytics Default Log Group OCID
-ociLALogGroupID:
-# Kubernetes Cluster OCID
-kubernetesClusterID:
-# Kubernetes Cluster NAME
-kubernetesClusterName:
-
-# Logging Analytics Default Entity OCID
-#ociLAEntityID:
-
-# Logging Analytics additional metadata. Use this to tag all the collected logs with one or more key:value pairs.
-# Key must be a valid field in Logging Analytics
-#metadata:
- #"Client Host Region": "PCT"
- #"Environment": "Production"
- #"Third key": "Third Value"
-
-## @param extraEnv extra env variables. Below is an example env variable
-## - name: ENV_VARIABLE_NAME
-## value: ENV_VARIABLE_VALUE
-extraEnv: []
-## parameters to set requests and limits for memory and cpu
-resources:
- limits:
- memory: 500Mi
- requests:
- cpu: 100m
- memory: 250Mi
-## @param extraVolumes Extra volumes. Below is an example extra volume
-## - name: tmpDir
-## hostPath:
-## path: /tmp log
-extraVolumes: []
-## @param extraVolumeMounts Mount extra volume(s). Below is an example extra volume mount
-## - name: tmpDir
-## mountPath: /tmp
-##
-extraVolumeMounts: []
-volumes:
- # This value is path to the pod logs in the host machine.
- # Replace this value with the actual path in your environment.
- podsHostPath: /var/log/pods
- # This value is path to the container data logs in the host machine.
- # Replace this value with the actual path in your environment.
- containerdataHostPath: /u01/data/docker/containers
-## oci section is only required when config file based auth is used.
-oci:
- # path to the config file
- path: /var/opt/.oci
- # config file name
- file: config
- configFiles:
- # config file data (https://docs.oracle.com/en-us/iaas/Content/API/Concepts/sdkconfig.htm)
- config: |-
- # Replace each of the below fields with actual values.
- [DEFAULT]
- user=
- fingerprint=
- key_file=
- tenancy=
- region=
- # private key file data
- private.pem: |-
- # Replace this private key with actual value.
- -----BEGIN RSA PRIVATE KEY-----
- XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
- -----END RSA PRIVATE KEY-----
-# The collection frequency (in minutes) for Kubernetes Objects
-objectsPollingFrequency: 5m
-# The following section represents the Fluentd configuration.
-fluentd:
- # path to the fluentd config file
- path: /var/opt/conf
- # fluentd config file name
- file: fluent.conf
- # Base directory on the node (with read write permission) to store fluentd plugins related data.
- baseDir: /var/log
- # Configuration for oci-logging-analytics output plugin
- ociLoggingAnalyticsOutputPlugin:
- profile_name: 'DEFAULT'
- # Output plugin logging level: DEBUG < INFO < WARN < ERROR < FATAL < UNKNOWN
- plugin_log_level: 'info'
- # The maximum log file size at which point the log file to be rotated, for example, 1KB, 1MB, etc.
- plugin_log_file_size: '10MB'
- # The number of archived or rotated log files to keep, must be non-zero.
- plugin_log_file_count: 10
- # Fluentd Buffer Configuration
- buffer:
- # The number of threads to flush or write chunks in parallel.
- flush_thread_count: 1
- # Once the total size of the stored buffer reaches this threshold, all the append operations will fail with error, and data will be lost.
- total_limit_size: '5368709120' # 5GB
- # The frequency of flushing the chunks to output plugin.
- flush_interval: 30 # seconds
- flush_thread_interval: 0.5 # seconds
- flush_thread_burst_interval: 0.05 # seconds
- # Wait in seconds before the next retry to flush.
- retry_wait: 2 # seconds
- # This is mandatory only when retry_forever field is false.
- retry_max_times: 17
- # Wait in seconds before the next constant factor of exponential backoff.
- retry_exponential_backoff_base: 2
- # If true, plugin will ignore retry_max_times option and retry flushing forever.
- retry_forever: true
- disable_chunk_backup: true
-
- # Configuration for kubernetes_metadata filter plugin (https://github.com/fabric8io/fluent-plugin-kubernetes_metadata_filter)
- kubernetesMetadataFilter:
- # URL to the API server. Set this to retrieve further kubernetes metadata for logs from kubernetes API server. If not specified, environment variables KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT will be used if both are present which is typically true when running fluentd in a pod.
- kubernetes_url:
- # Validate SSL certificates (default: true)
- verify_ssl: true
- # Path to CA file for Kubernetes server certificate validation
- ca_file:
- # Skip all label fields from the metadata.
- skip_labels: false
- # Skip the container fields container_image and container_image_id in the metadata.
- skip_container_metadata: false
- # Skip the master_url field from the metadata.
- skip_master_url: false
- # Skip the namespace_id field from the metadata. The fetch_namespace_metadata function will be skipped. The plugin will be faster and cpu consumption will be less.
- skip_namespace_metadata: false
- # Set up a watch on the pods on the API server for updates to metadata. By default, true.
- watch: true
- # Config for Logs Collection using tail plugin
- tailPlugin:
- # If true, starts to read the logs from the head of the file or the last read position recorded in pos_file
- readFromHead: true
- # Frequency of flushing the chunks to output plugin.
- flushInterval: 60 # seconds
- # Specifies the encoding of logs. By default, in_tail emits string value as ASCII-8BIT encoding. If encoding is specified, in_tail changes string to given encoding.
- # When encoding is set at this level, it gets applied to all the logs being collected. Instead, it can also be set at individual logs under sections like kubernetesSystem, genericContainerLogs, customLogs etc.
- # encoding:
-
- # Configuration for rewrite_tag plugin
- rewriteTagPlugin:
- hostname_command: "cat /etc/hostname"
-
- # Configuration for Kubernetes System specific logs like Kube Flannel, Kube Proxy etc.
- kubernetesSystem:
- # Setting the following properties will override the default/generic configuration and applies to all Kubernetes system logs
- #ociLALogGroupID:
- #metadata:
- #"Client Host Region": "America"
- #"Environment": "Production"
- #"Third Key": "Third Value"
- #ociLAEntityID:
- #encoding:
- logs:
- # Configuration specific to Kube Proxy logs
- kube-proxy:
- # Log file location.
- path: /var/log/containers/kube-proxy-*.log
- # Logging Analytics log source to use for parsing and processing the logs: Kubernetes Proxy Logs.
- ociLALogSourceName: "Kubernetes Proxy Logs"
- # The regular expression pattern for the starting line in case of multi-line logs.
- multilineStartRegExp: /^\S\d{2}\d{2}\s+[^\:]+:[^\:]+:[^\.]+\.\d{0,3}/
- #metadata:
- #"Client Host Region": "America"
- #"Environment": "Production"
- #"Third Key": "Third Value"
- #ociLAEntityID:
- #ociLALogGroupID:
- #encoding:
-
- # Configuration specific to Kube Flannel logs
- kube-flannel:
- # The path to the source files.
- path: /var/log/containers/kube-flannel-*.log
- # Logging Analytics log source to use for parsing and processing the logs: Kubernetes Flannel Logs.
- ociLALogSourceName: "Kubernetes Flannel Logs"
- # The regular expression pattern for the starting line in case of multi-line logs.
- multilineStartRegExp: /^\S\d{2}\d{2}\s+[^\:]+:[^\:]+:[^\.]+\.\d{0,3}/
-
- # Configuration specific to Kubernetes DNS Autoscaler Logs
- kube-dns-autoscaler:
- path: /var/log/containers/kube-dns-autoscaler-*.log
- # Logging Analytics log source to use for parsing and processing the logs: Kubernetes DNS Autoscaler Logs.
- ociLALogSourceName: "Kubernetes DNS Autoscaler Logs"
- # The regular expression pattern for the starting line in case of multi-line logs.
- multilineStartRegExp: /^\S\d{2}\d{2}\s+[^\:]+:[^\:]+:[^\.]+\.\d{0,3}/
-
- # Configuration specific to Kubernetes Core DNS Logs
- coredns:
- # The path to the source files.
- path: /var/log/containers/coredns-*.log
- # Logging Analytics log source to use for parsing and processing the logs: Kubernetes Core DNS Logs.
- ociLALogSourceName: "Kubernetes Core DNS Logs"
- # The regular expression pattern for the starting line in case of multi-line logs.
- multilineStartRegExp: /^\[[^\]]+\]\s+/
-
- # Configuration specific to Kubernetes CSI Node Driver Logs
- csinode:
- # The path to the source files.
- path: /var/log/containers/csi-oci-node-*.log
- # Logging Analytics log source to use for parsing and processing the logs: Kubernetes CSI Node Driver Logs.
- ociLALogSourceName: "Kubernetes CSI Node Driver Logs"
-
- # Configuration specific to Proxymux Client Logs
- proxymux:
- # The path to the source files.
- path: /var/log/containers/proxymux-client-*.log
- # Logging Analytics log source to use for parsing and processing the logs: OKE Proxymux Client Logs.
- ociLALogSourceName: "OKE Proxymux Client Logs"
-
- # Configuration specific to Kubernetes Autoscaler Logs
- cluster-autoscaler:
- # The path to the source files.
- path: /var/log/containers/cluster-autoscaler-*.log
- # Logging Analytics log source to use for parsing and processing the logs: Kubernetes Autoscaler Logs.
- ociLALogSourceName: "Kubernetes Autoscaler Logs"
- # The regular expression pattern for the starting line in case of multi-line logs.
- multilineStartRegExp: /^\S\d{2}\d{2}\s+[^\:]+:[^\:]+:[^\.]+\.\d{0,3}/
- # Configuration for Linux System specific logs like CronLogs and SecureLogs
- linuxSystem:
- logs:
- cronlog:
- # The path to the source files.
- path: /var/log/cron*
- # Logging Analytics log source to use for parsing and processing the logs: Linux Cron Logs.
- ociLALogSourceName: "Linux Cron Logs"
- # The regular expression pattern for the starting line in case of multi-line logs.
- multilineStartRegExp: /^(?:(?:\d+\s+)?<([^>]*)>(?:\d+\s+)?)?\S+\s+\d{1,2}\s+\d{1,2}:\d{1,2}:\d{1,2}\s+/
-
- securelog:
- # The path to the source files.
- path: /var/log/secure*
- # Logging Analytics log source to use for parsing and processing the logs: Linux Secure Logs.
- ociLALogSourceName: "Linux Secure Logs"
- # The regular expression pattern for the starting line in case of multi-line logs.
- multilineStartRegExp: /^(?:(?:\d+\s+)?<([^>]*)>(?:\d+\s+)?)?\S+\s+\d{1,2}\s+\d{1,2}:\d{1,2}:\d{1,2}\s+/
-
- kubeletlog:
- # Logging Analytics log source to use for parsing and processing the logs: Kubernetes Kubelet Logs.
- ociLALogSourceName: "Kubernetes Kubelet Logs"
-
- syslog:
- # The path to the source files.
- path: /var/log/messages*
- # Logging Analytics log source to use for parsing and processing the logs: Linux Syslog Logs.
- ociLALogSourceName: "Linux Syslog Logs"
- # The regular expression pattern for the starting line in case of multi-line logs.
- multilineStartRegExp: /^(?:(?:\d+\s+)?<([^>]*)>(?:\d+\s+)?)?\S+\s+\d{1,2}\s+\d{1,2}:\d{1,2}:\d{1,2}\s+/
-
- maillog:
- # The path to the source files.
- path: /var/log/maillog*
- # Logging Analytics log source to use for parsing and processing the logs: Linux Mail Delivery Logs.
- ociLALogSourceName: "Linux Mail Delivery Logs"
- # The regular expression pattern for the starting line in case of multi-line logs.
- multilineStartRegExp: /^(?:(?:\d+\s+)?<([^>]*)>(?:\d+\s+)?)?\S+\s+\d{1,2}\s+\d{1,2}:\d{1,2}:\d{1,2}\s+/
-
- linuxauditlog:
- # The path to the source files.
- path: /var/log/audit/audit*
- # Logging Analytics log source to use for parsing and processing the logs: Linux Audit Logs.
- ociLALogSourceName: "Linux Audit Logs"
-
- uptracklog:
- # The path to the source files.
- path: /var/log/uptrack*
- # Logging Analytics log source to use for parsing and processing the logs: ksplice Logs.
- ociLALogSourceName: "Ksplice Logs"
- # The regular expression pattern for the starting line in case of multi-line logs.
- multilineStartRegExp: /^\d{4}-\d{2}-\d{2}\s+\d{2}:\d{2}:\d{2}/
-
- yum:
- # The path to the source files.
- path: /var/log/yum.log*
- # Logging Analytics log source to use for parsing and processing the logs: Linux YUM Logs.
- ociLALogSourceName: "Linux YUM Logs"
-
- # Generic configuration for all container/pod logs
- genericContainerLogs:
- # Logging Analytics log source to use for parsing and processing the logs: Kubernetes Container Generic Logs.
- ociLALogSourceName: "Kubernetes Container Generic Logs"
- path: /var/log/containers/*.log
- # List of log paths to exclude that are already part of other specific configurations defined (like Kube Proxy, Kube Flannel)
- # If you want to create a custom configuration for any of the container logs using the customLogs section, then exclude the corresponding log path here.
- exclude_path:
- - '"/var/log/containers/kube-proxy-*.log"'
- - '"/var/log/containers/kube-flannel-*.log"'
- - '"/var/log/containers/kube-dns-autoscaler-*.log"'
- - '"/var/log/containers/coredns-*.log"'
- - '"/var/log/containers/csi-oci-node-*.log"'
- - '"/var/log/containers/proxymux-client-*.log"'
- - '"/var/log/containers/cluster-autoscaler-*.log"'
-
- # Configuration for any custom logs which are not part of the default configuration defined in this file.
- # All the pod/container logs will be collected using "genericContainerLogs" section.
- # Use this section to create a custom configuration for any of the container logs.
- # Also, you can use this section to define configuration for any other log path existing on a Kubernetes worker node
- customLogs:
- # A unique identifier to represent the configuration for a single log path
- #custom-id1:
- #path: /var/log/containers/custom*.log
- # Logging Analytics log source to use for parsing and processing the logs:
- #ociLALogSourceName: "Custom1 Logs"
- # The regular expression pattern for the starting line in case of multi-line logs.
- #multilineStartRegExp:
- # Set isContainerLog to false if the log is not a container log (/var/log/containers/*.log). Default value is true.
- #isContainerLog: true
- #custom-id2:
- #path: /var/log/custom/*.log
- # Logging Analytics log source to use for parsing and processing the logs:
- #ociLALogSourceName: "Custom2 Logs"
- # The regular expression pattern for the starting line in case of multi-line logs.
- #multilineStartRegExp:
- # Set isContainerLog to false if the log is not a container log (/var/log/containers/*.log). Default value is true.
- #isContainerLog: false
-
- # Alternative approach to define the configuration for any custom logs which are not part of the default configuration defined in this file.
- # Provide the Fluentd configuration with the source and filter sections for your custom logs in this section. Exclude the match section. It would be used without any modification.
- # Notes:
- # Ensure that @id in the source section is unique and does not collide with any default configuration defined in this file
- # Tag must start with "oci." and must be unique.
- # In case of container log (/var/log/containers/*.log), exclude the corresponding log path in "genericContainerLogs" section.
- customFluentdConf: |
-
- # Configuration for collecting Kubernetes Object information.
- # Supported objects are Node, Pod, Namespace, Event, DaemonSet, ReplicaSet, Deployment, StatefulSet, Job, CronJob
- kubernetesObjects:
- #metadata:
- #"Client Host Region": "America"
- #"Environment": "Production"
- #"Third Key": "Third Value"
- #ociLAEntityID:
- #ociLALogGroupID:
- objectsList:
- nodes:
- #api_version: v1 (default)
- #api_endpoint: "" (default)
- pods:
- namespaces:
- events:
- daemon_sets:
- api_endpoint: apis/apps
- replica_sets:
- api_endpoint: apis/apps
- deployments:
- api_endpoint: apis/apps
- stateful_sets:
- api_endpoint: apis/apps
- jobs:
- api_endpoint: apis/batch
- cron_jobs:
- api_endpoint: apis/batch
diff --git a/logan/kubernetes-resources/logs-collection/configmap-cri.yaml b/logan/kubernetes-resources/logs-collection/configmap-cri.yaml
index 92ac9c65..4b05b80e 100644
--- a/logan/kubernetes-resources/logs-collection/configmap-cri.yaml
+++ b/logan/kubernetes-resources/logs-collection/configmap-cri.yaml
@@ -1,3 +1,6 @@
+# Copyright (c) 2023, Oracle and/or its affiliates.
+# Licensed under the Universal Permissive License v1.0 as shown at https://oss.oracle.com/licenses/upl.
+
apiVersion: v1
kind: ConfigMap
metadata:
@@ -29,7 +32,7 @@ data:
@type relabel
@label @NORMAL
-
+
# Match block to set info required for oci-logging-analytics fluentd outplugin
@@ -70,7 +73,7 @@ data:
# Config for Kube Proxy Logs Collection
- # Source config section to collect Kube Proxy logs from /var/log/containers/kube-proxy-*.log using Fluentd tail plugin.
+ # Source config section to collect Kube Proxy logs from /var/log/containers/kube-proxy-*.log using Fluentd tail plugin.
@type tail
@id in_tail_kube_proxy
@@ -78,25 +81,25 @@ data:
path /var/log/containers/kube-proxy-*.log
pos_file /var/log/oci_la_fluentd_outplugin/pos/kube-proxy.logs.pos
tag oci.oke.kube-proxy.*
- read_from_head "#{ENV['FLUENT_OCI_READ_FROM_HEAD'] || true}"
+ read_from_head "#{ENV['FLUENT_OCI_READ_FROM_HEAD'] || true}"
@type cri
-
+
# Record transformer filter to apply Logging Analytics configuration to each record.
@type record_transformer
enable_ruby true
- oci_la_metadata ${{'Kubernetes Cluster Name': "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_NAME'] || 'UNDEFINED'}", 'Kubernetes Cluster ID': "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_ID'] || 'UNDEFINED'}"}}
- oci_la_log_group_id "#{ENV['FLUENT_OCI_KUBERNETES_LOGGROUP_ID'] || ENV['FLUENT_OCI_DEFAULT_LOGGROUP_ID']}"
+ oci_la_metadata ${{'Kubernetes Cluster Name': "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_NAME'] || 'UNDEFINED'}", 'Kubernetes Cluster ID': "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_ID'] || 'UNDEFINED'}"}}
+ oci_la_log_group_id "#{ENV['FLUENT_OCI_KUBERNETES_LOGGROUP_ID'] || ENV['FLUENT_OCI_DEFAULT_LOGGROUP_ID']}"
oci_la_log_path "${record['tailed_path']}"
oci_la_log_source_name "Kubernetes Proxy Logs"
tag ${tag}
-
+
# Concat filter to handle partial logs in CRI/ContainerD
# This filter can not be clubbed with concat filter for multiline as both are mutually exclusive.
@@ -118,7 +121,7 @@ data:
timeout_label "@NORMAL"
multiline_start_regexp /^\S\d{2}\d{2}\s+[^\:]+:[^\:]+:[^\.]+\.\d{0,3}/
-
+
# Config for Kube Flannel Logs Collection
# Source config section to collect Kube Flannel logs from /var/log/containers/kube-flannel-*.log using Fluentd tail plugin.
@@ -128,20 +131,20 @@ data:
path /var/log/containers/kube-flannel-*.log
pos_file /var/log/oci_la_fluentd_outplugin/pos/kube-flannel.logs.pos
tag oci.oke.kube-flannel.*
- read_from_head "#{ENV['FLUENT_OCI_READ_FROM_HEAD'] || true}"
+ read_from_head "#{ENV['FLUENT_OCI_READ_FROM_HEAD'] || true}"
- @type cri
+ @type cri
-
+
# Record transformer filter to apply Logging Analytics configuration to each record.
@type record_transformer
enable_ruby true
- oci_la_metadata ${{'Kubernetes Cluster Name': "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_NAME'] || 'UNDEFINED'}", 'Kubernetes Cluster ID': "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_ID'] || 'UNDEFINED'}"}}
- oci_la_log_group_id "#{ENV['FLUENT_OCI_KUBERNETES_LOGGROUP_ID'] || ENV['FLUENT_OCI_DEFAULT_LOGGROUP_ID']}"
- oci_la_log_path "${record['tailed_path']}"
+ oci_la_metadata ${{'Kubernetes Cluster Name': "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_NAME'] || 'UNDEFINED'}", 'Kubernetes Cluster ID': "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_ID'] || 'UNDEFINED'}"}}
+ oci_la_log_group_id "#{ENV['FLUENT_OCI_KUBERNETES_LOGGROUP_ID'] || ENV['FLUENT_OCI_DEFAULT_LOGGROUP_ID']}"
+ oci_la_log_path "${record['tailed_path']}"
oci_la_log_source_name "Kubernetes Flannel Logs"
tag ${tag}
@@ -158,7 +161,7 @@ data:
# timeout scenario should not occur in general for partial logs handling
timeout_label "@NORMAL"
-
+
# Concat filter to handle multi-line log records.
@type concat
@@ -168,7 +171,7 @@ data:
timeout_label "@NORMAL"
multiline_start_regexp /^\S\d{2}\d{2}\s+[^\:]+:[^\:]+:[^\.]+\.\d{0,3}/
-
+
# Config for Kube DNS Autoscalar Logs Collection
# Source config section to collect Kube DNS Autoscalar logs from /var/log/containers/kube-dns-autoscaler-*.log using Fluentd tail plugin.
@@ -178,20 +181,20 @@ data:
path /var/log/containers/kube-dns-autoscaler-*.log
pos_file /var/log/oci_la_fluentd_outplugin/pos/kube-dns-autoscaler.logs.pos
tag oci.oke.kube-dns-autoscaler.*
- read_from_head "#{ENV['FLUENT_OCI_READ_FROM_HEAD'] || true}"
+ read_from_head "#{ENV['FLUENT_OCI_READ_FROM_HEAD'] || true}"
- @type cri
+ @type cri
-
+
# Record transformer filter to apply Logging Analytics configuration to each record.
@type record_transformer
enable_ruby true
- oci_la_metadata ${{'Kubernetes Cluster Name': "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_NAME'] || 'UNDEFINED'}", 'Kubernetes Cluster ID': "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_ID'] || 'UNDEFINED'}"}}
+ oci_la_metadata ${{'Kubernetes Cluster Name': "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_NAME'] || 'UNDEFINED'}", 'Kubernetes Cluster ID': "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_ID'] || 'UNDEFINED'}"}}
oci_la_log_group_id "#{ENV['FLUENT_OCI_KUBERNETES_LOGGROUP_ID'] || ENV['FLUENT_OCI_DEFAULT_LOGGROUP_ID']}"
- oci_la_log_path "${record['tailed_path']}"
+ oci_la_log_path "${record['tailed_path']}"
oci_la_log_source_name "Kubernetes DNS Autoscaler Logs"
tag ${tag}
@@ -218,7 +221,7 @@ data:
timeout_label "@NORMAL"
multiline_start_regexp /^\S\d{2}\d{2}\s+[^\:]+:[^\:]+:[^\.]+\.\d{0,3}/
-
+
# Config for Coredns Logs Collection
# Source config section to collect Coredns logs from /var/log/containers/coredns-*.log using Fluentd tail plugin.
@@ -228,18 +231,18 @@ data:
path /var/log/containers/coredns-*.log
pos_file /var/log/oci_la_fluentd_outplugin/pos/coredns.logs.pos
tag oci.oke.kube.coredns.*
- read_from_head "#{ENV['FLUENT_OCI_READ_FROM_HEAD'] || true}"
+ read_from_head "#{ENV['FLUENT_OCI_READ_FROM_HEAD'] || true}"
- @type cri
+ @type cri
-
+
# Record transformer filter to apply Logging Analytics configuration to each record.
@type record_transformer
enable_ruby true
- oci_la_metadata ${{'Kubernetes Cluster Name': "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_NAME'] || 'UNDEFINED'}", 'Kubernetes Cluster ID': "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_ID'] || 'UNDEFINED'}"}}
+ oci_la_metadata ${{'Kubernetes Cluster Name': "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_NAME'] || 'UNDEFINED'}", 'Kubernetes Cluster ID': "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_ID'] || 'UNDEFINED'}"}}
oci_la_log_group_id "#{ENV['FLUENT_OCI_KUBERNETES_LOGGROUP_ID'] || ENV['FLUENT_OCI_DEFAULT_LOGGROUP_ID']}"
oci_la_log_path "${record['tailed_path']}"
oci_la_log_source_name "Kubernetes Core DNS Logs"
@@ -268,7 +271,7 @@ data:
timeout_label "#@NORMAL"
multiline_start_regexp /^\[[^\]]+\]\s+/
-
+
# Config for CSI Node Logs Collection
# Source config section to collect CSI Node logs from /var/log/containers/csi-oci-node-*.log using Fluentd tail plugin.
@@ -278,25 +281,25 @@ data:
path /var/log/containers/csi-oci-node-*.log
pos_file /var/log/oci_la_fluentd_outplugin/pos/csinode.logs.pos
tag oci.oke.csinode.*
- read_from_head "#{ENV['FLUENT_OCI_READ_FROM_HEAD'] || true}"
+ read_from_head "#{ENV['FLUENT_OCI_READ_FROM_HEAD'] || true}"
@type cri
-
+
# Record transformer filter to apply Logging Analytics configuration to each record.
@type record_transformer
enable_ruby true
- oci_la_metadata ${{'Kubernetes Cluster Name': "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_NAME'] || 'UNDEFINED'}", 'Kubernetes Cluster ID': "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_ID'] || 'UNDEFINED'}"}}
- oci_la_log_group_id "#{ENV['FLUENT_OCI_KUBERNETES_LOGGROUP_ID'] || ENV['FLUENT_OCI_DEFAULT_LOGGROUP_ID']}"
+ oci_la_metadata ${{'Kubernetes Cluster Name': "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_NAME'] || 'UNDEFINED'}", 'Kubernetes Cluster ID': "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_ID'] || 'UNDEFINED'}"}}
+ oci_la_log_group_id "#{ENV['FLUENT_OCI_KUBERNETES_LOGGROUP_ID'] || ENV['FLUENT_OCI_DEFAULT_LOGGROUP_ID']}"
oci_la_log_path "${record['tailed_path']}"
oci_la_log_source_name "Kubernetes CSI Node Driver Logs"
tag ${tag}
-
+
# Concat filter to handle partial logs in CRI/ContainerD
# This filter can not be clubbed with concat filter for multiline as both are mutually exclusive.
@@ -308,7 +311,7 @@ data:
# timeout scenario should not occur in general for partial logs handling
timeout_label "@NORMAL"
-
+
# Config for Proxymux Logs Collection
# Source config section to collect Proxymux logs from /var/log/containers/proxymux-client-*.log using Fluentd tail plugin.
@@ -318,20 +321,20 @@ data:
path /var/log/containers/proxymux-client-*.log
pos_file /var/log/oci_la_fluentd_outplugin/pos/proxymux.logs.pos
tag oci.oke.proxymux-client.*
- read_from_head "#{ENV['FLUENT_OCI_READ_FROM_HEAD'] || true}"
+ read_from_head "#{ENV['FLUENT_OCI_READ_FROM_HEAD'] || true}"
@type cri
-
+
# Record transformer filter to apply Logging Analytics configuration to each record.
@type record_transformer
enable_ruby true
- oci_la_metadata ${{'Kubernetes Cluster Name': "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_NAME'] || 'UNDEFINED'}", 'Kubernetes Cluster ID': "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_ID'] || 'UNDEFINED'}"}}
+ oci_la_metadata ${{'Kubernetes Cluster Name': "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_NAME'] || 'UNDEFINED'}", 'Kubernetes Cluster ID': "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_ID'] || 'UNDEFINED'}"}}
oci_la_log_group_id "#{ENV['FLUENT_OCI_KUBERNETES_LOGGROUP_ID'] || ENV['FLUENT_OCI_DEFAULT_LOGGROUP_ID']}"
- oci_la_log_path "${record['tailed_path']}"
+ oci_la_log_path "${record['tailed_path']}"
oci_la_log_source_name "OKE Proxymux Client Logs"
tag ${tag}
@@ -348,7 +351,7 @@ data:
# timeout scenario should not occur in general for partial logs handling
timeout_label "@NORMAL"
-
+
# Config for Cluster Autoscalar Logs Collection
# Source config section to collect Cluster Autoscalar logs from /var/log/containers/cluster-autoscaler-*.log using Fluentd tail plugin.
@@ -358,19 +361,19 @@ data:
path /var/log/containers/cluster-autoscaler-*.log
pos_file /var/log/oci_la_fluentd_outplugin/pos/cluster-autoscaler.logs.pos
tag oci.oke.cluster-autoscaler.*
- read_from_head "#{ENV['FLUENT_OCI_READ_FROM_HEAD'] || true}"
+ read_from_head "#{ENV['FLUENT_OCI_READ_FROM_HEAD'] || true}"
@type cri
-
+
# Record transformer filter to apply Logging Analytics configuration to each record.
@type record_transformer
enable_ruby true
- oci_la_metadata ${{'Kubernetes Cluster Name': "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_NAME'] || 'UNDEFINED'}", 'Kubernetes Cluster ID': "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_ID'] || 'UNDEFINED'}"}}
- oci_la_log_group_id "#{ENV['FLUENT_OCI_KUBERNETES_LOGGROUP_ID'] || ENV['FLUENT_OCI_DEFAULT_LOGGROUP_ID']}"
+ oci_la_metadata ${{'Kubernetes Cluster Name': "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_NAME'] || 'UNDEFINED'}", 'Kubernetes Cluster ID': "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_ID'] || 'UNDEFINED'}"}}
+ oci_la_log_group_id "#{ENV['FLUENT_OCI_KUBERNETES_LOGGROUP_ID'] || ENV['FLUENT_OCI_DEFAULT_LOGGROUP_ID']}"
oci_la_log_path "${record['tailed_path']}"
oci_la_log_source_name "Kubernetes Autoscaler Logs"
tag ${tag}
@@ -398,7 +401,7 @@ data:
timeout_label "@NORMAL"
multiline_start_regexp /^\S\d{2}\d{2}\s+[^\:]+:[^\:]+:[^\.]+\.\d{0,3}/
-
+
# Config for Cronlog Logs Collection
# Source config section to collect Cronlog logs from /var/log/cron* using Fluentd tail plugin.
@@ -408,21 +411,21 @@ data:
path /var/log/cron*
pos_file /var/log/oci_la_fluentd_outplugin/pos/cronlog.logs.pos
tag oci.oke.syslog.cronlog.*
- read_from_head "#{ENV['FLUENT_OCI_READ_FROM_HEAD'] || true}"
+ read_from_head "#{ENV['FLUENT_OCI_READ_FROM_HEAD'] || true}"
@type multiline
format_firstline /^(?:(?:\d+\s+)?<([^>]*)>(?:\d+\s+)?)?\S+\s+\d{1,2}\s+\d{1,2}:\d{1,2}:\d{1,2}\s+/
format1 /^(?.*)/
-
+
# Record transformer filter to apply Logging Analytics configuration to each record.
@type record_transformer
enable_ruby true
oci_la_metadata ${{'Node':"#{ENV['K8S_NODE_NAME'] || 'UNDEFINED'}", 'Kubernetes Cluster Name': "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_NAME'] || 'UNDEFINED'}", 'Kubernetes Cluster ID': "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_ID'] || 'UNDEFINED'}"}}
- oci_la_log_group_id "#{ENV['FLUENT_OCI_SYSLOG_LOGGROUP_ID'] || ENV['FLUENT_OCI_DEFAULT_LOGGROUP_ID']}"
+ oci_la_log_group_id "#{ENV['FLUENT_OCI_SYSLOG_LOGGROUP_ID'] || ENV['FLUENT_OCI_DEFAULT_LOGGROUP_ID']}"
oci_la_log_path "${record['tailed_path']}"
oci_la_log_source_name "Linux Cron Logs"
tag ${tag}
@@ -438,22 +441,22 @@ data:
path /var/log/secure*
pos_file /var/log/oci_la_fluentd_outplugin/pos/securelog.logs.pos
tag oci.oke.syslog.securelog.*
- read_from_head "#{ENV['FLUENT_OCI_READ_FROM_HEAD'] || true}"
+ read_from_head "#{ENV['FLUENT_OCI_READ_FROM_HEAD'] || true}"
@type multiline
format_firstline /^(?:(?:\d+\s+)?<([^>]*)>(?:\d+\s+)?)?\S+\s+\d{1,2}\s+\d{1,2}:\d{1,2}:\d{1,2}\s+/
format1 /^(?.*)/
-
+
# Record transformer filter to apply Logging Analytics configuration to each record.
@type record_transformer
enable_ruby true
oci_la_metadata ${{'Node':"#{ENV['K8S_NODE_NAME'] || 'UNDEFINED'}", 'Kubernetes Cluster Name': "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_NAME'] || 'UNDEFINED'}", 'Kubernetes Cluster ID': "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_ID'] || 'UNDEFINED'}"}}
- oci_la_log_group_id "#{ENV['FLUENT_OCI_SYSLOG_LOGGROUP_ID'] || ENV['FLUENT_OCI_DEFAULT_LOGGROUP_ID']}"
- oci_la_log_path "${record['tailed_path']}"
+ oci_la_log_group_id "#{ENV['FLUENT_OCI_SYSLOG_LOGGROUP_ID'] || ENV['FLUENT_OCI_DEFAULT_LOGGROUP_ID']}"
+ oci_la_log_path "${record['tailed_path']}"
oci_la_log_source_name "Linux Secure Logs"
tag ${tag}
@@ -468,14 +471,14 @@ data:
path /var/log/messages*
pos_file /var/log/oci_la_fluentd_outplugin/pos/syslog.logs.pos
tag oci.oke.syslog.messages.**
- read_from_head "#{ENV['FLUENT_OCI_READ_FROM_HEAD'] || true}"
+ read_from_head "#{ENV['FLUENT_OCI_READ_FROM_HEAD'] || true}"
@type multiline
format_firstline /^(?:(?:\d+\s+)?<([^>]*)>(?:\d+\s+)?)?\S+\s+\d{1,2}\s+\d{1,2}:\d{1,2}:\d{1,2}\s+/
format1 /^(?.*)/
-
+
# Match block to filter kubelet logs from syslogs
@type rewrite_tag_filter
@@ -491,14 +494,14 @@ data:
tag oci.oke.syslog.syslog.*
-
+
# Record transformer filter to apply Logging Analytics configuration to each record.
@type record_transformer
enable_ruby true
oci_la_metadata ${{'Node':"#{ENV['K8S_NODE_NAME'] || 'UNDEFINED'}", 'Kubernetes Cluster Name': "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_NAME'] || 'UNDEFINED'}", 'Kubernetes Cluster ID': "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_ID'] || 'UNDEFINED'}"}}
- oci_la_log_group_id "#{ENV['FLUENT_OCI_KUBERNETES_LOGGROUP_ID'] || ENV['FLUENT_OCI_DEFAULT_LOGGROUP_ID']}"
+ oci_la_log_group_id "#{ENV['FLUENT_OCI_KUBERNETES_LOGGROUP_ID'] || ENV['FLUENT_OCI_DEFAULT_LOGGROUP_ID']}"
oci_la_log_path "${record['tailed_path']}"
oci_la_log_source_name "Kubernetes Kubelet Logs"
tag ${tag}
@@ -511,13 +514,13 @@ data:
enable_ruby true
oci_la_metadata ${{'Node':"#{ENV['K8S_NODE_NAME'] || 'UNDEFINED'}", 'Kubernetes Cluster Name': "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_NAME'] || 'UNDEFINED'}", 'Kubernetes Cluster ID': "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_ID'] || 'UNDEFINED'}"}}
- oci_la_log_group_id "#{ENV['FLUENT_OCI_SYSLOG_LOGGROUP_ID'] || ENV['FLUENT_OCI_DEFAULT_LOGGROUP_ID']}"
- oci_la_log_path "${record['tailed_path']}"
+ oci_la_log_group_id "#{ENV['FLUENT_OCI_SYSLOG_LOGGROUP_ID'] || ENV['FLUENT_OCI_DEFAULT_LOGGROUP_ID']}"
+ oci_la_log_path "${record['tailed_path']}"
oci_la_log_source_name "Linux Syslog Logs"
tag ${tag}
-
+
# Config for Mail Delivery Logs Collection
# Source config section to collect Mail Delivery Logs from /var/log/maillog* using Fluentd tail plugin.
@@ -547,7 +550,7 @@ data:
tag ${tag}
-
+
# Config for Linux Audit Logs Collection
# Source config section to collect Linux Audit Logs from /var/log/audit/audit* using Fluentd tail plugin.
@@ -575,7 +578,7 @@ data:
tag ${tag}
-
+
# Config for Ksplice Logs Collection
# Source config section to collect Ksplice Logs from /var/log/uptrack* using Fluentd tail plugin.
@@ -693,4 +696,3 @@ data:
# timeout scenario should not occur in general for partial logs handling
timeout_label "@NORMAL"
-
diff --git a/logan/kubernetes-resources/logs-collection/configmap-docker.yaml b/logan/kubernetes-resources/logs-collection/configmap-docker.yaml
index c8849c2b..1917ec42 100644
--- a/logan/kubernetes-resources/logs-collection/configmap-docker.yaml
+++ b/logan/kubernetes-resources/logs-collection/configmap-docker.yaml
@@ -1,3 +1,6 @@
+# Copyright (c) 2023, Oracle and/or its affiliates.
+# Licensed under the Universal Permissive License v1.0 as shown at https://oss.oracle.com/licenses/upl.
+
apiVersion: v1
kind: ConfigMap
metadata:
@@ -29,7 +32,7 @@ data:
@type relabel
@label @NORMAL
-
+
# Match block to set info required for oci-logging-analytics fluentd outplugin
@@ -70,7 +73,7 @@ data:
# Config for Kube Proxy Logs Collection
- # Source config section to collect Kube Proxy logs from /var/log/containers/kube-proxy-*.log using Fluentd tail plugin.
+ # Source config section to collect Kube Proxy logs from /var/log/containers/kube-proxy-*.log using Fluentd tail plugin.
@type tail
@id in_tail_kube_proxy
@@ -78,26 +81,26 @@ data:
path /var/log/containers/kube-proxy-*.log
pos_file /var/log/oci_la_fluentd_outplugin/pos/kube-proxy.logs.pos
tag oci.oke.kube-proxy.*
- read_from_head "#{ENV['FLUENT_OCI_READ_FROM_HEAD'] || true}"
+ read_from_head "#{ENV['FLUENT_OCI_READ_FROM_HEAD'] || true}"
- @type json
+ @type json
-
+
# Record transformer filter to apply Logging Analytics configuration to each record.
@type record_transformer
enable_ruby true
- oci_la_metadata ${{'Kubernetes Cluster Name': "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_NAME'] || 'UNDEFINED'}", 'Kubernetes Cluster ID': "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_ID'] || 'UNDEFINED'}"}}
- oci_la_log_group_id "#{ENV['FLUENT_OCI_KUBERNETES_LOGGROUP_ID'] || ENV['FLUENT_OCI_DEFAULT_LOGGROUP_ID']}"
+ oci_la_metadata ${{'Kubernetes Cluster Name': "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_NAME'] || 'UNDEFINED'}", 'Kubernetes Cluster ID': "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_ID'] || 'UNDEFINED'}"}}
+ oci_la_log_group_id "#{ENV['FLUENT_OCI_KUBERNETES_LOGGROUP_ID'] || ENV['FLUENT_OCI_DEFAULT_LOGGROUP_ID']}"
oci_la_log_path "${record['tailed_path']}"
oci_la_log_source_name "Kubernetes Proxy Logs"
message "${record['log']}"
tag ${tag}
-
+
# Concat filter to handle multi-line log records.
@type concat
@@ -107,7 +110,7 @@ data:
timeout_label "#@NORMAL"
multiline_start_regexp /^\S\d{2}\d{2}\s+[^\:]+:[^\:]+:[^\.]+\.\d{0,3}/
-
+
# Config for Kube Flannel Logs Collection
# Source config section to collect Kube Flannel logs from /var/log/containers/kube-flannel-*.log using Fluentd tail plugin.
@@ -117,20 +120,20 @@ data:
path /var/log/containers/kube-flannel-*.log
pos_file /var/log/oci_la_fluentd_outplugin/pos/kube-flannel.logs.pos
tag oci.oke.kube-flannel.*
- read_from_head "#{ENV['FLUENT_OCI_READ_FROM_HEAD'] || true}"
+ read_from_head "#{ENV['FLUENT_OCI_READ_FROM_HEAD'] || true}"
- @type json
+ @type json
-
+
# Record transformer filter to apply Logging Analytics configuration to each record.
@type record_transformer
enable_ruby true
- oci_la_metadata ${{'Kubernetes Cluster Name': "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_NAME'] || 'UNDEFINED'}", 'Kubernetes Cluster ID': "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_ID'] || 'UNDEFINED'}"}}
- oci_la_log_group_id "#{ENV['FLUENT_OCI_KUBERNETES_LOGGROUP_ID'] || ENV['FLUENT_OCI_DEFAULT_LOGGROUP_ID']}"
- oci_la_log_path "${record['tailed_path']}"
+ oci_la_metadata ${{'Kubernetes Cluster Name': "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_NAME'] || 'UNDEFINED'}", 'Kubernetes Cluster ID': "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_ID'] || 'UNDEFINED'}"}}
+ oci_la_log_group_id "#{ENV['FLUENT_OCI_KUBERNETES_LOGGROUP_ID'] || ENV['FLUENT_OCI_DEFAULT_LOGGROUP_ID']}"
+ oci_la_log_path "${record['tailed_path']}"
oci_la_log_source_name "Kubernetes Flannel Logs"
message "${record['log']}"
tag ${tag}
@@ -146,7 +149,7 @@ data:
timeout_label "#@NORMAL"
multiline_start_regexp /^\S\d{2}\d{2}\s+[^\:]+:[^\:]+:[^\.]+\.\d{0,3}/
-
+
# Config for Kube DNS Autoscalar Logs Collection
# Source config section to collect Kube DNS Autoscalar logs from /var/log/containers/kube-dns-autoscaler-*.log using Fluentd tail plugin.
@@ -156,20 +159,20 @@ data:
path /var/log/containers/kube-dns-autoscaler-*.log
pos_file /var/log/oci_la_fluentd_outplugin/pos/kube-dns-autoscaler.logs.pos
tag oci.oke.kube-dns-autoscaler.*
- read_from_head "#{ENV['FLUENT_OCI_READ_FROM_HEAD'] || true}"
+ read_from_head "#{ENV['FLUENT_OCI_READ_FROM_HEAD'] || true}"
- @type json
+ @type json
-
+
# Record transformer filter to apply Logging Analytics configuration to each record.
@type record_transformer
enable_ruby true
- oci_la_metadata ${{'Kubernetes Cluster Name': "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_NAME'] || 'UNDEFINED'}", 'Kubernetes Cluster ID': "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_ID'] || 'UNDEFINED'}"}}
+ oci_la_metadata ${{'Kubernetes Cluster Name': "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_NAME'] || 'UNDEFINED'}", 'Kubernetes Cluster ID': "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_ID'] || 'UNDEFINED'}"}}
oci_la_log_group_id "#{ENV['FLUENT_OCI_KUBERNETES_LOGGROUP_ID'] || ENV['FLUENT_OCI_DEFAULT_LOGGROUP_ID']}"
- oci_la_log_path "${record['tailed_path']}"
+ oci_la_log_path "${record['tailed_path']}"
oci_la_log_source_name "Kubernetes DNS Autoscaler Logs"
message "${record['log']}"
tag ${tag}
@@ -185,7 +188,7 @@ data:
timeout_label "#@NORMAL"
multiline_start_regexp /^\S\d{2}\d{2}\s+[^\:]+:[^\:]+:[^\.]+\.\d{0,3}/
-
+
# Config for Coredns Logs Collection
# Source config section to collect Coredns logs from /var/log/containers/coredns-*.log using Fluentd tail plugin.
@@ -195,18 +198,18 @@ data:
path /var/log/containers/coredns-*.log
pos_file /var/log/oci_la_fluentd_outplugin/pos/coredns.logs.pos
tag oci.oke.kube.coredns.*
- read_from_head "#{ENV['FLUENT_OCI_READ_FROM_HEAD'] || true}"
+ read_from_head "#{ENV['FLUENT_OCI_READ_FROM_HEAD'] || true}"
- @type json
+ @type json
-
+
# Record transformer filter to apply Logging Analytics configuration to each record.
@type record_transformer
enable_ruby true
- oci_la_metadata ${{'Kubernetes Cluster Name': "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_NAME'] || 'UNDEFINED'}", 'Kubernetes Cluster ID': "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_ID'] || 'UNDEFINED'}"}}
+ oci_la_metadata ${{'Kubernetes Cluster Name': "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_NAME'] || 'UNDEFINED'}", 'Kubernetes Cluster ID': "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_ID'] || 'UNDEFINED'}"}}
oci_la_log_group_id "#{ENV['FLUENT_OCI_KUBERNETES_LOGGROUP_ID'] || ENV['FLUENT_OCI_DEFAULT_LOGGROUP_ID']}"
oci_la_log_path "${record['tailed_path']}"
oci_la_log_source_name "Kubernetes Core DNS Logs"
@@ -224,7 +227,7 @@ data:
timeout_label "#@NORMAL"
multiline_start_regexp /^\[[^\]]+\]\s+/
-
+
# Config for CSI Node Logs Collection
# Source config section to collect CSI Node logs from /var/log/containers/csi-oci-node-*.log using Fluentd tail plugin.
@@ -234,26 +237,26 @@ data:
path /var/log/containers/csi-oci-node-*.log
pos_file /var/log/oci_la_fluentd_outplugin/pos/csinode.logs.pos
tag oci.oke.csinode.*
- read_from_head "#{ENV['FLUENT_OCI_READ_FROM_HEAD'] || true}"
+ read_from_head "#{ENV['FLUENT_OCI_READ_FROM_HEAD'] || true}"
- @type json
+ @type json
-
+
# Record transformer filter to apply Logging Analytics configuration to each record.
@type record_transformer
enable_ruby true
- oci_la_metadata ${{'Kubernetes Cluster Name': "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_NAME'] || 'UNDEFINED'}", 'Kubernetes Cluster ID': "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_ID'] || 'UNDEFINED'}"}}
- oci_la_log_group_id "#{ENV['FLUENT_OCI_KUBERNETES_LOGGROUP_ID'] || ENV['FLUENT_OCI_DEFAULT_LOGGROUP_ID']}"
+ oci_la_metadata ${{'Kubernetes Cluster Name': "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_NAME'] || 'UNDEFINED'}", 'Kubernetes Cluster ID': "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_ID'] || 'UNDEFINED'}"}}
+ oci_la_log_group_id "#{ENV['FLUENT_OCI_KUBERNETES_LOGGROUP_ID'] || ENV['FLUENT_OCI_DEFAULT_LOGGROUP_ID']}"
oci_la_log_path "${record['tailed_path']}"
oci_la_log_source_name "Kubernetes CSI Node Driver Logs"
message "${record['log']}"
tag ${tag}
-
+
# Config for Proxymux Logs Collection
# Source config section to collect Proxymux logs from /var/log/containers/proxymux-client-*.log using Fluentd tail plugin.
@@ -263,26 +266,26 @@ data:
path /var/log/containers/proxymux-client-*.log
pos_file /var/log/oci_la_fluentd_outplugin/pos/proxymux.logs.pos
tag oci.oke.proxymux-client.*
- read_from_head "#{ENV['FLUENT_OCI_READ_FROM_HEAD'] || true}"
+ read_from_head "#{ENV['FLUENT_OCI_READ_FROM_HEAD'] || true}"
- @type json
+ @type json
-
+
# Record transformer filter to apply Logging Analytics configuration to each record.
@type record_transformer
enable_ruby true
- oci_la_metadata ${{'Kubernetes Cluster Name': "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_NAME'] || 'UNDEFINED'}", 'Kubernetes Cluster ID': "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_ID'] || 'UNDEFINED'}"}}
+ oci_la_metadata ${{'Kubernetes Cluster Name': "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_NAME'] || 'UNDEFINED'}", 'Kubernetes Cluster ID': "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_ID'] || 'UNDEFINED'}"}}
oci_la_log_group_id "#{ENV['FLUENT_OCI_KUBERNETES_LOGGROUP_ID'] || ENV['FLUENT_OCI_DEFAULT_LOGGROUP_ID']}"
- oci_la_log_path "${record['tailed_path']}"
+ oci_la_log_path "${record['tailed_path']}"
oci_la_log_source_name "OKE Proxymux Client Logs"
message "${record['log']}"
tag ${tag}
-
+
# Config for Cluster Autoscalar Logs Collection
# Source config section to collect Cluster Autoscalar logs from /var/log/containers/cluster-autoscaler-*.log using Fluentd tail plugin.
@@ -292,19 +295,19 @@ data:
path /var/log/containers/cluster-autoscaler-*.log
pos_file /var/log/oci_la_fluentd_outplugin/pos/cluster-autoscaler.logs.pos
tag oci.oke.cluster-autoscaler.*
- read_from_head "#{ENV['FLUENT_OCI_READ_FROM_HEAD'] || true}"
+ read_from_head "#{ENV['FLUENT_OCI_READ_FROM_HEAD'] || true}"
- @type json
+ @type json
-
+
# Record transformer filter to apply Logging Analytics configuration to each record.
@type record_transformer
enable_ruby true
- oci_la_metadata ${{'Kubernetes Cluster Name': "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_NAME'] || 'UNDEFINED'}", 'Kubernetes Cluster ID': "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_ID'] || 'UNDEFINED'}"}}
- oci_la_log_group_id "#{ENV['FLUENT_OCI_KUBERNETES_LOGGROUP_ID'] || ENV['FLUENT_OCI_DEFAULT_LOGGROUP_ID']}"
+ oci_la_metadata ${{'Kubernetes Cluster Name': "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_NAME'] || 'UNDEFINED'}", 'Kubernetes Cluster ID': "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_ID'] || 'UNDEFINED'}"}}
+ oci_la_log_group_id "#{ENV['FLUENT_OCI_KUBERNETES_LOGGROUP_ID'] || ENV['FLUENT_OCI_DEFAULT_LOGGROUP_ID']}"
oci_la_log_path "${record['tailed_path']}"
oci_la_log_source_name "Kubernetes Autoscaler Logs"
message "${record['log']}"
@@ -321,7 +324,7 @@ data:
timeout_label "#@NORMAL"
multiline_start_regexp /^\S\d{2}\d{2}\s+[^\:]+:[^\:]+:[^\.]+\.\d{0,3}/
-
+
# Config for Cronlog Logs Collection
# Source config section to collect Cronlog logs from /var/log/cron* using Fluentd tail plugin.
@@ -331,21 +334,21 @@ data:
path /var/log/cron*
pos_file /var/log/oci_la_fluentd_outplugin/pos/cronlog.logs.pos
tag oci.oke.syslog.cronlog.*
- read_from_head "#{ENV['FLUENT_OCI_READ_FROM_HEAD'] || true}"
+ read_from_head "#{ENV['FLUENT_OCI_READ_FROM_HEAD'] || true}"
@type multiline
format_firstline /^(?:(?:\d+\s+)?<([^>]*)>(?:\d+\s+)?)?\S+\s+\d{1,2}\s+\d{1,2}:\d{1,2}:\d{1,2}\s+/
format1 /^(?.*)/
-
+
# Record transformer filter to apply Logging Analytics configuration to each record.
@type record_transformer
enable_ruby true
oci_la_metadata ${{'Node':"#{ENV['K8S_NODE_NAME'] || 'UNDEFINED'}", 'Kubernetes Cluster Name': "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_NAME'] || 'UNDEFINED'}", 'Kubernetes Cluster ID': "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_ID'] || 'UNDEFINED'}"}}
- oci_la_log_group_id "#{ENV['FLUENT_OCI_SYSLOG_LOGGROUP_ID'] || ENV['FLUENT_OCI_DEFAULT_LOGGROUP_ID']}"
+ oci_la_log_group_id "#{ENV['FLUENT_OCI_SYSLOG_LOGGROUP_ID'] || ENV['FLUENT_OCI_DEFAULT_LOGGROUP_ID']}"
oci_la_log_path "${record['tailed_path']}"
oci_la_log_source_name "Linux Cron Logs"
tag ${tag}
@@ -361,22 +364,22 @@ data:
path /var/log/secure*
pos_file /var/log/oci_la_fluentd_outplugin/pos/securelog.logs.pos
tag oci.oke.syslog.securelog.*
- read_from_head "#{ENV['FLUENT_OCI_READ_FROM_HEAD'] || true}"
+ read_from_head "#{ENV['FLUENT_OCI_READ_FROM_HEAD'] || true}"
@type multiline
format_firstline /^(?:(?:\d+\s+)?<([^>]*)>(?:\d+\s+)?)?\S+\s+\d{1,2}\s+\d{1,2}:\d{1,2}:\d{1,2}\s+/
format1 /^(?.*)/
-
+
# Record transformer filter to apply Logging Analytics configuration to each record.
@type record_transformer
enable_ruby true
oci_la_metadata ${{'Node':"#{ENV['K8S_NODE_NAME'] || 'UNDEFINED'}", 'Kubernetes Cluster Name': "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_NAME'] || 'UNDEFINED'}", 'Kubernetes Cluster ID': "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_ID'] || 'UNDEFINED'}"}}
- oci_la_log_group_id "#{ENV['FLUENT_OCI_SYSLOG_LOGGROUP_ID'] || ENV['FLUENT_OCI_DEFAULT_LOGGROUP_ID']}"
- oci_la_log_path "${record['tailed_path']}"
+ oci_la_log_group_id "#{ENV['FLUENT_OCI_SYSLOG_LOGGROUP_ID'] || ENV['FLUENT_OCI_DEFAULT_LOGGROUP_ID']}"
+ oci_la_log_path "${record['tailed_path']}"
oci_la_log_source_name "Linux Secure Logs"
tag ${tag}
@@ -391,14 +394,14 @@ data:
path /var/log/messages*
pos_file /var/log/oci_la_fluentd_outplugin/pos/syslog.logs.pos
tag oci.oke.syslog.messages.**
- read_from_head "#{ENV['FLUENT_OCI_READ_FROM_HEAD'] || true}"
+ read_from_head "#{ENV['FLUENT_OCI_READ_FROM_HEAD'] || true}"
@type multiline
format_firstline /^(?:(?:\d+\s+)?<([^>]*)>(?:\d+\s+)?)?\S+\s+\d{1,2}\s+\d{1,2}:\d{1,2}:\d{1,2}\s+/
format1 /^(?.*)/
-
+
# Match block to filter kubelet logs from syslogs
@type rewrite_tag_filter
@@ -414,14 +417,14 @@ data:
tag oci.oke.syslog.syslog.*
-
+
# Record transformer filter to apply Logging Analytics configuration to each record.
@type record_transformer
enable_ruby true
oci_la_metadata ${{'Node':"#{ENV['K8S_NODE_NAME'] || 'UNDEFINED'}", 'Kubernetes Cluster Name': "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_NAME'] || 'UNDEFINED'}", 'Kubernetes Cluster ID': "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_ID'] || 'UNDEFINED'}"}}
- oci_la_log_group_id "#{ENV['FLUENT_OCI_KUBERNETES_LOGGROUP_ID'] || ENV['FLUENT_OCI_DEFAULT_LOGGROUP_ID']}"
+ oci_la_log_group_id "#{ENV['FLUENT_OCI_KUBERNETES_LOGGROUP_ID'] || ENV['FLUENT_OCI_DEFAULT_LOGGROUP_ID']}"
oci_la_log_path "${record['tailed_path']}"
oci_la_log_source_name "Kubernetes Kubelet Logs"
tag ${tag}
@@ -434,13 +437,13 @@ data:
enable_ruby true
oci_la_metadata ${{'Node':"#{ENV['K8S_NODE_NAME'] || 'UNDEFINED'}", 'Kubernetes Cluster Name': "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_NAME'] || 'UNDEFINED'}", 'Kubernetes Cluster ID': "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_ID'] || 'UNDEFINED'}"}}
- oci_la_log_group_id "#{ENV['FLUENT_OCI_SYSLOG_LOGGROUP_ID'] || ENV['FLUENT_OCI_DEFAULT_LOGGROUP_ID']}"
- oci_la_log_path "${record['tailed_path']}"
+ oci_la_log_group_id "#{ENV['FLUENT_OCI_SYSLOG_LOGGROUP_ID'] || ENV['FLUENT_OCI_DEFAULT_LOGGROUP_ID']}"
+ oci_la_log_path "${record['tailed_path']}"
oci_la_log_source_name "Linux Syslog Logs"
tag ${tag}
-
+
# Config for Mail Delivery Logs Collection
# Source config section to collect Mail Delivery Logs from /var/log/maillog* using Fluentd tail plugin.
@@ -470,7 +473,7 @@ data:
tag ${tag}
-
+
# Config for Linux Audit Logs Collection
# Source config section to collect Linux Audit Logs from /var/log/audit/audit* using Fluentd tail plugin.
@@ -498,7 +501,7 @@ data:
tag ${tag}
-
+
# Config for Ksplice Logs Collection
# Source config section to collect Ksplice Logs from /var/log/uptrack* using Fluentd tail plugin.
@@ -595,7 +598,7 @@ data:
@type record_transformer
enable_ruby true
- oci_la_metadata ${{'Kubernetes Cluster Name': "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_NAME'] || 'UNDEFINED'}", 'Kubernetes Cluster ID': "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_ID'] || 'UNDEFINED'}"}}
+ oci_la_metadata ${{'Kubernetes Cluster Name': "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_NAME'] || 'UNDEFINED'}", 'Kubernetes Cluster ID': "#{ENV['FLUENT_OCI_KUBERNETES_CLUSTER_ID'] || 'UNDEFINED'}"}}
oci_la_log_group_id ${record.dig("kubernetes", "annotations", "oracle.com/oci_la_log_group_id") ? record.dig("kubernetes", "annotations", "oracle.com/oci_la_log_group_id") : "#{ENV['FLUENT_OCI_KUBERNETES_LOGGROUP_ID'] || ENV['FLUENT_OCI_DEFAULT_LOGGROUP_ID']}"}
oci_la_log_path "${record['tailed_path']}"
oci_la_log_source_name ${record.dig("kubernetes", "annotations", "oracle.com/oci_la_log_source_name") ? record.dig("kubernetes", "annotations", "oracle.com/oci_la_log_source_name") : "Kubernetes Container Generic Logs"}
diff --git a/logan/kubernetes-resources/logs-collection/fluentd-daemonset.yaml b/logan/kubernetes-resources/logs-collection/fluentd-daemonset.yaml
index caed2a30..dcf20a2b 100644
--- a/logan/kubernetes-resources/logs-collection/fluentd-daemonset.yaml
+++ b/logan/kubernetes-resources/logs-collection/fluentd-daemonset.yaml
@@ -1,3 +1,6 @@
+# Copyright (c) 2023, Oracle and/or its affiliates.
+# Licensed under the Universal Permissive License v1.0 as shown at https://oss.oracle.com/licenses/upl.
+
---
apiVersion: v1
kind: ServiceAccount
diff --git a/logan/kubernetes-resources/logs-collection/secrets.yaml b/logan/kubernetes-resources/logs-collection/secrets.yaml
index 6fb37bcc..f76435c7 100644
--- a/logan/kubernetes-resources/logs-collection/secrets.yaml
+++ b/logan/kubernetes-resources/logs-collection/secrets.yaml
@@ -1,3 +1,6 @@
+# Copyright (c) 2023, Oracle and/or its affiliates.
+# Licensed under the Universal Permissive License v1.0 as shown at https://oss.oracle.com/licenses/upl.
+
apiVersion: v1
kind: Secret
type: Opaque
@@ -15,4 +18,4 @@ stringData:
private.pem: |-
-----BEGIN RSA PRIVATE KEY-----
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
- -----END RSA PRIVATE KEY-----
\ No newline at end of file
+ -----END RSA PRIVATE KEY-----
diff --git a/logan/kubernetes-resources/objects-collection/configmap-objects.yaml b/logan/kubernetes-resources/objects-collection/configmap-objects.yaml
index 76ee525f..a0b141c7 100644
--- a/logan/kubernetes-resources/objects-collection/configmap-objects.yaml
+++ b/logan/kubernetes-resources/objects-collection/configmap-objects.yaml
@@ -1,3 +1,6 @@
+# Copyright (c) 2023, Oracle and/or its affiliates.
+# Licensed under the Universal Permissive License v1.0 as shown at https://oss.oracle.com/licenses/upl.
+
apiVersion: v1
kind: ConfigMap
metadata:
@@ -19,7 +22,7 @@ data:
plugin_log_location "#{ENV['FLUENT_OCI_LOG_LOCATION'] || '/var/log/'}"
plugin_log_level "#{ENV['FLUENT_OCI_LOG_LEVEL'] || 'info'}"
plugin_log_file_size "#{ENV['FLUENT_OCI_LOG_FILE_SIZE'] || '10MB'}"
- plugin_log_file_count "#{ENV['FLUENT_OCI_LOG_FILE_COUNT'] || 10}"
+ plugin_log_file_count "#{ENV['FLUENT_OCI_LOG_FILE_COUNT'] || 10}"
@type file
path "#{ENV['FLUENT_OCI_BUFFER_PATH'] || '/var/log/oci_la_fluentd_outplugin/objects/buffer/'}"
@@ -71,7 +74,7 @@ data:
@type kubernetes_objects
- tag k8s.*
+ tag k8s.*
api_version v1
api_endpoint apis/apps
@@ -111,7 +114,7 @@ data:
-
+
# To support cronJob Object collection for Kubernetes versions <= 1.19 where cronJob is available under v1beta1 api version.
diff --git a/logan/kubernetes-resources/objects-collection/fluentd-deployment.yaml b/logan/kubernetes-resources/objects-collection/fluentd-deployment.yaml
index afdb803c..57dfdc9d 100644
--- a/logan/kubernetes-resources/objects-collection/fluentd-deployment.yaml
+++ b/logan/kubernetes-resources/objects-collection/fluentd-deployment.yaml
@@ -1,3 +1,6 @@
+# Copyright (c) 2023, Oracle and/or its affiliates.
+# Licensed under the Universal Permissive License v1.0 as shown at https://oss.oracle.com/licenses/upl.
+
---
apiVersion: v1
kind: ServiceAccount
@@ -81,7 +84,7 @@ spec:
value: "/var/opt/conf/fluent.conf" # change as required
- name: FLUENT_OCI_DEFAULT_LOGGROUP_ID
# Replace this value with actual logging analytics log group
- value:
+ value:
- name: FLUENT_OCI_NAMESPACE
# Replace this value with actual namespace of logging analytics
value:
diff --git a/logan/kubernetes-resources/objects-collection/secrets.yaml b/logan/kubernetes-resources/objects-collection/secrets.yaml
index 6fb37bcc..f76435c7 100644
--- a/logan/kubernetes-resources/objects-collection/secrets.yaml
+++ b/logan/kubernetes-resources/objects-collection/secrets.yaml
@@ -1,3 +1,6 @@
+# Copyright (c) 2023, Oracle and/or its affiliates.
+# Licensed under the Universal Permissive License v1.0 as shown at https://oss.oracle.com/licenses/upl.
+
apiVersion: v1
kind: Secret
type: Opaque
@@ -15,4 +18,4 @@ stringData:
private.pem: |-
-----BEGIN RSA PRIVATE KEY-----
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
- -----END RSA PRIVATE KEY-----
\ No newline at end of file
+ -----END RSA PRIVATE KEY-----
diff --git a/logan/terraform/oke/helm-chart b/logan/terraform/oke/helm-chart
deleted file mode 120000
index e42d5f72..00000000
--- a/logan/terraform/oke/helm-chart
+++ /dev/null
@@ -1 +0,0 @@
-../../helm-chart
\ No newline at end of file
diff --git a/logan/terraform/oke/helm.tf b/logan/terraform/oke/helm.tf
deleted file mode 100644
index ec1b1ed9..00000000
--- a/logan/terraform/oke/helm.tf
+++ /dev/null
@@ -1,45 +0,0 @@
-
-resource "helm_release" "oci-kubernetes-monitoring" {
- name = "oci-kubernetes-monitoring"
- chart = "${path.module}/../../helm-chart"
-
- set {
- name = "image.url"
- value = var.container_image_url
- }
-
- set {
- name = "kubernetesClusterName"
- value = var.oke_cluster_name
- }
-
- set {
- name = "kubernetesClusterID"
- value = var.oke_cluster_ocid
- }
-
- set {
- name = "namespace"
- value = var.kubernetes_namespace
- }
-
- set {
- name = "ociLANamespace"
- value = var.oci_la_namespace
- }
-
- set {
- name = "ociLALogGroupID"
- value = var.oci_la_logGroup_id
- }
-
- set {
- name = "ociCompartmentID"
- value = var.oke_cluster_compartment
- }
-
- set {
- name = "fluentd.baseDir"
- value = var.fluentd_baseDir_path
- }
-}
\ No newline at end of file
diff --git a/logan/terraform/oke/main.tf b/logan/terraform/oke/main.tf
deleted file mode 100644
index a727d66d..00000000
--- a/logan/terraform/oke/main.tf
+++ /dev/null
@@ -1,57 +0,0 @@
-// Import Kubernetes Dashboards
-module "import_kubernetes_dashbords" {
- source = "./modules/dashboards"
- compartment_ocid = var.oci_la_compartment_ocid
-
- count = var.enable_dashboard_import ? 1 : 0
-}
-
-// Create Required Polcies and Dynamic Group
-// Needs to be called with OCI Home Region Provider
-module "policy_and_dynamic-group" {
- source = "./modules/iam"
- root_compartment_ocid = var.tenancy_ocid
- oci_la_logGroup_compartment_ocid = var.oci_la_compartment_ocid
- oke_compartment_ocid = var.oke_compartment_ocid
- oke_cluster_ocid = var.oke_cluster_ocid
-
- count = var.opt_create_dynamicGroup_and_policies ? 1 : 0
-
- providers = {
- oci = oci.home_region
- }
-}
-
-// Create Logging Analytics Resorces
-module "loggingAnalytics" {
- source = "./modules/logan"
- tenancy_ocid = var.tenancy_ocid
- create_new_logGroup = var.opt_create_new_la_logGroup
- new_logGroup_name = var.oci_la_logGroup_name
- compartment_ocid = var.oci_la_compartment_ocid
- existing_logGroup_id = var.oci_la_logGroup_id
-}
-
-
-// deploy oke-monitoring solution (helm release)
-// always call this module
-// - if enable_helm_release is set to false, helm release won't be deployed
-// - We still need to call this, for the stack to avoid errors when enable_helm_release is set as false
-module "helm_release" {
- source = "./modules/helm"
-
- enable_helm_release = var.enable_helm_release
- enable_helm_debugging = var.enable_helm_debugging
-
- opt_create_kubernetes_namespace = var.opt_create_kubernetes_namespace
- oke_compartment_ocid = var.oke_compartment_ocid
- oke_cluster_ocid = var.oke_cluster_ocid
- container_image_url = var.container_image_url
- kubernetes_namespace = var.kubernetes_namespace
-
- oci_la_logGroup_id = module.loggingAnalytics.oci_la_logGroup_ocid
- oci_la_namespace = module.loggingAnalytics.oci_la_namespace
-
- fluentd_baseDir_path = var.fluentd_baseDir_path
-
-}
\ No newline at end of file
diff --git a/logan/terraform/oke/modules/dashboards/inputs.tf b/logan/terraform/oke/modules/dashboards/inputs.tf
deleted file mode 100644
index ef8c19aa..00000000
--- a/logan/terraform/oke/modules/dashboards/inputs.tf
+++ /dev/null
@@ -1,4 +0,0 @@
-# Compartment for creating dashboards and it's associated saved-searches
-variable "compartment_ocid" {
- type = string
-}
diff --git a/logan/terraform/oke/modules/helm/helm.tf b/logan/terraform/oke/modules/helm/helm.tf
deleted file mode 100644
index 6f8fa088..00000000
--- a/logan/terraform/oke/modules/helm/helm.tf
+++ /dev/null
@@ -1,124 +0,0 @@
-
-data "oci_containerengine_clusters" "oke_clusters_list" {
- compartment_id = var.oke_compartment_ocid
-}
-
-locals {
- oke_clusters_list = data.oci_containerengine_clusters.oke_clusters_list.clusters
- oke_cluster_name = var.enable_helm_release ? [for c in local.oke_clusters_list : c.name if c.id == var.oke_cluster_ocid][0] : "place-holder"
-}
-
-resource "helm_release" "oci-kubernetes-monitoring" {
- name = "oci-kubernetes-monitoring"
- chart = "${path.root}/helm-chart"
- namespace = var.kubernetes_namespace
- create_namespace = var.opt_create_kubernetes_namespace
- wait = true
-
- count = var.enable_helm_release ? 1 : 0
-
- set {
- name = "image.url"
- value = var.container_image_url
- }
-
- set {
- name = "kubernetesClusterName"
- value = local.oke_cluster_name
- }
-
- set {
- name = "kubernetesClusterID"
- value = var.oke_cluster_ocid
- }
-
- set {
- name = "namespace"
- value = var.kubernetes_namespace
- }
-
- set {
- name = "ociLANamespace"
- value = var.oci_la_namespace
- }
-
- set {
- name = "ociLALogGroupID"
- value = var.oci_la_logGroup_id
- }
-
- set {
- name = "ociCompartmentID"
- value = var.oke_compartment_ocid
- }
-
- set {
- name = "fluentd.baseDir"
- value = var.fluentd_baseDir_path
- }
-}
-
-# helm template for release artifacts testing and validation
-# this resouece is not used by helm release
-data "helm_template" "oci-kubernetes-monitoring" {
- name = "oci-kubernetes-monitoring"
- chart = "${path.root}/helm-chart"
- namespace = var.kubernetes_namespace
- create_namespace = var.opt_create_kubernetes_namespace
-
- count = var.enable_helm_debugging ? 1 : 0
-
- set {
- name = "image.url"
- value = var.container_image_url
- }
-
- set {
- name = "kubernetesClusterName"
- value = local.oke_cluster_name
- }
-
- set {
- name = "kubernetesClusterID"
- value = var.oke_cluster_ocid
- }
-
- set {
- name = "namespace"
- value = var.kubernetes_namespace
- }
-
- set {
- name = "ociLANamespace"
- value = var.oci_la_namespace
- }
-
- set {
- name = "ociLALogGroupID"
- value = var.oci_la_logGroup_id
- }
-
- set {
- name = "ociCompartmentID"
- value = var.oke_compartment_ocid
- }
-
- set {
- name = "fluentd.baseDir"
- value = var.fluentd_baseDir_path
- }
-}
-
-# Helm release artifacts for local testing and validation. Not used by helm resource.
-resource "local_file" "helm_release" {
- content = tostring(data.helm_template.oci-kubernetes-monitoring[0].manifest)
- filename = "${path.module}/local/helmrelease.yaml"
- count = var.enable_helm_debugging ? 1 : 0
-}
-
-# kubeconfig when using Terraform locally. Not used by Oracle Resource Manager
-resource "local_file" "oke_kubeconfig" {
- content = data.oci_containerengine_cluster_kube_config.oke[0].content
- filename = "${path.module}/local/kubeconfig"
- count = var.enable_helm_debugging && var.enable_helm_release ? 1 : 0
-}
diff --git a/logan/terraform/oke/modules/helm/inputs.tf b/logan/terraform/oke/modules/helm/inputs.tf
deleted file mode 100644
index 2259db0d..00000000
--- a/logan/terraform/oke/modules/helm/inputs.tf
+++ /dev/null
@@ -1,69 +0,0 @@
-####
-## Switches
-####
-
-variable "enable_helm_release" {
- type = bool
- default = true
-}
-
-variable "enable_helm_debugging" {
- type = bool
- default = false
-}
-
-
-####
-## OKE Cluster Information
-####
-
-# OKE Cluster Compartment
-variable "oke_compartment_ocid" {
- type = string
-}
-
-# OKE Cluster OCID
-variable "oke_cluster_ocid" {
- type = string
-}
-
-# OCI LA Fluentd Container Image
-variable "container_image_url" {
- type = string
-}
-
-# Kubernetes Namespace
-variable "kubernetes_namespace" {
- type = string
-}
-
-# Option to create Kubernetes Namespace
-variable "opt_create_kubernetes_namespace" {
- type = bool
- default = true
-}
-
-####
-## OCI Logging Analytics Information
-####
-
-# OCI Logging Analytics LogGroup OCID
-variable "oci_la_logGroup_id" {
- type = string
- default = ""
-}
-
-# Log Analytics Namespace
-variable "oci_la_namespace" {
- type = string
-}
-
-####
-## Fluentd Configuration
-####
-
-# Fluentd Base Directory
-variable "fluentd_baseDir_path" {
- type = string
- default = "/var/log"
-}
\ No newline at end of file
diff --git a/logan/terraform/oke/modules/helm/local/.gitignore b/logan/terraform/oke/modules/helm/local/.gitignore
deleted file mode 100644
index 94548af5..00000000
--- a/logan/terraform/oke/modules/helm/local/.gitignore
+++ /dev/null
@@ -1,3 +0,0 @@
-*
-*/
-!.gitignore
diff --git a/logan/terraform/oke/modules/helm/provider.tf b/logan/terraform/oke/modules/helm/provider.tf
deleted file mode 100644
index b1ac13a5..00000000
--- a/logan/terraform/oke/modules/helm/provider.tf
+++ /dev/null
@@ -1,36 +0,0 @@
-terraform {
- required_version = ">= 1.0"
- required_providers {
- helm = {
- source = "hashicorp/helm"
- version = "2.7.1"
- # https://registry.terraform.io/providers/hashicorp/helm/2.1.0
- }
- }
-}
-
-data "oci_containerengine_cluster_kube_config" "oke" {
- cluster_id = var.oke_cluster_ocid
- count = var.enable_helm_release ? 1 : 0
-}
-
-locals {
- // following locals are set as "place-holder" when user opts out of helm release
- cluster_endpoint = var.enable_helm_release ? yamldecode(data.oci_containerengine_cluster_kube_config.oke[0].content)["clusters"][0]["cluster"]["server"] : "place-holder"
- cluster_ca_certificate = var.enable_helm_release ? base64decode(yamldecode(data.oci_containerengine_cluster_kube_config.oke[0].content)["clusters"][0]["cluster"]["certificate-authority-data"]) : "place-holder"
- cluster_id = var.enable_helm_release ? yamldecode(data.oci_containerengine_cluster_kube_config.oke[0].content)["users"][0]["user"]["exec"]["args"][4] : "place-holder"
- cluster_region = var.enable_helm_release ? yamldecode(data.oci_containerengine_cluster_kube_config.oke[0].content)["users"][0]["user"]["exec"]["args"][6] : "place-holder"
-}
-
-# https://docs.cloud.oracle.com/en-us/iaas/Content/ContEng/Tasks/contengdownloadkubeconfigfile.htm#notes
-provider "helm" {
- kubernetes {
- host = local.cluster_endpoint
- cluster_ca_certificate = local.cluster_ca_certificate
- exec {
- api_version = "client.authentication.k8s.io/v1beta1"
- args = ["ce", "cluster", "generate-token", "--cluster-id", local.cluster_id, "--region", local.cluster_region]
- command = "oci"
- }
- }
-}
\ No newline at end of file
diff --git a/logan/terraform/oke/modules/iam/iam.tf b/logan/terraform/oke/modules/iam/iam.tf
deleted file mode 100644
index 52228358..00000000
--- a/logan/terraform/oke/modules/iam/iam.tf
+++ /dev/null
@@ -1,52 +0,0 @@
-locals {
- # Compartments
- la_compartment_name = data.oci_identity_compartment.oci_la_compartment.name
- oke_compartment_name = data.oci_identity_compartment.oke_compartment.name
-
- # Dynmaic Group
- uuid_dynamic_group = md5(var.oke_cluster_ocid)
- dynamic_group_name = "oci-kubernetes-monitoring-${local.uuid_dynamic_group}"
- dynamic_group_desc = "Auto generated by Resource Manager Stack - oci-kubernetes-monitoring. Required for monitoring OKE Cluster - ${var.oke_cluster_ocid}"
- instances_in_compartment_rule = ["ALL {instance.compartment.id = '${var.oke_compartment_ocid}'}"]
- clusters_in_compartment_rule = ["ALL {resource.type = 'cluster', resource.compartment.id = '${var.oke_compartment_ocid}'}"]
- dynamic_group_matching_rules = concat(local.instances_in_compartment_rule, local.clusters_in_compartment_rule)
- complied_dynamic_group_rules = "ANY {${join(",", local.dynamic_group_matching_rules)}}"
-
- # Policy
- uuid_policy = md5("${local.dynamic_group_name}${local.la_compartment_name}")
- policy_name = "oci-kubernetes-monitoring-${local.uuid_policy}"
- policy_desc = "Auto generated by Resource Manager Stack - oci-kubernetes-monitoring. Allows OKE Dynamic Group - ${local.dynamic_group_name} to upload data to Logging Analytics Service in ${local.la_compartment_name} compartment."
- policy_scope = var.root_compartment_ocid == var.oci_la_logGroup_compartment_ocid ? "tenancy" : "compartment ${local.la_compartment_name}"
- policy_statements = ["Allow dynamic-group ${local.dynamic_group_name} to {LOG_ANALYTICS_LOG_GROUP_UPLOAD_LOGS} in ${local.policy_scope}"]
-
-}
-
-# Logging Analytics Compartment
-data "oci_identity_compartment" "oci_la_compartment" {
- id = var.oci_la_logGroup_compartment_ocid
-}
-
-# OKE Compartment
-data "oci_identity_compartment" "oke_compartment" {
- id = var.oke_compartment_ocid
-}
-
-# Dynmaic Group
-resource "oci_identity_dynamic_group" "oke_dynamic_group" {
- name = local.dynamic_group_name
- description = local.dynamic_group_desc
- compartment_id = var.root_compartment_ocid
- matching_rule = local.complied_dynamic_group_rules
- #provider = oci.home_region
-}
-
-# Policy
-resource "oci_identity_policy" "oke_monitoring_policy" {
- name = local.policy_name
- description = local.policy_desc
- compartment_id = var.oci_la_logGroup_compartment_ocid
- statements = local.policy_statements
- #provider = oci.home_region
-
- depends_on = [oci_identity_dynamic_group.oke_dynamic_group]
-}
\ No newline at end of file
diff --git a/logan/terraform/oke/modules/iam/inputs.tf b/logan/terraform/oke/modules/iam/inputs.tf
deleted file mode 100644
index 013017b7..00000000
--- a/logan/terraform/oke/modules/iam/inputs.tf
+++ /dev/null
@@ -1,19 +0,0 @@
-# tenancy ocid
-variable "root_compartment_ocid" {
- type = string
-}
-
-# Compartment of OCI Logging Analytics LogGroup
-variable "oci_la_logGroup_compartment_ocid" {
- type = string
-}
-
-# OKE Cluster Compartment
-variable "oke_compartment_ocid" {
- type = string
-}
-
-# OKE Cluster OCID
-variable "oke_cluster_ocid" {
- type = string
-}
diff --git a/logan/terraform/oke/modules/logan/outputs.tf b/logan/terraform/oke/modules/logan/outputs.tf
deleted file mode 100644
index b7d1c632..00000000
--- a/logan/terraform/oke/modules/logan/outputs.tf
+++ /dev/null
@@ -1,7 +0,0 @@
-output "oci_la_namespace" {
- value = local.oci_la_namespace
-}
-
-output "oci_la_logGroup_ocid" {
- value = local.final_oci_la_logGroup_id
-}
\ No newline at end of file
diff --git a/logan/terraform/oke/oci_images.tf b/logan/terraform/oke/oci_images.tf
deleted file mode 100644
index e69de29b..00000000
diff --git a/logan/terraform/oke/oke.tf b/logan/terraform/oke/oke.tf
deleted file mode 100644
index 7fb8ae47..00000000
--- a/logan/terraform/oke/oke.tf
+++ /dev/null
@@ -1,9 +0,0 @@
-data "oci_containerengine_cluster_kube_config" "oke" {
- cluster_id = var.oke_cluster_ocid
-}
-
-# kubeconfig when using Terraform locally. Not used by Oracle Resource Manager
-resource "local_file" "oke_kubeconfig" {
- content = data.oci_containerengine_cluster_kube_config.oke.content
- filename = "${path.module}/kubeconfig"
-}
\ No newline at end of file
diff --git a/logan/terraform/oke/outputs.tf b/logan/terraform/oke/outputs.tf
deleted file mode 100644
index e69de29b..00000000
diff --git a/logan/terraform/oke/providers.tf b/logan/terraform/oke/providers.tf
deleted file mode 100644
index 6b797e24..00000000
--- a/logan/terraform/oke/providers.tf
+++ /dev/null
@@ -1,72 +0,0 @@
-terraform {
- required_version = "~> 1.0.0, < 1.1"
- required_providers {
- oci = {
- source = "oracle/oci"
- version = ">= 4.96.0"
- # https://registry.terraform.io/providers/hashicorp/oci/4.85.0
- }
- helm = {
- source = "hashicorp/helm"
- version = "2.7.1"
- # https://registry.terraform.io/providers/hashicorp/helm/2.1.0
- }
- local = {
- source = "hashicorp/local"
- version = "2.2.3"
- # https://registry.terraform.io/providers/hashicorp/local/2.1.0
- }
- }
-}
-
-# https://docs.oracle.com/en-us/iaas/Content/API/SDKDocs/terraformproviderconfiguration.htm
-provider "oci" {
- tenancy_ocid = var.boat_auth ? var.boat_tenancy_ocid : var.tenancy_ocid
- region = var.region
-
- private_key_path = var.private_key_path
- fingerprint = var.fingerprint
- user_ocid = var.user_ocid
-}
-
-data "oci_identity_region_subscriptions" "regions" {
- tenancy_id = var.tenancy_ocid
-}
-
-locals {
- home_region = [for s in data.oci_identity_region_subscriptions.regions.region_subscriptions : s.region_name if s.is_home_region == true][0]
-}
-
-provider "oci" {
- alias = "home_region"
- tenancy_ocid = var.boat_auth ? var.boat_tenancy_ocid : var.tenancy_ocid
- region = local.home_region
-
- private_key_path = var.private_key_path
- fingerprint = var.fingerprint
- user_ocid = var.user_ocid
-}
-
-# data "oci_containerengine_cluster_kube_config" "oke" {
-# cluster_id = var.oke_cluster_ocid
-# }
-
-# locals {
-# cluster_endpoint = yamldecode(data.oci_containerengine_cluster_kube_config.oke.content)["clusters"][0]["cluster"]["server"]
-# cluster_ca_certificate = base64decode(yamldecode(data.oci_containerengine_cluster_kube_config.oke.content)["clusters"][0]["cluster"]["certificate-authority-data"])
-# cluster_id = yamldecode(data.oci_containerengine_cluster_kube_config.oke.content)["users"][0]["user"]["exec"]["args"][4]
-# cluster_region = yamldecode(data.oci_containerengine_cluster_kube_config.oke.content)["users"][0]["user"]["exec"]["args"][6]
-# }
-
-# # https://docs.cloud.oracle.com/en-us/iaas/Content/ContEng/Tasks/contengdownloadkubeconfigfile.htm#notes
-# provider "helm" {
-# kubernetes {
-# host = local.cluster_endpoint
-# cluster_ca_certificate = local.cluster_ca_certificate
-# exec {
-# api_version = "client.authentication.k8s.io/v1beta1"
-# args = ["ce", "cluster", "generate-token", "--cluster-id", local.cluster_id, "--region", local.cluster_region]
-# command = "oci"
-# }
-# }
-# }
\ No newline at end of file
diff --git a/logan/terraform/oke/schema.yaml b/logan/terraform/oke/schema.yaml
deleted file mode 100644
index ab1de97d..00000000
--- a/logan/terraform/oke/schema.yaml
+++ /dev/null
@@ -1,200 +0,0 @@
-title: OCI Kubernetes Monitoring Solution
-description: Monitoring Solution for Kubernetes offered by OCI Logging Analytics
-informationalText: Monitoring Solution for Kubernetes offered by OCI Logging Analytics
-schemaVersion: 1.1.0
-version: "20221004"
-
-# URL of Logo Icon used on Application Information tab. Logo must be 130x130 pixels.
-# (Optional)
-#logoUrl: https://cloudmarketplace.oracle.com/marketplace/content?contentId=53066708
-
-source:
- type: marketplace # enum - marketplace, quickstart or web
-
-locale: "en"
-
-variableGroups:
- - title: "configuration inputs"
- variables:
- - auth_tenancy_ocid
- - tenancy_ocid
- - region
- - user_ocid
- - private_key_path
- - fingerprint
- - enable_helm_debugging
- - enable_dashboard_import
- - enable_helm_release
- - boat_auth
- - boat_tenancy_ocid
- - compartment_ocid
- visible: false
-
- - title: "Pre-requisites"
- variables:
- - opt_create_dynamicGroup_and_policies
- visible:
- and:
- - enable_helm_release
-
- - title: "OKE Cluster Information"
- variables:
- - oke_compartment_ocid
- - oke_cluster_ocid
- - kubernetes_namespace
- - opt_create_kubernetes_namespace
- visible:
- and:
- - enable_helm_release
-
- - title: "OCI Logging Analytics Information"
- variables:
- - oci_la_compartment_ocid
- - opt_create_new_la_logGroup
- - oci_la_logGroup_id
- - oci_la_logGroup_name
- visible:
- and:
- - enable_helm_release
-
- - title: "Fluentd Configuration"
- variables:
- - container_image_url
- - fluentd_baseDir_path
- visible:
- and:
- - enable_helm_release
-
-variables:
-
- ####
- ## Deployment Options
- ####
-
- # Option to install helm chart
- enable_helm_release:
- type: boolean
- title: Deploy Kubernetes Monitoring Solution
- description: "Ref: https://github.com/oracle-quickstart/oci-kubernetes-monitoring"
- default: true
- required: true
-
- ####
- ## Pre-requisites
- ####
-
- # Option to create Dynamic Group and Policies
- opt_create_dynamicGroup_and_policies:
- type: boolean
- title: Create Dynamic Group and Policies required for Kubernetes Monitoring Solution (Un-check if created manually)
- #description: "Ref: https://github.com/oracle-quickstart/oci-kubernetes-monitoring#pre-requisites"
- description: "Note: The auto-created dynamic group may need changes, if node pool(s) compartment is different than cluster compartment."
- default: true
- required: true
-
- ####
- ## OKE Cluster Information
- ####
-
- # OKE Cluster Compartment
- oke_compartment_ocid:
- type: oci:identity:compartment:id
- required: true
- title: OKE Cluster Compartment
- default: compartment_ocid
-
- # OKE Cluster OCID
- oke_cluster_ocid:
- type: oci:container:cluster:id
- dependsOn:
- compartmentId: ${oke_compartment_ocid}
- title: OKE Cluster
- required: true
-
- # Kubernetes Namespace
- kubernetes_namespace:
- type: string
- minLength: 1
- maxLength: 63
- title: Kubernetes Namespace
- description: Kubernetes Namespace in which the monitoring solution to be deployed
- default: kube-system
- pattern: '^([a-z0-9]|[a-z][a-z\-0-9]*[a-z0-9])$' #Ref - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-label-names
- required: true
-
- # Option to create Kubernetes Namespace
- opt_create_kubernetes_namespace:
- type: boolean
- title: Create namespace, if doesn't exist
- description: "Note: Un-checking this option can lead to failure when namespace doesn't exist in cluster"
- default: true
- required: true
-
- ####
- ## OCI Logging Analytics Information
- ####
-
- # Compartment for creating dashboards and logGroup
- oci_la_compartment_ocid:
- type: oci:identity:compartment:id
- required: true
- title: OCI Logging Analytics Compartment
- description: Compartment to store Logging Analytics dashboards and logGroup
- default: compartment_ocid
-
- # Option to create Logging Analytics
- opt_create_new_la_logGroup: # change this to create new log group
- type: boolean
- title: Create a new LogGroup
- default: false
-
- # OCI Logging Analytics LogGroup OCID of existing LogGroup
- oci_la_logGroup_id:
- type: oci:logan:loggroup:id
- dependsOn:
- compartmentId: ${oci_la_compartment_ocid}
- title: OCI Logging Analytics LogGroup
- description: LogGroup to store collected logs
- required: true
- visible:
- not:
- - opt_create_new_la_logGroup
-
- # New Log Group to collect Kubernetes data
- oci_la_logGroup_name:
- type: string
- maxLength: 255
- minLength: 1
- required: true
- title: "OCI Logging Analytics LogGroup Name"
- description: "Note: LogGroup name must be unique in a tenant"
- visible:
- and:
- - opt_create_new_la_logGroup
- pattern: '^([a-zA-Z0-9]|[a-zA-Z0-9][\\ a-zA-Z0-9_\-]*[\\a-zA-Z\-0-9_])$'
-
- ####
- ## Fluentd Configuration
- ####
-
- # OCI LA Fluentd Container Image
- container_image_url:
- type: string
- minLength: 1
- maxLength: 255
- title: OCI Logging Analytics fluentd container image
- default: iad.ocir.io/ax1wgjs6b2vc/oci_la_fluentd:ol8-1.1
- pattern: '^[\S]+$'
- required: true
- description: "Note: Default value is not recommended for production usage. You may need to build your own image using https://github.com/oracle-quickstart/oci-kubernetes-monitoring#docker-image"
-
- # Fluentd Base Directory
- fluentd_baseDir_path:
- type: string
- maxLength: 255
- minLength: 1
- title: Fluentd Base Directory
- description: Base directory on the node (with read & write permission) to store fluentd plugin's related data
- default: /var/log
- required: true
- pattern: '^/[\w- /]*$'
\ No newline at end of file
diff --git a/logan/terraform/oke/terraform-sample.tfvars b/logan/terraform/oke/terraform-sample.tfvars
deleted file mode 100644
index 24a9749f..00000000
--- a/logan/terraform/oke/terraform-sample.tfvars
+++ /dev/null
@@ -1,47 +0,0 @@
-
-### Configure Boat Authentication for OCI; leave unchaged if not using boat authentication
-boat_auth = false # set true to use BOAT Authentication
-boat_tenancy_ocid = "" # ; leave uncganged if boat_auth=false
-
-### OCI Provider inputs
-tenancy_ocid = "" # Use Boat tenancy OCID if boat_auth=true
-region = "" # add target region - ex: "us-phoenix-1"
-user_ocid = "" # ; leave it empty for cloud-shell
-private_key_path = "" # ; leave it empty for cloud-shell
-fingerprint = "" # ; leave it empty for cloud-shell
-
-### Stack inputs
-
-# Option to create Dynamic Group and Policies
-opt_create_dynamicGroup_and_policies = true # changes as required
-
-# OKE Cluster Compartment
-oke_compartment_ocid = ""
-
-# OKE Cluster OCID
-oke_cluster_ocid = ""
-
-# Kubernetes Namespace in which the monitoring solution to be deployed
-kubernetes_namespace = "kube-system" # can change if want to deploy in a custom namespace
-
-# Option to create Kubernetes Namespace
-opt_create_kubernetes_namespace = true # If true, kubernetes_namespace will be created if does not exist already
-
-# Compartment for creating dashboards and saved-searches and logGroup
-oci_la_compartment_ocid = ""
-
-# Option to create Logging Analytics
-opt_create_new_la_logGroup = false # if ture, oci_la_logGroup_name must be set
-
-# OCI Logging Analytics LogGroup
-oci_la_logGroup_id = "" # Add OCID of logGroup if opt_use_existing_la_logGroup=true, leave it empty otherwise
-
-# OCI Logging Analytics LogGroup Name
-oci_la_logGroup_name = "NewLogGroupName" # leave it unchanged, if opt_use_existing_la_logGroup=false
-
-# Image URL of OCI LA Fluentd Container
-# Reference - https://github.com/oracle-quickstart/oci-kubernetes-monitoring#docker-image
-container_image_url = ""
-
-# Base directory on the node (with read & write permission) to store fluentd plugin's related data
-fluentd_baseDir_path = "/var/log" # change as required
\ No newline at end of file
diff --git a/logan/terraform/oke/modules/dashboards/dashboard.tf b/terraform/modules/dashboards/dashboard.tf
similarity index 66%
rename from logan/terraform/oke/modules/dashboards/dashboard.tf
rename to terraform/modules/dashboards/dashboard.tf
index 56965339..6a5da5ec 100644
--- a/logan/terraform/oke/modules/dashboards/dashboard.tf
+++ b/terraform/modules/dashboards/dashboard.tf
@@ -1,8 +1,11 @@
+# Copyright (c) 2023, Oracle and/or its affiliates.
+# Licensed under the Universal Permissive License v1.0 as shown at https://oss.oracle.com/licenses/upl.
+
locals {
- dashboards = ["cluster.json", "node.json", "pod.json", "workload.json"]
+ dashboards = ["cluster.json", "node.json", "pod.json", "workload.json", "service-type-lb.json"]
}
resource "oci_management_dashboard_management_dashboards_import" "multi_management_dashboards_import" {
for_each = toset(local.dashboards)
import_details = templatefile(format("%s/%s/%s", "${path.module}", "dashboards_json", each.value), { "compartment_ocid" : "${var.compartment_ocid}" })
-}
\ No newline at end of file
+}
diff --git a/logan/terraform/oke/modules/dashboards/dashboards_json/cluster.json b/terraform/modules/dashboards/dashboards_json/cluster.json
similarity index 99%
rename from logan/terraform/oke/modules/dashboards/dashboards_json/cluster.json
rename to terraform/modules/dashboards/dashboards_json/cluster.json
index 52dcf371..a0cb9d02 100644
--- a/logan/terraform/oke/modules/dashboards/dashboards_json/cluster.json
+++ b/terraform/modules/dashboards/dashboards_json/cluster.json
@@ -3998,4 +3998,4 @@
"definedTags": {}
}
]
-}
\ No newline at end of file
+}
diff --git a/logan/terraform/oke/modules/dashboards/dashboards_json/node.json b/terraform/modules/dashboards/dashboards_json/node.json
similarity index 99%
rename from logan/terraform/oke/modules/dashboards/dashboards_json/node.json
rename to terraform/modules/dashboards/dashboards_json/node.json
index 117a914a..37db3276 100644
--- a/logan/terraform/oke/modules/dashboards/dashboards_json/node.json
+++ b/terraform/modules/dashboards/dashboards_json/node.json
@@ -1790,4 +1790,4 @@
"definedTags": {}
}
]
-}
\ No newline at end of file
+}
diff --git a/logan/terraform/oke/modules/dashboards/dashboards_json/pod.json b/terraform/modules/dashboards/dashboards_json/pod.json
similarity index 99%
rename from logan/terraform/oke/modules/dashboards/dashboards_json/pod.json
rename to terraform/modules/dashboards/dashboards_json/pod.json
index 795eac7d..ed90b4f3 100644
--- a/logan/terraform/oke/modules/dashboards/dashboards_json/pod.json
+++ b/terraform/modules/dashboards/dashboards_json/pod.json
@@ -1818,4 +1818,4 @@
"definedTags": {}
}
]
-}
\ No newline at end of file
+}
diff --git a/terraform/modules/dashboards/dashboards_json/service-type-lb.json b/terraform/modules/dashboards/dashboards_json/service-type-lb.json
new file mode 100644
index 00000000..a6227f3c
--- /dev/null
+++ b/terraform/modules/dashboards/dashboards_json/service-type-lb.json
@@ -0,0 +1,3082 @@
+{
+ "dashboards": [
+ {
+ "compartmentId": "${compartment_ocid}",
+ "createdBy": "ocid1.saml2idp.oc1..aaaaaaaa7ehdi53lr546fhugwewrbeltbgqvkocc27qslgbahsw5v55bvh4a/KUMAR.VARUN@ORACLE.COM",
+ "dashboardId": "ocid1.managementdashboard.oc1..aaaaaaaa7tmp6wlxgpkjavvyeuvdn3eevm32pmski2z4m22w3jxhtq32h6rq",
+ "dataConfig": [],
+ "description": "Kubernetes Services (Type: Load Balancer)",
+ "displayName": "Kubernetes Services (Type: Load Balancer)",
+ "drilldownConfig": [],
+ "featuresConfig": {
+ "crossService": {
+ "shared": false
+ }
+ },
+ "freeformTags": {},
+ "id": "ocid1.managementdashboard.oc1..aaaaaaaa7tmp6wlxgpkjavvyeuvdn3eevm32pmski2z4m22w3jxhtq32h6rq",
+ "isFavorite": false,
+ "isOobDashboard": false,
+ "isShowDescription": true,
+ "isShowInHome": false,
+ "lifecycleState": "ACTIVE",
+ "metadataVersion": "2.0",
+ "nls": {},
+ "parametersConfig": [
+ {
+ "displayName": "Log Group Compartment",
+ "localStorageKey": "log-analytics-loggroup-filter",
+ "name": "log-analytics-loggroup-filter",
+ "parametersMap": {
+ "isStoreInLocalStorage": true
+ },
+ "savedSearchId": "OOBSS-management-dashboard-filter-4a",
+ "state": "DEFAULT",
+ "uiConfig": {
+ "defaultWidth": 3,
+ "filterName": "log-analytics-loggroup-filter",
+ "internalKey": "OOBSS-management-dashboard-filter-4a",
+ "minWidth": 3,
+ "vizFilterType": "lxLogGroupDashFilterType"
+ }
+ },
+ {
+ "displayName": "Cluster Name",
+ "localStorageKey": "logField_Kubernetes Cluster Name",
+ "name": "log-analytics-log-field-filter2",
+ "parametersMap": {
+ "isStoreInLocalStorage": true,
+ "log-analytics-log-group-compartment": "$(dashboard.params.log-analytics-loggroup-filter)",
+ "logFieldName": "Kubernetes Cluster Name"
+ },
+ "savedSearchId": "OOBSS-management-dashboard-filter-xlog",
+ "state": "DEFAULT"
+ },
+ {
+ "displayName": "Service (Type: LB)",
+ "localStorageKey": "logField_Service",
+ "name": "log-analytics-log-field-filter1",
+ "parametersMap": {
+ "flex": {
+ "Kubernetes Cluster Name": "$(dashboard.params.log-analytics-log-field-filter2)",
+ "Log Source": "Kubernetes Service Object Logs",
+ "Type": "loadbalancer"
+ },
+ "isStoreInLocalStorage": true,
+ "log-analytics-log-group-compartment": "$(dashboard.params.log-analytics-loggroup-filter)",
+ "logFieldName": "Service"
+ },
+ "savedSearchId": "OOBSS-management-dashboard-filter-xlog",
+ "state": "DEFAULT"
+ },
+ {
+ "displayName": "LBaaS IP",
+ "localStorageKey": "logField_Load Balancer IP",
+ "name": "log-analytics-log-field-filter3",
+ "parametersMap": {
+ "flex": {
+ "Service": "$(dashboard.params.log-analytics-log-field-filter1)"
+ },
+ "isStoreInLocalStorage": true,
+ "log-analytics-log-group-compartment": "$(dashboard.params.log-analytics-loggroup-filter)",
+ "logFieldName": "Load Balancer IP"
+ },
+ "savedSearchId": "OOBSS-management-dashboard-filter-xlog",
+ "state": "DEFAULT"
+ },
+ {
+ "displayName": "Load Balancer",
+ "localStorageKey": "logField_OCI Resource Name",
+ "name": "log-analytics-log-field-filter",
+ "parametersMap": {
+ "flex": {
+ "Load Balancer IP": "$(dashboard.params.log-analytics-log-field-filter3)",
+ "Type": "com.oraclecloud.loadbalancer.access"
+ },
+ "isStoreInLocalStorage": true,
+ "log-analytics-log-group-compartment": "$(dashboard.params.log-analytics-loggroup-filter)",
+ "logFieldName": "OCI Resource Name"
+ },
+ "savedSearchId": "OOBSS-management-dashboard-filter-xlog",
+ "state": "DEFAULT"
+ },
+ {
+ "displayName": "Kubernetes Node",
+ "localStorageKey": "k8sNodeFilter",
+ "name": "k8sNodeFilter",
+ "parametersMap": {
+ "compartmentId": "$(dashboard.params.log-analytics-loggroup-filter)",
+ "isStoreInLocalStorage": true
+ },
+ "savedSearchId": "ocid1.managementsavedsearch.oc1..aaaaaaaaijqkcnwvt4fpevll6y5aa5xkhz27suolsc35t5m7ri4a2sth6cqq",
+ "state": "DEFAULT"
+ },
+ {
+ "displayName": "Entity",
+ "localStorageKey": "log-analytics-entity-filter",
+ "name": "log-analytics-entity-filter",
+ "parametersMap": {
+ "isStoreInLocalStorage": true
+ },
+ "savedSearchId": "OOBSS-management-dashboard-filter-2a",
+ "state": "DEFAULT",
+ "uiConfig": {
+ "defaultWidth": 6,
+ "filterName": "log-analytics-entity-filter",
+ "internalKey": "OOBSS-management-dashboard-filter-2a",
+ "minWidth": 6,
+ "vizFilterType": "lxEntityDashFilterType"
+ },
+ "width": 6
+ },
+ {
+ "displayName": "Log Set",
+ "localStorageKey": "log-analytics-logset-filter",
+ "name": "log-analytics-logset-filter1",
+ "parametersMap": {},
+ "savedSearchId": "OOBSS-management-dashboard-filter-3a",
+ "state": "DEFAULT",
+ "width": 6
+ },
+ {
+ "name": "time",
+ "src": "$(context.time)"
+ }
+ ],
+ "providerId": "log-analytics",
+ "providerName": "Logging Analytics",
+ "providerVersion": "3.0.0",
+ "savedSearches": [
+ {
+ "compartmentId": "${compartment_ocid}",
+ "createdBy": "ocid1.saml2idp.oc1..aaaaaaaa7ehdi53lr546fhugwewrbeltbgqvkocc27qslgbahsw5v55bvh4a/KUMAR.VARUN@ORACLE.COM",
+ "dataConfig": [],
+ "description": "Count of load balancers being monitored",
+ "displayName": "Load Balancers Count",
+ "drilldownConfig": [],
+ "featuresConfig": {
+ "crossService": {
+ "shared": false
+ }
+ },
+ "freeformTags": {},
+ "id": "ocid1.managementsavedsearch.oc1..aaaaaaaa76smhssp5m56wt4eehroipemhm2tzhwtg7znrsjt43vhqrbwsaia",
+ "isOobSavedSearch": false,
+ "lifecycleState": "ACTIVE",
+ "metadataVersion": "2.0",
+ "nls": {},
+ "parametersConfig": [
+ {
+ "defaultFilterIds": [
+ "OOBSS-management-dashboard-filter-4a"
+ ],
+ "displayName": "Log Group Compartment",
+ "editUi": {
+ "inputType": "none"
+ },
+ "name": "log-analytics-log-group-compartment",
+ "required": true,
+ "valueFormat": {
+ "type": "object"
+ }
+ },
+ {
+ "defaultFilterIds": [
+ "OOBSS-management-dashboard-filter-2a"
+ ],
+ "displayName": "Entity",
+ "editUi": {
+ "inputType": "none"
+ },
+ "name": "log-analytics-entity",
+ "required": true,
+ "valueFormat": {
+ "type": "object"
+ }
+ },
+ {
+ "defaultFilterIds": [
+ "OOBSS-management-dashboard-filter-3a"
+ ],
+ "displayName": "Log Set",
+ "editUi": {
+ "inputType": "none"
+ },
+ "hidden": "$(window.logSetNotEnabled)",
+ "name": "log-analytics-log-set",
+ "required": true,
+ "valueFormat": {
+ "type": "object"
+ }
+ },
+ {
+ "displayName": "$(bundle.globalSavedSearch.TIME)",
+ "hidden": true,
+ "name": "time",
+ "required": true
+ },
+ {
+ "name": "flex"
+ }
+ ],
+ "providerId": "log-analytics",
+ "providerName": "Logging Analytics",
+ "providerVersion": "3.0.0",
+ "screenImage": " ",
+ "timeCreated": "2023-05-11T21:06:02.108Z",
+ "timeUpdated": "2023-05-11T21:06:02.108Z",
+ "type": "WIDGET_SHOW_IN_DASHBOARD",
+ "uiConfig": {
+ "enableWidgetInApp": true,
+ "queryString": "'Log Source' in ('OCI Load Balancer Access Logs', 'OCI Load Balancer Error Logs') | stats distinctcount('OCI Resource Name'), trend(distinctcount('OCI Resource Name'))",
+ "scopeFilters": {
+ "Entity": {
+ "flags": {
+ "IncludeDependents": true,
+ "ScopeCompartmentId": "ocid1.compartment.oc1..aaaaaaaauiq74grhbq2n7pcjuvrkad5alaaj63dsrtcudefa7uvdwoo3kqpa"
+ },
+ "type": "Entity",
+ "values": []
+ },
+ "LogFields": {
+ "flags": {},
+ "type": "LogFields",
+ "values": []
+ },
+ "LogGroup": {
+ "flags": {
+ "IncludeSubCompartments": true
+ },
+ "type": "LogGroup",
+ "values": [
+ {
+ "label": "ops-development",
+ "value": "ocid1.compartment.oc1..aaaaaaaauiq74grhbq2n7pcjuvrkad5alaaj63dsrtcudefa7uvdwoo3kqpa"
+ }
+ ]
+ },
+ "LogSet": {
+ "flags": {},
+ "type": "LogSet",
+ "values": [
+ {
+ "label": "*",
+ "value": "*"
+ }
+ ]
+ },
+ "MetricCompartment": {
+ "flags": {},
+ "type": "MetricCompartment",
+ "values": []
+ },
+ "ResourceCompartment": {
+ "flags": {
+ "IncludeSubCompartments": true
+ },
+ "type": "ResourceCompartment",
+ "values": [
+ {
+ "label": "ops-development",
+ "value": "ocid1.compartment.oc1..aaaaaaaauiq74grhbq2n7pcjuvrkad5alaaj63dsrtcudefa7uvdwoo3kqpa"
+ }
+ ]
+ },
+ "filters": [
+ {
+ "flags": {
+ "IncludeSubCompartments": true
+ },
+ "type": "LogGroup",
+ "values": [
+ {
+ "label": "ops-development",
+ "value": "ocid1.compartment.oc1..aaaaaaaauiq74grhbq2n7pcjuvrkad5alaaj63dsrtcudefa7uvdwoo3kqpa"
+ }
+ ]
+ },
+ {
+ "flags": {},
+ "type": "MetricCompartment",
+ "values": []
+ },
+ {
+ "flags": {
+ "IncludeDependents": true,
+ "ScopeCompartmentId": "ocid1.compartment.oc1..aaaaaaaauiq74grhbq2n7pcjuvrkad5alaaj63dsrtcudefa7uvdwoo3kqpa"
+ },
+ "type": "Entity",
+ "values": []
+ },
+ {
+ "flags": {},
+ "type": "LogSet",
+ "values": [
+ {
+ "label": "*",
+ "value": "*"
+ }
+ ]
+ },
+ {
+ "flags": {
+ "IncludeSubCompartments": true
+ },
+ "type": "ResourceCompartment",
+ "values": [
+ {
+ "label": "ops-development",
+ "value": "ocid1.compartment.oc1..aaaaaaaauiq74grhbq2n7pcjuvrkad5alaaj63dsrtcudefa7uvdwoo3kqpa"
+ }
+ ]
+ },
+ {
+ "flags": {},
+ "type": "LogFields",
+ "values": []
+ }
+ ],
+ "isGlobal": false
+ },
+ "showTitle": true,
+ "timeSelection": {
+ "timePeriod": "l60min"
+ },
+ "visualizationOptions": {
+ "changeLabel": "",
+ "formatNumber": false,
+ "hideLabel": true,
+ "showTrend": true
+ },
+ "visualizationType": "tile",
+ "vizType": "lxSavedSearchWidgetType"
+ },
+ "updatedBy": "ocid1.saml2idp.oc1..aaaaaaaa7ehdi53lr546fhugwewrbeltbgqvkocc27qslgbahsw5v55bvh4a/KUMAR.VARUN@ORACLE.COM",
+ "widgetTemplate": "visualizations/chartWidgetTemplate.html",
+ "widgetVM": "jet-modules/dashboards/widgets/lxSavedSearchWidget"
+ },
+ {
+ "compartmentId": "${compartment_ocid}",
+ "createdBy": "ocid1.saml2idp.oc1..aaaaaaaa7ehdi53lr546fhugwewrbeltbgqvkocc27qslgbahsw5v55bvh4a/KUMAR.VARUN@ORACLE.COM",
+ "dataConfig": [],
+ "description": "Service count",
+ "displayName": "Services",
+ "drilldownConfig": [],
+ "featuresConfig": {
+ "crossService": {
+ "shared": false
+ }
+ },
+ "freeformTags": {},
+ "id": "ocid1.managementsavedsearch.oc1..aaaaaaaa7zwdwtcluaapibq7pkw3d5mbrnvkulkz5ovgb2bs6ntxfqd7fj7a",
+ "isOobSavedSearch": false,
+ "lifecycleState": "ACTIVE",
+ "metadataVersion": "2.0",
+ "nls": {},
+ "parametersConfig": [
+ {
+ "defaultFilterIds": [
+ "OOBSS-management-dashboard-filter-4a"
+ ],
+ "displayName": "Log Group Compartment",
+ "editUi": {
+ "inputType": "none"
+ },
+ "name": "log-analytics-log-group-compartment",
+ "required": true,
+ "valueFormat": {
+ "type": "object"
+ }
+ },
+ {
+ "defaultFilterIds": [
+ "OOBSS-management-dashboard-filter-2a"
+ ],
+ "displayName": "Entity",
+ "editUi": {
+ "inputType": "none"
+ },
+ "name": "log-analytics-entity",
+ "required": true,
+ "valueFormat": {
+ "type": "object"
+ }
+ },
+ {
+ "defaultFilterIds": [
+ "OOBSS-management-dashboard-filter-3a"
+ ],
+ "displayName": "Log Set",
+ "editUi": {
+ "inputType": "none"
+ },
+ "hidden": "$(window.logSetNotEnabled)",
+ "name": "log-analytics-log-set",
+ "required": true,
+ "valueFormat": {
+ "type": "object"
+ }
+ },
+ {
+ "displayName": "$(bundle.globalSavedSearch.TIME)",
+ "hidden": true,
+ "name": "time",
+ "required": true
+ },
+ {
+ "name": "flex"
+ }
+ ],
+ "providerId": "log-analytics",
+ "providerName": "Logging Analytics",
+ "providerVersion": "3.0.0",
+ "screenImage": " ",
+ "timeCreated": "2023-05-11T21:06:02.108Z",
+ "timeUpdated": "2023-05-15T19:36:44.924Z",
+ "type": "WIDGET_SHOW_IN_DASHBOARD",
+ "uiConfig": {
+ "enableWidgetInApp": true,
+ "internalKey": "ocid1.managementsavedsearch.oc1..aaaaaaaa7zwdwtcluaapibq7pkw3d5mbrnvkulkz5ovgb2bs6ntxfqd7fj7a",
+ "queryString": "'Log Source' = 'Kubernetes Service Object Logs' and Service != null and Type = loadbalancer | stats latest(Service) by Service",
+ "scopeFilters": {
+ "Entity": {
+ "flags": {
+ "IncludeDependents": true,
+ "ScopeCompartmentId": "ocid1.compartment.oc1..aaaaaaaa4yj2x6hjxntcf5vydrdvsm3trgblkmwgcmvxiar2miklv3ip4t7q"
+ },
+ "type": "Entity",
+ "values": []
+ },
+ "LogFields": {
+ "flags": {},
+ "type": "LogFields",
+ "values": []
+ },
+ "LogGroup": {
+ "flags": {
+ "IncludeSubCompartments": true
+ },
+ "type": "LogGroup",
+ "values": [
+ {
+ "label": "LogAnalytics",
+ "value": "ocid1.compartment.oc1..aaaaaaaa4yj2x6hjxntcf5vydrdvsm3trgblkmwgcmvxiar2miklv3ip4t7q"
+ }
+ ]
+ },
+ "LogSet": {
+ "flags": {},
+ "type": "LogSet",
+ "values": []
+ },
+ "MetricCompartment": {
+ "flags": {},
+ "type": "MetricCompartment",
+ "values": []
+ },
+ "Region": {
+ "flags": {},
+ "type": "Region",
+ "values": [
+ {
+ "label": "US West (Phoenix)",
+ "value": "us-phoenix-1"
+ }
+ ]
+ },
+ "ResourceCompartment": {
+ "flags": {
+ "IncludeSubCompartments": true
+ },
+ "type": "ResourceCompartment",
+ "values": [
+ {
+ "label": "emdemo (root)",
+ "value": "ocid1.tenancy.oc1..aaaaaaaa5s2vdjjrydixjulorcwozffbpna37w5a35p3jhgpyshlkmio6oiq"
+ }
+ ]
+ },
+ "filters": [
+ {
+ "flags": {
+ "IncludeSubCompartments": true
+ },
+ "type": "LogGroup",
+ "values": [
+ {
+ "label": "LogAnalytics",
+ "value": "ocid1.compartment.oc1..aaaaaaaa4yj2x6hjxntcf5vydrdvsm3trgblkmwgcmvxiar2miklv3ip4t7q"
+ }
+ ]
+ },
+ {
+ "flags": {},
+ "type": "MetricCompartment",
+ "values": []
+ },
+ {
+ "flags": {
+ "IncludeDependents": true,
+ "ScopeCompartmentId": "ocid1.compartment.oc1..aaaaaaaa4yj2x6hjxntcf5vydrdvsm3trgblkmwgcmvxiar2miklv3ip4t7q"
+ },
+ "type": "Entity",
+ "values": []
+ },
+ {
+ "flags": {},
+ "type": "LogSet",
+ "values": []
+ },
+ {
+ "flags": {
+ "IncludeSubCompartments": true
+ },
+ "type": "ResourceCompartment",
+ "values": [
+ {
+ "label": "emdemo (root)",
+ "value": "ocid1.tenancy.oc1..aaaaaaaa5s2vdjjrydixjulorcwozffbpna37w5a35p3jhgpyshlkmio6oiq"
+ }
+ ]
+ },
+ {
+ "flags": {},
+ "type": "LogFields",
+ "values": []
+ },
+ {
+ "flags": {},
+ "type": "Region",
+ "values": [
+ {
+ "label": "US West (Phoenix)",
+ "value": "us-phoenix-1"
+ }
+ ]
+ }
+ ],
+ "isGlobal": false
+ },
+ "showTitle": true,
+ "timeSelection": {
+ "timePeriod": "l60min"
+ },
+ "visualizationOptions": {
+ "changeLabel": "",
+ "customVizOpt": {
+ "primaryFieldDname": "Original Log Content",
+ "primaryFieldIname": "mbody"
+ },
+ "formatNumber": false,
+ "hideLabel": true,
+ "inputTextEnabled": true
+ },
+ "visualizationType": "tile",
+ "vizType": "lxSavedSearchWidgetType"
+ },
+ "updatedBy": "ocid1.saml2idp.oc1..aaaaaaaa7ehdi53lr546fhugwewrbeltbgqvkocc27qslgbahsw5v55bvh4a/KUMAR.VARUN@ORACLE.COM",
+ "widgetTemplate": "visualizations/chartWidgetTemplate.html",
+ "widgetVM": "jet-modules/dashboards/widgets/lxSavedSearchWidget"
+ },
+ {
+ "compartmentId": "${compartment_ocid}",
+ "createdBy": "ocid1.saml2idp.oc1..aaaaaaaa7ehdi53lr546fhugwewrbeltbgqvkocc27qslgbahsw5v55bvh4a/KUMAR.VARUN@ORACLE.COM",
+ "dataConfig": [],
+ "description": "",
+ "displayName": "SD: Load Balancer to Node and Pods",
+ "drilldownConfig": [],
+ "featuresConfig": {
+ "crossService": {
+ "shared": false
+ }
+ },
+ "freeformTags": {},
+ "id": "ocid1.managementsavedsearch.oc1..aaaaaaaa3pqjk76dbx4sa2633flzikbg76bjuhaxrcmgy2ul6ayznkr7uxma",
+ "isOobSavedSearch": false,
+ "lifecycleState": "ACTIVE",
+ "metadataVersion": "2.0",
+ "nls": {},
+ "parametersConfig": [
+ {
+ "defaultFilterIds": [
+ "OOBSS-management-dashboard-filter-4a"
+ ],
+ "displayName": "Log Group Compartment",
+ "editUi": {
+ "inputType": "none"
+ },
+ "name": "log-analytics-log-group-compartment",
+ "required": true,
+ "valueFormat": {
+ "type": "object"
+ }
+ },
+ {
+ "defaultFilterIds": [
+ "OOBSS-management-dashboard-filter-2a"
+ ],
+ "displayName": "Entity",
+ "editUi": {
+ "inputType": "none"
+ },
+ "name": "log-analytics-entity",
+ "required": true,
+ "valueFormat": {
+ "type": "object"
+ }
+ },
+ {
+ "defaultFilterIds": [
+ "OOBSS-management-dashboard-filter-3a"
+ ],
+ "displayName": "Log Set",
+ "editUi": {
+ "inputType": "none"
+ },
+ "hidden": "$(window.logSetNotEnabled)",
+ "name": "log-analytics-log-set",
+ "required": true,
+ "valueFormat": {
+ "type": "object"
+ }
+ },
+ {
+ "displayName": "$(bundle.globalSavedSearch.TIME)",
+ "hidden": true,
+ "name": "time",
+ "required": true
+ },
+ {
+ "name": "flex"
+ }
+ ],
+ "providerId": "log-analytics",
+ "providerName": "Logging Analytics",
+ "providerVersion": "3.0.0",
+ "screenImage": " ",
+ "timeCreated": "2023-05-11T21:06:02.108Z",
+ "timeUpdated": "2023-05-11T21:06:02.108Z",
+ "type": "WIDGET_SHOW_IN_DASHBOARD",
+ "uiConfig": {
+ "enableWidgetInApp": true,
+ "internalKey": "ocid1.managementsavedsearch.oc1..aaaaaaaafzbpju6atzfofmtc2oge4t5774jium35cd5z36gkuaofm3npysfq",
+ "queryString": "'Log Source' = 'Kubernetes Endpoint Logs' or ('Log Source' in ('OCI Load Balancer Access Logs', 'Kubernetes Service Object Logs')) | link includenulls = true 'Load Balancer IP', Service, 'Destination IP', 'Destination Port' | stats unique('Log Source') as 'Log Source', unique('Kubernetes Cluster Name') as Cluster, unique(Node) as Node, unique(Pod) as Pod | createview [ * | where 'Log Source' = 'Kubernetes Service Object Logs' | rename 'Load Balancer IP' as LB, Service as S ] as 'KSS Logs' | map [ * | where 'Log Source' = 'Kubernetes Endpoint Logs' and Service = S | eval 'Load Balancer' = LB ] using 'KSS Logs' | where 'Load Balancer' != null | fields -'Load Balancer IP', -'Destination IP', -'Destination Port', -'Log Source'",
+ "scopeFilters": {
+ "Entity": {
+ "flags": {
+ "IncludeDependents": true,
+ "ScopeCompartmentId": "ocid1.compartment.oc1..aaaaaaaauiq74grhbq2n7pcjuvrkad5alaaj63dsrtcudefa7uvdwoo3kqpa"
+ },
+ "type": "Entity",
+ "values": []
+ },
+ "LogGroup": {
+ "flags": {
+ "IncludeSubCompartments": true
+ },
+ "type": "LogGroup",
+ "values": [
+ {
+ "label": "ops-development",
+ "value": "ocid1.compartment.oc1..aaaaaaaauiq74grhbq2n7pcjuvrkad5alaaj63dsrtcudefa7uvdwoo3kqpa"
+ }
+ ]
+ },
+ "LogSet": {
+ "flags": {},
+ "type": "LogSet",
+ "values": [
+ {
+ "label": "*",
+ "value": "*"
+ }
+ ]
+ },
+ "filters": [
+ {
+ "flags": {
+ "IncludeSubCompartments": true
+ },
+ "type": "LogGroup",
+ "values": [
+ {
+ "label": "ops-development",
+ "value": "ocid1.compartment.oc1..aaaaaaaauiq74grhbq2n7pcjuvrkad5alaaj63dsrtcudefa7uvdwoo3kqpa"
+ }
+ ]
+ },
+ {
+ "flags": {
+ "IncludeDependents": true,
+ "ScopeCompartmentId": "ocid1.compartment.oc1..aaaaaaaauiq74grhbq2n7pcjuvrkad5alaaj63dsrtcudefa7uvdwoo3kqpa"
+ },
+ "type": "Entity",
+ "values": []
+ },
+ {
+ "flags": {},
+ "type": "LogSet",
+ "values": [
+ {
+ "label": "*",
+ "value": "*"
+ }
+ ]
+ }
+ ],
+ "isGlobal": false
+ },
+ "showTitle": true,
+ "timeSelection": {
+ "timePeriod": "l14day"
+ },
+ "visualizationOptions": {
+ "customVizOpt": {
+ "LINK_SEARCH_SETTINGS": {
+ "chartHeightVal": 200,
+ "chartOptions": "bar",
+ "chartType": "bar",
+ "chartWidthVal": 60,
+ "columnAliases": {},
+ "dashboardOptions": {
+ "showAnalyzeTab": [],
+ "showChartsTab": [],
+ "showSummary": [],
+ "showTSCharts": [],
+ "showTable": [
+ "on"
+ ],
+ "showTabs": []
+ },
+ "groupAliasP": "Groups",
+ "groupAliasS": "Group",
+ "hiddenCharts": {
+ "groupColumn": true
+ },
+ "hiddenClassifyCharts": {},
+ "hiddenColumns": {
+ "g_count": true,
+ "g_duration": true,
+ "g_endepoch": true,
+ "g_startepoch": true,
+ "query_end_time": true,
+ "query_start_time": true,
+ "trend_interval": true,
+ "trend_interval_unit": true
+ },
+ "highlightColumnStatus": {},
+ "linkSummaryInput": "",
+ "logAliasP": "Log Records",
+ "mergeHighlightColumns": [
+ "off"
+ ],
+ "ms": [
+ "on"
+ ],
+ "showAllRegions": [],
+ "showCombinedCharts": [
+ "off"
+ ],
+ "showNonUnitRawData": [
+ "off"
+ ],
+ "showStack": [
+ "off"
+ ],
+ "showToolTips": [
+ "on"
+ ],
+ "showUnitRawData": [],
+ "smartGroup": [
+ "off"
+ ],
+ "styleDefaults": {
+ "lineType": "curved",
+ "markerDisplayed": "on"
+ },
+ "tableColumns": [
+ "Load Balancer",
+ "Service",
+ "Cluster",
+ "Node",
+ "Pod",
+ "Count",
+ "Start Time (UTC-08:00)",
+ "End Time (UTC-08:00)"
+ ],
+ "timeseries": {
+ "timestats1": {
+ "chartHeightVal": 200,
+ "chartOptions": "lineWithMarker",
+ "chartType": "line",
+ "chartWidthVal": 60,
+ "colorColumn": 0,
+ "hiddenTSCharts": {},
+ "showCombinedCharts": [
+ "on"
+ ],
+ "showLegend": [
+ "off"
+ ],
+ "showStack": [
+ "off"
+ ],
+ "showToolTips": [
+ "off"
+ ],
+ "smartGroup": [
+ "on"
+ ],
+ "timeSeriesColorPalette": {
+ "0": "udfs27",
+ "1": "map4_evalVirtualField2"
+ },
+ "timeSeriesColorPaletteCustom": {
+ "0": {},
+ "1": {}
+ },
+ "tsFilters": {
+ "selectAllFilters": [
+ "off"
+ ],
+ "selectedTSFilters": [
+ "udfs27",
+ "map4_evalVirtualField2"
+ ],
+ "showTSFilters": [
+ "on"
+ ]
+ }
+ }
+ }
+ }
+ }
+ },
+ "visualizationType": "link",
+ "vizType": "lxSavedSearchWidgetType"
+ },
+ "updatedBy": "ocid1.saml2idp.oc1..aaaaaaaa7ehdi53lr546fhugwewrbeltbgqvkocc27qslgbahsw5v55bvh4a/KUMAR.VARUN@ORACLE.COM",
+ "widgetTemplate": "visualizations/chartWidgetTemplate.html",
+ "widgetVM": "jet-modules/dashboards/widgets/lxSavedSearchWidget"
+ },
+ {
+ "compartmentId": "${compartment_ocid}",
+ "createdBy": "ocid1.saml2idp.oc1..aaaaaaaa7ehdi53lr546fhugwewrbeltbgqvkocc27qslgbahsw5v55bvh4a/KUMAR.VARUN@ORACLE.COM",
+ "dataConfig": [],
+ "description": "Problem labels by lb name",
+ "displayName": "LB Problem Labels",
+ "drilldownConfig": [],
+ "featuresConfig": {
+ "crossService": {
+ "shared": false
+ }
+ },
+ "freeformTags": {},
+ "id": "ocid1.managementsavedsearch.oc1..aaaaaaaakhx6idmt4r6tkl4ev5fijc5ojr7mgpgc3cluggh7oarmennj63zq",
+ "isOobSavedSearch": false,
+ "lifecycleState": "ACTIVE",
+ "metadataVersion": "2.0",
+ "nls": {},
+ "parametersConfig": [
+ {
+ "defaultFilterIds": [
+ "OOBSS-management-dashboard-filter-4a"
+ ],
+ "displayName": "Log Group Compartment",
+ "editUi": {
+ "inputType": "none"
+ },
+ "name": "log-analytics-log-group-compartment",
+ "required": true,
+ "valueFormat": {
+ "type": "object"
+ }
+ },
+ {
+ "defaultFilterIds": [
+ "OOBSS-management-dashboard-filter-2a"
+ ],
+ "displayName": "Entity",
+ "editUi": {
+ "inputType": "none"
+ },
+ "name": "log-analytics-entity",
+ "required": true,
+ "valueFormat": {
+ "type": "object"
+ }
+ },
+ {
+ "defaultFilterIds": [
+ "OOBSS-management-dashboard-filter-3a"
+ ],
+ "displayName": "Log Set",
+ "editUi": {
+ "inputType": "none"
+ },
+ "hidden": "$(window.logSetNotEnabled)",
+ "name": "log-analytics-log-set",
+ "required": true,
+ "valueFormat": {
+ "type": "object"
+ }
+ },
+ {
+ "displayName": "$(bundle.globalSavedSearch.TIME)",
+ "hidden": true,
+ "name": "time",
+ "required": true
+ },
+ {
+ "name": "flex"
+ }
+ ],
+ "providerId": "log-analytics",
+ "providerName": "Logging Analytics",
+ "providerVersion": "3.0.0",
+ "screenImage": " ",
+ "timeCreated": "2023-05-11T21:06:02.108Z",
+ "timeUpdated": "2023-05-11T21:06:02.108Z",
+ "type": "WIDGET_SHOW_IN_DASHBOARD",
+ "uiConfig": {
+ "enableWidgetInApp": true,
+ "queryString": "'Log Source' in ('OCI Load Balancer Access Logs', 'OCI Load Balancer Error Logs') and Label != null | stats count by Label, 'OCI Resource Name'",
+ "scopeFilters": {
+ "Entity": {
+ "flags": {
+ "IncludeDependents": true,
+ "ScopeCompartmentId": "ocid1.compartment.oc1..aaaaaaaauiq74grhbq2n7pcjuvrkad5alaaj63dsrtcudefa7uvdwoo3kqpa"
+ },
+ "type": "Entity",
+ "values": []
+ },
+ "LogFields": {
+ "flags": {},
+ "type": "LogFields",
+ "values": []
+ },
+ "LogGroup": {
+ "flags": {
+ "IncludeSubCompartments": true
+ },
+ "type": "LogGroup",
+ "values": [
+ {
+ "label": "ops-development",
+ "value": "ocid1.compartment.oc1..aaaaaaaauiq74grhbq2n7pcjuvrkad5alaaj63dsrtcudefa7uvdwoo3kqpa"
+ }
+ ]
+ },
+ "LogSet": {
+ "flags": {},
+ "type": "LogSet",
+ "values": [
+ {
+ "label": "*",
+ "value": "*"
+ }
+ ]
+ },
+ "MetricCompartment": {
+ "flags": {},
+ "type": "MetricCompartment",
+ "values": []
+ },
+ "ResourceCompartment": {
+ "flags": {
+ "IncludeSubCompartments": true
+ },
+ "type": "ResourceCompartment",
+ "values": [
+ {
+ "label": "eightxeightmain (root)",
+ "value": "ocid1.tenancy.oc1..aaaaaaaax7tm7jtfarexna447cmubjxwou6lug42jss2ddyis63wqo3lrpda"
+ }
+ ]
+ },
+ "filters": [
+ {
+ "flags": {
+ "IncludeSubCompartments": true
+ },
+ "type": "LogGroup",
+ "values": [
+ {
+ "label": "ops-development",
+ "value": "ocid1.compartment.oc1..aaaaaaaauiq74grhbq2n7pcjuvrkad5alaaj63dsrtcudefa7uvdwoo3kqpa"
+ }
+ ]
+ },
+ {
+ "flags": {},
+ "type": "MetricCompartment",
+ "values": []
+ },
+ {
+ "flags": {
+ "IncludeDependents": true,
+ "ScopeCompartmentId": "ocid1.compartment.oc1..aaaaaaaauiq74grhbq2n7pcjuvrkad5alaaj63dsrtcudefa7uvdwoo3kqpa"
+ },
+ "type": "Entity",
+ "values": []
+ },
+ {
+ "flags": {},
+ "type": "LogSet",
+ "values": [
+ {
+ "label": "*",
+ "value": "*"
+ }
+ ]
+ },
+ {
+ "flags": {
+ "IncludeSubCompartments": true
+ },
+ "type": "ResourceCompartment",
+ "values": [
+ {
+ "label": "eightxeightmain (root)",
+ "value": "ocid1.tenancy.oc1..aaaaaaaax7tm7jtfarexna447cmubjxwou6lug42jss2ddyis63wqo3lrpda"
+ }
+ ]
+ },
+ {
+ "flags": {},
+ "type": "LogFields",
+ "values": []
+ }
+ ],
+ "isGlobal": false
+ },
+ "showTitle": true,
+ "timeSelection": {
+ "timePeriod": "l60min"
+ },
+ "visualizationOptions": {
+ "customVizOpt": {
+ "primaryFieldDname": "Original Log Content",
+ "primaryFieldIname": "mbody"
+ }
+ },
+ "visualizationType": "treemap",
+ "vizType": "lxSavedSearchWidgetType"
+ },
+ "updatedBy": "ocid1.saml2idp.oc1..aaaaaaaa7ehdi53lr546fhugwewrbeltbgqvkocc27qslgbahsw5v55bvh4a/KUMAR.VARUN@ORACLE.COM",
+ "widgetTemplate": "visualizations/chartWidgetTemplate.html",
+ "widgetVM": "jet-modules/dashboards/widgets/lxSavedSearchWidget"
+ },
+ {
+ "compartmentId": "${compartment_ocid}",
+ "createdBy": "ocid1.saml2idp.oc1..aaaaaaaa7ehdi53lr546fhugwewrbeltbgqvkocc27qslgbahsw5v55bvh4a/KUMAR.VARUN@ORACLE.COM",
+ "dataConfig": [
+ {
+ "name": "mgmtagent_kubernetes_metrics/nodeMemoryUsage",
+ "parameters": {
+ "compartmentId": "$(params.compartmentId)",
+ "endTime": "$(params.time.end)",
+ "maxDataPoints": "useIntervalExact",
+ "mql": "nodeMemoryUsage[1m]{clusterName = \"onm3h-demo\"}.mean()",
+ "namespace": "mgmtagent_kubernetes_metrics",
+ "regionName": "$(params.regionName)",
+ "startTime": "$(params.time.start)"
+ },
+ "type": "monitoringDataSource"
+ }
+ ],
+ "description": "Node Memory Usage",
+ "displayName": "Node Memory Usage",
+ "drilldownConfig": [],
+ "featuresConfig": {
+ "crossService": {
+ "shared": false
+ }
+ },
+ "freeformTags": {},
+ "id": "ocid1.managementsavedsearch.oc1..aaaaaaaa2ctxqgooubu2oqvz6p2u7tnrlbr6davpwt5fxdmqg3hafktdtk4a",
+ "isOobSavedSearch": false,
+ "lifecycleState": "ACTIVE",
+ "metadataVersion": "2.0",
+ "nls": {},
+ "parametersConfig": [
+ {
+ "defaultFilterIds": [
+ "OOBSS-management-dashboard-time-selector-filter"
+ ],
+ "displayName": "Time",
+ "editUi": {
+ "filterTile": {
+ "filterId": "OOBSS-management-dashboard-time-selector-filter"
+ },
+ "inputType": "savedSearch"
+ },
+ "name": "time",
+ "required": true
+ },
+ {
+ "defaultFilterIds": [
+ "OOBSS-management-dashboard-compartment-filter"
+ ],
+ "displayName": "Compartment",
+ "editUi": {
+ "inputType": "compartmentSelect"
+ },
+ "name": "compartmentId",
+ "required": true
+ },
+ {
+ "defaultFilterIds": [
+ "OOBSS-management-dashboard-region-filter"
+ ],
+ "displayName": "Region",
+ "editUi": {
+ "filterTile": {
+ "filterId": "OOBSS-management-dashboard-region-filter"
+ },
+ "inputType": "savedSearch"
+ },
+ "name": "regionName",
+ "required": false
+ }
+ ],
+ "providerId": "log-analytics",
+ "providerName": "Logging Analytics",
+ "providerVersion": "3.0.0",
+ "screenImage": "...",
+ "timeCreated": "2023-05-15T20:29:55.991Z",
+ "timeUpdated": "2023-05-15T20:31:56.808Z",
+ "type": "WIDGET_SHOW_IN_DASHBOARD",
+ "uiConfig": {
+ "chartInfo": {
+ "colorBy": "dimensions.nodeName",
+ "enableCorrelation": true,
+ "group": "aggregatedDatapoints.timestamp",
+ "jetConfig": {
+ "dataCursor": "on",
+ "legend": {
+ "position": "end",
+ "rendered": true
+ },
+ "timeAxisType": "enabled",
+ "type": "line",
+ "xAxis": {
+ "viewportMax": "$(params.time.end)",
+ "viewportMin": "$(params.time.start)"
+ }
+ },
+ "series": "dimensions.nodeName",
+ "value": "aggregatedDatapoints.value"
+ },
+ "defaultDataSource": "mgmtagent_kubernetes_metrics/nodeMemoryUsage",
+ "vizType": "chart"
+ },
+ "updatedBy": "ocid1.saml2idp.oc1..aaaaaaaa7ehdi53lr546fhugwewrbeltbgqvkocc27qslgbahsw5v55bvh4a/KUMAR.VARUN@ORACLE.COM",
+ "widgetTemplate": "visualizations/chartWidgetTemplate.html",
+ "widgetVM": "visualizations/chartWidget"
+ },
+ {
+ "compartmentId": "${compartment_ocid}",
+ "createdBy": "ocid1.saml2idp.oc1..aaaaaaaa7ehdi53lr546fhugwewrbeltbgqvkocc27qslgbahsw5v55bvh4a/KUMAR.VARUN@ORACLE.COM",
+ "dataConfig": [
+ {
+ "name": "mgmtagent_kubernetes_metrics/podCpuUsage",
+ "parameters": {
+ "compartmentId": "$(params.compartmentId)",
+ "endTime": "$(params.time.end)",
+ "maxDataPoints": "useIntervalExact",
+ "mql": "podCpuUsage[1m].mean()",
+ "namespace": "mgmtagent_kubernetes_metrics",
+ "regionName": "$(params.regionName)",
+ "startTime": "$(params.time.start)"
+ },
+ "type": "monitoringDataSource"
+ }
+ ],
+ "description": "Pods CPU Usage",
+ "displayName": "Pods CPU Usage",
+ "drilldownConfig": [],
+ "featuresConfig": {
+ "crossService": {
+ "shared": false
+ }
+ },
+ "freeformTags": {},
+ "id": "ocid1.managementsavedsearch.oc1..aaaaaaaao66kcpdxkzv7euzz2nolnheoztvhjj4ireqfkalgzvg27xtb3wqa",
+ "isOobSavedSearch": false,
+ "lifecycleState": "ACTIVE",
+ "metadataVersion": "2.0",
+ "nls": {},
+ "parametersConfig": [
+ {
+ "defaultFilterIds": [
+ "OOBSS-management-dashboard-time-selector-filter"
+ ],
+ "displayName": "Time",
+ "editUi": {
+ "filterTile": {
+ "filterId": "OOBSS-management-dashboard-time-selector-filter"
+ },
+ "inputType": "savedSearch"
+ },
+ "name": "time",
+ "required": true
+ },
+ {
+ "defaultFilterIds": [
+ "OOBSS-management-dashboard-compartment-filter"
+ ],
+ "displayName": "Compartment",
+ "editUi": {
+ "inputType": "compartmentSelect"
+ },
+ "name": "compartmentId",
+ "required": true
+ },
+ {
+ "defaultFilterIds": [
+ "OOBSS-management-dashboard-region-filter"
+ ],
+ "displayName": "Region",
+ "editUi": {
+ "filterTile": {
+ "filterId": "OOBSS-management-dashboard-region-filter"
+ },
+ "inputType": "savedSearch"
+ },
+ "name": "regionName",
+ "required": false
+ }
+ ],
+ "providerId": "log-analytics",
+ "providerName": "Logging Analytics",
+ "providerVersion": "3.0.0",
+ "screenImage": "...",
+ "timeCreated": "2023-05-15T20:21:35.816Z",
+ "timeUpdated": "2023-05-15T20:23:19.906Z",
+ "type": "WIDGET_SHOW_IN_DASHBOARD",
+ "uiConfig": {
+ "chartInfo": {
+ "colorBy": "dimensions.containerName",
+ "enableCorrelation": true,
+ "group": "aggregatedDatapoints.timestamp",
+ "jetConfig": {
+ "dataCursor": "on",
+ "legend": {
+ "position": "end",
+ "rendered": true
+ },
+ "timeAxisType": "enabled",
+ "type": "line",
+ "xAxis": {
+ "viewportMax": "$(params.time.end)",
+ "viewportMin": "$(params.time.start)"
+ }
+ },
+ "series": "dimensions.containerName",
+ "value": "aggregatedDatapoints.value"
+ },
+ "defaultDataSource": "mgmtagent_kubernetes_metrics/podCpuUsage",
+ "vizType": "chart"
+ },
+ "updatedBy": "ocid1.saml2idp.oc1..aaaaaaaa7ehdi53lr546fhugwewrbeltbgqvkocc27qslgbahsw5v55bvh4a/KUMAR.VARUN@ORACLE.COM",
+ "widgetTemplate": "visualizations/chartWidgetTemplate.html",
+ "widgetVM": "visualizations/chartWidget"
+ },
+ {
+ "compartmentId": "${compartment_ocid}",
+ "createdBy": "ocid1.saml2idp.oc1..aaaaaaaa7ehdi53lr546fhugwewrbeltbgqvkocc27qslgbahsw5v55bvh4a/KUMAR.VARUN@ORACLE.COM",
+ "dataConfig": [],
+ "description": "",
+ "displayName": "Backend Connect Times",
+ "drilldownConfig": [],
+ "featuresConfig": {
+ "crossService": {
+ "shared": true
+ }
+ },
+ "freeformTags": {},
+ "id": "ocid1.managementsavedsearch.oc1..aaaaaaaamzjz6demzwue2rr2whhgiml3wrf3oe5vlln6avfwdtjjjfwarjba",
+ "isOobSavedSearch": false,
+ "lifecycleState": "ACTIVE",
+ "metadataVersion": "2.0",
+ "nls": {},
+ "parametersConfig": [
+ {
+ "defaultFilterIds": [
+ "OOBSS-management-dashboard-filter-4a"
+ ],
+ "displayName": "Log Group Compartment",
+ "editUi": {
+ "inputType": "none"
+ },
+ "name": "log-analytics-log-group-compartment",
+ "required": true,
+ "valueFormat": {
+ "type": "object"
+ }
+ },
+ {
+ "defaultFilterIds": [
+ "OOBSS-management-dashboard-filter-2a"
+ ],
+ "displayName": "Entity",
+ "editUi": {
+ "inputType": "none"
+ },
+ "name": "log-analytics-entity",
+ "required": true,
+ "valueFormat": {
+ "type": "object"
+ }
+ },
+ {
+ "defaultFilterIds": [
+ "OOBSS-management-dashboard-filter-3a"
+ ],
+ "displayName": "Log Set",
+ "editUi": {
+ "inputType": "none"
+ },
+ "hidden": "$(window.logSetNotEnabled)",
+ "name": "log-analytics-log-set",
+ "required": true,
+ "valueFormat": {
+ "type": "object"
+ }
+ },
+ {
+ "displayName": "$(bundle.globalSavedSearch.TIME)",
+ "hidden": true,
+ "name": "time",
+ "required": true
+ },
+ {
+ "name": "flex"
+ }
+ ],
+ "providerId": "log-analytics",
+ "providerName": "Logging Analytics",
+ "providerVersion": "3.0.0",
+ "screenImage": " ",
+ "timeCreated": "2023-05-11T21:06:02.108Z",
+ "timeUpdated": "2023-05-15T21:05:04.920Z",
+ "type": "WIDGET_SHOW_IN_DASHBOARD",
+ "uiConfig": {
+ "enableWidgetInApp": true,
+ "internalKey": "ocid1.managementsavedsearch.oc1..aaaaaaaamzjz6demzwue2rr2whhgiml3wrf3oe5vlln6avfwdtjjjfwarjba",
+ "queryString": "'Log Source' in ('OCI Load Balancer Access Logs', 'OCI Load Balancer Error Logs') | rename 'Backend Connect Time' as tar | stats min(tar) as Min, avg(tar) as Mean, median(tar) as P50, pct(tar, 75) as P75, max(tar) as Max by 'OCI Resource Name' | sort -Max",
+ "scopeFilters": {
+ "Entity": {
+ "flags": {
+ "IncludeDependents": true,
+ "ScopeCompartmentId": "ocid1.compartment.oc1..aaaaaaaa4yj2x6hjxntcf5vydrdvsm3trgblkmwgcmvxiar2miklv3ip4t7q"
+ },
+ "type": "Entity",
+ "values": []
+ },
+ "LogFields": {
+ "flags": {},
+ "type": "LogFields",
+ "values": []
+ },
+ "LogGroup": {
+ "flags": {
+ "IncludeSubCompartments": true
+ },
+ "type": "LogGroup",
+ "values": [
+ {
+ "label": "LogAnalytics",
+ "value": "ocid1.compartment.oc1..aaaaaaaa4yj2x6hjxntcf5vydrdvsm3trgblkmwgcmvxiar2miklv3ip4t7q"
+ }
+ ]
+ },
+ "LogSet": {
+ "flags": {},
+ "type": "LogSet",
+ "values": []
+ },
+ "MetricCompartment": {
+ "flags": {},
+ "type": "MetricCompartment",
+ "values": []
+ },
+ "Region": {
+ "flags": {},
+ "type": "Region",
+ "values": [
+ {
+ "label": "US West (Phoenix)",
+ "value": "us-phoenix-1"
+ }
+ ]
+ },
+ "ResourceCompartment": {
+ "flags": {
+ "IncludeSubCompartments": true
+ },
+ "type": "ResourceCompartment",
+ "values": [
+ {
+ "label": "emdemo (root)",
+ "value": "ocid1.tenancy.oc1..aaaaaaaa5s2vdjjrydixjulorcwozffbpna37w5a35p3jhgpyshlkmio6oiq"
+ }
+ ]
+ },
+ "filters": [
+ {
+ "flags": {
+ "IncludeSubCompartments": true
+ },
+ "type": "LogGroup",
+ "values": [
+ {
+ "label": "LogAnalytics",
+ "value": "ocid1.compartment.oc1..aaaaaaaa4yj2x6hjxntcf5vydrdvsm3trgblkmwgcmvxiar2miklv3ip4t7q"
+ }
+ ]
+ },
+ {
+ "flags": {},
+ "type": "MetricCompartment",
+ "values": []
+ },
+ {
+ "flags": {
+ "IncludeDependents": true,
+ "ScopeCompartmentId": "ocid1.compartment.oc1..aaaaaaaa4yj2x6hjxntcf5vydrdvsm3trgblkmwgcmvxiar2miklv3ip4t7q"
+ },
+ "type": "Entity",
+ "values": []
+ },
+ {
+ "flags": {},
+ "type": "LogSet",
+ "values": []
+ },
+ {
+ "flags": {
+ "IncludeSubCompartments": true
+ },
+ "type": "ResourceCompartment",
+ "values": [
+ {
+ "label": "emdemo (root)",
+ "value": "ocid1.tenancy.oc1..aaaaaaaa5s2vdjjrydixjulorcwozffbpna37w5a35p3jhgpyshlkmio6oiq"
+ }
+ ]
+ },
+ {
+ "flags": {},
+ "type": "LogFields",
+ "values": []
+ },
+ {
+ "flags": {},
+ "type": "Region",
+ "values": [
+ {
+ "label": "US West (Phoenix)",
+ "value": "us-phoenix-1"
+ }
+ ]
+ }
+ ],
+ "isGlobal": false
+ },
+ "showTitle": true,
+ "timeSelection": {
+ "numUnits": 60,
+ "timePeriod": "relative",
+ "units": "MINUTES"
+ },
+ "visualizationOptions": {
+ "customVizOpt": {
+ "GEOMAP_SETTINGS": {
+ "basemap": "bi_world_map_light",
+ "clusterColor": "rgb(192, 192, 192)",
+ "filterOnZoom": false,
+ "isShowLegend": true,
+ "lat": 2273030.9269876885,
+ "lon": 0,
+ "mapZoom": 1,
+ "pointColor": "rgb(0, 0, 255)",
+ "srid": 3857,
+ "toggleClusters": false
+ }
+ }
+ },
+ "visualizationType": "summary_table",
+ "vizType": "lxSavedSearchWidgetType"
+ },
+ "updatedBy": "ocid1.saml2idp.oc1..aaaaaaaa7ehdi53lr546fhugwewrbeltbgqvkocc27qslgbahsw5v55bvh4a/KUMAR.VARUN@ORACLE.COM",
+ "widgetTemplate": "visualizations/chartWidgetTemplate.html",
+ "widgetVM": "jet-modules/dashboards/widgets/lxSavedSearchWidget"
+ },
+ {
+ "compartmentId": "${compartment_ocid}",
+ "createdBy": "ocid1.saml2idp.oc1..aaaaaaaa7ehdi53lr546fhugwewrbeltbgqvkocc27qslgbahsw5v55bvh4a/KUMAR.VARUN@ORACLE.COM",
+ "dataConfig": [],
+ "description": "",
+ "displayName": "Response Time",
+ "drilldownConfig": [],
+ "featuresConfig": {
+ "crossService": {
+ "shared": false
+ }
+ },
+ "freeformTags": {},
+ "id": "ocid1.managementsavedsearch.oc1..aaaaaaaanonr7kl5v3cycwxl45h4n5ybz76meoudfgpc5gbsggtxf3djduqa",
+ "isOobSavedSearch": false,
+ "lifecycleState": "ACTIVE",
+ "metadataVersion": "2.0",
+ "nls": {},
+ "parametersConfig": [
+ {
+ "defaultFilterIds": [
+ "OOBSS-management-dashboard-filter-4a"
+ ],
+ "displayName": "Log Group Compartment",
+ "editUi": {
+ "inputType": "none"
+ },
+ "name": "log-analytics-log-group-compartment",
+ "required": true,
+ "valueFormat": {
+ "type": "object"
+ }
+ },
+ {
+ "defaultFilterIds": [
+ "OOBSS-management-dashboard-filter-2a"
+ ],
+ "displayName": "Entity",
+ "editUi": {
+ "inputType": "none"
+ },
+ "name": "log-analytics-entity",
+ "required": true,
+ "valueFormat": {
+ "type": "object"
+ }
+ },
+ {
+ "defaultFilterIds": [
+ "OOBSS-management-dashboard-filter-3a"
+ ],
+ "displayName": "Log Set",
+ "editUi": {
+ "inputType": "none"
+ },
+ "hidden": "$(window.logSetNotEnabled)",
+ "name": "log-analytics-log-set",
+ "required": true,
+ "valueFormat": {
+ "type": "object"
+ }
+ },
+ {
+ "displayName": "$(bundle.globalSavedSearch.TIME)",
+ "hidden": true,
+ "name": "time",
+ "required": true
+ },
+ {
+ "name": "flex"
+ }
+ ],
+ "providerId": "log-analytics",
+ "providerName": "Logging Analytics",
+ "providerVersion": "3.0.0",
+ "screenImage": " ",
+ "timeCreated": "2023-05-11T21:06:02.108Z",
+ "timeUpdated": "2023-05-11T21:06:02.108Z",
+ "type": "WIDGET_SHOW_IN_DASHBOARD",
+ "uiConfig": {
+ "enableWidgetInApp": true,
+ "internalKey": "ocid1.managementsavedsearch.oc1..aaaaaaaanyae67s4f3hpipt3gyzkugtry3hu35nlogflmgtgmuagil3ikyzq",
+ "queryString": "'Log Source' in ('OCI Load Balancer Access Logs', 'OCI Load Balancer Error Logs') | eval tar = unit('Response Time', second) | link 'OCI Resource Name' | stats min(tar) as Minimum, avg(tar) as Average, pct(tar, 75) as P75, max(tar) as Maximum | classify topcount = 300 correlate = -*, Average 'Start Time', 'OCI Resource Name', P75, Maximum as 'Content Size In'",
+ "scopeFilters": {
+ "Entity": {
+ "flags": {
+ "IncludeDependents": true,
+ "ScopeCompartmentId": "ocid1.compartment.oc1..aaaaaaaauiq74grhbq2n7pcjuvrkad5alaaj63dsrtcudefa7uvdwoo3kqpa"
+ },
+ "type": "Entity",
+ "values": []
+ },
+ "LogGroup": {
+ "flags": {
+ "IncludeSubCompartments": true
+ },
+ "type": "LogGroup",
+ "values": [
+ {
+ "label": "ops-development",
+ "value": "ocid1.compartment.oc1..aaaaaaaauiq74grhbq2n7pcjuvrkad5alaaj63dsrtcudefa7uvdwoo3kqpa"
+ }
+ ]
+ },
+ "LogSet": {
+ "flags": {},
+ "type": "LogSet",
+ "values": [
+ {
+ "label": "%",
+ "value": "%"
+ }
+ ]
+ },
+ "filters": [
+ {
+ "flags": {
+ "IncludeSubCompartments": true
+ },
+ "type": "LogGroup",
+ "values": [
+ {
+ "label": "ops-development",
+ "value": "ocid1.compartment.oc1..aaaaaaaauiq74grhbq2n7pcjuvrkad5alaaj63dsrtcudefa7uvdwoo3kqpa"
+ }
+ ]
+ },
+ {
+ "flags": {
+ "IncludeDependents": true,
+ "ScopeCompartmentId": "ocid1.compartment.oc1..aaaaaaaauiq74grhbq2n7pcjuvrkad5alaaj63dsrtcudefa7uvdwoo3kqpa"
+ },
+ "type": "Entity",
+ "values": []
+ },
+ {
+ "flags": {},
+ "type": "LogSet",
+ "values": [
+ {
+ "label": "%",
+ "value": "%"
+ }
+ ]
+ }
+ ],
+ "isGlobal": false
+ },
+ "showTitle": true,
+ "timeSelection": {
+ "timePeriod": "l24hr"
+ },
+ "visualizationOptions": {
+ "customVizOpt": {
+ "LINK_CLASSIFY_SETTINGS": {
+ "Content Size In": {
+ "chartHeight": 200,
+ "chartType": "bubble",
+ "classifyColorPalette": {
+ "1": "default",
+ "7": "ocirsrcname",
+ "8": "default",
+ "9": "default"
+ },
+ "classifyColorPaletteCustom": {
+ "7": {}
+ },
+ "classifyFilters": {
+ "classifyNarrowResults": [
+ "on"
+ ],
+ "selectAllFilters": [
+ "on"
+ ],
+ "selectedClassifyFilters": [
+ 6,
+ 7,
+ 8
+ ],
+ "showClassifyFilters": []
+ },
+ "colorColumn": 8,
+ "descendingXAxis": [
+ null
+ ],
+ "descendingYAxis": [
+ null
+ ],
+ "drilldown": "on",
+ "groupAlias": "Groups",
+ "groupAliasS": "Group",
+ "showAnomaly": [
+ "on"
+ ],
+ "showBaseline": [
+ "off"
+ ],
+ "showDimensions": [
+ "on"
+ ],
+ "sizeColumn": 8,
+ "swapXY": [
+ "off"
+ ],
+ "zeroXAxis": [
+ "on"
+ ],
+ "zeroYAxis": [
+ "on"
+ ]
+ }
+ },
+ "LINK_SEARCH_SETTINGS": {
+ "chartHeightVal": 140,
+ "chartOptions": "lineWithArea",
+ "chartType": "lineWithArea",
+ "chartWidthVal": 20,
+ "columnAliases": {},
+ "dashboardOptions": {
+ "showAnalyzeTab": [],
+ "showChartsTab": [
+ "on"
+ ],
+ "showSummary": [],
+ "showTSCharts": [],
+ "showTable": [],
+ "showTabs": []
+ },
+ "groupAliasP": "Groups",
+ "groupAliasS": "Group",
+ "hiddenCharts": {
+ "TREND_COLUMN": true,
+ "groupColumn": true
+ },
+ "hiddenClassifyCharts": {},
+ "hiddenColumns": {
+ "g_duration": true,
+ "query_end_time": true,
+ "query_start_time": true,
+ "trend_interval": true,
+ "trend_interval_unit": true
+ },
+ "highlightColumnStatus": {},
+ "linkSummaryInput": "",
+ "logAliasP": "Log Records",
+ "mergeHighlightColumns": [
+ "off"
+ ],
+ "ms": [
+ "on"
+ ],
+ "showAllRegions": [
+ "on"
+ ],
+ "showCombinedCharts": [
+ "off"
+ ],
+ "showNonUnitRawData": [],
+ "showStack": [
+ "off"
+ ],
+ "showToolTips": [
+ "on"
+ ],
+ "showUnitRawData": [],
+ "smartGroup": [
+ "off"
+ ],
+ "styleDefaults": {
+ "lineType": "curved",
+ "markerDisplayed": "off"
+ },
+ "timeseries": {}
+ }
+ }
+ },
+ "visualizationType": "link",
+ "vizType": "lxSavedSearchWidgetType"
+ },
+ "updatedBy": "ocid1.saml2idp.oc1..aaaaaaaa7ehdi53lr546fhugwewrbeltbgqvkocc27qslgbahsw5v55bvh4a/KUMAR.VARUN@ORACLE.COM",
+ "widgetTemplate": "visualizations/chartWidgetTemplate.html",
+ "widgetVM": "jet-modules/dashboards/widgets/lxSavedSearchWidget"
+ },
+ {
+ "compartmentId": "${compartment_ocid}",
+ "createdBy": "ocid1.saml2idp.oc1..aaaaaaaa7ehdi53lr546fhugwewrbeltbgqvkocc27qslgbahsw5v55bvh4a/KUMAR.VARUN@ORACLE.COM",
+ "dataConfig": [
+ {
+ "name": "k8sNodeNames",
+ "parameters": {
+ "compartmentId": "$(params.compartmentId)",
+ "dimension": "nodeName",
+ "dimensionFilters": {},
+ "metricName": "nodeStatus",
+ "namespace": "mgmtagent_kubernetes_metrics"
+ },
+ "type": "monitoringDimensionDataSource"
+ }
+ ],
+ "description": "Kubernetes objects in a compartment",
+ "displayName": "Kubernetes Node",
+ "drilldownConfig": [],
+ "featuresConfig": {
+ "crossService": {
+ "shared": false
+ }
+ },
+ "freeformTags": {},
+ "id": "ocid1.managementsavedsearch.oc1..aaaaaaaaijqkcnwvt4fpevll6y5aa5xkhz27suolsc35t5m7ri4a2sth6cqq",
+ "isOobSavedSearch": false,
+ "lifecycleState": "ACTIVE",
+ "metadataVersion": "2.0",
+ "nls": {},
+ "parametersConfig": [
+ {
+ "defaultFilterIds": [
+ "OOBSS-management-dashboard-compartment-filter"
+ ],
+ "displayName": "Compartment",
+ "editUi": {
+ "inputType": "compartmentSelect"
+ },
+ "name": "compartmentId",
+ "required": true
+ }
+ ],
+ "providerId": "management-dashboard",
+ "providerName": "Management Dashboard",
+ "providerVersion": "3.0.0",
+ "screenImage": "...",
+ "timeCreated": "2023-05-11T21:06:02.108Z",
+ "timeUpdated": "2023-05-11T21:06:02.108Z",
+ "type": "FILTER_SHOW_IN_DASHBOARD",
+ "uiConfig": {
+ "defaultDataSource": "k8sNodeNames",
+ "filterName": "k8sNodeFilter",
+ "genericFilterInfo": {
+ "firstOption": {
+ "label": "All",
+ "value": ""
+ },
+ "noDataMsg": "All",
+ "placeholder": "All"
+ },
+ "vizFilterType": "genericFilter"
+ },
+ "updatedBy": "ocid1.saml2idp.oc1..aaaaaaaa7ehdi53lr546fhugwewrbeltbgqvkocc27qslgbahsw5v55bvh4a/KUMAR.VARUN@ORACLE.COM",
+ "widgetTemplate": "visualizations/chartWidgetTemplate.html",
+ "widgetVM": "visualizations/chartWidget"
+ },
+ {
+ "compartmentId": "${compartment_ocid}",
+ "createdBy": "ocid1.saml2idp.oc1..aaaaaaaa7ehdi53lr546fhugwewrbeltbgqvkocc27qslgbahsw5v55bvh4a/KUMAR.VARUN@ORACLE.COM",
+ "dataConfig": [],
+ "description": "OKE Nodes",
+ "displayName": "Nodes",
+ "drilldownConfig": [],
+ "featuresConfig": {
+ "crossService": {
+ "shared": false
+ }
+ },
+ "freeformTags": {},
+ "id": "ocid1.managementsavedsearch.oc1..aaaaaaaanuh7tdxvj52bx5th2iqnndjzfr3i6mejbqiziil24v7xmwuvwxmq",
+ "isOobSavedSearch": false,
+ "lifecycleState": "ACTIVE",
+ "metadataVersion": "2.0",
+ "nls": {},
+ "parametersConfig": [],
+ "providerId": "log-analytics",
+ "providerName": "Logging Analytics",
+ "providerVersion": "2.0",
+ "screenImage": " ",
+ "timeCreated": "2023-05-11T21:06:02.108Z",
+ "timeUpdated": "2023-05-11T21:06:02.108Z",
+ "type": "WIDGET_SHOW_IN_DASHBOARD",
+ "uiConfig": {
+ "enableWidgetInApp": true,
+ "internalKey": "ocid1.managementsavedsearch.oc1..aaaaaaaa3rw4ke22wnlelaqkguvxbj5fvykfp377fj722wo2xaubsg5bodsa",
+ "queryString": "'Log Source' = 'Kubernetes Node Object Logs' | stats latest('Ready Status') as Ready by Node",
+ "scopeFilters": {
+ "Entity": {
+ "flags": {
+ "IncludeDependents": true,
+ "ScopeCompartmentId": "ocid1.compartment.oc1..aaaaaaaauiq74grhbq2n7pcjuvrkad5alaaj63dsrtcudefa7uvdwoo3kqpa"
+ },
+ "type": "Entity",
+ "values": []
+ },
+ "LogGroup": {
+ "flags": {
+ "IncludeSubCompartments": true
+ },
+ "type": "LogGroup",
+ "values": [
+ {
+ "label": "ops-development",
+ "value": "ocid1.compartment.oc1..aaaaaaaauiq74grhbq2n7pcjuvrkad5alaaj63dsrtcudefa7uvdwoo3kqpa"
+ }
+ ]
+ },
+ "LogSet": {
+ "flags": {},
+ "type": "LogSet",
+ "values": [
+ {
+ "label": "%",
+ "value": "%"
+ }
+ ]
+ },
+ "filters": [
+ {
+ "flags": {
+ "IncludeSubCompartments": true
+ },
+ "type": "LogGroup",
+ "values": [
+ {
+ "label": "ops-development",
+ "value": "ocid1.compartment.oc1..aaaaaaaauiq74grhbq2n7pcjuvrkad5alaaj63dsrtcudefa7uvdwoo3kqpa"
+ }
+ ]
+ },
+ {
+ "flags": {
+ "IncludeDependents": true,
+ "ScopeCompartmentId": "ocid1.compartment.oc1..aaaaaaaauiq74grhbq2n7pcjuvrkad5alaaj63dsrtcudefa7uvdwoo3kqpa"
+ },
+ "type": "Entity",
+ "values": []
+ },
+ {
+ "flags": {},
+ "type": "LogSet",
+ "values": [
+ {
+ "label": "%",
+ "value": "%"
+ }
+ ]
+ }
+ ],
+ "isGlobal": false
+ },
+ "showTitle": true,
+ "timeSelection": {
+ "numUnits": 60,
+ "timePeriod": "relative",
+ "units": "MINUTES"
+ },
+ "visualizationOptions": {
+ "changeLabel": "Nodes",
+ "formatNumber": false,
+ "hideLabel": true,
+ "inputTextEnabled": true
+ },
+ "visualizationType": "tile",
+ "vizType": "lxSavedSearchWidgetType"
+ },
+ "updatedBy": "ocid1.saml2idp.oc1..aaaaaaaa7ehdi53lr546fhugwewrbeltbgqvkocc27qslgbahsw5v55bvh4a/KUMAR.VARUN@ORACLE.COM",
+ "widgetTemplate": "jet-modules/dashboards/widgets/lxSavedSearchWidget.html",
+ "widgetVM": "jet-modules/dashboards/widgets/lxSavedSearchWidget"
+ },
+ {
+ "compartmentId": "${compartment_ocid}",
+ "createdBy": "ocid1.saml2idp.oc1..aaaaaaaa7ehdi53lr546fhugwewrbeltbgqvkocc27qslgbahsw5v55bvh4a/KUMAR.VARUN@ORACLE.COM",
+ "dataConfig": [],
+ "description": "",
+ "displayName": "Load Balancer Errors and Services",
+ "drilldownConfig": [],
+ "featuresConfig": {
+ "crossService": {
+ "shared": false
+ }
+ },
+ "freeformTags": {},
+ "id": "ocid1.managementsavedsearch.oc1..aaaaaaaakzhn2cfwz2cz6btwvfk5b6vt4jcr2djfmrc5sz6pyxbjuuwgb22a",
+ "isOobSavedSearch": false,
+ "lifecycleState": "ACTIVE",
+ "metadataVersion": "2.0",
+ "nls": {},
+ "parametersConfig": [
+ {
+ "defaultFilterIds": [
+ "OOBSS-management-dashboard-filter-4a"
+ ],
+ "displayName": "Log Group Compartment",
+ "editUi": {
+ "inputType": "none"
+ },
+ "name": "log-analytics-log-group-compartment",
+ "required": true,
+ "valueFormat": {
+ "type": "object"
+ }
+ },
+ {
+ "defaultFilterIds": [
+ "OOBSS-management-dashboard-filter-2a"
+ ],
+ "displayName": "Entity",
+ "editUi": {
+ "inputType": "none"
+ },
+ "name": "log-analytics-entity",
+ "required": true,
+ "valueFormat": {
+ "type": "object"
+ }
+ },
+ {
+ "defaultFilterIds": [
+ "OOBSS-management-dashboard-filter-3a"
+ ],
+ "displayName": "Log Set",
+ "editUi": {
+ "inputType": "none"
+ },
+ "hidden": "$(window.logSetNotEnabled)",
+ "name": "log-analytics-log-set",
+ "required": true,
+ "valueFormat": {
+ "type": "object"
+ }
+ },
+ {
+ "displayName": "$(bundle.globalSavedSearch.TIME)",
+ "hidden": true,
+ "name": "time",
+ "required": true
+ },
+ {
+ "name": "flex"
+ }
+ ],
+ "providerId": "log-analytics",
+ "providerName": "Logging Analytics",
+ "providerVersion": "3.0.0",
+ "screenImage": " ",
+ "timeCreated": "2023-05-11T21:06:02.108Z",
+ "timeUpdated": "2023-05-11T21:06:02.108Z",
+ "type": "WIDGET_SHOW_IN_DASHBOARD",
+ "uiConfig": {
+ "enableWidgetInApp": true,
+ "internalKey": "ocid1.managementsavedsearch.oc1..aaaaaaaauob7etq3rt3natddhzqd56jvhvod4bpuqautcridsn55br5y2jea",
+ "queryString": "'Log Source' in ('OCI Load Balancer Error Logs', 'Kubernetes Service Object Logs') | link includenulls = true Time, 'Load Balancer IP', cluster() | stats latest(Service) as S, unique(Namespace) as N | createview [ * | where N != null | rename 'Load Balancer IP' as LB, N as N1, S as S1 ] as 'Service Records' | map [ * | where N = null and 'Load Balancer IP' = LB | eval Service = S1 | eval Namespace = N1 ] using 'Service Records' | where Service != null | fields -S, -N, -'Potential Issue' | timestats sum(Count) as 'Load Balancer Issues' by 'Load Balancer IP', Service",
+ "scopeFilters": {
+ "Entity": {
+ "flags": {
+ "IncludeDependents": true,
+ "ScopeCompartmentId": "ocid1.compartment.oc1..aaaaaaaauiq74grhbq2n7pcjuvrkad5alaaj63dsrtcudefa7uvdwoo3kqpa"
+ },
+ "type": "Entity",
+ "values": []
+ },
+ "LogGroup": {
+ "flags": {
+ "IncludeSubCompartments": true
+ },
+ "type": "LogGroup",
+ "values": [
+ {
+ "label": "ops-development",
+ "value": "ocid1.compartment.oc1..aaaaaaaauiq74grhbq2n7pcjuvrkad5alaaj63dsrtcudefa7uvdwoo3kqpa"
+ }
+ ]
+ },
+ "LogSet": {
+ "flags": {},
+ "type": "LogSet",
+ "values": [
+ {
+ "label": "*",
+ "value": "*"
+ }
+ ]
+ },
+ "filters": [
+ {
+ "flags": {
+ "IncludeSubCompartments": true
+ },
+ "type": "LogGroup",
+ "values": [
+ {
+ "label": "ops-development",
+ "value": "ocid1.compartment.oc1..aaaaaaaauiq74grhbq2n7pcjuvrkad5alaaj63dsrtcudefa7uvdwoo3kqpa"
+ }
+ ]
+ },
+ {
+ "flags": {
+ "IncludeDependents": true,
+ "ScopeCompartmentId": "ocid1.compartment.oc1..aaaaaaaauiq74grhbq2n7pcjuvrkad5alaaj63dsrtcudefa7uvdwoo3kqpa"
+ },
+ "type": "Entity",
+ "values": []
+ },
+ {
+ "flags": {},
+ "type": "LogSet",
+ "values": [
+ {
+ "label": "*",
+ "value": "*"
+ }
+ ]
+ }
+ ],
+ "isGlobal": false
+ },
+ "showTitle": true,
+ "timeSelection": {
+ "timePeriod": "l14day"
+ },
+ "visualizationOptions": {
+ "customVizOpt": {
+ "LINK_SEARCH_SETTINGS": {
+ "chartHeightVal": 200,
+ "chartOptions": "bar",
+ "chartType": "bar",
+ "chartWidthVal": 60,
+ "columnAliases": {},
+ "dashboardOptions": {
+ "showAnalyzeTab": [
+ "on"
+ ],
+ "showChartsTab": [],
+ "showSummary": [
+ "on"
+ ],
+ "showTSCharts": [],
+ "showTable": [],
+ "showTabs": [
+ "on"
+ ]
+ },
+ "groupAliasP": "Groups",
+ "groupAliasS": "Group",
+ "hiddenCharts": {
+ "groupColumn": true
+ },
+ "hiddenClassifyCharts": {},
+ "hiddenColumns": {
+ "g_duration": true,
+ "query_end_time": true,
+ "query_start_time": true,
+ "trend_interval": true,
+ "trend_interval_unit": true
+ },
+ "highlightColumnStatus": {},
+ "linkSummaryInput": "",
+ "logAliasP": "Log Records",
+ "mergeHighlightColumns": [
+ "off"
+ ],
+ "ms": [
+ "on"
+ ],
+ "showAllRegions": [],
+ "showCombinedCharts": [
+ "off"
+ ],
+ "showNonUnitRawData": [
+ "off"
+ ],
+ "showStack": [
+ "off"
+ ],
+ "showToolTips": [
+ "on"
+ ],
+ "showUnitRawData": [],
+ "smartGroup": [
+ "off"
+ ],
+ "styleDefaults": {
+ "lineType": "curved",
+ "markerDisplayed": "on"
+ },
+ "timeseries": {
+ "timestats1": {
+ "chartHeightVal": 200,
+ "chartOptions": "lineWithMarker",
+ "chartType": "line",
+ "chartWidthVal": 60,
+ "colorColumn": 0,
+ "hiddenTSCharts": {},
+ "showCombinedCharts": [
+ "on"
+ ],
+ "showLegend": [
+ "off"
+ ],
+ "showStack": [
+ "off"
+ ],
+ "showToolTips": [
+ "off"
+ ],
+ "smartGroup": [
+ "on"
+ ],
+ "timeSeriesColorPalette": {
+ "0": "udfs27",
+ "1": "map4_evalVirtualField2"
+ },
+ "timeSeriesColorPaletteCustom": {
+ "0": {},
+ "1": {}
+ },
+ "tsFilters": {
+ "selectAllFilters": [
+ "off"
+ ],
+ "selectedTSFilters": [
+ "udfs27",
+ "map4_evalVirtualField2"
+ ],
+ "showTSFilters": [
+ "on"
+ ]
+ }
+ }
+ }
+ }
+ }
+ },
+ "visualizationType": "link",
+ "vizType": "lxSavedSearchWidgetType"
+ },
+ "updatedBy": "ocid1.saml2idp.oc1..aaaaaaaa7ehdi53lr546fhugwewrbeltbgqvkocc27qslgbahsw5v55bvh4a/KUMAR.VARUN@ORACLE.COM",
+ "widgetTemplate": "visualizations/chartWidgetTemplate.html",
+ "widgetVM": "jet-modules/dashboards/widgets/lxSavedSearchWidget"
+ },
+ {
+ "compartmentId": "${compartment_ocid}",
+ "createdBy": "ocid1.saml2idp.oc1..aaaaaaaa7ehdi53lr546fhugwewrbeltbgqvkocc27qslgbahsw5v55bvh4a/KUMAR.VARUN@ORACLE.COM",
+ "dataConfig": [],
+ "description": "",
+ "displayName": "Backend Processing Times",
+ "drilldownConfig": [],
+ "featuresConfig": {
+ "crossService": {
+ "shared": true
+ }
+ },
+ "freeformTags": {},
+ "id": "ocid1.managementsavedsearch.oc1..aaaaaaaau76opbz75ozaotvdpne4g76z267pm6djgehamu2paiutwff5uwna",
+ "isOobSavedSearch": false,
+ "lifecycleState": "ACTIVE",
+ "metadataVersion": "2.0",
+ "nls": {},
+ "parametersConfig": [
+ {
+ "defaultFilterIds": [
+ "OOBSS-management-dashboard-filter-4a"
+ ],
+ "displayName": "Log Group Compartment",
+ "editUi": {
+ "inputType": "none"
+ },
+ "name": "log-analytics-log-group-compartment",
+ "required": true,
+ "valueFormat": {
+ "type": "object"
+ }
+ },
+ {
+ "defaultFilterIds": [
+ "OOBSS-management-dashboard-filter-2a"
+ ],
+ "displayName": "Entity",
+ "editUi": {
+ "inputType": "none"
+ },
+ "name": "log-analytics-entity",
+ "required": true,
+ "valueFormat": {
+ "type": "object"
+ }
+ },
+ {
+ "defaultFilterIds": [
+ "OOBSS-management-dashboard-filter-3a"
+ ],
+ "displayName": "Log Set",
+ "editUi": {
+ "inputType": "none"
+ },
+ "hidden": "$(window.logSetNotEnabled)",
+ "name": "log-analytics-log-set",
+ "required": true,
+ "valueFormat": {
+ "type": "object"
+ }
+ },
+ {
+ "displayName": "$(bundle.globalSavedSearch.TIME)",
+ "hidden": true,
+ "name": "time",
+ "required": true
+ },
+ {
+ "name": "flex"
+ }
+ ],
+ "providerId": "log-analytics",
+ "providerName": "Logging Analytics",
+ "providerVersion": "3.0.0",
+ "screenImage": " ",
+ "timeCreated": "2023-05-11T21:06:02.108Z",
+ "timeUpdated": "2023-05-15T21:06:31.289Z",
+ "type": "WIDGET_SHOW_IN_DASHBOARD",
+ "uiConfig": {
+ "enableWidgetInApp": true,
+ "internalKey": "ocid1.managementsavedsearch.oc1..aaaaaaaau76opbz75ozaotvdpne4g76z267pm6djgehamu2paiutwff5uwna",
+ "queryString": "'Log Source' in ('OCI Load Balancer Access Logs', 'OCI Load Balancer Error Logs') | rename 'Backend Processing Time' as tar | stats min(tar) as Min, avg(tar) as Mean, median(tar) as P50, pct(tar, 75) as P75, max(tar) as Max by 'OCI Resource Name' | sort -Max",
+ "scopeFilters": {
+ "Entity": {
+ "flags": {
+ "IncludeDependents": true,
+ "ScopeCompartmentId": "ocid1.compartment.oc1..aaaaaaaa4yj2x6hjxntcf5vydrdvsm3trgblkmwgcmvxiar2miklv3ip4t7q"
+ },
+ "type": "Entity",
+ "values": []
+ },
+ "LogGroup": {
+ "flags": {
+ "IncludeSubCompartments": true
+ },
+ "type": "LogGroup",
+ "values": [
+ {
+ "label": "LogAnalytics",
+ "value": "ocid1.compartment.oc1..aaaaaaaa4yj2x6hjxntcf5vydrdvsm3trgblkmwgcmvxiar2miklv3ip4t7q"
+ }
+ ]
+ },
+ "LogSet": {
+ "flags": {},
+ "type": "LogSet",
+ "values": []
+ },
+ "Region": {
+ "flags": {},
+ "type": "Region",
+ "values": [
+ {
+ "label": "US West (Phoenix)",
+ "value": "us-phoenix-1"
+ }
+ ]
+ },
+ "filters": [
+ {
+ "flags": {
+ "IncludeSubCompartments": true
+ },
+ "type": "LogGroup",
+ "values": [
+ {
+ "label": "LogAnalytics",
+ "value": "ocid1.compartment.oc1..aaaaaaaa4yj2x6hjxntcf5vydrdvsm3trgblkmwgcmvxiar2miklv3ip4t7q"
+ }
+ ]
+ },
+ {
+ "flags": {
+ "IncludeDependents": true,
+ "ScopeCompartmentId": "ocid1.compartment.oc1..aaaaaaaa4yj2x6hjxntcf5vydrdvsm3trgblkmwgcmvxiar2miklv3ip4t7q"
+ },
+ "type": "Entity",
+ "values": []
+ },
+ {
+ "flags": {},
+ "type": "LogSet",
+ "values": []
+ },
+ {
+ "flags": {},
+ "type": "Region",
+ "values": [
+ {
+ "label": "US West (Phoenix)",
+ "value": "us-phoenix-1"
+ }
+ ]
+ }
+ ],
+ "isGlobal": false
+ },
+ "showTitle": true,
+ "timeSelection": {
+ "numUnits": 60,
+ "timePeriod": "relative",
+ "units": "MINUTES"
+ },
+ "visualizationOptions": {
+ "customVizOpt": {
+ "GEOMAP_SETTINGS": {
+ "basemap": "bi_world_map_light",
+ "clusterColor": "rgb(192, 192, 192)",
+ "filterOnZoom": false,
+ "isShowLegend": true,
+ "lat": 2273030.9269876885,
+ "lon": 0,
+ "mapZoom": 1,
+ "pointColor": "rgb(0, 0, 255)",
+ "srid": 3857,
+ "toggleClusters": false
+ }
+ }
+ },
+ "visualizationType": "summary_table",
+ "vizType": "lxSavedSearchWidgetType"
+ },
+ "updatedBy": "ocid1.saml2idp.oc1..aaaaaaaa7ehdi53lr546fhugwewrbeltbgqvkocc27qslgbahsw5v55bvh4a/KUMAR.VARUN@ORACLE.COM",
+ "widgetTemplate": "visualizations/chartWidgetTemplate.html",
+ "widgetVM": "jet-modules/dashboards/widgets/lxSavedSearchWidget"
+ }
+ ],
+ "screenImage": "todo: provide value[mandatory]",
+ "tiles": [
+ {
+ "column": 6,
+ "dataConfig": [
+ {
+ "name": "autoUnion",
+ "parameters": {
+ "dependents": [
+ "oci_lbaas/UnHealthyBackendServers",
+ "oci_lbaas/BackendServers"
+ ],
+ "orderByClause": [
+ {
+ "field": "aggregatedDatapoints.timestamp"
+ }
+ ]
+ },
+ "type": "transformUnion"
+ },
+ {
+ "name": "oci_lbaas/UnHealthyBackendServers",
+ "parameters": {
+ "compartmentId": "$(params.compartmentId)",
+ "endTime": "$(params.time.end)",
+ "maxDataPoints": "useInterval",
+ "mql": "UnHealthyBackendServers[1m]{lbName=$(dashboard.params.log-analytics-log-field-filter)}.grouping().max()",
+ "namespace": "oci_lbaas",
+ "regionName": "$(params.regionName)",
+ "startTime": "$(params.time.start)"
+ },
+ "type": "monitoringDataSource"
+ },
+ {
+ "name": "oci_lbaas/BackendServers",
+ "parameters": {
+ "compartmentId": "$(params.compartmentId)",
+ "endTime": "$(params.time.end)",
+ "maxDataPoints": "useInterval",
+ "mql": "BackendServers[1m]{lbName=$(dashboard.params.log-analytics-log-field-filter)}.grouping().max()",
+ "namespace": "oci_lbaas",
+ "regionName": "$(params.regionName)",
+ "startTime": "$(params.time.start)"
+ },
+ "type": "monitoringDataSource"
+ }
+ ],
+ "displayName": "Backend Servers",
+ "drilldownConfig": [],
+ "height": 3,
+ "nls": {},
+ "parametersMap": {
+ "compartmentId": "$(dashboard.params.log-analytics-loggroup-filter)",
+ "time": "$(dashboard.params.time)"
+ },
+ "row": 7,
+ "savedSearchId": "OOBSS-management-dashboard-123",
+ "state": "DEFAULT",
+ "uiConfig": {
+ "chartInfo": {
+ "colorBy": "name",
+ "enableCorrelation": true,
+ "group": "aggregatedDatapoints.timestamp",
+ "jetConfig": {
+ "dataCursor": "on",
+ "legend": {
+ "position": "top",
+ "rendered": true
+ },
+ "timeAxisType": "enabled",
+ "type": "line",
+ "xAxis": {
+ "viewportMax": "$(params.time.end)",
+ "viewportMin": "$(params.time.start)"
+ }
+ },
+ "series": "name",
+ "value": "aggregatedDatapoints.value"
+ },
+ "defaultDataSource": "autoUnion",
+ "internalKey": "OOBSS-management-dashboard-123"
+ },
+ "width": 6
+ },
+ {
+ "column": 6,
+ "dataConfig": [
+ {
+ "name": "autoUnion",
+ "parameters": {
+ "dependents": [
+ "oci_lbaas/HttpResponses5xx",
+ "oci_lbaas/BackendTimeouts",
+ "oci_lbaas/HttpResponses4xx"
+ ],
+ "orderByClause": [
+ {
+ "field": "aggregatedDatapoints.timestamp"
+ }
+ ]
+ },
+ "type": "transformUnion"
+ },
+ {
+ "name": "oci_lbaas/HttpResponses4xx",
+ "parameters": {
+ "compartmentId": "$(params.compartmentId)",
+ "endTime": "$(params.time.end)",
+ "mql": "HttpResponses4xx[auto]{lbName=$(dashboard.params.log-analytics-log-field-filter)}.grouping().max()",
+ "namespace": "oci_lbaas",
+ "regionName": "$(params.regionName)",
+ "startTime": "$(params.time.start)"
+ },
+ "type": "monitoringDataSource"
+ },
+ {
+ "name": "oci_lbaas/HttpResponses5xx",
+ "parameters": {
+ "compartmentId": "$(params.compartmentId)",
+ "endTime": "$(params.time.end)",
+ "mql": "HttpResponses5xx[auto]{lbName=$(dashboard.params.log-analytics-log-field-filter)}.grouping().max()",
+ "namespace": "oci_lbaas",
+ "regionName": "$(params.regionName)",
+ "startTime": "$(params.time.start)"
+ },
+ "type": "monitoringDataSource"
+ },
+ {
+ "name": "oci_lbaas/BackendTimeouts",
+ "parameters": {
+ "compartmentId": "$(params.compartmentId)",
+ "endTime": "$(params.time.end)",
+ "mql": "BackendTimeouts[auto]{lbName=$(dashboard.params.log-analytics-log-field-filter)}.grouping().max()",
+ "namespace": "oci_lbaas",
+ "regionName": "$(params.regionName)",
+ "startTime": "$(params.time.start)"
+ },
+ "type": "monitoringDataSource"
+ }
+ ],
+ "displayName": "Errors and Timeouts (Count)",
+ "drilldownConfig": [],
+ "height": 3,
+ "nls": {},
+ "parametersMap": {
+ "compartmentId": "$(dashboard.params.log-analytics-loggroup-filter)",
+ "time": "$(dashboard.params.time)"
+ },
+ "row": 4,
+ "savedSearchId": "OOBSS-management-dashboard-123",
+ "state": "DEFAULT",
+ "uiConfig": {
+ "chartInfo": {
+ "colorBy": "name",
+ "enableCorrelation": true,
+ "group": "aggregatedDatapoints.timestamp",
+ "jetConfig": {
+ "dataCursor": "on",
+ "legend": {
+ "position": "top",
+ "rendered": true
+ },
+ "timeAxisType": "enabled",
+ "type": "line",
+ "xAxis": {
+ "viewportMax": "$(params.time.end)",
+ "viewportMin": "$(params.time.start)"
+ }
+ },
+ "series": "name",
+ "value": "aggregatedDatapoints.value"
+ },
+ "defaultDataSource": "autoUnion",
+ "internalKey": "OOBSS-management-dashboard-123"
+ },
+ "width": 6
+ },
+ {
+ "column": 6,
+ "dataConfig": [
+ {
+ "name": "autoUnion",
+ "parameters": {
+ "dependents": [
+ "oci_lbaas/HttpRequests",
+ "oci_lbaas/HttpResponses"
+ ],
+ "orderByClause": [
+ {
+ "field": "aggregatedDatapoints.timestamp"
+ }
+ ]
+ },
+ "type": "transformUnion"
+ },
+ {
+ "name": "oci_lbaas/HttpRequests",
+ "parameters": {
+ "compartmentId": "$(params.compartmentId)",
+ "endTime": "$(params.time.end)",
+ "mql": "HttpRequests[auto]{lbName=$(dashboard.params.log-analytics-log-field-filter)}.grouping().sum()",
+ "namespace": "oci_lbaas",
+ "regionName": "$(params.regionName)",
+ "startTime": "$(params.time.start)"
+ },
+ "type": "monitoringDataSource"
+ },
+ {
+ "name": "oci_lbaas/HttpResponses",
+ "parameters": {
+ "compartmentId": "$(params.compartmentId)",
+ "endTime": "$(params.time.end)",
+ "mql": "HttpResponses[auto]{lbName=$(dashboard.params.log-analytics-log-field-filter)}.grouping().sum()",
+ "namespace": "oci_lbaas",
+ "regionName": "$(params.regionName)",
+ "startTime": "$(params.time.start)"
+ },
+ "type": "monitoringDataSource"
+ }
+ ],
+ "displayName": "Requests/Response",
+ "drilldownConfig": [],
+ "height": 2,
+ "nls": {},
+ "parametersMap": {
+ "compartmentId": "$(dashboard.params.log-analytics-loggroup-filter)",
+ "time": "$(dashboard.params.time)"
+ },
+ "row": 2,
+ "savedSearchId": "OOBSS-management-dashboard-123",
+ "state": "DEFAULT",
+ "uiConfig": {
+ "chartInfo": {
+ "colorBy": "name",
+ "enableCorrelation": true,
+ "group": "aggregatedDatapoints.timestamp",
+ "jetConfig": {
+ "dataCursor": "on",
+ "legend": {
+ "position": "top",
+ "rendered": true
+ },
+ "styleDefaults": {
+ "lineWidth": 2
+ },
+ "timeAxisType": "enabled",
+ "type": "line",
+ "xAxis": {
+ "viewportMax": "$(params.time.end)",
+ "viewportMin": "$(params.time.start)"
+ }
+ },
+ "series": "name",
+ "value": "aggregatedDatapoints.value"
+ },
+ "defaultDataSource": "autoUnion",
+ "internalKey": "OOBSS-management-dashboard-123"
+ },
+ "width": 6
+ },
+ {
+ "column": 4,
+ "dataConfig": [],
+ "displayName": "Load Balancers Count",
+ "drilldownConfig": [],
+ "height": 2,
+ "nls": {},
+ "parametersMap": {
+ "log-analytics-entity": "$(dashboard.params.log-analytics-entity-filter)",
+ "log-analytics-log-group-compartment": "$(dashboard.params.log-analytics-loggroup-filter)",
+ "log-analytics-log-set": "$(dashboard.params.log-analytics-logset-filter1)",
+ "time": "$(dashboard.params.time)"
+ },
+ "row": 0,
+ "savedSearchId": "ocid1.managementsavedsearch.oc1..aaaaaaaa76smhssp5m56wt4eehroipemhm2tzhwtg7znrsjt43vhqrbwsaia",
+ "state": "DEFAULT",
+ "uiConfig": {},
+ "width": 2
+ },
+ {
+ "column": 6,
+ "dataConfig": [],
+ "displayName": "LB Problem Labels",
+ "drilldownConfig": [],
+ "height": 2,
+ "nls": {},
+ "parametersMap": {
+ "log-analytics-entity": "$(dashboard.params.log-analytics-entity-filter)",
+ "log-analytics-log-group-compartment": "$(dashboard.params.log-analytics-loggroup-filter)",
+ "log-analytics-log-set": "$(dashboard.params.log-analytics-logset-filter1)",
+ "time": "$(dashboard.params.time)"
+ },
+ "row": 0,
+ "savedSearchId": "ocid1.managementsavedsearch.oc1..aaaaaaaakhx6idmt4r6tkl4ev5fijc5ojr7mgpgc3cluggh7oarmennj63zq",
+ "state": "DEFAULT",
+ "uiConfig": {},
+ "width": 3
+ },
+ {
+ "column": 0,
+ "dataConfig": [],
+ "displayName": "Backend Connect Times",
+ "drilldownConfig": [],
+ "height": 4,
+ "nls": {},
+ "parametersMap": {
+ "flex": {
+ "Load Balancer IP": "$(dashboard.params.log-analytics-log-field-filter3)"
+ },
+ "log-analytics-entity": "$(dashboard.params.log-analytics-entity-filter)",
+ "log-analytics-log-group-compartment": "$(dashboard.params.log-analytics-loggroup-filter)",
+ "log-analytics-log-set": "$(dashboard.params.log-analytics-logset-filter1)",
+ "time": "$(dashboard.params.time)"
+ },
+ "row": 13,
+ "savedSearchId": "ocid1.managementsavedsearch.oc1..aaaaaaaamzjz6demzwue2rr2whhgiml3wrf3oe5vlln6avfwdtjjjfwarjba",
+ "state": "DEFAULT",
+ "uiConfig": {},
+ "width": 6
+ },
+ {
+ "column": 6,
+ "dataConfig": [],
+ "displayName": "Backend Processing Times",
+ "drilldownConfig": [],
+ "height": 4,
+ "nls": {},
+ "parametersMap": {
+ "flex": {
+ "OCI Resource Name": "$(dashboard.params.log-analytics-log-field-filter)"
+ },
+ "log-analytics-entity": "$(dashboard.params.log-analytics-entity-filter)",
+ "log-analytics-log-group-compartment": "$(dashboard.params.log-analytics-loggroup-filter)",
+ "log-analytics-log-set": "$(dashboard.params.log-analytics-logset-filter1)",
+ "time": "$(dashboard.params.time)"
+ },
+ "row": 13,
+ "savedSearchId": "ocid1.managementsavedsearch.oc1..aaaaaaaau76opbz75ozaotvdpne4g76z267pm6djgehamu2paiutwff5uwna",
+ "state": "DEFAULT",
+ "uiConfig": {},
+ "width": 6
+ },
+ {
+ "column": 0,
+ "dataConfig": [],
+ "displayName": "Load Balancers Response Time",
+ "drilldownConfig": [],
+ "height": 3,
+ "nls": {},
+ "parametersMap": {
+ "flex": {
+ "Load Balancer IP": "$(dashboard.params.log-analytics-log-field-filter3)"
+ },
+ "log-analytics-entity": "$(dashboard.params.log-analytics-entity-filter)",
+ "log-analytics-log-group-compartment": "$(dashboard.params.log-analytics-loggroup-filter)",
+ "log-analytics-log-set": "$(dashboard.params.log-analytics-logset-filter1)",
+ "time": "$(dashboard.params.time)"
+ },
+ "row": 10,
+ "savedSearchId": "ocid1.managementsavedsearch.oc1..aaaaaaaanonr7kl5v3cycwxl45h4n5ybz76meoudfgpc5gbsggtxf3djduqa",
+ "state": "DEFAULT",
+ "uiConfig": {},
+ "width": 6
+ },
+ {
+ "column": 0,
+ "dataConfig": [],
+ "displayName": "Services",
+ "drilldownConfig": [],
+ "height": 2,
+ "nls": {},
+ "parametersMap": {
+ "flex": {
+ "Kubernetes Cluster Name": "$(dashboard.params.log-analytics-log-field-filter2)",
+ "Service": "$(dashboard.params.log-analytics-log-field-filter1)"
+ },
+ "log-analytics-entity": "$(dashboard.params.log-analytics-entity-filter)",
+ "log-analytics-log-group-compartment": "$(dashboard.params.log-analytics-loggroup-filter)",
+ "log-analytics-log-set": "$(dashboard.params.log-analytics-logset-filter1)",
+ "time": "$(dashboard.params.time)"
+ },
+ "row": 0,
+ "savedSearchId": "ocid1.managementsavedsearch.oc1..aaaaaaaa7zwdwtcluaapibq7pkw3d5mbrnvkulkz5ovgb2bs6ntxfqd7fj7a",
+ "state": "DEFAULT",
+ "uiConfig": {},
+ "width": 2
+ },
+ {
+ "column": 2,
+ "dataConfig": [],
+ "displayName": "Nodes",
+ "drilldownConfig": [],
+ "height": 2,
+ "nls": {},
+ "parametersMap": {
+ "flex": {
+ "Kubernetes Cluster Name": "$(dashboard.params.log-analytics-log-field-filter2)"
+ },
+ "log-analytics-entity": "$(dashboard.params.log-analytics-entity-filter)",
+ "log-analytics-log-group-compartment": "$(dashboard.params.log-analytics-loggroup-filter)",
+ "log-analytics-log-set": "$(dashboard.params.log-analytics-logset-filter1)",
+ "time": "$(dashboard.params.time)"
+ },
+ "row": 0,
+ "savedSearchId": "ocid1.managementsavedsearch.oc1..aaaaaaaanuh7tdxvj52bx5th2iqnndjzfr3i6mejbqiziil24v7xmwuvwxmq",
+ "state": "DEFAULT",
+ "uiConfig": {},
+ "width": 2
+ },
+ {
+ "column": 0,
+ "dataConfig": [],
+ "displayName": "Load Balancer Errors and Services",
+ "drilldownConfig": [],
+ "height": 5,
+ "nls": {},
+ "parametersMap": {
+ "flex": {
+ "Load Balancer IP": "$(dashboard.params.log-analytics-log-field-filter3)"
+ },
+ "log-analytics-entity": "$(dashboard.params.log-analytics-entity-filter)",
+ "log-analytics-log-group-compartment": "$(dashboard.params.log-analytics-loggroup-filter)",
+ "log-analytics-log-set": "$(dashboard.params.log-analytics-logset-filter1)",
+ "time": "$(dashboard.params.time)"
+ },
+ "row": 2,
+ "savedSearchId": "ocid1.managementsavedsearch.oc1..aaaaaaaakzhn2cfwz2cz6btwvfk5b6vt4jcr2djfmrc5sz6pyxbjuuwgb22a",
+ "state": "DEFAULT",
+ "uiConfig": {},
+ "width": 6
+ },
+ {
+ "column": 9,
+ "dataConfig": [
+ {
+ "name": "oci_lbaas/BackendServers",
+ "parameters": {
+ "compartmentId": "$(params.compartmentId)",
+ "endTime": "$(params.time.end)",
+ "mql": "BackendServers[auto]{lbName=$(dashboard.params.log-analytics-log-field-filter)}.grouping().max()",
+ "namespace": "oci_lbaas",
+ "regionName": "$(params.regionName)",
+ "startTime": "$(params.time.start)"
+ },
+ "type": "monitoringDataSource"
+ }
+ ],
+ "displayName": "Backend Servers",
+ "drilldownConfig": [],
+ "height": 2,
+ "nls": {},
+ "parametersMap": {
+ "compartmentId": "$(dashboard.params.log-analytics-loggroup-filter)",
+ "time": "$(dashboard.params.time)"
+ },
+ "row": 0,
+ "savedSearchId": "OOBSS-management-dashboard-123",
+ "state": "DEFAULT",
+ "uiConfig": {
+ "chartInfo": {
+ "colorBy": "name",
+ "enableCorrelation": true,
+ "group": "aggregatedDatapoints.timestamp",
+ "jetConfig": {
+ "dataCursor": "on",
+ "legend": {
+ "position": "top",
+ "rendered": true
+ },
+ "timeAxisType": "enabled",
+ "type": "line",
+ "xAxis": {
+ "viewportMax": "$(params.time.end)",
+ "viewportMin": "$(params.time.start)"
+ },
+ "yAxis": {
+ "title": "Backend Server Count"
+ }
+ },
+ "series": "name",
+ "value": "aggregatedDatapoints.value"
+ },
+ "defaultDataSource": "oci_lbaas/BackendServers",
+ "internalKey": "OOBSS-management-dashboard-123",
+ "sharedCrossService": [
+ "ALL"
+ ]
+ },
+ "width": 3
+ },
+ {
+ "column": 0,
+ "dataConfig": [],
+ "displayName": "Load Balancer to Nodes and Pods",
+ "drilldownConfig": [],
+ "height": 3,
+ "nls": {},
+ "parametersMap": {
+ "flex": {
+ "Load Balancer IP": "$(dashboard.params.log-analytics-log-field-filter3)"
+ },
+ "log-analytics-entity": "$(dashboard.params.log-analytics-entity-filter)",
+ "log-analytics-log-group-compartment": "$(dashboard.params.log-analytics-loggroup-filter)",
+ "log-analytics-log-set": "$(dashboard.params.log-analytics-logset-filter1)",
+ "time": "$(dashboard.params.time)"
+ },
+ "row": 7,
+ "savedSearchId": "ocid1.managementsavedsearch.oc1..aaaaaaaa3pqjk76dbx4sa2633flzikbg76bjuhaxrcmgy2ul6ayznkr7uxma",
+ "state": "DEFAULT",
+ "uiConfig": {},
+ "width": 6
+ },
+ {
+ "column": 6,
+ "dataConfig": [
+ {
+ "name": "mgmtagent_kubernetes_metrics/nodeCpuUsage",
+ "parameters": {
+ "compartmentId": "$(params.compartmentId)",
+ "endTime": "$(params.time.end)",
+ "mql": "nodeCpuUsage[auto]{clusterName=$(dashboard.params.log-analytics-log-field-filter2),nodeName=$(dashboard.params.k8sNodeFilter)}.grouping().mean()",
+ "namespace": "mgmtagent_kubernetes_metrics",
+ "regionName": "$(params.regionName)",
+ "startTime": "$(params.time.start)"
+ },
+ "type": "monitoringDataSource"
+ },
+ {
+ "name": "autoUnion",
+ "parameters": {
+ "dependents": [
+ "mgmtagent_kubernetes_metrics/nodeCpuUsage",
+ "mgmtagent_kubernetes_metrics/nodeMemoryUsage"
+ ],
+ "orderByClause": [
+ {
+ "field": "aggregatedDatapoints.timestamp"
+ }
+ ]
+ },
+ "type": "transformUnion"
+ },
+ {
+ "name": "mgmtagent_kubernetes_metrics/nodeMemoryUsage",
+ "parameters": {
+ "compartmentId": "$(params.compartmentId)",
+ "endTime": "$(params.time.end)",
+ "mql": "nodeMemoryUsage[auto]{nodeName=$(dashboard.params.k8sNodeFilter),clusterName=$(dashboard.params.log-analytics-log-field-filter2)}.grouping().mean()",
+ "namespace": "mgmtagent_kubernetes_metrics",
+ "regionName": "$(params.regionName)",
+ "startTime": "$(params.time.start)"
+ },
+ "type": "monitoringDataSource"
+ }
+ ],
+ "displayName": "Nodes CPU & Memory Usage",
+ "drilldownConfig": [],
+ "height": 3,
+ "nls": {},
+ "parametersMap": {
+ "compartmentId": "$(dashboard.params.log-analytics-loggroup-filter)",
+ "regionName": "$(context.regionName)",
+ "time": "$(dashboard.params.time)"
+ },
+ "row": 10,
+ "savedSearchId": "OOBSS-management-dashboard-123",
+ "state": "DEFAULT",
+ "uiConfig": {
+ "chartInfo": {
+ "colorBy": "name",
+ "enableCorrelation": true,
+ "group": "aggregatedDatapoints.timestamp",
+ "jetConfig": {
+ "dataCursor": "on",
+ "legend": {
+ "position": "top",
+ "rendered": true
+ },
+ "timeAxisType": "enabled",
+ "type": "line",
+ "xAxis": {
+ "viewportMax": "$(params.time.end)",
+ "viewportMin": "$(params.time.start)"
+ }
+ },
+ "series": "name",
+ "value": "aggregatedDatapoints.value"
+ },
+ "defaultDataSource": "autoUnion",
+ "internalKey": "OOBSS-management-dashboard-123"
+ },
+ "width": 6
+ },
+ {
+ "column": 0,
+ "dataConfig": [],
+ "displayName": "Pods CPU Usage",
+ "drilldownConfig": [],
+ "height": 4,
+ "nls": {},
+ "parametersMap": {
+ "compartmentId": "$(dashboard.params.log-analytics-loggroup-filter)",
+ "regionName": "$(context.regionName)",
+ "time": "$(dashboard.params.time)"
+ },
+ "row": 17,
+ "savedSearchId": "ocid1.managementsavedsearch.oc1..aaaaaaaao66kcpdxkzv7euzz2nolnheoztvhjj4ireqfkalgzvg27xtb3wqa",
+ "state": "DEFAULT",
+ "uiConfig": {},
+ "width": 6
+ },
+ {
+ "column": 6,
+ "dataConfig": [],
+ "displayName": "Node Memory Usage",
+ "drilldownConfig": [],
+ "height": 4,
+ "nls": {},
+ "parametersMap": {
+ "compartmentId": "$(dashboard.params.log-analytics-loggroup-filter)",
+ "regionName": "$(context.regionName)",
+ "time": "$(dashboard.params.time)"
+ },
+ "row": 17,
+ "savedSearchId": "ocid1.managementsavedsearch.oc1..aaaaaaaa2ctxqgooubu2oqvz6p2u7tnrlbr6davpwt5fxdmqg3hafktdtk4a",
+ "state": "DEFAULT",
+ "uiConfig": {},
+ "width": 6
+ }
+ ],
+ "timeCreated": "2023-05-15T20:52:06.226Z",
+ "timeUpdated": "2023-05-15T20:52:07.342Z",
+ "type": "normal",
+ "uiConfig": {
+ "isFilteringEnabled": false,
+ "isRefreshEnabled": true,
+ "isTimeRangeEnabled": true
+ },
+ "updatedBy": "ocid1.saml2idp.oc1..aaaaaaaa7ehdi53lr546fhugwewrbeltbgqvkocc27qslgbahsw5v55bvh4a/KUMAR.VARUN@ORACLE.COM"
+ }
+ ]
+ }
\ No newline at end of file
diff --git a/logan/terraform/oke/modules/dashboards/dashboards_json/workload.json b/terraform/modules/dashboards/dashboards_json/workload.json
similarity index 99%
rename from logan/terraform/oke/modules/dashboards/dashboards_json/workload.json
rename to terraform/modules/dashboards/dashboards_json/workload.json
index 2e3a1852..19f5c791 100644
--- a/logan/terraform/oke/modules/dashboards/dashboards_json/workload.json
+++ b/terraform/modules/dashboards/dashboards_json/workload.json
@@ -3010,4 +3010,4 @@
"definedTags": {}
}
]
-}
\ No newline at end of file
+}
diff --git a/terraform/modules/dashboards/inputs.tf b/terraform/modules/dashboards/inputs.tf
new file mode 100644
index 00000000..2b9b506e
--- /dev/null
+++ b/terraform/modules/dashboards/inputs.tf
@@ -0,0 +1,7 @@
+# Copyright (c) 2023, Oracle and/or its affiliates.
+# Licensed under the Universal Permissive License v1.0 as shown at https://oss.oracle.com/licenses/upl.
+
+# Compartment for creating dashboards and it's associated saved-searches
+variable "compartment_ocid" {
+ type = string
+}
diff --git a/logan/terraform/oke/modules/logan/provider.tf b/terraform/modules/dashboards/provider.tf
similarity index 57%
rename from logan/terraform/oke/modules/logan/provider.tf
rename to terraform/modules/dashboards/provider.tf
index e310a47a..370a4ebe 100644
--- a/logan/terraform/oke/modules/logan/provider.tf
+++ b/terraform/modules/dashboards/provider.tf
@@ -1,3 +1,6 @@
+# Copyright (c) 2023, Oracle and/or its affiliates.
+# Licensed under the Universal Permissive License v1.0 as shown at https://oss.oracle.com/licenses/upl.
+
terraform {
required_version = ">= 1.0"
required_providers {
@@ -7,4 +10,4 @@ terraform {
# https://registry.terraform.io/providers/hashicorp/oci/4.85.0
}
}
-}
\ No newline at end of file
+}
diff --git a/terraform/modules/helm/helm.tf b/terraform/modules/helm/helm.tf
new file mode 100644
index 00000000..9d63d28e
--- /dev/null
+++ b/terraform/modules/helm/helm.tf
@@ -0,0 +1,97 @@
+# Copyright (c) 2023, Oracle and/or its affiliates.
+# Licensed under the Universal Permissive License v1.0 as shown at https://oss.oracle.com/licenses/upl.
+
+data "oci_containerengine_clusters" "oke_clusters_list" {
+ compartment_id = var.oke_compartment_ocid
+}
+
+locals {
+ oke_clusters_list = data.oci_containerengine_clusters.oke_clusters_list.clusters
+ oke_cluster_name = [for c in local.oke_clusters_list : c.name if c.id == var.oke_cluster_ocid][0]
+
+ helm_inputs = {
+ # global
+ "global.namespace" = var.deploy_mushop_config ? "livelab-test" : var.kubernetes_namespace
+ "global.kubernetesClusterID" = var.oke_cluster_ocid
+ "global.kubernetesClusterName" = local.oke_cluster_name
+
+ # oci-onm-logan
+ "oci-onm-logan.ociLANamespace" = var.oci_la_namespace
+ "oci-onm-logan.ociLALogGroupID" = var.oci_la_logGroup_id
+ "oci-onm-logan.image.url" = var.logan_container_image_url
+ "oci-onm-logan.fluentd.baseDir" = var.fluentd_baseDir_path
+
+ #oci-onm-mgmt-agent
+ "oci-onm-mgmt-agent.mgmtagent.installKeyFileContent" = var.mgmt_agent_install_key_content
+ "oci-onm-mgmt-agent.mgmtagent.image.url" = var.mgmt_agent_container_image_url
+ "oci-onm-mgmt-agent.deployMetricServer" = var.opt_deploy_metric_server
+ }
+
+ mushop_helm_inputs = {
+ # oci-onm-logan
+ "createServiceAccount" = false
+ "serviceAccount" = var.livelab_service_account
+ }
+
+}
+
+resource "helm_release" "oci-kubernetes-monitoring" {
+ name = "oci-kubernetes-monitoring"
+ chart = var.helm_abs_path
+ wait = true
+ dependency_update = true
+ atomic = true
+
+ values = var.deploy_mushop_config ? ["${file("${path.module}/mushop_values.yaml")}"] : null
+
+ dynamic "set" {
+ for_each = local.helm_inputs
+ content {
+ name = set.key
+ value = set.value
+ }
+ }
+
+ dynamic "set" {
+ for_each = var.deploy_mushop_config ? local.mushop_helm_inputs : {}
+ content {
+ name = set.key
+ value = set.value
+ }
+ }
+
+ count = var.generate_helm_template ? 0 : 1
+}
+
+data "helm_template" "oci-kubernetes-monitoring" {
+ name = "oci-kubernetes-monitoring"
+ chart = var.helm_abs_path
+ dependency_update = true
+
+ values = var.deploy_mushop_config ? ["${file("${path.module}/mushop_values.yaml")}"] : null
+
+ dynamic "set" {
+ for_each = local.helm_inputs
+ content {
+ name = set.key
+ value = set.value
+ }
+ }
+
+ dynamic "set" {
+ for_each = var.deploy_mushop_config ? local.mushop_helm_inputs : {}
+ content {
+ name = set.key
+ value = set.value
+ }
+ }
+
+ count = var.generate_helm_template ? 1 : 0
+}
+
+# Helm release artifacts for local testing and validation. Not used by helm resource.
+resource "local_file" "helm_release" {
+ content = tostring(data.helm_template.oci-kubernetes-monitoring[0].manifest)
+ filename = "${path.module}/local/helmrelease.yaml"
+ count = var.generate_helm_template ? 1 : 0
+}
\ No newline at end of file
diff --git a/terraform/modules/helm/inputs.tf b/terraform/modules/helm/inputs.tf
new file mode 100644
index 00000000..828a960f
--- /dev/null
+++ b/terraform/modules/helm/inputs.tf
@@ -0,0 +1,106 @@
+# Copyright (c) 2023, Oracle and/or its affiliates.
+# Licensed under the Universal Permissive License v1.0 as shown at https://oss.oracle.com/licenses/upl.
+
+####
+## Switches
+####
+
+variable "generate_helm_template" {
+ type = bool
+ default = false
+}
+
+####
+## Helm chart
+####
+
+# Absoulte path to helm chart directory
+variable "helm_abs_path" {
+ type = string
+}
+
+####
+## OKE Cluster Information
+####
+
+# OKE Cluster Compartment
+variable "oke_compartment_ocid" {
+ type = string
+}
+
+# OKE Cluster OCID
+variable "oke_cluster_ocid" {
+ type = string
+}
+
+# Kubernetes Namespace
+variable "kubernetes_namespace" {
+ type = string
+}
+
+####
+## OCI Logging Analytics Information
+####
+
+# OCI Logging Analytics LogGroup OCID
+variable "oci_la_logGroup_id" {
+ type = string
+ default = ""
+}
+
+# Log Analytics Namespace
+variable "oci_la_namespace" {
+ type = string
+}
+
+####
+## Fluentd Configuration
+####
+
+# OCI LA Fluentd Container Image
+variable "logan_container_image_url" {
+ type = string
+ default = "container-registry.oracle.com/oci_observability_management/oci-la-fluentd-collector:1.0.0"
+}
+
+# Fluentd Base Directory
+variable "fluentd_baseDir_path" {
+ type = string
+ default = "/var/log"
+}
+
+####
+## Management Agent Configuration
+####
+
+variable "mgmt_agent_install_key_content" {
+ type = string
+}
+
+# OCI Management Agent Container Image
+variable "mgmt_agent_container_image_url" {
+ type = string
+ default = "container-registry.oracle.com/oci_observability_management/oci-management-agent:1.0.0"
+}
+
+# Option to control the metric server deployment inside kubernetes cluster
+variable "opt_deploy_metric_server" {
+ type = bool
+ default = true
+}
+
+####
+## livelab
+####
+
+# Option to deploy mushop specific values.yaml (inputs)
+variable "deploy_mushop_config" {
+ type = bool
+ default = false
+}
+
+# Service Account to be used when working on livelab cluster
+variable "livelab_service_account" {
+ type = string
+ default = ""
+}
\ No newline at end of file
diff --git a/terraform/modules/helm/local/.gitignore b/terraform/modules/helm/local/.gitignore
new file mode 100644
index 00000000..bc1f496f
--- /dev/null
+++ b/terraform/modules/helm/local/.gitignore
@@ -0,0 +1,6 @@
+# Copyright (c) 2023, Oracle and/or its affiliates.
+# Licensed under the Universal Permissive License v1.0 as shown at https://oss.oracle.com/licenses/upl.
+
+*
+*/
+!.gitignore
diff --git a/terraform/modules/helm/mushop_values.yaml b/terraform/modules/helm/mushop_values.yaml
new file mode 100644
index 00000000..ea67b92a
--- /dev/null
+++ b/terraform/modules/helm/mushop_values.yaml
@@ -0,0 +1,59 @@
+oci-onm-common:
+ fluentd:
+ customLogs:
+ mushop-orders:
+ path: /var/log/containers/mushop-orders-*.log
+ ociLALogSourceName: "mushop-orders-app"
+ multilineStartRegExp: /^\d{4}-\d{2}-\d{2}\s*\d{2}:\d{2}:\d{2}.\d{3}/
+ isContainerLog: true
+ mushop-api:
+ path: /var/log/containers/mushop-api-*.log
+ ociLALogSourceName: "mushop api logs"
+ multilineStartRegExp: /^::\w{4}:\d{2}.\d{3}.\d{1}.\d{1}\s*-\s*-\s*\[\d{2}\/\w{3}\/\d{4}:\d{2}:\d{2}:\d{2}\s*\+\d{4}\]/
+ isContainerLog: true
+ mushop-assets:
+ path: /var/log/containers/mushop-assets-*.log
+ ociLALogSourceName: "mushop-assets logs"
+ isContainerLog: true
+ mushop-carts:
+ path: /var/log/containers/mushop-carts-*.log
+ ociLALogSourceName: "mushop-carts logs"
+ multilineStartRegExp: /^\w+\s*\d{2}\,\s*\d{4}\s\d{1,2}:\d{2}:\d{2}/
+ isContainerLog: true
+ mushop-catalogue:
+ path: /var/log/containers/mushop-catalogue-*.log
+ ociLALogSourceName: "mushop-catalogue logs"
+ isContainerLog: true
+ mushop-edge:
+ path: /var/log/containers/mushop-edge-*.log
+ ociLALogSourceName: "mushop-edge logs"
+ isContainerLog: true
+ mushop-nats:
+ path: /var/log/containers/mushop-nats-*.log
+ ociLALogSourceName: "mushop-nats logs"
+ isContainerLog: true
+ mushop-payment:
+ path: /var/log/containers/mushop-payment-*.log
+ ociLALogSourceName: "mushop-payment logs"
+ isContainerLog: true
+ mushop-session:
+ path: /var/log/containers/mushop-session-*.log
+ ociLALogSourceName: "mushop-session logs"
+ multilineStartRegExp: /^\d+:\w\s*\d{2}\s*\w{3}\s*\d{4}\s*\d{2}:\d{2}:\d{2}.\d{3}/
+ isContainerLog: true
+ mushop-storefront:
+ path: /var/log/containers/mushop-storefront-*.log
+ ociLALogSourceName: "mushop-storefront logs"
+ isContainerLog: true
+ mushop-user:
+ path: /var/log/containers/mushop-user-*.log
+ ociLALogSourceName: "mushop-user logs"
+ isContainerLog: true
+ mushop-utils:
+ path: /var/log/containers/mushop-utils-*.log
+ ociLALogSourceName: "mushop-utils-ingress-nginx-controller logs"
+ isContainerLog: true
+ wordpress-access:
+ path: /var/log/containers/wordpress-*.log
+ ociLALogSourceName: "WordPress Access Logs"
+ isContainerLog: true
\ No newline at end of file
diff --git a/terraform/modules/helm/provider.tf b/terraform/modules/helm/provider.tf
new file mode 100644
index 00000000..9c529672
--- /dev/null
+++ b/terraform/modules/helm/provider.tf
@@ -0,0 +1,16 @@
+# Copyright (c) 2023, Oracle and/or its affiliates.
+# Licensed under the Universal Permissive License v1.0 as shown at https://oss.oracle.com/licenses/upl.
+
+terraform {
+ required_version = ">= 1.0"
+ required_providers {
+ helm = {
+ source = "hashicorp/helm"
+ version = "2.7.1"
+ }
+ oci = {
+ source = "oracle/oci"
+ version = ">= 4.96.0"
+ }
+ }
+}
\ No newline at end of file
diff --git a/terraform/modules/iam/iam.tf b/terraform/modules/iam/iam.tf
new file mode 100644
index 00000000..8a8ed4ec
--- /dev/null
+++ b/terraform/modules/iam/iam.tf
@@ -0,0 +1,56 @@
+# Copyright (c) 2023, Oracle and/or its affiliates.
+# Licensed under the Universal Permissive License v1.0 as shown at https://oss.oracle.com/licenses/upl.
+
+locals {
+ # Compartments
+ oci_onm_compartment_name = data.oci_identity_compartment.oci_onm_compartment.name
+ oke_compartment_name = data.oci_identity_compartment.oke_compartment.name
+
+ # Dynmaic Group
+ uuid_dynamic_group = md5(var.oke_cluster_ocid)
+ dynamic_group_name = "oci-kubernetes-monitoring-${local.uuid_dynamic_group}"
+ dynamic_group_desc = "Auto generated by Resource Manager Stack - oci-kubernetes-monitoring. Required for monitoring OKE Cluster - ${var.oke_cluster_ocid}"
+ instances_in_compartment_rule = ["ALL {instance.compartment.id = '${var.oke_compartment_ocid}'}"]
+ management_agent_rule = ["ALL {resource.type='managementagent', resource.compartment.id='${var.oci_onm_compartment_ocid}'}"]
+ dynamic_group_matching_rules = concat(local.instances_in_compartment_rule, local.management_agent_rule)
+ complied_dynamic_group_rules = "ANY {${join(",", local.dynamic_group_matching_rules)}}"
+
+ # Policy
+ uuid_policy = md5("${local.dynamic_group_name}${local.oci_onm_compartment_name}")
+ policy_name = "oci-kubernetes-monitoring-${local.uuid_policy}"
+ policy_desc = "Auto generated by Resource Manager Stack - oci-kubernetes-monitoring. Allows Fluentd and MgmtAgent Pods running inside Kubernetes Cluster to send the data to OCI Logging Analytics and OCI Monitoring respectively."
+ policy_scope = var.root_compartment_ocid == var.oci_onm_compartment_ocid ? "tenancy" : "compartment ${local.oci_onm_compartment_name}"
+ mgmt_agent_policy = ["Allow dynamic-group ${local.dynamic_group_name} to use METRICS in ${local.policy_scope} WHERE target.metrics.namespace = 'mgmtagent_kubernetes_metrics'"]
+ fluentd_agent_policy = ["Allow dynamic-group ${local.dynamic_group_name} to {LOG_ANALYTICS_LOG_GROUP_UPLOAD_LOGS} in ${local.policy_scope}"]
+ policy_statements = concat(local.fluentd_agent_policy, local.mgmt_agent_policy)
+}
+
+# Logging Analytics Compartment
+data "oci_identity_compartment" "oci_onm_compartment" {
+ id = var.oci_onm_compartment_ocid
+}
+
+# OKE Compartment
+data "oci_identity_compartment" "oke_compartment" {
+ id = var.oke_compartment_ocid
+}
+
+# Dynmaic Group
+resource "oci_identity_dynamic_group" "oke_dynamic_group" {
+ name = local.dynamic_group_name
+ description = local.dynamic_group_desc
+ compartment_id = var.root_compartment_ocid
+ matching_rule = local.complied_dynamic_group_rules
+ #provider = oci.home_region
+}
+
+# Policy
+resource "oci_identity_policy" "oke_monitoring_policy" {
+ name = local.policy_name
+ description = local.policy_desc
+ compartment_id = var.oci_onm_compartment_ocid
+ statements = local.policy_statements
+ #provider = oci.home_region
+
+ depends_on = [oci_identity_dynamic_group.oke_dynamic_group]
+}
diff --git a/terraform/modules/iam/inputs.tf b/terraform/modules/iam/inputs.tf
new file mode 100644
index 00000000..084439ac
--- /dev/null
+++ b/terraform/modules/iam/inputs.tf
@@ -0,0 +1,23 @@
+# Copyright (c) 2023, Oracle and/or its affiliates.
+# Licensed under the Universal Permissive License v1.0 as shown at https://oss.oracle.com/licenses/upl.
+
+# tenancy ocid
+variable "root_compartment_ocid" {
+ type = string
+}
+
+# Compartment for OCI Observability and Management service resources
+variable "oci_onm_compartment_ocid" {
+ type = string
+}
+
+# OKE Cluster Compartment
+variable "oke_compartment_ocid" {
+ type = string
+}
+
+# OKE Cluster OCID
+variable "oke_cluster_ocid" {
+ type = string
+}
+
diff --git a/logan/terraform/oke/modules/iam/provider.tf b/terraform/modules/iam/provider.tf
similarity index 57%
rename from logan/terraform/oke/modules/iam/provider.tf
rename to terraform/modules/iam/provider.tf
index e310a47a..370a4ebe 100644
--- a/logan/terraform/oke/modules/iam/provider.tf
+++ b/terraform/modules/iam/provider.tf
@@ -1,3 +1,6 @@
+# Copyright (c) 2023, Oracle and/or its affiliates.
+# Licensed under the Universal Permissive License v1.0 as shown at https://oss.oracle.com/licenses/upl.
+
terraform {
required_version = ">= 1.0"
required_providers {
@@ -7,4 +10,4 @@ terraform {
# https://registry.terraform.io/providers/hashicorp/oci/4.85.0
}
}
-}
\ No newline at end of file
+}
diff --git a/logan/terraform/oke/modules/logan/inputs.tf b/terraform/modules/logan/inputs.tf
similarity index 79%
rename from logan/terraform/oke/modules/logan/inputs.tf
rename to terraform/modules/logan/inputs.tf
index 272d1907..070d1e36 100644
--- a/logan/terraform/oke/modules/logan/inputs.tf
+++ b/terraform/modules/logan/inputs.tf
@@ -1,3 +1,6 @@
+# Copyright (c) 2023, Oracle and/or its affiliates.
+# Licensed under the Universal Permissive License v1.0 as shown at https://oss.oracle.com/licenses/upl.
+
# tenancy OCID
variable "tenancy_ocid" {
type = string
diff --git a/logan/terraform/oke/modules/logan/logAnalytics.tf b/terraform/modules/logan/logAnalytics.tf
similarity index 59%
rename from logan/terraform/oke/modules/logan/logAnalytics.tf
rename to terraform/modules/logan/logAnalytics.tf
index ac720a43..67a96071 100644
--- a/logan/terraform/oke/modules/logan/logAnalytics.tf
+++ b/terraform/modules/logan/logAnalytics.tf
@@ -1,13 +1,12 @@
-data "oci_objectstorage_namespace" "tenant_namespace" {
- compartment_id = var.tenancy_ocid # tenancy ocid
-}
+# Copyright (c) 2023, Oracle and/or its affiliates.
+# Licensed under the Universal Permissive License v1.0 as shown at https://oss.oracle.com/licenses/upl.
-data "oci_log_analytics_namespace" "la_namespace" {
- namespace = data.oci_objectstorage_namespace.tenant_namespace.namespace
+data "oci_log_analytics_namespaces" "logan_namespaces" {
+ compartment_id = var.tenancy_ocid
}
locals {
- oci_la_namespace = data.oci_log_analytics_namespace.la_namespace.namespace
+ oci_la_namespace = data.oci_log_analytics_namespaces.logan_namespaces.namespace_collection[0].items[0].namespace
final_oci_la_logGroup_id = var.create_new_logGroup ? oci_log_analytics_log_analytics_log_group.new_log_group[0].id : var.existing_logGroup_id
}
@@ -25,8 +24,8 @@ resource "oci_log_analytics_log_analytics_log_group" "new_log_group" {
# lifecycle {
# precondition {
- # condition = data.oci_log_analytics_namespace.tenant_namespace.is_onboarded == true
+ # condition = data.oci_log_analytics_namespaces.logan_namespaces.namespace_collection[0].items[0].is_onboarded == true
# error_message = "Tenancy is not on-boarded to OCI Logging Analytics Service in ${var.region} region."
# }
# }
-}
\ No newline at end of file
+}
diff --git a/terraform/modules/logan/outputs.tf b/terraform/modules/logan/outputs.tf
new file mode 100644
index 00000000..d8e43019
--- /dev/null
+++ b/terraform/modules/logan/outputs.tf
@@ -0,0 +1,10 @@
+# Copyright (c) 2023, Oracle and/or its affiliates.
+# Licensed under the Universal Permissive License v1.0 as shown at https://oss.oracle.com/licenses/upl.
+
+output "oci_la_namespace" {
+ value = local.oci_la_namespace
+}
+
+output "oci_la_logGroup_ocid" {
+ value = local.final_oci_la_logGroup_id
+}
diff --git a/logan/terraform/oke/modules/dashboards/provider.tf b/terraform/modules/logan/provider.tf
similarity index 57%
rename from logan/terraform/oke/modules/dashboards/provider.tf
rename to terraform/modules/logan/provider.tf
index e310a47a..370a4ebe 100644
--- a/logan/terraform/oke/modules/dashboards/provider.tf
+++ b/terraform/modules/logan/provider.tf
@@ -1,3 +1,6 @@
+# Copyright (c) 2023, Oracle and/or its affiliates.
+# Licensed under the Universal Permissive License v1.0 as shown at https://oss.oracle.com/licenses/upl.
+
terraform {
required_version = ">= 1.0"
required_providers {
@@ -7,4 +10,4 @@ terraform {
# https://registry.terraform.io/providers/hashicorp/oci/4.85.0
}
}
-}
\ No newline at end of file
+}
diff --git a/terraform/modules/mgmt_agent/agent.tf b/terraform/modules/mgmt_agent/agent.tf
new file mode 100644
index 00000000..45f50c3e
--- /dev/null
+++ b/terraform/modules/mgmt_agent/agent.tf
@@ -0,0 +1,12 @@
+# Copyright (c) 2023, Oracle and/or its affiliates.
+# Licensed under the Universal Permissive License v1.0 as shown at https://oss.oracle.com/licenses/upl.
+
+locals {
+ installKey = oci_management_agent_management_agent_install_key.Kubernetes_AgentInstallKey.key
+ inputRspFileContent = base64encode(join("\n", ["ManagementAgentInstallKey = ${local.installKey}", "AgentDisplayName = k8_mgmt_agent-${var.uniquifier}"]))
+}
+
+resource "oci_management_agent_management_agent_install_key" "Kubernetes_AgentInstallKey" {
+ compartment_id = var.compartment_ocid
+ display_name = "k8_mgmt_agent_key-${var.uniquifier}"
+}
\ No newline at end of file
diff --git a/terraform/modules/mgmt_agent/inputs.tf b/terraform/modules/mgmt_agent/inputs.tf
new file mode 100644
index 00000000..9b94fdcb
--- /dev/null
+++ b/terraform/modules/mgmt_agent/inputs.tf
@@ -0,0 +1,12 @@
+# Copyright (c) 2023, Oracle and/or its affiliates.
+# Licensed under the Universal Permissive License v1.0 as shown at https://oss.oracle.com/licenses/upl.
+
+# A unique key to be associated with a single OKE cluster
+variable "uniquifier" {
+ type = string
+}
+
+# OCID of compartment where management agent installation key is to be created
+variable "compartment_ocid" {
+ type = string
+}
\ No newline at end of file
diff --git a/terraform/modules/mgmt_agent/outputs.tf b/terraform/modules/mgmt_agent/outputs.tf
new file mode 100644
index 00000000..13298f06
--- /dev/null
+++ b/terraform/modules/mgmt_agent/outputs.tf
@@ -0,0 +1,7 @@
+# Copyright (c) 2023, Oracle and/or its affiliates.
+# Licensed under the Universal Permissive License v1.0 as shown at https://oss.oracle.com/licenses/upl.
+
+# Management Agent Install Key
+output "mgmt_agent_install_key_content" {
+ value = local.inputRspFileContent
+}
\ No newline at end of file
diff --git a/terraform/modules/mgmt_agent/provider.tf b/terraform/modules/mgmt_agent/provider.tf
new file mode 100644
index 00000000..38621564
--- /dev/null
+++ b/terraform/modules/mgmt_agent/provider.tf
@@ -0,0 +1,12 @@
+# Copyright (c) 2023, Oracle and/or its affiliates.
+# Licensed under the Universal Permissive License v1.0 as shown at https://oss.oracle.com/licenses/upl.
+
+terraform {
+ required_version = ">= 1.0"
+ required_providers {
+ oci = {
+ source = "oracle/oci"
+ version = ">= 4.96.0"
+ }
+ }
+}
diff --git a/terraform/oke/charts b/terraform/oke/charts
new file mode 120000
index 00000000..3331b1a7
--- /dev/null
+++ b/terraform/oke/charts
@@ -0,0 +1 @@
+../../charts
\ No newline at end of file
diff --git a/terraform/oke/datasources.tf b/terraform/oke/datasources.tf
new file mode 100644
index 00000000..98ed84d9
--- /dev/null
+++ b/terraform/oke/datasources.tf
@@ -0,0 +1,17 @@
+# Copyright (c) 2023, Oracle and/or its affiliates.
+# Licensed under the Universal Permissive License v1.0 as shown at https://oss.oracle.com/licenses/upl.
+
+data "oci_identity_user" "livelab_user" {
+ user_id = var.current_user_ocid
+}
+
+data "oci_identity_tenancy" "tenant_details" {
+ tenancy_id = var.tenancy_ocid
+}
+
+data "oci_identity_regions" "region_map" {
+}
+
+data "oci_containerengine_cluster_kube_config" "oke" {
+ cluster_id = var.oke_cluster_ocid
+}
\ No newline at end of file
diff --git a/terraform/oke/debug-inputs.tf b/terraform/oke/debug-inputs.tf
new file mode 100644
index 00000000..13d69628
--- /dev/null
+++ b/terraform/oke/debug-inputs.tf
@@ -0,0 +1,27 @@
+# Copyright (c) 2023, Oracle and/or its affiliates.
+# Licensed under the Universal Permissive License v1.0 as shown at https://oss.oracle.com/licenses/upl.
+
+####
+## Switches - These inputs are meant to be used for development purpose only
+## Leave it to default for production use
+####
+
+# Enable/Disable helm module
+variable "enable_helm_module" {
+ type = bool
+ default = true
+}
+
+# Enable/Disable helm template. When set as true,
+# - helm module will generate template file inside ../modules/helm/local directory
+# - Setting this to true disables/skips the helm release
+variable "generate_helm_template" {
+ type = bool
+ default = false
+}
+
+# Enable/Disable logan dashboards module
+variable "enable_dashboard_module" {
+ type = bool
+ default = true
+}
\ No newline at end of file
diff --git a/logan/terraform/oke/inputs.tf b/terraform/oke/inputs.tf
similarity index 62%
rename from logan/terraform/oke/inputs.tf
rename to terraform/oke/inputs.tf
index 90745790..a2c8b1bc 100644
--- a/logan/terraform/oke/inputs.tf
+++ b/terraform/oke/inputs.tf
@@ -1,4 +1,7 @@
-# When defined in the Terraform configuration, the following variables automatically prepopulate with values on the Console pages used to create and edit the stack.
+# Copyright (c) 2023, Oracle and/or its affiliates.
+# Licensed under the Universal Permissive License v1.0 as shown at https://oss.oracle.com/licenses/upl.
+
+# When defined in the Terraform configuration, the following variables automatically prepopulate with values on the Console pages used to create and edit the stack.
# The stack's values are used when you select the Terraform actions Plan, Apply, and Destroy.
# - tenancy_ocid (tenancy OCID)
# - region (region)
@@ -33,48 +36,36 @@ variable "fingerprint" {
}
####
-## Stack Variable
+## Stack Variable - Auto-pupulated while running RM Stack
####
-// Auto-pupulated while running RM Stack
+# Stack compartment - where marketplace app / Resoruce Manager stack is executed
variable "compartment_ocid" {
type = string
default = ""
}
+# OCID of user running the marketplace app / Resoruce Manager stack
+variable "current_user_ocid" {
+ type = string
+}
+
####
-## Boat configuration
+## Boat configuration - Used for internal developement purpose only.
####
+# Option to enable BOAT authentication.
variable "boat_auth" {
type = bool
default = false
}
+# OCID of BOAT tenancy.
variable "boat_tenancy_ocid" {
type = string
default = ""
}
-####
-## Switches
-####
-
-variable "enable_helm_release" {
- type = bool
- default = true
-}
-
-variable "enable_helm_debugging" {
- type = bool
- default = false
-}
-
-variable "enable_dashboard_import" {
- type = bool
- default = true
-}
-
####
## Dynamic Group and Policies
####
@@ -102,21 +93,16 @@ variable "oke_cluster_ocid" {
# Kubernetes Namespace
variable "kubernetes_namespace" {
- type = string
-}
-
-# Option to create Kubernetes Namespace
-variable "opt_create_kubernetes_namespace" {
- type = bool
- default = true
+ type = string
+ default = "oci-onm"
}
####
-## OCI Logging Analytics Information
+## OCI Observability and Management Information
####
-# Compartment for creating logging analytics LogGroup and Dashboards
-variable "oci_la_compartment_ocid" {
+# Compartment for creating OCI Observability and Management resources
+variable "oci_onm_compartment_ocid" {
type = string
default = ""
}
@@ -139,18 +125,34 @@ variable "oci_la_logGroup_name" {
default = ""
}
+# Fluentd Base Directory
+variable "fluentd_baseDir_path" {
+ type = string
+ default = "/var/log"
+}
+
####
## Fluentd Configuration
####
# OCI LA Fluentd Container Image
-variable "container_image_url" {
- type = string
+variable "logan_container_image_url" {
+ type = string
+ default = "container-registry.oracle.com/oci_observability_management/oci-la-fluentd-collector:1.0.0"
}
-# Fluentd Base Directory
-variable "fluentd_baseDir_path" {
+####
+## Management Agent Configuration
+####
+
+# OCI Management Agent Container Image
+variable "mgmt_agent_container_image_url" {
type = string
- default = "/var/log"
+ default = "container-registry.oracle.com/oci_observability_management/oci-management-agent:1.0.0"
}
+# Option to deploy metric server
+variable "opt_deploy_metric_server" {
+ type = bool
+ default = true
+}
\ No newline at end of file
diff --git a/terraform/oke/livelab.tf b/terraform/oke/livelab.tf
new file mode 100644
index 00000000..ad96eb30
--- /dev/null
+++ b/terraform/oke/livelab.tf
@@ -0,0 +1,15 @@
+# Copyright (c) 2023, Oracle and/or its affiliates.
+# Licensed under the Universal Permissive License v1.0 as shown at https://oss.oracle.com/licenses/upl.
+
+####
+## livelab
+####
+
+## Note - /util/build_stack.sh script modifies below input from "FALSE" to "TRUE", while generating livelab build, hence
+## - Do not add additional inputs here &
+## - Do not modify this file
+
+variable "livelab_switch" {
+ type = bool
+ default = false
+}
\ No newline at end of file
diff --git a/terraform/oke/main.tf b/terraform/oke/main.tf
new file mode 100644
index 00000000..6e845a99
--- /dev/null
+++ b/terraform/oke/main.tf
@@ -0,0 +1,80 @@
+# Copyright (c) 2023, Oracle and/or its affiliates.
+# Licensed under the Universal Permissive License v1.0 as shown at https://oss.oracle.com/licenses/upl.
+
+locals {
+ ## livelab
+ oci_username = data.oci_identity_user.livelab_user.name
+ livelab_service_account = local.oci_username
+
+ ## Helm release
+ fluentd_baseDir_path = var.livelab_switch ? "/var/log/${local.oci_username}" : var.fluentd_baseDir_path
+}
+
+// Import Kubernetes Dashboards
+module "import_kubernetes_dashbords" {
+ source = "./modules/dashboards"
+ compartment_ocid = var.oci_onm_compartment_ocid
+
+ count = var.enable_dashboard_module ? 1 : 0
+}
+
+// Create Required Polcies and Dynamic Group
+// Needs to be called with OCI Home Region Provider
+module "policy_and_dynamic-group" {
+ source = "./modules/iam"
+ root_compartment_ocid = var.tenancy_ocid
+ oci_onm_compartment_ocid = var.oci_onm_compartment_ocid
+ oke_compartment_ocid = var.oke_compartment_ocid
+ oke_cluster_ocid = var.oke_cluster_ocid
+
+ count = var.opt_create_dynamicGroup_and_policies && !var.livelab_switch ? 1 : 0
+
+ providers = {
+ oci = oci.home_region
+ }
+}
+
+module "management_agent" {
+ source = "./modules/mgmt_agent"
+ uniquifier = md5(var.oke_cluster_ocid)
+ compartment_ocid = var.oci_onm_compartment_ocid
+
+ # this module is only required in case of helm deployment
+ count = var.enable_helm_module ? 1 : 0
+}
+
+// Create Logging Analytics Resorces
+module "loggingAnalytics" {
+ source = "./modules/logan"
+ tenancy_ocid = var.tenancy_ocid
+ create_new_logGroup = var.opt_create_new_la_logGroup
+ new_logGroup_name = var.oci_la_logGroup_name
+ compartment_ocid = var.oci_onm_compartment_ocid
+ existing_logGroup_id = var.oci_la_logGroup_id
+}
+
+
+// deploy oke-monitoring solution (helm release)
+module "helm_release" {
+ source = "./modules/helm"
+ helm_abs_path = abspath("./charts/oci-onm")
+ generate_helm_template = var.generate_helm_template
+
+ oke_compartment_ocid = var.oke_compartment_ocid
+ oke_cluster_ocid = var.oke_cluster_ocid
+ logan_container_image_url = var.logan_container_image_url
+ kubernetes_namespace = var.kubernetes_namespace
+
+ oci_la_logGroup_id = module.loggingAnalytics.oci_la_logGroup_ocid
+ oci_la_namespace = module.loggingAnalytics.oci_la_namespace
+ fluentd_baseDir_path = local.fluentd_baseDir_path
+
+ mgmt_agent_install_key_content = module.management_agent[0].mgmt_agent_install_key_content
+ mgmt_agent_container_image_url = var.mgmt_agent_container_image_url
+ opt_deploy_metric_server = var.livelab_switch ? true : var.opt_deploy_metric_server
+
+ deploy_mushop_config = var.livelab_switch
+ livelab_service_account = local.livelab_service_account
+
+ count = var.enable_helm_module ? 1 : 0
+}
diff --git a/terraform/oke/modules b/terraform/oke/modules
new file mode 120000
index 00000000..43aab75b
--- /dev/null
+++ b/terraform/oke/modules
@@ -0,0 +1 @@
+../modules/
\ No newline at end of file
diff --git a/terraform/oke/outputs.tf b/terraform/oke/outputs.tf
new file mode 100644
index 00000000..abc612c1
--- /dev/null
+++ b/terraform/oke/outputs.tf
@@ -0,0 +1,2 @@
+# Copyright (c) 2023, Oracle and/or its affiliates.
+# Licensed under the Universal Permissive License v1.0 as shown at https://oss.oracle.com/licenses/upl.
diff --git a/terraform/oke/providers.tf b/terraform/oke/providers.tf
new file mode 100644
index 00000000..89b07367
--- /dev/null
+++ b/terraform/oke/providers.tf
@@ -0,0 +1,60 @@
+# Copyright (c) 2023, Oracle and/or its affiliates.
+# Licensed under the Universal Permissive License v1.0 as shown at https://oss.oracle.com/licenses/upl.
+
+terraform {
+ required_version = ">= 1.0.0, <= 1.5"
+ required_providers {
+ oci = {
+ source = "oracle/oci"
+ version = ">= 4.96.0"
+ }
+ helm = {
+ source = "hashicorp/helm"
+ version = "2.7.1"
+ }
+ local = {
+ source = "hashicorp/local"
+ version = "2.2.3"
+ }
+ }
+}
+
+locals {
+ cluster_endpoint = yamldecode(data.oci_containerengine_cluster_kube_config.oke.content)["clusters"][0]["cluster"]["server"]
+ cluster_ca_certificate = base64decode(yamldecode(data.oci_containerengine_cluster_kube_config.oke.content)["clusters"][0]["cluster"]["certificate-authority-data"])
+ cluster_id = yamldecode(data.oci_containerengine_cluster_kube_config.oke.content)["users"][0]["user"]["exec"]["args"][4]
+ cluster_region = yamldecode(data.oci_containerengine_cluster_kube_config.oke.content)["users"][0]["user"]["exec"]["args"][6]
+
+ home_region_key = data.oci_identity_tenancy.tenant_details.home_region_key
+ home_region = var.livelab_switch ? "us-phoenix-1" : [for r in data.oci_identity_regions.region_map.regions : r.name if r.key == local.home_region_key][0]
+}
+
+provider "oci" {
+ tenancy_ocid = var.boat_auth ? var.boat_tenancy_ocid : var.tenancy_ocid
+ region = var.region
+ private_key_path = var.private_key_path
+ fingerprint = var.fingerprint
+ user_ocid = var.user_ocid
+}
+
+provider "oci" {
+ alias = "home_region"
+ tenancy_ocid = var.boat_auth ? var.boat_tenancy_ocid : var.tenancy_ocid
+ region = local.home_region
+ private_key_path = var.private_key_path
+ fingerprint = var.fingerprint
+ user_ocid = var.user_ocid
+}
+
+provider "helm" {
+ kubernetes {
+ host = local.cluster_endpoint
+ cluster_ca_certificate = local.cluster_ca_certificate
+ exec {
+ api_version = "client.authentication.k8s.io/v1beta1"
+ args = ["ce", "cluster", "generate-token", "--cluster-id", local.cluster_id, "--region", local.cluster_region]
+ command = "oci"
+ }
+ }
+}
+
diff --git a/terraform/oke/schema.yaml b/terraform/oke/schema.yaml
new file mode 100644
index 00000000..3c6c924b
--- /dev/null
+++ b/terraform/oke/schema.yaml
@@ -0,0 +1,198 @@
+# Copyright (c) 2023, Oracle and/or its affiliates.
+# Licensed under the Universal Permissive License v1.0 as shown at https://oss.oracle.com/licenses/upl.
+
+title: OCI Kubernetes Monitoring Solution
+description: OCI Kubernetes Monitoring Solution is a turn-key Kubernetes monitoring and management package based on OCI Logging Analytics cloud service, OCI Monitoring, OCI Management Agent.
+informationalText: OCI Kubernetes Monitoring Solution is a turn-key Kubernetes monitoring and management package based on OCI Logging Analytics cloud service, OCI Monitoring, OCI Management Agent.
+schemaVersion: 1.1.0
+version: "20221004"
+
+# URL of Logo Icon used on Application Information tab. Logo must be 130x130 pixels.
+# (Optional)
+#logoUrl: https://cloudmarketplace.oracle.com/marketplace/content?contentId=53066708
+
+source:
+ type: marketplace # enum - marketplace, quickstart or web
+
+locale: "en"
+
+variableGroups:
+ - title: "configuration inputs"
+ variables:
+ - tenancy_ocid
+ - region
+ - user_ocid
+ - private_key_path
+ - fingerprint
+ - generate_helm_template
+ - enable_dashboard_module
+ - enable_helm_module
+ - boat_auth
+ - boat_tenancy_ocid
+ - compartment_ocid
+ - logan_container_image_url
+ - mgmt_agent_container_image_url
+ - kubernetes_namespace
+ - current_user_ocid
+ - livelab_switch
+ visible: false
+
+ - title: "Select an OKE Cluster deployed in this region to start monitoring"
+ description: "Use CLI (Helm) if your cluster does not have public API Endpoint or restricted from accessing container-registry.oracle.com. See: https://github.com/oracle-quickstart/oci-kubernetes-monitoring"
+ variables:
+ - oke_compartment_ocid
+ - oke_cluster_ocid
+ visible:
+ and:
+ - enable_helm_module
+
+ - title: "OCI Observability & Management Services Configuration"
+ description: "See: https://github.com/oracle-quickstart/oci-kubernetes-monitoring for list of resources created"
+ variables:
+ - opt_deploy_metric_server
+ - oci_onm_compartment_ocid
+ - opt_create_new_la_logGroup
+ - oci_la_logGroup_id
+ - oci_la_logGroup_name
+ - fluentd_baseDir_path
+ visible:
+ and:
+ - enable_helm_module
+
+ - title: "OCI IAM Policies and Dynaimic Groups (Optional)"
+ variables:
+ - opt_create_dynamicGroup_and_policies
+ visible:
+ and:
+ - enable_helm_module
+
+variables:
+
+ ####
+ ## Deployment Options
+ ####
+
+ # Option to install helm chart
+ # enable_helm_module:
+ # type: boolean
+ # title: Deploy Kubernetes Monitoring Solution
+ # description: "Ref: https://github.com/oracle-quickstart/oci-kubernetes-monitoring"
+ # default: true
+ # required: true
+
+ ####
+ ## OKE Cluster Information
+ ####
+
+ # OKE Cluster Compartment
+ oke_compartment_ocid:
+ type: oci:identity:compartment:id
+ required: true
+ title: "Select OKE Cluster Compartment"
+ default: compartment_ocid
+
+ # OKE Cluster OCID
+ oke_cluster_ocid:
+ type: oci:container:cluster:id
+ dependsOn:
+ compartmentId: ${oke_compartment_ocid}
+ title: Select OKE Cluster
+ required: true
+
+ # Kubernetes Namespace
+ # kubernetes_namespace:
+ # type: string
+ # minLength: 1
+ # maxLength: 63
+ # title: Kubernetes Namespace
+ # description: Kubernetes Namespace in which the monitoring solution to be deployed
+ # default: kube-system
+ # pattern: '^([a-z0-9]|[a-z][a-z\-0-9]*[a-z0-9])$' #Ref - https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-label-names
+ # required: true
+
+ ####
+ ## OCI Observability & Management Services Configuration
+ ####
+
+ # Option to enable/disable metric server installation during helm deployment
+ opt_deploy_metric_server:
+ type: boolean
+ title: Enable Metric Server Installation
+ description: Uncheck this if Metric Server is already installed in your cluster.
+ default: true
+ visible:
+ and:
+ - enable_helm_module
+ - not:
+ - livelab_switch
+
+ # Compartment for creating OCI Observability and Management resources
+ oci_onm_compartment_ocid:
+ type: oci:identity:compartment:id
+ required: true
+ title: Select compartment for Logging Analytics, Management Agent, and Monitoring service resources
+ description: This compartment will be used for creating Dashboards, Log Groups, Entities, Management Agent Keys, Metrics Namespace etc. See https://github.com/oracle-quickstart/oci-kubernetes-monitoring for full list of resources.
+ default: compartment_ocid
+
+ # Option to create Logging Analytics
+ opt_create_new_la_logGroup: # change this to create new log group
+ type: boolean
+ title: Check if you want to create a new Log Group
+ default: false
+ visible:
+ and:
+ - enable_helm_module
+ - not:
+ - livelab_switch
+
+ # OCI Logging Analytics LogGroup OCID of existing LogGroup
+ oci_la_logGroup_id:
+ type: oci:logan:loggroup:id
+ dependsOn:
+ compartmentId: ${oci_onm_compartment_ocid}
+ title: OCI Logging Analytics Log Group
+ description: Log Groups are logical containers for log data, and provide access control for your data using IAM Policies
+ required: true
+ visible:
+ not:
+ - opt_create_new_la_logGroup
+
+ # New Log Group to collect Kubernetes data
+ oci_la_logGroup_name:
+ type: string
+ maxLength: 255
+ minLength: 1
+ required: true
+ title: "OCI Logging Analytics Log Group Name"
+ description: "Tip: Give a unique name which can be identified with your cluster name to make it easy to find in Dashboards and Logs Explorer"
+ visible:
+ and:
+ - opt_create_new_la_logGroup
+ pattern: '^([a-zA-Z0-9]|[a-zA-Z0-9][\\ a-zA-Z0-9_\-]*[\\a-zA-Z\-0-9_])$'
+
+ # Fluentd Base Directory
+ fluentd_baseDir_path:
+ type: string
+ maxLength: 255
+ minLength: 1
+ title: FluentD Working Directory
+ description: A directory on the node (with read & write permission) to use for storing Fluentd related data
+ default: /var/log
+ required: true
+ pattern: '^/[\w- /]*$'
+ visible:
+ not:
+ - livelab_switch
+
+ ####
+ ## Pre-requisites
+ ####
+
+ # Option to create Dynamic Group and Policies
+ opt_create_dynamicGroup_and_policies:
+ type: boolean
+ title: Check to create Dynamic Group and Policies required for deploying monitoring solution
+ #description: "Ref: https://github.com/oracle-quickstart/oci-kubernetes-monitoring#pre-requisites"
+ description: "Note: The dynamic group definition must be updated, if node pool(s) and OKE Cluster are in different compartments."
+ default: false
+ required: true
\ No newline at end of file
diff --git a/terraform/oke/terraform-sample.tfvars b/terraform/oke/terraform-sample.tfvars
new file mode 100644
index 00000000..ea195b37
--- /dev/null
+++ b/terraform/oke/terraform-sample.tfvars
@@ -0,0 +1,72 @@
+# Copyright (c) 2023, Oracle and/or its affiliates.
+# Licensed under the Universal Permissive License v1.0 as shown at https://oss.oracle.com/licenses/upl.
+
+####
+## Configure BOAT Authentication for OCI; Leave unchaged, if BOAT authentication is not used
+####
+boat_auth = false
+boat_tenancy_ocid = ""
+
+####
+## OCI Provider inputs
+####
+tenancy_ocid = ""
+region = ""
+
+# Note - Leave following empty when running terraform from OCI cloud-shell
+
+# OCI user OCID
+user_ocid = ""
+# Path to OCI user's API key
+private_key_path = ""
+# Fingerprint of the API key
+fingerprint = ""
+
+####
+## Mandatory Stack inputs
+####
+
+# OKE Cluster Compartment OCID
+oke_compartment_ocid = "" # Mandatory
+
+# OKE Cluster OCID
+oke_cluster_ocid = ""
+
+# Change this, if you want to deploy in a custom namespace
+kubernetes_namespace = "oci-onm"
+
+# Option to control metric server installation as part of helm release
+opt_deploy_metric_server = true
+
+# Compartment for creating dashboards and saved-searches and logGroup
+oci_onm_compartment_ocid = ""
+
+# if ture, oci_la_logGroup_name must be set
+opt_create_new_la_logGroup = false
+
+# OCI Logging Analytics LogGroup
+# Add OCID of logGroup if opt_use_existing_la_logGroup=true, leave it empty otherwise
+oci_la_logGroup_id = ""
+
+# leave it unchanged, if opt_use_existing_la_logGroup=false
+oci_la_logGroup_name = "NewLogGroupName"
+
+####
+## Optional Stack inputs
+####
+
+# Option to create Dynamic Group and Policies
+opt_create_dynamicGroup_and_policies = true
+
+# Fluentd installation path
+fluentd_baseDir_path = "/var/log"
+
+####
+## Optional Switches
+####
+
+enable_dashboard_module = false
+enable_helm_module = false
+generate_helm_template = false
+
+
diff --git a/util/build_stack.sh b/util/build_stack.sh
new file mode 100755
index 00000000..e7fb20a8
--- /dev/null
+++ b/util/build_stack.sh
@@ -0,0 +1,161 @@
+#!/bin/bash
+# Copyright (c) 2023, Oracle and/or its affiliates.
+# Licensed under the Universal Permissive License v1.0 as shown at https://oss.oracle.com/licenses/upl.
+
+# Bash script to build OCI Resource Manager Stack or Marketplace app for OKE monitoring
+
+# Fail at first error
+set -e
+
+function error_and_exit {
+ echo -e "ERROR: $1"
+ exit
+}
+
+function abspath {
+ relative_path=$1
+ cd $relative_path
+ pwd
+}
+
+usage="
+$(basename "$0") [-h] [-n name] -- program to build marketplace app from oracle-quickstart/oci-kubernetes-monitoring repo.
+
+where:
+ -h show this help text
+ -n name of output zip file without extention (Optional)
+ -l flag to generate livelab build; otherwise oke build is generated
+
+The zip artifacts shall be stored at -
+ $RELEASE_PATH"
+
+while getopts "hn:l" option; do
+ case $option in
+ h) # display Help
+ echo "$usage"
+ exit
+ ;;
+ l) #livelab-build
+ LIVE_LAB_BUILD=true
+ ;;
+ n)
+ release_name=$OPTARG
+ ;;
+ :) printf "missing argument for -%s\n" "$OPTARG" >&2
+ echo "$usage" >&2
+ exit 1
+ ;;
+ \?) printf "illegal option: -%s\n" "$OPTARG" >&2
+ echo "$usage" >&2
+ exit 1
+ ;;
+ esac
+done
+
+ROOT_DIR=".."
+ROOT_DIR=$(abspath $ROOT_DIR) # Convert to absolute path
+
+RELEASE_PATH="$ROOT_DIR/releases"
+TEMP_ZIP="${RELEASE_PATH}/temp.zip"
+TEMP_DIR="${RELEASE_PATH}/temp"
+
+HELM_SOURCE="$ROOT_DIR/charts"
+MODULES_SOURCE="$ROOT_DIR/terraform/modules"
+ROOT_MODULE_PATH="$ROOT_DIR/terraform/oke"
+
+if [ -n "$LIVE_LAB_BUILD" ]; then
+ PREFIX="livelab"
+else
+ PREFIX="oke"
+fi
+
+# Create a release DIR if it does not exist already.
+if test ! -d "$RELEASE_PATH"; then
+ mkdir "${RELEASE_PATH}" || error_and_exit "Could not create releases DIR."
+ echo -e "Create release DIR: ${RELEASE_PATH}"
+fi
+
+# Change to git repo
+cd "$ROOT_DIR" || error_and_exit "Could not switch DIR"
+
+# Decide on final zip name
+if test -z "${release_name}"; then
+ BRANCH=$(git symbolic-ref --short HEAD)
+ COMMIT_HASH_SHORT=$(git rev-parse --short HEAD)
+ COMMIT_COUNT=$(git rev-list --count HEAD)
+ release_name="${PREFIX}-${BRANCH}-${COMMIT_HASH_SHORT}-${COMMIT_COUNT}"
+fi
+
+RELEASE_ZIP="${RELEASE_PATH}/${release_name}.zip"
+
+echo -e ""
+echo -e "Build parameters - "
+echo -e ""
+echo -e "ROOT_DIR = $ROOT_DIR"
+echo -e "HELM_SOURCE = $HELM_SOURCE"
+echo -e "MODULES_SOURCE = $MODULES_SOURCE"
+echo -e "TEMP_DIR = $TEMP_DIR"
+echo -e "TEMP_ZIP = $TEMP_ZIP"
+echo -e "RELEASE_ZIP = $RELEASE_ZIP"
+echo -e "ROOT_MODULE_PATH = $ROOT_MODULE_PATH"
+echo -e ""
+
+# Clean up stale dirs and files
+rm "${RELEASE_ZIP}" 2>/dev/null && echo -e "Removed stale release zip"
+rm "$TEMP_ZIP" 2>/dev/null && echo -e "Removed stale temp zip"
+rm -rf "$TEMP_DIR" 2>/dev/null && echo -e "Removed stale temp dir"
+
+# Switch to Root Module for gitzip
+cd $ROOT_MODULE_PATH || echo -e "Failed to Switch to root module"
+
+# Create git archive as temp.zip
+git archive HEAD -o "$TEMP_ZIP" --format=zip >/dev/null || error_and_exit "git archive failed."
+echo -e "Created Git archive - temp.zip"
+
+# Switch back to release dir
+# cd "$RELEASE_PATH" || error_and_exit "Could not switch back to releases dir."
+# echo -e "Switched back to releases DIR."
+
+# unzip the temp.zip file
+unzip -d "$TEMP_DIR" "$TEMP_ZIP" >/dev/null || error_and_exit "Could not unzip temp.zip"
+echo -e "Unzipped temp.zip to temp dir"
+
+# remove the helm-chart symlink
+rm "$TEMP_DIR/charts" || error_and_exit "Could not remove helm-chart symlink"
+echo -e "Removed helm-chart symlink"
+
+# copy the helm-chart
+cp -R "$HELM_SOURCE" "$TEMP_DIR" || error_and_exit "Could not copy helm chart"
+echo -e "Copied helm-chart to temp dir"
+
+# remove the terraform modules symlink
+rm "$TEMP_DIR/modules" || error_and_exit "Could not remove modules symlink"
+echo -e "Removed terraform modules symlink"
+
+# copy the modules
+cp -R "$MODULES_SOURCE" "$TEMP_DIR" || error_and_exit "Could not copy modules"
+echo -e "Copied orignal modules"
+
+# to be fixed from here -
+
+cd "$TEMP_DIR" || error_and_exit "Could not switch to temp dir"
+echo -e "Switched to temp dir"
+
+# update livelab switch input to true
+if [ -n "$LIVE_LAB_BUILD" ]; then
+ sed "s/false/true/g" -i livelab.tf
+ echo -e "Enabled livelab switch in livelab.tf"
+fi
+
+zip -r "${RELEASE_ZIP}" ./* >/dev/null || error_and_exit "Could not zip temp dir"
+
+cd "$RELEASE_PATH" || error_and_exit "Could not switch to Util dir"
+
+# clean up temp zip file
+rm "$TEMP_ZIP" 2>/dev/null && echo -e "stale zip file removed."
+rm -rf "$TEMP_DIR" 2>/dev/null && echo -e "stale zip dir removed."
+
+echo -e "\nNew Release Created - $RELEASE_PATH/$release_name.zip"
+
+
+