diff --git a/docs/actions-and-automations/reflect-action-progress/reflect-action-progress.md b/docs/actions-and-automations/reflect-action-progress/reflect-action-progress.md index ff02838b2..0b63b878c 100644 --- a/docs/actions-and-automations/reflect-action-progress/reflect-action-progress.md +++ b/docs/actions-and-automations/reflect-action-progress/reflect-action-progress.md @@ -11,7 +11,22 @@ This page will teach you how to use Port's API to obtain existing action runs an ## Where to find your action runs -You can find your existing/finished action runs using one of the following methods: +### Non-admin roles + +`Members` and `moderators` can find their existing/finished action runs using one of the following methods: + +- The `My latest runs` list - click on the button in the top-right corner of the page. + + This will display the latest action runs that have been executed by the logged-in user: + + + You can also click on the `My run history` button to see a table with all of the user's previous action runs. + +- After executing an action from the [self-service page](https://app.getport.io/self-serve), the `My latest runs` list will open automatically, showing the newly created action run. + +### Admin role + +In addition to the methods mentioned above, `admins` can find action runs using one of the following methods: - The dedicated `Runs history` table: - **Self-service actions** - go to the [self-service page](https://app.getport.io/self-serve) of your portal, then click on the button in the top-right corner. @@ -22,10 +37,6 @@ You can find your existing/finished action runs using one of the following metho - Go the [entity page](/customize-pages-dashboards-and-plugins/page/entity-page.md) of your desired entity, then select the `Runs` tab. This page will display all action runs that have been executed for the selected Entity. -- Once you have at least one `in-progress` action run, a panel will be displayed on the right side of the page, showing the runs that are currently in progress. - -- After executing an action from the [self-service page](https://app.getport.io/self-serve), a toast will appear in the bottom-right of the page, with a link to the newly created action run. - ## Interacting with action runs Once an `actionRun` is created, it will have a unique `runId`. Using this id, you can interact with the action run using Port's API. diff --git a/static/apispec.yaml b/docs/api-reference/apispec.yaml similarity index 99% rename from static/apispec.yaml rename to docs/api-reference/apispec.yaml index 858ce98d8..ee326c3f7 100644 --- a/static/apispec.yaml +++ b/docs/api-reference/apispec.yaml @@ -16364,7 +16364,7 @@ paths: responses: "200": description: Default Response - '/v1/organization/secrets': + /v1/organization/secrets: get: description: This route allows you to retrieve all of the secrets in your organization.

To learn more about secrets management in Port, check out the [documentation](https://docs.getport.io/sso-rbac/port-secrets). summary: Get all organization secrets @@ -16372,9 +16372,9 @@ paths: - Organization security: - bearer: - - 'read:blueprints' + - read:secrets responses: - '200': + "200": description: Default Response post: summary: Create an organization secret @@ -16387,23 +16387,41 @@ paths: schema: type: object properties: - secret_name: + secretName: type: string description: The name of the new secret.
- secret_value: + maxLength: 50 + minLength: 1 + pattern: ^[A-Za-z0-9-_]*$ + secretValue: type: string description: The value of the new secret.
description: type: string description: An optional description of the new secret.
+ maxLength: 200 + additionalProperties: false required: - - secret_name - - secret_value + - secretName + - secretValue required: true security: - - bearer: [ ] + - bearer: + - create:secrets responses: - '200': + "200": + description: Default Response + "/v1/organization/secrets/{secret_name}": + get: + summary: Get an organization secret + description: This route allows you to retrieve a specific secret in your organization.

To learn more about secrets management in Port, check out the [documentation](https://docs.getport.io/sso-rbac/port-secrets). + tags: + - Organization + security: + - bearer: + - read:secrets + responses: + "200": description: Default Response patch: summary: Patch an organization secret @@ -16421,8 +16439,10 @@ paths: description: The new value of the secret.
description: type: string + maxLength: 200 description: The new description of the secret.
additionalProperties: false + required: [] parameters: - schema: type: string @@ -16431,9 +16451,10 @@ paths: required: true description: The name of the secret you want to patch.
security: - - bearer: [] + - bearer: + - update:secrets responses: - '200': + "200": description: Default Response delete: summary: Delete an organization secret @@ -16448,9 +16469,10 @@ paths: required: true description: The name of the secret you want to delete.
security: - - bearer: [ ] + - bearer: + - delete:secrets responses: - '200': + "200": description: Default Response /v1/apps: get: diff --git a/docs/api-reference/create-an-organization-secret.api.mdx b/docs/api-reference/create-an-organization-secret.api.mdx index a8f43a0f2..65d769c6a 100644 --- a/docs/api-reference/create-an-organization-secret.api.mdx +++ b/docs/api-reference/create-an-organization-secret.api.mdx @@ -5,9 +5,9 @@ description: "This route allows you to create an organization secret.

T sidebar_label: "Create an organization secret" hide_title: true hide_table_of_contents: true -api: eJzdVMtu2zAQ/BWCpxZwrKRHIwiQPg5FDwkS9+QaxUreWEwkkuXDiSvo37tLSbbsPNpzEUChydnl7swsG7lCXzhlgzJazuS8VF44EwMKqCrz6MXWRBGMKBwCb2ph3Bq0+g0cITzSQZie5y67SJ+5ERWC06I2juA5pepBXtSgYY016iCUFtfGhYkoSiweBKNCiWKxMkVkQMq+fFeGYP0sy2jbT9cYLMVMlcm8NycuhyLjjZM+//upnMgAay9nC3k1qlIuJ9Lhr4g+fDSrrZw1sjA60DW8BGsrVSRgdu+ZhUZ6KqsGXoWtReLF5PdYBMpvnbHogkKfcOnmnxpqHIF9cEqvCXzMLQpGCnOXmtX4eMCfbCdDwg1U8R8zJuhbKQ9C/pbxkvRNa6jE6OT1/G1HrXK4YtrHhBx1s2wPwcFF7DqOTgVSZdHInKyDjpbLNmnmrdG+o/rD6Sn/Oyz3M95BrIK46ZGpnhpDaegCaY1PkkEo6Ve2OcvG3s1626Q63QadTyVEVxF4MB5YNfId0fkSIPoDzHLf1C37qKt/aG2nAEV+wy3d3rlHXkYq2w2WnUjFDZYIK4rittibN3sXf3mC2lb4zIV7ZQ+99Jri/XbLF96ZVJ8KnFfygIrL668Uwux08LPpKWOZWhpnhvfXfnrrgTi+dTSA/8GT0+kZ8ClktgLSjQhKLml67y3k5oyA4zZ2+nh+nUp2KsGaJgeP313VtrxNYjseDFpuwCnIWReajcngCzbsA5mI6O/4PJlzMQzvRH/2uLE7dwNyfXU7J3DeP4q1WXGMg0fa5O9M/qA/rjzplnyc9htZgV5HopYwXV62KETuduzwh+TwfsHVDkd6O6ry2PpdS/zlgXsx5JyMKWh+Lnbw7uTVgJ6xAc0q8Yv0B1Hqb4Y= +api: eJzdVE2P0zAQ/SvWiAMgt2lX4kCEkMrHAYHYaikcqAqaJtPGu4mdtZ3udqP8dzRO2qbdXdgzF9d1nsfvzbyZGlJyiVWlV0ZDDLNMOWFN5UlgnpsbJ7amEt6IxBLyoRbGrlGrO+QbwlFiyQ/fLG30NiwzI3JCq0VhLAlcmsp3ICcK1LimgrQXSoupsV6KJKPkSjDKZyTmqUkqBoToi+eZ96WLoyg1iRuuyZfG+qEykXNmYJeYRHww6OK/GIIEj2sH8RzOeyxhIcHSdUXOvzPpFuIaEqM9ac9bLMtcJQEYXTrOQg0uyahA3vltSRCDWV5S4kFCaU1J1ityARde/ooF9bDOW6XXIO+lloTGgoRZBa2abo7SBxIKvP1Ceu0ziF+NJBRK7/6OJZToPVmO9Gs+GfzEwd1o8Hrwe/HyGTSyY/ID8+qJVDYMfZRLc3LlXxEnWpiwx1z0vjxN69lo1DQSME1VG2LaS/IKc0dt/ZSllGvby/qx8EXDcQ5Qbytqc1NZ5bcQz2tYElqyHKa1dNy5BxZNsIkrjXZtdc9GI/45VvqBVljlXlx0SOAnC/KZSSGG0rjgEmRdEG3GUb9dot1bzMluyLpAqbI5xLDzOpaqZ3Vo5EOAyh1hFgeR39i6Lf+d1H3xsFSfaQsSdHAsTCqfGbvrEgmKBWaEKdkgi9vh4tA4H2+xKHM6Nf7BE0cmfMwq3XHDz61MYKc8RwWeCGIy/QQSODctfDwcMZYTW2BwYkf+/d8m0umrvY7/D2ZcW01Ptz4qc1SaExQ8UnfOm8NmDBL6MvblcTwOM/ZpPIe6XqKj7zZvGj6+rshymywkbNAqXHJd5otG7lzBdr2iLae/zedgxmQY3hb93jRlb+7bY3r+bQYSlt0ULkzKdyzegAxrDMw61Cw4OJzVkKNeV7hmbBszjIuKlfa9fRW83W2Y6e6T3vYYnpq+lcMrt9qDV+q67Zym2ePbT4/e6NK1Q3OJeDj9ATqxlIo= sidebar_class_name: "post api-method" -info_path: api-reference/port-api +info_path: api-reference-temp/port-api custom_edit_url: null --- @@ -20,7 +20,6 @@ import ParamsItem from "@theme/ParamsItem"; import ResponseSamples from "@theme/ResponseSamples"; import SchemaItem from "@theme/SchemaItem"; import SchemaTabs from "@theme/SchemaTabs"; -import Markdown from "@theme/Markdown"; import Heading from "@theme/Heading"; import OperationTabs from "@theme/OperationTabs"; import TabItem from "@theme/TabItem"; @@ -86,16 +85,16 @@ This route allows you to create an organization secret.

To learn more a > "}} + qualifierMessage={"**Possible values:** `non-empty` and `<= 50 characters`, Value must match regular expression `^[A-Za-z0-9-_]*$`"} + schema={{"type":"string","description":"The name of the new secret.
","maxLength":50,"minLength":1,"pattern":"^[A-Za-z0-9-_]*$"}} >

To learn more a name={"description"} required={false} schemaName={"string"} - qualifierMessage={undefined} - schema={{"type":"string","description":"An optional description of the new secret.
"}} + qualifierMessage={"**Possible values:** `<= 200 characters`"} + schema={{"type":"string","description":"An optional description of the new secret.
","maxLength":200}} >
diff --git a/docs/api-reference/delete-an-organization-secret.api.mdx b/docs/api-reference/delete-an-organization-secret.api.mdx index f48534aa6..ad7df1ffd 100644 --- a/docs/api-reference/delete-an-organization-secret.api.mdx +++ b/docs/api-reference/delete-an-organization-secret.api.mdx @@ -5,9 +5,9 @@ description: "This route allows you to delete an organization secret.

T sidebar_label: "Delete an organization secret" hide_title: true hide_table_of_contents: true -api: eJzdU01r20AQ/SvLnBJQLLtHEQIB+1BaaEjdkzFlLY2txfKush9OXaH/3pmVlMquU+i1F2k/3uzMe/OmgQJdblXtldGQwbJUTlgTPApZVebViZMJwhtRYIV8qIWxO6nVT8kRwmFu0U/uNzZ9iJ+lERVKq8XBWIJv6Kke5MRBarnDA2ovlBZPxvpE5CXme8EoX6JYFSYPDIivr29K72uXpSkdu8kOfU0xE2VS58yd3cg85YO7/v3bCSTg5c5BtoIvoyphnUAtrTwQA8u3DTjKe5CQNeBPNRJx563SO2gTUKxDLX1Jr2mK4cuY4HvcJWDxJSiLBWTeBkz+UBAFA4XZRkpdbJTxVRLzNy070aCl4ggTrPKnWNqG5ENLyzVfWXS10Q4d1/phOuXfecI5bmWovHjukdASCaJaGqoQulyQdIwySI+zdNzBtBcPuAp7HPQJtiLwIL+s1Uh9EukaILgzzIjWVxa7YzCQe9OdIj/h6bfUj4EKt0Pj+m6UKAuKamN3tiaGK18xnl0kHp8+EpSL7ySZTabcydo4T55jeP/6/G8uhotWNpAb7cmM/8VcdHJ7/OHTupIkKwkUm9j01ljBcUbAMY1ois4e1M6S5GRY02ykw2+2als+fglo2bm0PEqr5Ib7QhYqlOM1mXArK3c5J2Nxb577iboV/zo97xAbzKXZWUdZBd7Rck9WOx/ndj2elvni82K5IKAMLMnYpfvo0n7B/K7muLRvl5C/PDRXQ+7JvYJm4OEN3t28G9BPw4Bmxuu2bX8BTdn/sg== +api: eJzdU0tr3DAQ/itiTgko62yPphQCyaG00JBuT4sps/bsWsSWHGm06dbov5exvYmzSQq99uKH9I0032N6qCiU3nRsnIUcVrUJyrvIpLBp3GNQBxcVO1VRQ7JolfM7tOY3SoUKVHrixceNzz4Nj5VTDaG3qnWeFG5c5AkUVIsWd9SSZWWsunWetSprKu+VoLgmta5cGQUwnF6c1cxdyLOscmVY7Ig753lhXBaCu/AbLDNZuJjOP1+ABsZdgHwN32ZdQqGhQ48tMXnZ7SGUNbUIeQ986AhyCOyN3UHSYESHDrkGDRbbYXO44Ofwp8HTQzSeKsjZR9KvFCQlQOW2A6WxdpDxES0/azmKBqnQcn70hg9DaxtCT144jLh8ogeFQD2FztlAQXr/cHkpr5cNXNMWY8PqbkJCShpa4tpVkE9ngh4Z5pDtl9nc0Wy6LetnpBNIj35/VC/6BnI4moOdmXkDSb8FiOEFZkb6u1gx8jlSf3IFO/OFDs9GXEWunT/aOnlVE1bkB5rGbt1QbrgRvGRMXd1+Bg3S/CjQcnEpPncucItW4NPp13/LOJwY3UPpLJPl/2JqRrmZfnHWNWisCDSY2E9BWcN+CRrmNIZQjNHUkM9npNBQu8BS1fcbDPTDNynJ8kMkLzEvNOzRG9yITeseKhPku4J8i004Haq51md30/idq38dtXd4HrNmJWh7bKL8gYZ7OpzMfirmo3R98/VmdQMaMIpC89DeD6GdPoTfm3ecpnm8UJ4yQ2+W9P04Eik94cetdyum6TiihXKRUvoD5pgSPg== sidebar_class_name: "delete api-method" -info_path: api-reference/port-api +info_path: api-reference-temp/port-api custom_edit_url: null --- @@ -20,7 +20,6 @@ import ParamsItem from "@theme/ParamsItem"; import ResponseSamples from "@theme/ResponseSamples"; import SchemaItem from "@theme/SchemaItem"; import SchemaTabs from "@theme/SchemaTabs"; -import Markdown from "@theme/Markdown"; import Heading from "@theme/Heading"; import OperationTabs from "@theme/OperationTabs"; import TabItem from "@theme/TabItem"; @@ -34,7 +33,7 @@ import TabItem from "@theme/TabItem"; diff --git a/docs/api-reference/get-all-organization-secrets.api.mdx b/docs/api-reference/get-all-organization-secrets.api.mdx index f3eb7edf1..6d8806302 100644 --- a/docs/api-reference/get-all-organization-secrets.api.mdx +++ b/docs/api-reference/get-all-organization-secrets.api.mdx @@ -5,9 +5,9 @@ description: "This route allows you to retrieve all of the secrets in your organ sidebar_label: "Get all organization secrets" hide_title: true hide_table_of_contents: true -api: eJzdU8tu2zAQ/BWCpxZQJLtHIwgQoEVQ9NCgdU+GDpS8tohIpLpcqnUF/Xt39UhkwP2BXmRpOTucnR33+gihRNuS9U7v9L6yQaGPBMrUtf8V1MVHRV4hEFroxrLyJ0UVqAAll4OyTlCoPJ6Ns3+McKX3BWYP42PvVQ0GnWo8cn/B7K+tjXHmDA04EpZnj5SosoLyRQlKLjkcfRkFMNLm7yqiNuyyjMshPQO13JNan4Xg77AwZSaFu5n/faoTTeYc9O6gv67k6TzRjIlo6cJnvS5YIaDAEMxxV9QRWrSOgs4HxiKE1rsATNTrD5uN/Fw79xFOJtakvs1IPQyJboAqf+RTFspKWkMVf2TdNlt7lc1qtWjCDjCMkiLWDF7mNa1djauH5BYghivMasjv7GozyV9G7TVdWmAG7vwCF77dmUa+HyOrxsWpRFuZr2JbuEumsu7kx3ZLteBlberx+TNDRfzkxzbdsEjd+kC8ZIHP7E9AU4hWDqg3B65c7XXpHfHy/9doTgsg+E1ZWxs2mi0b19rPYTnobsvAtf4xJpNdvOCKDRZY3xcmwA+sh0HKPyOgJJtfO4PWFLKpQ77O5NOnPVOZKNess/AyZmF+kSAuR07qneG/xo2QjODxKdG82XLPGVGctIdX+HTyz4Y5cwta7MmHYfgL10Oiig== +api: eJzdkk2P1DAMhv9K5BNI2ekMxwohrQRaIQ6sYDiNekhbzzTaNimOUxiq/HfkfiwdafcPcOmH89qxXz8j1Bgqsj1b7yCHY2ODIh8ZlWlb/yuoq4+KvSJksjhMYeXPihtUAStCDso6UZHydDHO/jFSa/e+pOzD9Dh61aIhpzpPqEzpIz+ndsaZC3boWKo8emKtqgarJyUqueRU+yqKYCpbvGmY+5BnWe2rsLsg9554Z30Wgr+j0lSZBO6W+m93oIHNJUB+gq+b9qDQELCKZPkK+WmEEg0hiYzQ1PmSD0UqNBCG3ruAAfIR3u338rq17SOeTWxZfVuUkJKGDrnxNeRwQQYNveEGcsiGQ7Y1KluvkoZoQApTP5FayGEd1vR2Mysk/ZIghhvNZsLvVYPd3P465wh87RFyML39glfQ4Ewn//eRG0+rTRqszNegqZGmqaw7+yndcit62Zm6f/wMGqT52Y/Dbg9JQ+8Dd8aJfKn+gDwTtHFA/XPgxtURKu8YHf+vXM4LYPzNWd8a68Syaa3jAssJhgNo2PY/YbKwqaHxgUU2jqUJ+IPalCT8MyIJ1oWGwZA1pWzqVGyZfPh0BA0myjVbFp4mFpYPAXE9chIfTBtfgmQST09B88WUcZxJS+lZPx+9mrFAt6rFnyKl9BfwTKLx sidebar_class_name: "get api-method" -info_path: api-reference/port-api +info_path: api-reference-temp/port-api custom_edit_url: null --- @@ -20,7 +20,6 @@ import ParamsItem from "@theme/ParamsItem"; import ResponseSamples from "@theme/ResponseSamples"; import SchemaItem from "@theme/SchemaItem"; import SchemaTabs from "@theme/SchemaTabs"; -import Markdown from "@theme/Markdown"; import Heading from "@theme/Heading"; import OperationTabs from "@theme/OperationTabs"; import TabItem from "@theme/TabItem"; diff --git a/docs/api-reference/get-an-organization-secret.api.mdx b/docs/api-reference/get-an-organization-secret.api.mdx new file mode 100644 index 000000000..2bd290e3c --- /dev/null +++ b/docs/api-reference/get-an-organization-secret.api.mdx @@ -0,0 +1,68 @@ +--- +id: get-an-organization-secret +title: "Get an organization secret" +description: "This route allows you to retrieve a specific secret in your organization.

To learn more about secrets management in Port, check out the [documentation](https://docs.getport.io/sso-rbac/port-secrets)." +sidebar_label: "Get an organization secret" +hide_title: true +hide_table_of_contents: true +api: eJzdksGK2zAQhl9FzKkFbZz0aEphoWUpPXRp01MwZWJPYrG25I7GblOjdy9jO8WB9AV6kY30z2j+X98IFcWSXScueMhhX7toOPRCBpsm/IzmEnojwTAJOxrIoIkdle7kShOpZBLjvIrYBD6jd79RW23eHjl7Ny37YBpC9qYNTAaPoZelMpoWPZ6pJT91eQ4s1pQ1lS9GVVKTOVSh7FUwtS1e1SJdzLOsCmXcnEm6wLJxIYsxPPARy0w3Hpb+rzdgQfAcIT/A59V4UFiIVPbs5AL5YYQjIROrjAmrfKmHIhUWmGIXfKQI+Qhvtlv93Kb2nk7YN2K+LEpIyUJLUocKcjiTgIUOpYYcsmGXrYPKlquycf757rGlBDoeD8Rxmq7nBnK4WsfOrZxDsvcEfbzRrPx+LWtqZzNX1yPIpSPIATv3iS5gQaeAHB57qQNfQ7Pg1G1NWBFPHp0/hancSaN6fUHz+PwRLOjwczq7zRaShS5EadGrfOn+RGLQ33CzkAH2NuERyuCFvPyniM7pC/2SrGvQec1retNx4eYAww4srOefGJkxtZCv6FG46xBFq8bxiJG+cZOSbv/oiRX4wsKA7PCor3Yo1rQ+fdiDBez11jUXLxMXy49CeT3yuj9g098DZhJPq2J6t2QcZ+pS+qufj/5ZsQB4VWtcRUrpD66ZrHs= +sidebar_class_name: "get api-method" +info_path: api-reference-temp/port-api +custom_edit_url: null +--- + +import ApiTabs from "@theme/ApiTabs"; +import DiscriminatorTabs from "@theme/DiscriminatorTabs"; +import MethodEndpoint from "@theme/ApiExplorer/MethodEndpoint"; +import SecuritySchemes from "@theme/ApiExplorer/SecuritySchemes"; +import MimeTabs from "@theme/MimeTabs"; +import ParamsItem from "@theme/ParamsItem"; +import ResponseSamples from "@theme/ResponseSamples"; +import SchemaItem from "@theme/SchemaItem"; +import SchemaTabs from "@theme/SchemaTabs"; +import Heading from "@theme/Heading"; +import OperationTabs from "@theme/OperationTabs"; +import TabItem from "@theme/TabItem"; + + + + + + + + + + +This route allows you to retrieve a specific secret in your organization.

To learn more about secrets management in Port, check out the [documentation](https://docs.getport.io/sso-rbac/port-secrets). + +
+
+ + +
+ + + Default Response + + +
+ +
+
+
+
+
+ \ No newline at end of file diff --git a/docs/api-reference/patch-an-organization-secret.api.mdx b/docs/api-reference/patch-an-organization-secret.api.mdx index 78c34a24d..1b1ffd343 100644 --- a/docs/api-reference/patch-an-organization-secret.api.mdx +++ b/docs/api-reference/patch-an-organization-secret.api.mdx @@ -5,9 +5,9 @@ description: "This route allows you to patch an organization secret.

To sidebar_label: "Patch an organization secret" hide_title: true hide_table_of_contents: true -api: eJzVVMFu2zAM/RVBpxVo43ZHoyjQdQM27LCgy3bpgoG2mVitbWmSnDQL/O8jZTl10rTYjkMBV5EeqcfHR21lgS63ynilG5nKWamcsLr1KKCq9NqJjW6F18KAz0sBjdB2CY36DRwgHOYW/eQys8lV+My0qBBsI2ptKUVGmSLIiRoaWGKNjT8VeYn5g+BTX6K4K3Te8kHIOn9Tem9cmiS07SZL9EZbP1E6cU6f2QzyhDfOYt6TiTyVHpZOpnfyy4idnJ9Ki79adP6dLjYy3cpcN56u4SUYU6k8AJN7x8VvpSNaNfDKbwySHDq7x9xTfmO1QesVuoALN3+HqsUR2HmrmiWBDyVF0eBarBgu9CJUPBZOdgchf5txtHs8LyWGolAMgGo6KmEBlcOuY4ABCzV6tKzfEQkiB0Iqvpt8UBKhhmL4MFz3M/zqxVYWC5l62+JR1gTcpxr8tYbG70w2kKfuEaS1ym8Cs4x8hZaW8y401hnduL4fb8/P+d/+fe9xAW3lxW1EBjmo0lIXfR05FxLqSWWyukjGzk6iuSSTsKtBndZWBB7sCUaN3EkSHQO0bg8zquorS90XMNS2U50iP+PmSejrlnjbwdixFyVCQVFcFzv49snrHx6hNhU+8+pLjho3eaEDD+U5Xk6JuLiefqIQVqGHX0zOGWu08zTUDI80p6+8EoeXjqbx/312+m55fPSJqYC6QrIED2yjs+7k6oKAY/rBU727yA0liciw7TYDh99s1XW8Ta207HtarsAqyLgb5MBCOV4XcYZf0fTNbRzHE/GPo/dCXYM1G/blKhqKlg9k1P2noJsTPLqTSfeIm57a2YzzPGV49hDzjOzmdHo9u/lI6Cy+4LUuOMjCmh8c+qbyB/2xxEGDME5hfysraJYt9Z4wfeLwILbclvGgPYRBiwume7TQwwnsa+Ivz/3RkEuaG0FjfLWD9ycvBkTJBjTLPifSfwDYOKYi +api: eJzVVV1v2zoM/SsCnzZAjdM9GsMFug9gwwYs6HrvSxAMjM3GWm1Jk+ikmeH/fkHZSb00GbDHvSSKdETxkOcwHZQUi2A8G2chh7vKRBVcy6Swrt0uqr1rFTvlkYtKoVUubNCanygXVKQiEM9er0P2T/q4c6omDFY1LpDCtWt5BEXVoMUNNWRZq6Ki4kHJKVeklqUrWjlIUVcvKmYf8ywrXRFnG2LvAs+My2J0V2GNRSYbV2PclzPQwLiJkC/hyyQ7WGkI9KOlyG9cuYe8g8JZJsuyRO9rUyRg9j0K+Q5iUVGDsuK9J8jBrb9TwaDBB+cpsKGYcOnl/7BuaQKOHIzdgH5WUlKWdmorcOXuE+Np4aA/ufI8YoOPn8luuIL81Xx+6YXJ7vl3eg1YlkYAWC8mlO6xjjRUywQqIV+u+l7gHgM2xBSkumcKNGbYazCSiUeuQIPFJh2mx7+lX9PgHFo6ywGbkwIl9e3Q8lGCByorLeHbYHifMlsTBgoigdaXyJSP6oBVn2QQvbNx6N6r+Vy+fn3/Hd1jW7O6HZGpWA1x5cqBVyHEEr8csu11NvVBNj6WdRPKPUiKYXuoXRtqyOEgbfRmomzo9TlAG3/BTDh/lUYMdA7Mjz1Bbz7R/qkNNy1XLhxMMXaqIiwpJJai/tsnn7x/xMbX9Eznl/Q9lcC9S3kYlvuwcIHVzeIjaJAqDPDr2Vyw3kVuMIl9THPxmwlz+ujEyX/vyBq6xfTIma/RWClL0kA36mwJ22vQME0/aWoQtoZ8arCVhspFlltdt8ZI/4a672X7R0tBTLLSsMVgcC3NWXZQmijr8mj/iyV+cTt696X6Q59eoHlQqhWZbkd9gYYH2p/MjX7V64NYJekB8XZI7epO4jxFeDbTxTJHEy9u7t5+AA3r8c+gcaVcCriT6YS7IQWX+Cdnpb0OarSbFjeCHYKmOdpKh6aee0ieGxeS6lmSp2Yc+MinjICzV7pucHTfH/HD0cUbY70OaKm5zPP/AQxdwVo= sidebar_class_name: "patch api-method" -info_path: api-reference/port-api +info_path: api-reference-temp/port-api custom_edit_url: null --- @@ -20,7 +20,6 @@ import ParamsItem from "@theme/ParamsItem"; import ResponseSamples from "@theme/ResponseSamples"; import SchemaItem from "@theme/SchemaItem"; import SchemaTabs from "@theme/SchemaTabs"; -import Markdown from "@theme/Markdown"; import Heading from "@theme/Heading"; import OperationTabs from "@theme/OperationTabs"; import TabItem from "@theme/TabItem"; @@ -34,7 +33,7 @@ import TabItem from "@theme/TabItem"; @@ -118,8 +117,8 @@ This route allows you to patch an organization secret.

To learn more ab name={"description"} required={false} schemaName={"string"} - qualifierMessage={undefined} - schema={{"type":"string","description":"The new description of the secret.
"}} + qualifierMessage={"**Possible values:** `<= 200 characters`"} + schema={{"type":"string","maxLength":200,"description":"The new description of the secret.
"}} > diff --git a/docs/api-reference/sidebar.ts b/docs/api-reference/sidebar.ts index 77bfc78a1..cf52fa83b 100644 --- a/docs/api-reference/sidebar.ts +++ b/docs/api-reference/sidebar.ts @@ -552,6 +552,12 @@ const sidebar: SidebarsConfig = { label: "Create an organization secret", className: "api-method post", }, + { + type: "doc", + id: "api-reference/get-an-organization-secret", + label: "Get an organization secret", + className: "api-method get", + }, { type: "doc", id: "api-reference/patch-an-organization-secret", diff --git a/docs/build-your-software-catalog/custom-integration/webhook/examples/bitbucket-server.md b/docs/build-your-software-catalog/custom-integration/webhook/examples/bitbucket-server.md index 77a6bd8c8..03e756bca 100644 --- a/docs/build-your-software-catalog/custom-integration/webhook/examples/bitbucket-server.md +++ b/docs/build-your-software-catalog/custom-integration/webhook/examples/bitbucket-server.md @@ -303,6 +303,8 @@ In addition, provide the following environment variables: - `BITBUCKET_PROJECTS_FILTER` - An optional comma separated list of Bitbucket projects to filter. If not provided, all projects will be fetched. - `WEBHOOK_SECRET` - An optional secret to use when creating a webhook in Port. If not provided, `bitbucket_webhook_secret` will be used. - `PORT_API_URL` - An optional variable that defaults to the EU Port API `https://api.getport.io/v1`. For US organizations use `https://api.us.getport.io/v1` instead. +- `IS_VERSION_8_7_OR_OLDER` - An optional variable that specifies whether the Bitbucket version is older than 8.7. This setting determines if webhooks should be created at the repository level (for older versions `<=8.7`) or at the project level (for newer versions `>=8.8`). +- `PULL_REQUEST_STATE` - An optional variable to specify the state of Bitbucket pull requests to be ingested. Accepted values are `"ALL"`, `"OPEN"`, `"MERGED"`, or `"DECLINED"`. If not specified, the default value is `OPEN`. :::tip Webhook Configuration This app will automatically set up a webhook that allows Bitbucket to send events to Port. To understand more about how Bitbucket sends event payloads via webhooks, you can refer to [this documentation](https://confluence.atlassian.com/bitbucketserver/event-payload-938025882.html). @@ -320,6 +322,10 @@ Use the following Python script to set up webhook and ingest historical Bitbucke
Bitbucket Python script +:::tip Latest Version +You can pull the latest version of this code by cloning this [repository](https://github.com/port-labs/bitbucket-workspace-data/) +::: +
diff --git a/docs/build-your-software-catalog/custom-integration/webhook/examples/resources/bitbucket-server/_example_bitbucket_pull_request_blueprint.mdx b/docs/build-your-software-catalog/custom-integration/webhook/examples/resources/bitbucket-server/_example_bitbucket_pull_request_blueprint.mdx index 61dafbed8..a09023731 100644 --- a/docs/build-your-software-catalog/custom-integration/webhook/examples/resources/bitbucket-server/_example_bitbucket_pull_request_blueprint.mdx +++ b/docs/build-your-software-catalog/custom-integration/webhook/examples/resources/bitbucket-server/_example_bitbucket_pull_request_blueprint.mdx @@ -72,6 +72,12 @@ "title": "Merge Commit", "type": "string", "icon": "DefaultProperty" + }, + "mergedAt": { + "title": "Merged At", + "type": "string", + "format": "date-time", + "icon": "DefaultProperty" } }, "required": [] diff --git a/docs/build-your-software-catalog/custom-integration/webhook/examples/resources/bitbucket-server/_example_bitbucket_python_script.mdx b/docs/build-your-software-catalog/custom-integration/webhook/examples/resources/bitbucket-server/_example_bitbucket_python_script.mdx index 2cbd2f5b2..596095588 100644 --- a/docs/build-your-software-catalog/custom-integration/webhook/examples/resources/bitbucket-server/_example_bitbucket_python_script.mdx +++ b/docs/build-your-software-catalog/custom-integration/webhook/examples/resources/bitbucket-server/_example_bitbucket_python_script.mdx @@ -1,7 +1,8 @@ ```python showLineNumbers import json +import re import time -from datetime import datetime +from datetime import datetime, timedelta import asyncio from typing import Any, Optional import httpx @@ -9,7 +10,7 @@ from decouple import config from loguru import logger from httpx import BasicAuth -# These are the credentials passed by the variables of your pipeline to your tasks and in to your env +# These are the credentials passed by the variables of your pipeline to your tasks and into your env PORT_CLIENT_ID = config("PORT_CLIENT_ID") PORT_CLIENT_SECRET = config("PORT_CLIENT_SECRET") BITBUCKET_USERNAME = config("BITBUCKET_USERNAME") @@ -18,8 +19,12 @@ BITBUCKET_API_URL = config("BITBUCKET_HOST") BITBUCKET_PROJECTS_FILTER = config( "BITBUCKET_PROJECTS_FILTER", cast=lambda v: v.split(",") if v else None, default=[] ) -PORT_API_URL = config("PORT_API_URL", default="https://api.getport.io/v1") +PORT_API_URL = config("PORT_API_URL", default="https://api.getport.io/v1") WEBHOOK_SECRET = config("WEBHOOK_SECRET", default="bitbucket_webhook_secret") +IS_VERSION_8_7_OR_OLDER = config("IS_VERSION_8_7_OR_OLDER", default=False) +VALID_PULL_REQUEST_STATES = {"ALL", "OPEN", "MERGED", "DECLINED"} +PULL_REQUEST_STATE = config("PULL_REQUEST_STATE", default="OPEN").upper() + # According to https://support.atlassian.com/bitbucket-cloud/docs/api-request-limits/ RATE_LIMIT = 1000 # Maximum number of requests allowed per hour @@ -46,35 +51,95 @@ WEBHOOK_EVENTS = [ # Initialize rate limiting variables request_count = 0 rate_limit_start = time.time() - +port_access_token, token_expiry_time = None, datetime.now() +port_headers = {} bitbucket_auth = BasicAuth(username=BITBUCKET_USERNAME, password=BITBUCKET_PASSWORD) +client = httpx.AsyncClient(timeout=httpx.Timeout(60)) -# Obtain the access token synchronously -credentials = {"clientId": PORT_CLIENT_ID, "clientSecret": PORT_CLIENT_SECRET} -token_response = httpx.post(f"{PORT_API_URL}/auth/access_token", json=credentials) -port_headers = {"Authorization": f"Bearer {token_response.json()['accessToken']}"} -# Initialize the global AsyncClient with a timeout -client = httpx.AsyncClient(timeout=httpx.Timeout(60)) +async def get_access_token(): + credentials = {"clientId": PORT_CLIENT_ID, "clientSecret": PORT_CLIENT_SECRET} + token_response = await client.post( + f"{PORT_API_URL}/auth/access_token", json=credentials + ) + response_data = token_response.json() + access_token = response_data["accessToken"] + expires_in = response_data["expiresIn"] + token_expiry_time = datetime.now() + timedelta(seconds=expires_in) + return access_token, token_expiry_time -async def get_or_create_port_webhook(): - logger.info("Checking if a Bitbucket webhook is configured on Port...") + +async def refresh_access_token(): + global port_access_token, token_expiry_time, port_headers + logger.info("Refreshing access token...") + port_access_token, token_expiry_time = await get_access_token() + port_headers = {"Authorization": f"Bearer {port_access_token}"} + logger.info(f"New token received. Expiry time: {token_expiry_time}") + + +async def refresh_token_if_expired(): + if datetime.now() >= token_expiry_time: + await refresh_access_token() + + +async def refresh_token_and_retry(method: str, url: str, **kwargs): + await refresh_access_token() + response = await client.request(method, url, headers=port_headers, **kwargs) + return response + + +def sanitize_identifier(identifier: str) -> str: + pattern = r"[^A-Za-z0-9@_.+:\/=-]" + # Replace any character that does not match the pattern with an underscore + return re.sub(pattern, "_", identifier) + + +async def send_port_request(method: str, endpoint: str, payload: Optional[dict] = None): + global port_access_token, token_expiry_time, port_headers + await refresh_token_if_expired() + url = f"{PORT_API_URL}/{endpoint}" try: - response = await client.get( - f"{PORT_API_URL}/webhooks/{WEBHOOK_IDENTIFIER}", - headers=port_headers, - ) + response = await client.request(method, url, headers=port_headers, json=payload) response.raise_for_status() - webhook_url = response.json().get("integration", {}).get("url") - logger.info(f"Webhook configuration exists in Port. URL: {webhook_url}") - return webhook_url + return response except httpx.HTTPStatusError as e: - if e.response.status_code == 404: + if e.response.status_code == 401: + logger.info("Received 401 Unauthorized. Refreshing token and retrying...") + try: + response = await refresh_token_and_retry(method, url, json=payload) + response.raise_for_status() + return response + except httpx.HTTPStatusError as e: + logger.error( + f"Error after retrying: {e.response.status_code}, {e.response.text}" + ) + return {"status_code": e.response.status_code, "response": e.response} + else: + logger.error( + f"HTTP error occurred: {e.response.status_code}, {e.response.text}" + ) + return {"status_code": e.response.status_code, "response": e.response} + except httpx.HTTPError as e: + logger.error(f"HTTP error occurred: {e}") + return {"status_code": None, "error": e} + + +async def get_or_create_port_webhook(): + logger.info("Checking if a Bitbucket webhook is configured on Port...") + response = await send_port_request( + method="GET", endpoint=f"webhooks/{WEBHOOK_IDENTIFIER}" + ) + if isinstance(response, dict): + if response.get("status_code") == 404: logger.info("Port webhook not found, creating a new one.") return await create_port_webhook() else: - logger.error(f"Error checking Port webhook: {e.response.status_code}") return None + else: + webhook_url = response.json().get("integration", {}).get("url") + logger.info(f"Webhook configuration exists in Port. URL: {webhook_url}") + return webhook_url + async def create_port_webhook(): logger.info("Creating a webhook for Bitbucket on Port...") @@ -96,43 +161,111 @@ async def create_port_webhook(): }, "integrationType": "custom", } + response = await send_port_request( + method="POST", endpoint="webhooks", payload=webhook_data + ) + if isinstance(response, dict): + if response.get("status_code") == 442: + logger.error("Incorrect mapping, kindly fix!") + return None + else: + webhook_url = response.json().get("integration", {}).get("url") + logger.info( + f"Webhook configuration successfully created in Port: {webhook_url}" + ) + return webhook_url + + +def generate_webhook_data(webhook_url: str, events: list[str]) -> dict: + return { + "name": "Port Webhook", + "url": webhook_url, + "events": events, + "active": True, + "sslVerificationRequired": True, + "configuration": {"secret": WEBHOOK_SECRET, "createdBy": "Port"}, + } + + +async def create_project_level_webhook( + project_key: str, webhook_url: str, events: list[str] +): + logger.info(f"Creating project-level webhook for project: {project_key}") + webhook_data = generate_webhook_data(webhook_url, events) try: response = await client.post( - f"{PORT_API_URL}/webhooks", + f"{BITBUCKET_API_URL}/rest/api/1.0/projects/{project_key}/webhooks", json=webhook_data, - headers=port_headers, + auth=bitbucket_auth, ) response.raise_for_status() - webhook_url = response.json().get("integration", {}).get("url") - logger.info(f"Webhook configuration successfully created in Port: {webhook_url}") - return webhook_url + logger.info(f"Successfully created project-level webhook for {project_key}") + return response.json() except httpx.HTTPStatusError as e: - if e.response.status_code == 442: - logger.error("Incorrect mapping, kindly fix!") - return None - logger.error(f"Error creating Port webhook: {e.response.status_code}") + logger.error( + f"HTTP error when creating webhook for project: {project_key} code: {e.response.status_code} response: {e.response.text}" + ) + return None + + +async def create_repo_level_webhook( + project_key: str, repo_key: str, webhook_url: str, events: list[str] +): + logger.info(f"Creating repo-level webhook for repo: {repo_key}") + webhook_data = generate_webhook_data(webhook_url, events) + + try: + response = await client.post( + f"{BITBUCKET_API_URL}/rest/api/1.0/projects/{project_key}/repos/{repo_key}/webhooks", + json=webhook_data, + auth=bitbucket_auth, + ) + response.raise_for_status() + logger.info(f"Successfully created repo-level webhook for {repo_key}") + return response.json() + except httpx.HTTPStatusError as e: + logger.error( + f"HTTP error when creating webhook for repo: {repo_key} code: {e.response.status_code} response: {e.response.text}" + ) return None -async def get_or_create_project_webhook(project_key: str, webhook_url: str, events: list[str]): - logger.info(f"Checking webhooks for project: {project_key}") + +async def get_or_create_bitbucket_webhook( + project_key: str, + webhook_url: str, + events: list[str], + repo_key: Optional[str] = None, +): + logger.info(f"Checking webhooks for {repo_key or project_key}") if webhook_url is not None: try: matching_webhooks = [ webhook async for project_webhooks_batch in get_paginated_resource( - path=f"projects/{project_key}/webhooks" + path=( + f"projects/{project_key}/repos/{repo_key}/webhooks" + if repo_key + else f"projects/{project_key}/webhooks" + ) ) for webhook in project_webhooks_batch if webhook["url"] == webhook_url ] if matching_webhooks: - logger.info(f"Webhook already exists for project {project_key}") + logger.info(f"Webhook already exists for {repo_key or project_key}.") return matching_webhooks[0] - logger.info(f"Webhook not found for project {project_key}. Creating a new one.") - return await create_project_webhook( - project_key=project_key, webhook_url=webhook_url, events=events + logger.info( + f"Webhook not found for {repo_key or project_key}. Creating a new one." ) + if repo_key: + return await create_repo_level_webhook( + project_key, repo_key, webhook_url, events + ) + else: + return await create_project_level_webhook( + project_key, webhook_url, events + ) except httpx.HTTPStatusError as e: logger.error( f"HTTP error when checking webhooks for project: {project_key} code: {e.response.status_code} response: {e.response.text}" @@ -142,47 +275,22 @@ async def get_or_create_project_webhook(project_key: str, webhook_url: str, even logger.error("Port webhook URL is not available. Skipping webhook check...") return None -async def create_project_webhook(project_key: str, webhook_url: str, events: list[str]): - logger.info(f"Creating webhook for project: {project_key}") - webhook_data = { - "name": "Port Webhook", - "url": webhook_url, - "events": events, - "active": True, - "sslVerificationRequired": True, - "configuration": { - "secret": WEBHOOK_SECRET, - "createdBy": "Port", - }, - } - try: - response = await client.post( - f"{BITBUCKET_API_URL}/rest/api/1.0/projects/{project_key}/webhooks", - json=webhook_data, - auth=bitbucket_auth, - ) - response.raise_for_status() - logger.info(f"Successfully created webhook for project {project_key}") - return response.json() - except httpx.HTTPStatusError as e: - logger.error( - f"HTTP error when creating webhook for project: {project_key} code: {e.response.status_code} response: {e.response.text}" - ) - return None async def add_entity_to_port(blueprint_id, entity_object): - response = await client.post( - f"{PORT_API_URL}/blueprints/{blueprint_id}/entities?upsert=true&merge=true", - json=entity_object, - headers=port_headers, + response = await send_port_request( + method="POST", + endpoint=f"blueprints/{blueprint_id}/entities?upsert=true&merge=true", + payload=entity_object, ) - logger.info(response.json()) + if not isinstance(response, dict): + logger.info(response.json()) + async def get_paginated_resource( - path: str, - params: dict[str, Any] = None, - page_size: int = 25, - full_response: bool = False, + path: str, + params: dict[str, Any] = None, + page_size: int = 25, + full_response: bool = False, ): global request_count, rate_limit_start @@ -211,7 +319,9 @@ async def get_paginated_resource( response.raise_for_status() page_json = response.json() request_count += 1 - logger.debug(f"Requested data for {path}, with params: {params} and response code: {response.status_code}") + logger.debug( + f"Requested data for {path}, with params: {params} and response code: {response.status_code}" + ) if full_response: yield page_json else: @@ -234,6 +344,7 @@ async def get_paginated_resource( logger.error(f"HTTP occurred while fetching Bitbucket data: {e}") logger.info(f"Successfully fetched paginated data for {path}") + async def get_single_project(project_key: str): response = await client.get( f"{BITBUCKET_API_URL}/rest/api/1.0/projects/{project_key}", auth=bitbucket_auth @@ -241,10 +352,12 @@ async def get_single_project(project_key: str): response.raise_for_status() return response.json() + def convert_to_datetime(timestamp: int): converted_datetime = datetime.utcfromtimestamp(timestamp / 1000.0) return converted_datetime.strftime("%Y-%m-%dT%H:%M:%SZ") + def parse_repository_file_response(file_response: dict[str, Any]) -> str: lines = file_response.get("lines", []) logger.info(f"Received readme file with {len(lines)} entries") @@ -255,13 +368,12 @@ def parse_repository_file_response(file_response: dict[str, Any]) -> str: return readme_content + async def process_user_entities(users_data: list[dict[str, Any]]): blueprint_id = "bitbucketUser" for user in users_data: - logger.warning(f"Processing user: {user}") entity = { - "identifier": user.get("emailAddress"), "title": user.get("displayName"), "properties": { "username": user.get("name"), @@ -269,14 +381,17 @@ async def process_user_entities(users_data: list[dict[str, Any]]): }, "relations": {}, } + identifier = str(user.get("emailAddress")) + if identifier: + entity["identifier"] = sanitize_identifier(identifier) await add_entity_to_port(blueprint_id=blueprint_id, entity_object=entity) + async def process_project_entities(projects_data: list[dict[str, Any]]): blueprint_id = "bitbucketProject" for project in projects_data: entity = { - "identifier": project.get("key"), "title": project.get("name"), "properties": { "description": project.get("description"), @@ -286,8 +401,12 @@ async def process_project_entities(projects_data: list[dict[str, Any]]): }, "relations": {}, } + identifier = str(project.get("key")) + if identifier: + entity["identifier"] = sanitize_identifier(identifier) await add_entity_to_port(blueprint_id=blueprint_id, entity_object=entity) + async def process_repository_entities(repository_data: list[dict[str, Any]]): blueprint_id = "bitbucketRepository" @@ -296,7 +415,6 @@ async def process_repository_entities(repository_data: list[dict[str, Any]]): project_key=repo["project"]["key"], repo_slug=repo["slug"] ) entity = { - "identifier": repo.get("slug"), "title": repo.get("name"), "properties": { "description": repo.get("description"), @@ -314,47 +432,67 @@ async def process_repository_entities(repository_data: list[dict[str, Any]]): .get("emailAddress"), ), } + identifier = str(repo.get("slug")) + if identifier: + entity["identifier"] = sanitize_identifier(identifier) await add_entity_to_port(blueprint_id=blueprint_id, entity_object=entity) + async def process_pullrequest_entities(pullrequest_data: list[dict[str, Any]]): blueprint_id = "bitbucketPullrequest" for pr in pullrequest_data: entity = { - "identifier": str(pr.get("id")), "title": pr.get("title"), "properties": { "created_on": convert_to_datetime(pr.get("createdDate")), "updated_on": convert_to_datetime(pr.get("updatedDate")), + "mergedAt": convert_to_datetime(pr.get("closedDate", 0)), "merge_commit": pr.get("fromRef", {}).get("latestCommit"), "description": pr.get("description"), "state": pr.get("state"), - "owner": pr.get("author", {}).get("user", {}).get("displayName"), + "owner": pr.get("author", {}).get("user", {}).get("emailAddress"), "link": pr.get("links", {}).get("self", [{}])[0].get("href"), "destination": pr.get("toRef", {}).get("displayId"), "reviewers": [ - user.get("user", {}).get("displayName") for user in pr.get("reviewers", []) + reviewer_email + for reviewer in pr.get("reviewers", []) + if (reviewer_email := reviewer.get("user", {}).get("emailAddress")) ], "source": pr.get("fromRef", {}).get("displayId"), }, "relations": { "repository": pr["toRef"]["repository"]["slug"], - "participants": [pr.get("author", {}).get("user", {}).get("emailAddress")] - + [user.get("user", {}).get("emailAddress") for user in pr.get("participants", [])], + "participants": [ + email + for email in [ + pr.get("author", {}).get("user", {}).get("emailAddress") + ] + + [ + user.get("user", {}).get("emailAddress", "") + for user in pr.get("participants", []) + ] + if email + ], }, } + identifier = str(pr.get("id")) + if identifier: + entity["identifier"] = sanitize_identifier(identifier) await add_entity_to_port(blueprint_id=blueprint_id, entity_object=entity) + async def get_repository_readme(project_key: str, repo_slug: str) -> str: file_path = f"projects/{project_key}/repos/{repo_slug}/browse/README.md" readme_content = "" async for readme_file_batch in get_paginated_resource( - path=file_path, page_size=500, full_response=True + path=file_path, page_size=500, full_response=True ): file_content = parse_repository_file_response(readme_file_batch) readme_content += file_content return readme_content + async def get_latest_commit(project_key: str, repo_slug: str) -> dict[str, Any]: try: commit_path = f"projects/{project_key}/repos/{repo_slug}/commits" @@ -367,7 +505,8 @@ async def get_latest_commit(project_key: str, repo_slug: str) -> dict[str, Any]: logger.error(f"Error fetching latest commit for repo {repo_slug}: {e}") return {} -async def get_repositories(project: dict[str, Any]): + +async def get_repositories(project: dict[str, Any], port_webhook_url: str): repositories_path = f"projects/{project['key']}/repos" async for repositories_batch in get_paginated_resource(path=repositories_path): logger.info( @@ -384,21 +523,38 @@ async def get_repositories(project: dict[str, Any]): for repo in repositories_batch ] ) - + if IS_VERSION_8_7_OR_OLDER: + [ + await get_or_create_bitbucket_webhook( + project_key=project["key"], + repo_key=repo["slug"], + webhook_url=port_webhook_url, + events=WEBHOOK_EVENTS, + ) + for repo in repositories_batch + ] await get_repository_pull_requests(repository_batch=repositories_batch) + async def get_repository_pull_requests(repository_batch: list[dict[str, Any]]): - pr_params = {"state": "ALL"} ## Fetch all pull requests + global PULL_REQUEST_STATE for repository in repository_batch: pull_requests_path = f"projects/{repository['project']['key']}/repos/{repository['slug']}/pull-requests" + if PULL_REQUEST_STATE not in VALID_PULL_REQUEST_STATES: + logger.warning( + f"Invalid PULL_REQUEST_STATE '{PULL_REQUEST_STATE}' provided. Defaulting to 'OPEN'." + ) + PULL_REQUEST_STATE = "OPEN" async for pull_requests_batch in get_paginated_resource( - path=pull_requests_path, params=pr_params + path=pull_requests_path, + params={"state": PULL_REQUEST_STATE}, ): logger.info( f"received pull requests batch with size {len(pull_requests_batch)} from repo: {repository['slug']}" ) await process_pullrequest_entities(pullrequest_data=pull_requests_batch) + async def main(): logger.info("Starting Bitbucket data extraction") async for users_batch in get_paginated_resource(path="admin/users"): @@ -407,7 +563,11 @@ async def main(): project_path = "projects" if BITBUCKET_PROJECTS_FILTER: - projects = [await get_single_project(key) for key in BITBUCKET_PROJECTS_FILTER] + + async def filtered_projects_generator(): + yield [await get_single_project(key) for key in BITBUCKET_PROJECTS_FILTER] + + projects = filtered_projects_generator() else: projects = get_paginated_resource(path=project_path) @@ -420,16 +580,17 @@ async def main(): await process_project_entities(projects_data=projects_batch) for project in projects_batch: - await get_repositories(project=project) - await get_or_create_project_webhook( - project_key=project["key"], - webhook_url=port_webhook_url, - events=WEBHOOK_EVENTS, - ) - + await get_repositories(project=project, port_webhook_url=port_webhook_url) + if not IS_VERSION_8_7_OR_OLDER: + await get_or_create_bitbucket_webhook( + project_key=project["key"], + webhook_url=port_webhook_url, + events=WEBHOOK_EVENTS, + ) logger.info("Bitbucket data extraction completed") await client.aclose() + if __name__ == "__main__": asyncio.run(main()) ``` diff --git a/docs/build-your-software-catalog/custom-integration/webhook/examples/resources/bitbucket-server/_example_bitbucket_user_blueprint.mdx b/docs/build-your-software-catalog/custom-integration/webhook/examples/resources/bitbucket-server/_example_bitbucket_user_blueprint.mdx index 3441678cd..01562cbad 100644 --- a/docs/build-your-software-catalog/custom-integration/webhook/examples/resources/bitbucket-server/_example_bitbucket_user_blueprint.mdx +++ b/docs/build-your-software-catalog/custom-integration/webhook/examples/resources/bitbucket-server/_example_bitbucket_user_blueprint.mdx @@ -27,4 +27,5 @@ "aggregationProperties": {}, "relations": {} } + ``` diff --git a/docs/build-your-software-catalog/custom-integration/webhook/examples/resources/bitbucket-server/_example_bitbucket_webhook_config.mdx b/docs/build-your-software-catalog/custom-integration/webhook/examples/resources/bitbucket-server/_example_bitbucket_webhook_config.mdx index fbf6368ee..7b2f1fcc2 100644 --- a/docs/build-your-software-catalog/custom-integration/webhook/examples/resources/bitbucket-server/_example_bitbucket_webhook_config.mdx +++ b/docs/build-your-software-catalog/custom-integration/webhook/examples/resources/bitbucket-server/_example_bitbucket_webhook_config.mdx @@ -47,6 +47,7 @@ "link": ".body.pullRequest.links.self[0].href", "destination": ".body.pullRequest.toRef.displayId", "source": ".body.pullRequest.fromRef.displayId", + "mergedAt": ".body.pullRequest.closedDate | (tonumber / 1000 | strftime(\"%Y-%m-%dT%H:%M:%SZ\"))", "reviewers": "[.body.pullRequest.reviewers[].user.emailAddress]" }, "relations": { diff --git a/docs/build-your-software-catalog/custom-integration/webhook/webhook.md b/docs/build-your-software-catalog/custom-integration/webhook/webhook.md index c4a8c4319..3cf796fb4 100644 --- a/docs/build-your-software-catalog/custom-integration/webhook/webhook.md +++ b/docs/build-your-software-catalog/custom-integration/webhook/webhook.md @@ -636,6 +636,14 @@ The maximum size of the webhook payload is **512KiB**. Reaching this limit will throw a `413 Request Entity Too Large` HTTP error. ::: +### Custom webhook HTTP response + +Custom webhook endpoints provided by Port perform their processing in an asynchronous manner. + +Most 3rd party providers expect a webhook endpoint to provide a correct response after a short period of time. + +In order to comply with the expectation from custom webhook endpoints, when you or your 3rd party provider makes a request to your custom webhook, you will see a **`202 ACCEPTED`** status code returned in the response. This is the expected behavior and it means that the payload sent to your custom webhook has been received and will undergo processing in the background, after which it will appear in your catalog as determined by your mapping configuration. + ## Examples Refer to the [examples](./examples/examples.md) page for practical configurations and their corresponding blueprint definitions. diff --git a/docs/build-your-software-catalog/sync-data-to-catalog/apm-alerting/datadog/datadog.md b/docs/build-your-software-catalog/sync-data-to-catalog/apm-alerting/datadog/datadog.md index 348e34bf8..d7aff6784 100644 --- a/docs/build-your-software-catalog/sync-data-to-catalog/apm-alerting/datadog/datadog.md +++ b/docs/build-your-software-catalog/sync-data-to-catalog/apm-alerting/datadog/datadog.md @@ -3,7 +3,6 @@ import TabItem from "@theme/TabItem" import PortTooltip from "/src/components/tooltip/tooltip.jsx" import Prerequisites from "/docs/build-your-software-catalog/sync-data-to-catalog/templates/\_ocean_helm_prerequisites_block.mdx" import AzurePremise from "/docs/build-your-software-catalog/sync-data-to-catalog/templates/\_ocean_azure_premise.mdx" -import HelmParameters from "/docs/build-your-software-catalog/sync-data-to-catalog/templates/\_ocean-advanced-parameters-helm.mdx" import DockerParameters from "./\_datadog_one_time_docker_parameters.mdx" import AdvancedConfig from '/docs/generalTemplates/\_ocean_advanced_configuration_note.md' import PortApiRegionTip from "/docs/generalTemplates/_port_region_parameter_explanation_template.md" @@ -11,6 +10,8 @@ import OceanSaasInstallation from "/docs/build-your-software-catalog/sync-data-t import DatadogBlueprint from "../resources/datadog/\_example_datadog_alert_blueprint.mdx"; import DatadogConfiguration from "../resources/datadog/\_example_datadog_webhook_configuration.mdx" import DatadogMicroserviceBlueprint from "../resources/datadog/\_example_datadog_microservice.mdx" +import OceanRealtimeInstallation from "/docs/build-your-software-catalog/sync-data-to-catalog/templates/_ocean_realtime_installation.mdx" + # Datadog @@ -45,7 +46,7 @@ Choose one of the following installation methods: - + Using this installation option means that the integration will be able to update Port in real time using webhooks. @@ -53,49 +54,20 @@ Using this installation option means that the integration will be able to update -This table summarizes the available parameters for the installation. -Set them as you wish in the script below, then copy it and run it in your terminal: - -| Parameter | Description | Example | Required | -| ---------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------ | -------------------------------- | ------- | -| `port.clientId` | Your port [client id](https://docs.getport.io/build-your-software-catalog/custom-integration/api/#find-your-port-credentials) | | ✅ | -| `port.clientSecret` | Your port [client secret](https://docs.getport.io/build-your-software-catalog/custom-integration/api/#find-your-port-credentials) | | ✅ | -| `port.baseUrl` | Your Port API URL - `https://api.getport.io` for EU, `https://api.us.getport.io` for US | | ✅ | -| `integration.secrets.datadogApiKey` | Datadog API key, docs can be found [here](https://docs.datadoghq.com/account_management/api-app-keys/#add-an-api-key-or-client-token) | | ✅ | -| `integration.secrets.datadogApplicationKey` | Datadog application key, docs can be found [here](https://docs.datadoghq.com/account_management/api-app-keys/#add-application-keys) | | ✅ | -| `integration.config.datadogBaseUrl` | The base Datadog host. Defaults to https://api.datadoghq.com. If in EU, use https://api.datadoghq.eu | | ✅ | -| `integration.secrets.datadogWebhookToken` | Datadog webhook token. Learn [more](https://docs.datadoghq.com/integrations/webhooks/#setup) | | ❌ | -| `integration.config.appHost` | The host of the Port Ocean app. Used to set up the integration endpoint as the target for webhooks created in Datadog | https://my-ocean-integration.com | ✅ | - +For details about the available parameters for the installation, see the table below. -
-To install the integration using Helm, run the following command: - -```bash showLineNumbers -helm repo add --force-update port-labs https://port-labs.github.io/helm-charts -helm upgrade --install my-datadog-integration port-labs/port-ocean \ - --set port.clientId="PORT_CLIENT_ID" \ - --set port.clientSecret="PORT_CLIENT_SECRET" \ - --set port.baseUrl="https://api.getport.io" \ - --set initializePortResources=true \ - --set scheduledResyncInterval=60 \ - --set integration.identifier="my-datadog-integration" \ - --set integration.type="datadog" \ - --set integration.eventListener.type="POLLING" \ - --set integration.config.datadogBaseUrl="https://api.datadoghq.com" \ - --set integration.secrets.datadogApiKey="" \ - --set integration.secrets.datadogApplicationKey="" -``` + + -To install the integration using ArgoCD, follow these steps: +To install the integration using ArgoCD: 1. Create a `values.yaml` file in `argocd/my-ocean-datadog-integration` in your git repository with the content: @@ -184,34 +156,56 @@ kubectl apply -f my-ocean-datadog-integration.yaml +This table summarizes the available parameters for the installation. + +| Parameter | Description | Example | Required | +|---------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------|----------| +| `port.clientId` | Your port [client id](https://docs.getport.io/build-your-software-catalog/custom-integration/api/#find-your-port-credentials) | | ✅ | +| `port.clientSecret` | Your port [client secret](https://docs.getport.io/build-your-software-catalog/custom-integration/api/#find-your-port-credentials) | | ✅ | +| `port.baseUrl` | Your Port API URL - `https://api.getport.io` for EU, `https://api.us.getport.io` for US | | ✅ | +| `integration.secrets.datadogApiKey` | Datadog API key, docs can be found [here](https://docs.datadoghq.com/account_management/api-app-keys/#add-an-api-key-or-client-token) | | ✅ | +| `integration.secrets.datadogApplicationKey` | Datadog application key, docs can be found [here](https://docs.datadoghq.com/account_management/api-app-keys/#add-application-keys) | | ✅ | +| `integration.config.datadogBaseUrl` | The base Datadog host. Defaults to https://api.datadoghq.com. If in EU, use https://api.datadoghq.eu | | ✅ | +| `integration.secrets.datadogWebhookToken` | Datadog webhook token. Learn [more](https://docs.datadoghq.com/integrations/webhooks/#setup) | | ❌ | +| `integration.config.appHost` | The host of the Port Ocean app. Used to set up the integration endpoint as the target for webhooks created in Datadog | https://my-ocean-integration.com | ✅ | +| `integration.eventListener.type` | The event listener type. Read more about [event listeners](https://ocean.getport.io/framework/features/event-listener) | | ✅ | +| `integration.type` | The integration to be installed | | ✅ | +| `scheduledResyncInterval` | The number of minutes between each resync. When not set the integration will resync for each event listener resync event. Read more about [scheduledResyncInterval](https://ocean.getport.io/develop-an-integration/integration-configuration/#scheduledresyncinterval---run-scheduled-resync) | | ❌ | +| `initializePortResources` | Default true, When set to true the integration will create default blueprints and the port App config Mapping. Read more about [initializePortResources](https://ocean.getport.io/develop-an-integration/integration-configuration/#initializeportresources---initialize-port-resources) | | ❌ | +| `sendRawDataExamples` | Enable sending raw data examples from the third party API to port for testing and managing the integration mapping. Default is true | | ❌ | + + +
+
- - -This workflow will run the Datadog integration once and then exit, this is useful for **scheduled** ingestion of data. +This workflow/pipeline will run the Datadog integration once and then exit, this is useful for **scheduled** ingestion of data. -:::warning Realtime updates in Port +:::warning Real-time updates If you want the integration to update Port in real time using webhooks you should use the [Real Time & Always On](?installation-methods=real-time-always-on#installation) installation option. ::: + + + Make sure to configure the following [Github Secrets](https://docs.github.com/en/actions/security-guides/using-secrets-in-github-actions): -| Parameter | Description | Example | Required | -|----------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------|----------| -| `port_client_id` | Your Port client ([How to get the credentials](https://docs.getport.io/build-your-software-catalog/custom-integration/api/#find-your-port-credentials)) id | | ✅ | -| `port_client_secret` | Your Port client ([How to get the credentials](https://docs.getport.io/build-your-software-catalog/custom-integration/api/#find-your-port-credentials)) secret | | ✅ | -| `port_base_url` | Your Port API URL - `https://api.getport.io` for EU, `https://api.us.getport.io` for US | | ✅ | -| `config -> datadog_base_url` | US: https://api.datadoghq.com EU: https://api.datadoghq.eu | | ✅ | -| `config -> datadog_api_key` | Datadog API key, docs can be found [here](https://docs.datadoghq.com/account_management/api-app-keys/#add-an-api-key-or-client-token) | | ✅ | -| `config -> datadog_application_key` | Datadog application key, docs can be found [here](https://docs.datadoghq.com/account_management/api-app-keys/#add-application-keys) | | ✅ | -| `config -> datadog_webhook_token` | Datadog webhook token. Learn [more](https://docs.datadoghq.com/integrations/webhooks/#setup) | | ❌ | -| `initialize_port_resources` | Default true, When set to true the integration will create default blueprints and the port App config Mapping. Read more about [initializePortResources](https://ocean.getport.io/develop-an-integration/integration-configuration/#initializeportresources---initialize-port-resources) | | ❌ | -| `identifier` | The identifier of the integration that will be installed | | ❌ | -| `version` | The version of the integration that will be installed | latest | ❌ |` +| Parameter | Description | Example | Required | +|-------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------|----------| +| `port_client_id` | Your Port client ([How to get the credentials](https://docs.getport.io/build-your-software-catalog/custom-integration/api/#find-your-port-credentials)) id | | ✅ | +| `port_client_secret` | Your Port client ([How to get the credentials](https://docs.getport.io/build-your-software-catalog/custom-integration/api/#find-your-port-credentials)) secret | | ✅ | +| `port_base_url` | Your Port API URL - `https://api.getport.io` for EU, `https://api.us.getport.io` for US | | ✅ | +| `config -> datadog_base_url` | US: https://api.datadoghq.com EU: https://api.datadoghq.eu | | ✅ | +| `config -> datadog_api_key` | Datadog API key, docs can be found [here](https://docs.datadoghq.com/account_management/api-app-keys/#add-an-api-key-or-client-token) | | ✅ | +| `config -> datadog_application_key` | Datadog application key, docs can be found [here](https://docs.datadoghq.com/account_management/api-app-keys/#add-application-keys) | | ✅ | +| `config -> datadog_webhook_token` | Datadog webhook token. Learn [more](https://docs.datadoghq.com/integrations/webhooks/#setup) | | ❌ | +| `initialize_port_resources` | Default true, When set to true the integration will create default blueprints and the port App config Mapping. Read more about [initializePortResources](https://ocean.getport.io/develop-an-integration/integration-configuration/#initializeportresources---initialize-port-resources) | | ❌ | +| `identifier` | The identifier of the integration that will be installed | | ❌ | +| `version` | The version of the integration that will be installed | latest | ❌ |`
@@ -252,15 +246,10 @@ jobs:
-This pipeline will run the Datadog integration once and then exit, this is useful for **scheduled** ingestion of data. - :::tip Tip for Jenkins agent Your Jenkins agent should be able to run docker commands. ::: -:::warning Realtime updates in Port -If you want the integration to update Port in real time using webhooks you should use the [Real Time & Always On](?installation-methods=real-time-always-on#installation) installation option. -::: Make sure to configure the following [Jenkins Credentials](https://www.jenkins.io/doc/book/using/using-credentials/) of `Secret Text` type: @@ -312,10 +301,9 @@ pipeline { ``` - - + @@ -357,13 +345,7 @@ steps: ``` - -This workflow will run the Datadog integration once and then exit, this is useful for **scheduled** ingestion of data. - -:::warning Realtime updates in Port -If you want the integration to update Port in real time using webhooks you should use the [Real Time & Always On](?installation-methods=real-time-always-on#installation) installation option. -::: Make sure to [configure the following GitLab variables](https://docs.gitlab.com/ee/ci/variables/#for-a-project): @@ -431,7 +413,533 @@ The mapping makes use of the [JQ JSON processor](https://stedolan.github.io/jq/m To view and test the integration's mapping against examples of the third-party API responses, use the jq playground in your [data sources page](https://app.getport.io/settings/data-sources). Find the integration in the list of data sources and click on it to open the playground. -Additional examples of blueprints and the relevant integration configurations can be found on the datadog [examples page](example.md) +Examples of blueprints and the relevant integration configurations can be found on the datadog [examples page](examples.md) + +## Let's Test It + +This section includes a sample response data from Datadog. In addition, it includes the entity created from the resync event based on the Ocean configuration provided in the previous section. + +### Payload + +Here is an example of the payload structure from Datadog: + +
+ Monitor response data + +```json showLineNumbers +{ + "id":15173866, + "org_id":1000147697, + "type":"query alert", + "name":"A change @webhook-PORT", + "message":"A change has happened", + "tags":[ + "app:webserver" + ], + "query":"change(avg(last_5m),last_1h):avg:datadog.agent.running{local} by {version,host} > 40", + "options":{ + "thresholds":{ + "critical":40.0, + "warning":30.0 + }, + "notify_audit":false, + "include_tags":true, + "new_group_delay":60, + "notify_no_data":false, + "timeout_h":0, + "silenced":{ + } + }, + "multi":true, + "created_at":1706707941000, + "created":"2024-01-31T13:32:21.270116+00:00", + "modified":"2024-02-02T16:31:40.516062+00:00", + "deleted":"None"[ + "REDACTED" + ], + "restricted_roles":"None"[ + "REDACTED" + ], + "priority":5, + "overall_state_modified":"2024-03-08T20:52:46+00:00", + "overall_state":"No Data", + "creator":{ + "name":"John Doe", + "email":"john.doe@gmail.com", + "handle":"john.doe@gmail.com", + "id":1001199545 + }, + "matching_downtimes":[ + ] +} +``` + +
+ +
+Service response data + +```json showLineNumbers +{ + "type":"service-definition", + "id":"04fbab48-a233-4592-8c53-d1bfe282e6c3", + "attributes":{ + "meta":{ + "last-modified-time":"2024-05-29T10:31:06.833444245Z", + "github-html-url":"", + "ingestion-source":"api", + "origin":"unknown", + "origin-detail":"", + "warnings":[ + { + "keyword-location":"/properties/integrations/properties/opsgenie/properties/service-url/pattern", + "instance-location":"/integrations/opsgenie/service-url", + "message":"does not match pattern '^(https?://)?[a-zA-Z\\\\d_\\\\-.]+\\\\.opsgenie\\\\.com/service/([a-zA-Z\\\\d_\\\\-]+)/?$'" + }, + { + "keyword-location":"/properties/integrations/properties/pagerduty/properties/service-url/pattern", + "instance-location":"/integrations/pagerduty/service-url", + "message":"does not match pattern '^(https?://)?[a-zA-Z\\\\d_\\\\-.]+\\\\.pagerduty\\\\.com/service-directory/(P[a-zA-Z\\\\d_\\\\-]+)/?$'" + } + ], + "ingested-schema-version":"v2.1" + }, + "schema":{ + "schema-version":"v2.2", + "dd-service":"inventory-management", + "team":"Inventory Management Team", + "application":"Inventory System", + "tier":"Tier 1", + "description":"Service for managing product inventory and stock levels.", + "lifecycle":"production", + "contacts":[ + { + "name":"Inventory Team", + "type":"email", + "contact":"inventory-team@example.com" + }, + { + "name":"Warehouse Support", + "type":"email", + "contact":"warehouse-support@example.com" + } + ], + "links":[ + { + "name":"Repository", + "type":"repo", + "provider":"GitHub", + "url":"https://github.com/example/inventory-service" + }, + { + "name":"Runbook", + "type":"runbook", + "provider":"Confluence", + "url":"https://wiki.example.com/runbooks/inventory-service" + } + ], + "tags":[ + "inventory", + "stock" + ], + "integrations":{ + "pagerduty":{ + "service-url":"https://pagerduty.com/services/inventory" + }, + "opsgenie":{ + "service-url":"https://opsgenie.com/services/inventory", + "region":"US" + } + }, + "extensions":{ + "qui_6":{ + + } + } + } + } + } +} +``` + +
+ +
+ SLO response data + +```json showLineNumbers +{ + "id":"b6869ae6189d59baa421feb8b437fe9e", + "name":"Availability SLO for shopping-cart service", + "tags":[ + "service:shopping-cart", + "env:none" + ], + "monitor_tags":[ + + ], + "thresholds":[ + { + "timeframe":"7d", + "target":99.9, + "target_display":"99.9" + } + ], + "type":"monitor", + "type_id":0, + "description":"This SLO tracks the availability of the shopping-cart service. Availability is measured as the number of successful requests divided by the number of total requests for the service", + "timeframe":"7d", + "target_threshold":99.9, + "monitor_ids":[ + 15173866, + 15216083, + 15254771 + ], + "creator":{ + "name":"John Doe", + "handle":"john.doe@gmail.com", + "email":"john.doe@gmail.com" + }, + "created_at":1707215619, + "modified_at":1707215619 +} +``` + +
+ +
+ + SLO history response data + +```json showLineNumbers +{ + "thresholds": { + "7d": { + "timeframe": "7d", + "target": 99, + "target_display": "99." + } + }, + "from_ts": 1719254776, + "to_ts": 1719859576, + "type": "monitor", + "type_id": 0, + "slo": { + "id": "5ec82408e83c54b4b5b2574ee428a26c", + "name": "Host {{host.name}} with IP {{host.ip}} is not having enough memory", + "tags": [ + "p69hx03", + "pages-laptop" + ], + "monitor_tags": [], + "thresholds": [ + { + "timeframe": "7d", + "target": 99, + "target_display": "99." + } + ], + "type": "monitor", + "type_id": 0, + "description": "Testing SLOs from DataDog", + "timeframe": "7d", + "target_threshold": 99, + "monitor_ids": [ + 147793 + ], + "creator": { + "name": "John Doe", + "handle": "janesmith@gmail.com", + "email": "janesmith@gmail.com" + }, + "created_at": 1683878238, + "modified_at": 1684773765 + }, + "overall": { + "name": "Host {{host.name}} with IP {{host.ip}} is not having enough memory", + "preview": false, + "monitor_type": "query alert", + "monitor_modified": 1683815332, + "errors": null, + "span_precision": 2, + "history": [ + [ + 1714596313, + 1 + ] + ], + "uptime": 3, + "sli_value": 10, + "precision": { + "custom": 2, + "7d": 2 + }, + "corrections": [], + "state": "breached" + } +} +``` + +
+ +
+ + Service metric response data + +:::tip Response Enrichment +The Datadog response is enriched with a variety of metadata fields, including: + +- `__service`: The name or identifier of the service generating the data. +- `__query_id`: A unique identifier for the query that generated the data. +- `__query`: The original query used to retrieve the data. +- `__env`: The environment associated with the data (e.g., production, staging). + +This enrichment significantly enhances the usability of the Datadog response by providing valuable context and facilitating easier analysis and troubleshooting. +::: + +```json showLineNumbers +{ + "status": "ok", + "res_type": "time_series", + "resp_version": 1, + "query": "avg:system.mem.used{service:inventory-management,env:staging}", + "from_date": 1723796537000, + "to_date": 1723797137000, + "series": [ + { + "unit": [ + { + "family": "bytes", + "id": 2, + "name": "byte", + "short_name": "B", + "plural": "bytes", + "scale_factor": 1.0 + } + ], + "query_index": 0, + "aggr": "avg", + "metric": "system.mem.used", + "tag_set": [], + "expression": "avg:system.mem.used{env:staging,service:inventory-management}", + "scope": "env:staging,service:inventory-management", + "interval": 2, + "length": 39, + "start": 1723796546000, + "end": 1723797117000, + "pointlist": [ + [1723796546000.0, 528986112.0], + [1723796562000.0, 531886080.0], + [1723796576000.0, 528867328.0], + [1723796592000.0, 522272768.0], + [1723796606000.0, 533704704.0], + [1723796846000.0, 533028864.0], + [1723796862000.0, 527417344.0], + [1723796876000.0, 531513344.0], + [1723796892000.0, 533577728.0], + [1723796906000.0, 533471232.0], + [1723796922000.0, 528125952.0], + [1723796936000.0, 530542592.0], + [1723796952000.0, 530767872.0], + [1723796966000.0, 526966784.0], + [1723796982000.0, 528560128.0], + [1723796996000.0, 530792448.0], + [1723797012000.0, 527384576.0], + [1723797026000.0, 529534976.0], + [1723797042000.0, 521650176.0], + [1723797056000.0, 531001344.0], + [1723797072000.0, 525955072.0], + [1723797086000.0, 529469440.0], + [1723797102000.0, 532279296.0], + [1723797116000.0, 526979072.0] + ], + "display_name": "system.mem.used", + "attributes": {} + } + ], + "values": [], + "times": [], + "message": "", + "group_by": [], + // highlight-start + "__service": "inventory-management", + "__query_id": "avg:system.mem.used/service:inventory-management/env:staging", + "__query": "avg:system.mem.used", + "__env": "staging" + // highlight-end +} +``` + +
+ +### Mapping Result + +The combination of the sample payload and the Ocean configuration generates the following Port entity: + +
+ Monitor entity in Port + +```json showLineNumbers +{ + "identifier": "15173866", + "title": "A change @webhook-PORT", + "icon": "Datadog", + "blueprint": "datadogMonitor", + "team": [], + "properties": { + "tags": [ + "app:webserver" + ], + "overallState": "No Data", + "priority": "5", + "createdAt": "2024-01-31T13:32:21.270116+00:00", + "updatedAt": "2024-02-02T16:31:40.516062+00:00", + "createdBy": "john.doe@gmail.com", + "monitorType": "query alert" + }, + "relations": {}, + "createdAt": "2024-05-29T09:43:34.750Z", + "createdBy": "", + "updatedAt": "2024-05-29T09:43:34.750Z", + "updatedBy": "" +} +``` + +
+ +
+Service entity in Port + +```json showLineNumbers +{ + "identifier": "inventory-management", + "title": "inventory-management", + "icon": "Datadog", + "blueprint": "datadogService", + "team": [], + "properties": { + "owners": [ + "inventory-team@example.com", + "warehouse-support@example.com" + ], + "links": [ + "https://github.com/example/inventory-service", + "https://wiki.example.com/runbooks/inventory-service" + ], + "description": "Service for managing product inventory and stock levels.", + "tags": [ + "inventory", + "stock" + ], + "application": "Inventory System" + }, + "relations": {}, + "createdAt": "2024-05-29T10:31:44.283Z", + "createdBy": "", + "updatedAt": "2024-05-29T10:31:44.283Z", + "updatedBy": "" +} +``` + +
+ +
+SLO entity in Port + +```json showLineNumbers +{ + "identifier": "b6869ae6189d59baa421feb8b437fe9e", + "title": "Availability SLO for shopping-cart service", + "icon": "Datadog", + "blueprint": "datadogSlo", + "team": [], + "properties": { + "description": "This SLO tracks the availability of the shopping-cart service. Availability is measured as the number of successful requests divided by the number of total requests for the service", + "updatedAt": "2024-02-06T10:33:39Z", + "createdBy": "ahosea15@gmail.com", + "sloType": "monitor", + "targetThreshold": "99.9", + "tags": [ + "service:shopping-cart", + "env:none" + ], + "createdAt": "2024-02-06T10:33:39Z" + }, + "relations": { + "monitors": [ + "15173866", + "15216083", + "15254771" + ], + "services": [ + "shopping-cart" + ] + }, + "createdAt": "2024-05-29T09:43:51.946Z", + "createdBy": "", + "updatedAt": "2024-05-29T12:02:01.559Z", + "updatedBy": "" +} +``` + +
+ +
+SLO history entity in Port + +```json showLineNumbers +{ + "identifier": "5ec82408e83c54b4b5b2574ee428a26c", + "title": "Host {{host.name}} with IP {{host.ip}} is not having enough memory", + "icon": "Datadog", + "blueprint": "datadogSloHistory", + "team": [], + "properties": { + "sampling_end_date": "2024-07-01T18:46:16Z", + "sliValue": 10, + "sampling_start_date": "2024-06-24T18:46:16Z" + }, + "relations": { + "slo": "5ec82408e83c54b4b5b2574ee428a26c" + }, + "createdAt": "2024-07-01T09:43:51.946Z", + "createdBy": "", + "updatedAt": "2024-07-01T12:02:01.559Z", + "updatedBy": "" +} +``` + +
+ +
+Service metric entity in Port + +```json showLineNumbers +{ + "identifier": "avg:system.disk.used/service:inventory-management/env:prod", + "title": "avg:system.disk.used{service:inventory-management,env:prod}", + "icon": null, + "blueprint": "datadogServiceMetric", + "team": [], + "properties": { + "query": "avg:system.disk.used", + "series": [], + "res_type": "time_series", + "from_date": "2024-08-16T07:32:00Z", + "to_date": "2024-08-16T08:02:00Z", + "env": "prod" + }, + "relations": { + "service": "inventory-management" + }, + "createdAt": "2024-08-15T15:54:36.638Z", + "createdBy": "", + "updatedAt": "2024-08-16T08:02:02.399Z", + "updatedBy": "" +} +``` + +
## Relevant Guides diff --git a/docs/build-your-software-catalog/sync-data-to-catalog/apm-alerting/datadog/example.md b/docs/build-your-software-catalog/sync-data-to-catalog/apm-alerting/datadog/examples.md similarity index 97% rename from docs/build-your-software-catalog/sync-data-to-catalog/apm-alerting/datadog/example.md rename to docs/build-your-software-catalog/sync-data-to-catalog/apm-alerting/datadog/examples.md index d7c27e94e..2dbe71d25 100644 --- a/docs/build-your-software-catalog/sync-data-to-catalog/apm-alerting/datadog/example.md +++ b/docs/build-your-software-catalog/sync-data-to-catalog/apm-alerting/datadog/examples.md @@ -410,6 +410,12 @@ Based on the [best practices for tagging infrastructure](https://www.datadoghq.c
Integration configuration +:::tip Configuration Options +The SLO history selector supports two time-related configurations: +- `timeframe`: How many days to look back for each SLO history data point. Must be greater than 0 (default: 7 days) +- `periodOfTimeInMonths`: How far back in time to fetch SLO history. Must be between 1-12 months (default: 6 months) +::: + ```yaml showLineNumbers createMissingRelatedEntities: true deleteDependentEntities: true @@ -417,7 +423,8 @@ resources: - kind: sloHistory selector: query: 'true' - sampleIntervalPeriodInDays: 7 + timeframe: 7 + periodOfTimeInMonths: 6 port: entity: mappings: @@ -433,10 +440,6 @@ resources: slo: .slo.id ``` -:::tip Service Relation -Based on the [best practices for tagging infrastructure](https://www.datadoghq.com/blog/tagging-best-practices/), the default JQ maps SLOs to services using tags that starts with the `service` keyword -::: -
## Service Metric diff --git a/docs/build-your-software-catalog/sync-data-to-catalog/apm-alerting/dynatrace/dynatrace.md b/docs/build-your-software-catalog/sync-data-to-catalog/apm-alerting/dynatrace/dynatrace.md index 36087a8d9..e557a4792 100644 --- a/docs/build-your-software-catalog/sync-data-to-catalog/apm-alerting/dynatrace/dynatrace.md +++ b/docs/build-your-software-catalog/sync-data-to-catalog/apm-alerting/dynatrace/dynatrace.md @@ -6,18 +6,42 @@ import OceanSaasInstallation from "/docs/build-your-software-catalog/sync-data-t import DynatraceProblemBlueprint from "/docs/build-your-software-catalog/sync-data-to-catalog/apm-alerting/resources/dynatrace/\_example_dynatrace_problem_blueprint.mdx"; import DynatraceProblemConfiguration from "/docs/build-your-software-catalog/sync-data-to-catalog/apm-alerting/resources/dynatrace/\_example_dynatrace_problem_webhook_configuration.mdx" import DynatraceMicroserviceBlueprint from "/docs/build-your-software-catalog/sync-data-to-catalog/apm-alerting/resources/dynatrace/\_example_dynatrace_microservice_blueprint.mdx" +import OceanRealtimeInstallation from "/docs/build-your-software-catalog/sync-data-to-catalog/templates/_ocean_realtime_installation.mdx" +import Prerequisites from "/docs/build-your-software-catalog/sync-data-to-catalog/templates/_ocean_helm_prerequisites_block.mdx" # Dynatrace -Port's Dynatrace integration allows you to import `problem`, `slo` -and `entity` resources from your Dynatrace instance into Port, according to your mapping and definition. +Port's Dynatrace integration allows you to model Dynatrace resources in your software catalog and ingest data into them. -## Common use cases +## Overview -- Map your monitored entities, problems and SLOs in Dynatrace. -- Watch for object changes (create/update) in real-time, and automatically apply the changes to your entities in Port. +This integration allows you to: -## Installation +- Map and organize your desired Dynatrace resources and their metadata in Port (see supported resources below). +- Watch for Dynatrace object changes (create/update/delete) in real-time, and automatically apply the changes to your software catalog. + +### Supported Resources + +The resources that can be ingested from Dynatrace into Port are listed below. It is possible to reference any field that appears in the API responses linked below for the mapping configuration. + +- [`problem`](https://docs.dynatrace.com/docs/dynatrace-api/environment-api/problems-v2/problems/get-problems-list#definition--Problem) +- [`entity`](https://docs.dynatrace.com/docs/dynatrace-api/environment-api/entity-v2/get-entities-list#definition--Entity) +- [`slo`](https://docs.dynatrace.com/docs/dynatrace-api/environment-api/service-level-objectives/get-all#definition--SLO) +- [`entity types`](https://docs.dynatrace.com/docs/dynatrace-api/environment-api/entity-v2/get-entity-type#definition--EntityType) for selectors in the `entity` resource. + + +## Prerequisites + +### Generate a Dynatrace API key + +1. Navigate to `/ui/apps/dynatrace.classic.tokens/ui/access-tokens`. For example, if you access your Dynatrace instance at `https://npm82883.apps.dynatrace.com`, you should navigate to `https://npm82883.apps.dynatrace.com/ui/apps/dynatrace.classic.tokens/ui/access-tokens`. +2. Click **Generate new token** to create a new token. Ensure the permissions: `DataExport`, `Read entities`, `Read problems` and `Read SLO` are assigned to the token. The `DataExport` permission allows Dynatrace to perform healthchecks before ingestion starts. + +### Construct your Dynatrace Host URL +Your Dynatrace host URL should be `https://.live.dynatrace.com`. Note that there is a difference between the instance URL and the API host URL. The former contains `apps` while the latter (as shown prior) uses `live`. This means if your environment ID is `npm82883`, your API host URL should be `https://npm82883.live.dynatrace.com`. + + +## Setup Choose one of the following installation methods: @@ -29,52 +53,28 @@ Choose one of the following installation methods:
- + Using this installation option means that the integration will be able to update Port in real time using webhooks. -This table summarizes the available parameters for the installation. -Set them as you wish in the script below, then copy it and run it in your terminal: - -| Parameter | Description | Required | -| ------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------- | -------- | -| `port.clientId` | Your port [client id](https://docs.getport.io/build-your-software-catalog/custom-integration/api/#find-your-port-credentials) | ✅ | -| `port.clientSecret` | Your port [client secret](https://docs.getport.io/build-your-software-catalog/custom-integration/api/#find-your-port-credentials) | ✅ | -| `port.baseUrl` | Your Port API URL - `https://api.getport.io` for EU, `https://api.us.getport.io` for US | ✅ | -| `integration.identifier` | Change the identifier to describe your integration | ✅ | -| `integration.type` | The integration type | ✅ | -| `integration.eventListener.type` | The event listener type | ✅ | -| `integration.secrets.dynatraceApiKey` | API Key for Dynatrace instance | ✅ | -| `integration.config.dynatraceHostUrl` | The API URL of the Dynatrace instance | ✅ | -| `scheduledResyncInterval` | The number of minutes between each resync | ❌ | -| `initializePortResources` | Default true, When set to true the integration will create default blueprints and the port App config Mapping | ❌ | +

Prerequisites

+ + -
+ +For details about the available parameters for the installation, see the table below. -To install the integration using Helm, run the following command: - -```bash showLineNumbers -helm repo add --force-update port-labs https://port-labs.github.io/helm-charts -helm upgrade --install my-dynatrace-integration port-labs/port-ocean \ - --set port.clientId="CLIENT_ID" \ - --set port.clientSecret="CLIENT_SECRET" \ - --set port.baseUrl="https://api.getport.io" \ - --set initializePortResources=true \ - --set scheduledResyncInterval=60 \ - --set integration.identifier="my-dynatrace-integration" \ - --set integration.type="dynatrace" \ - --set integration.eventListener.type="POLLING" \ - --set integration.secrets.dynatraceApiKey="" \ - --set integration.config.dynatraceHostUrl="" -``` + + + -To install the integration using ArgoCD, follow these steps: +To install the integration using ArgoCD: 1. Create a `values.yaml` file in `argocd/my-ocean-dynatrace-integration` in your git repository with the content: @@ -159,28 +159,46 @@ kubectl apply -f my-ocean-dynatrace-integration.yaml +This table summarizes the available parameters for the installation. + +| Parameter | Description | Required | +|---------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------| +| `port.clientId` | Your port [client id](https://docs.getport.io/build-your-software-catalog/custom-integration/api/#find-your-port-credentials) | ✅ | +| `port.clientSecret` | Your port [client secret](https://docs.getport.io/build-your-software-catalog/custom-integration/api/#find-your-port-credentials) | ✅ | +| `port.baseUrl` | Your Port API URL - `https://api.getport.io` for EU, `https://api.us.getport.io` for US | ✅ | +| `integration.identifier` | Change the identifier to describe your integration | ✅ | +| `integration.type` | The integration type | ✅ | +| `integration.eventListener.type` | The event listener type | ✅ | +| `integration.secrets.dynatraceApiKey` | API Key for Dynatrace instance, docs can be found [here](https://docs.dynatrace.com/docs/discover-dynatrace/references/dynatrace-api/basics/dynatrace-api-authentication) | ✅ | +| `integration.config.dynatraceHostUrl` | The API URL of the Dynatrace instance | ✅ | +| `scheduledResyncInterval` | The number of minutes between each resync | ❌ | +| `initializePortResources` | Default true, When set to true the integration will create default blueprints and the port App config Mapping | ❌ | + +
- - - -This workflow will run the Dynatrace integration once and then exit, this is useful for **scheduled** ingestion of data. + -:::warning +This workflow/pipeline will run the Dynatrace integration once and then exit, this is useful for **scheduled** ingestion of data. + +:::warning Real-time updates If you want the integration to update Port in real time using webhooks, use the [Real Time & Always On](?installation-methods=real-time-always-on#installation) installation option. ::: + + + Make sure to configure the following [Github Secrets](https://docs.github.com/en/actions/security-guides/using-secrets-in-github-actions): -| Parameter | Description | Required | -| ------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------ | -------- | -| `OCEAN__INTEGRATION__CONFIG__DYNATRACE_API_KEY` | The Dynatrace API key | ✅ | -| `OCEAN__INTEGRATION__CONFIG__DYNATRACE_HOST_URL` | The Dynatrace API host URL | ✅ | -| `OCEAN__INITIALIZE_PORT_RESOURCES` | Default true, When set to false the integration will not create default blueprints and the port App config Mapping | ❌ | -| `OCEAN__INTEGRATION__IDENTIFIER` | Change the identifier to describe your integration, if not set will use the default one | ❌ | -| `OCEAN__PORT__CLIENT_ID` | Your port client id | ✅ | -| `OCEAN__PORT__CLIENT_SECRET` | Your port client secret | ✅ | -| `OCEAN__PORT__BASE_URL` | Your Port API URL - `https://api.getport.io` for EU, `https://api.us.getport.io` for US | ✅ | +| Parameter | Description | Required | +|--------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------| +| `OCEAN__INTEGRATION__CONFIG__DYNATRACE_API_KEY` | The Dynatrace API key , docs can be found [here](https://docs.dynatrace.com/docs/discover-dynatrace/references/dynatrace-api/basics/dynatrace-api-authentication) | ✅ | +| `OCEAN__INTEGRATION__CONFIG__DYNATRACE_HOST_URL` | The Dynatrace API host URL | ✅ | +| `OCEAN__INITIALIZE_PORT_RESOURCES` | Default true, When set to false the integration will not create default blueprints and the port App config Mapping | ❌ | +| `OCEAN__INTEGRATION__IDENTIFIER` | Change the identifier to describe your integration, if not set will use the default one | ❌ | +| `OCEAN__PORT__CLIENT_ID` | Your port client id | ✅ | +| `OCEAN__PORT__CLIENT_SECRET` | Your port client secret | ✅ | +| `OCEAN__PORT__BASE_URL` | Your Port API URL - `https://api.getport.io` for EU, `https://api.us.getport.io` for US | ✅ |
@@ -214,15 +232,10 @@ jobs:
-This pipeline will run the Dynatrace integration once and then exit, this is useful for **scheduled** ingestion of data. :::tip Your Jenkins agent should be able to run docker commands. ::: -:::warning -If you want the integration to update Port in real time using webhooks you should use -the [Real Time & Always On](?installation-methods=real-time-always-on#installation) installation option. -::: Make sure to configure the following [Jenkins Credentials](https://www.jenkins.io/doc/book/using/using-credentials/) of `Secret Text` type: @@ -282,19 +295,18 @@ pipeline { ``` - - - + + | Parameter | Description | Required | -| ------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------ | -------- | -| `OCEAN__INTEGRATION__CONFIG__DYNATRACE_API_KEY` | The Dynatrace API key | ✅ | -| `OCEAN__INTEGRATION__CONFIG__DYNATRACE_HOST_URL` | The Dynatrace API host URL | ✅ | -| `OCEAN__INITIALIZE_PORT_RESOURCES` | Default true, When set to false the integration will not create default blueprints and the port App config Mapping | ❌ | -| `OCEAN__INTEGRATION__IDENTIFIER` | Change the identifier to describe your integration, if not set will use the default one | ❌ | -| `OCEAN__PORT__CLIENT_ID` | Your port client id | ✅ | -| `OCEAN__PORT__CLIENT_SECRET` | Your port client secret | ✅ | -| `OCEAN__PORT__BASE_URL` | Your Port API URL - `https://api.getport.io` for EU, `https://api.us.getport.io` for US | ✅ | +|--------------------------------------------------|--------------------------------------------------------------------------------------------------------------------|----------| +| `OCEAN__INTEGRATION__CONFIG__DYNATRACE_API_KEY` | The Dynatrace API key | ✅ | +| `OCEAN__INTEGRATION__CONFIG__DYNATRACE_HOST_URL` | The Dynatrace API host URL | ✅ | +| `OCEAN__INITIALIZE_PORT_RESOURCES` | Default true, When set to false the integration will not create default blueprints and the port App config Mapping | ❌ | +| `OCEAN__INTEGRATION__IDENTIFIER` | Change the identifier to describe your integration, if not set will use the default one | ❌ | +| `OCEAN__PORT__CLIENT_ID` | Your port client id | ✅ | +| `OCEAN__PORT__CLIENT_SECRET` | Your port client secret | ✅ | +| `OCEAN__PORT__BASE_URL` | Your Port API URL - `https://api.getport.io` for EU, `https://api.us.getport.io` for US | ✅ |
@@ -336,13 +348,7 @@ steps: ```
- - -This workflow will run the Dynatrace integration once and then exit, this is useful for **scheduled** ingestion of data. - -:::warning Realtime updates in Port -If you want the integration to update Port in real time using webhooks you should use the [Real Time & Always On](?installation-methods=real-time-always-on#installation) installation option. -::: + Make sure to [configure the following GitLab variables](https://docs.gitlab.com/ee/ci/variables/#for-a-project): @@ -407,128 +413,12 @@ ingest_data:
-### Generating Dynatrace API key -1. Navigate to `/ui/apps/dynatrace.classic.tokens/ui/access-tokens`. For example, if you access your Dynatrace instance at `https://npm82883.apps.dynatrace.com`, you should navigate to `https://npm82883.apps.dynatrace.com/ui/apps/dynatrace.classic.tokens/ui/access-tokens`. -2. Click **Generate new token** to create a new token. Ensure the permissions: `DataExport`, `Read entities`, `Read problems` and `Read SLO` are assigned to the token. The `DataExport` permission allows Dynatrace to perform healthchecks before ingestion starts. +## Configuration -### Constructing Dynatrace Host URL -Your Dynatrace host URL should be `https://.live.dynatrace.com`. Note that there is a difference between the instance URL and the API host URL. The former contains `apps` while the latter (as shown prior) uses `live`. This means if your environment ID is `npm82883`, your API host URL should be `https://npm82883.live.dynatrace.com`. - - -## Ingesting Dynatrace objects - -The Dynatrace integration uses a YAML configuration to describe the process of loading data into the developer portal. - -Here is an example snippet from the config which demonstrates the process for getting `entity` data from Dynatrace: - -```yaml showLineNumbers -createMissingRelatedEntities: true -deleteDependentEntities: true -resources: - - kind: entity - selector: - query: "true" - port: - entity: - mappings: - identifier: .entityId - title: .displayName - blueprint: '"dynatraceEntity"' - properties: - firstSeen: ".firstSeenTms / 1000 | todate" - lastSeen: ".lastSeenTms / 1000 | todate" - type: .type - tags: .tags[].stringRepresentation -``` - -The integration makes use of the [JQ JSON processor](https://stedolan.github.io/jq/manual/) to select, modify, concatenate, transform and perform other operations on existing fields and values from Dynatrace's API events. - -### Configuration structure - -The integration configuration determines which resources will be queried from Dynatrace, and which entities and properties will be created in Port. - -:::tip Supported resources -The following resources can be used to map data from Dynatrace, it is possible to reference any field that appears in the API responses linked below for the mapping configuration. - -- [`problem`](https://docs.dynatrace.com/docs/dynatrace-api/environment-api/problems-v2/problems/get-problems-list#definition--Problem) -- [`entity`](https://docs.dynatrace.com/docs/dynatrace-api/environment-api/entity-v2/get-entities-list#definition--Entity) -- [`slo`](https://docs.dynatrace.com/docs/dynatrace-api/environment-api/service-level-objectives/get-all#definition--SLO) -- [`entity types`](https://docs.dynatrace.com/docs/dynatrace-api/environment-api/entity-v2/get-entity-type#definition--EntityType) for selectors in the `entity` resource. - -::: - -- The root key of the integration configuration is the `resources` key: - - ```yaml showLineNumbers - # highlight-next-line - resources: - - kind: entity - selector: - ... - ``` - -- The `kind` key is a specifier for an Dynatrace object: - - ```yaml showLineNumbers - resources: - # highlight-next-line - - kind: entity - selector: - ... - ``` - -- The `selector` and the `query` keys allow you to filter which objects of the specified `kind` will be ingested into your software catalog: - - ```yaml showLineNumbers - resources: - - kind: entity - # highlight-start - selector: - query: "true" # JQ boolean expression. If evaluated to false - this object will be skipped. - entityTypes: ["APPLICATION", "SERVICE"] # An optional list of entity types to filter by. If not specified, defaults to ["APPLICATION", "SERVICE"]. - # highlight-end - port: - ``` - -- The `port`, `entity` and the `mappings` keys are used to map the Dynatrace object fields to Port entities. To create multiple mappings of the same kind, you can add another item in the `resources` array: - - ```yaml showLineNumbers - resources: - - kind: entity - selector: - query: "true" - entityTypes: ["APPLICATION", "SERVICE"] - port: - # highlight-start - entity: - mappings: # Mappings between one Dynatrace object to a Port entity. Each value is a JQ query. - identifier: .entityId - title: .displayName - blueprint: '"dynatraceEntity"' - properties: - firstSeen: ".firstSeenTms / 1000 | todate" - lastSeen: ".lastSeenTms / 1000 | todate" - type: .type - tags: .tags[].stringRepresentation - managementZones: .managementZones[].name - properties: .properties - fromRelationships: .fromRelationships - toRelationships: .toRelationships - # highlight-end - - kind: entity # In this instance entity is mapped again with a different filter - selector: - query: '.displayName == "MyEntityName"' - entityTypes: ["APPLICATION", "SERVICE"] - port: - entity: - mappings: ... - ``` - - :::tip Blueprint key - Note the value of the `blueprint` key - if you want to use a hardcoded string, you need to encapsulate it in 2 sets of quotes, for example use a pair of single-quotes (`'`) and then another pair of double-quotes (`"`) - ::: +Port integrations use a [YAML mapping block](/build-your-software-catalog/customize-integrations/configure-mapping#configuration-structure) to ingest data from the third-party api into Port. +The mapping makes use of the [JQ JSON processor](https://stedolan.github.io/jq/manual/) to select, modify, concatenate, transform and perform other operations on existing fields and values from the integration API. ### Ingest additional resource types By default, the `entity` kind ingests only entities of type `APPLICATION` and `SERVICE` due to the large number of available resources. However, you can configure the `entity` kind mapping to ingest entities of other types. @@ -926,48 +816,44 @@ You can retrieve a list of available resource types by using the [Dynatrace Enti -## Configuring real-time updates -Currently, the Dynatrace API lacks support for programmatic webhook creation. To set up a webhook configuration in Dynatrace for sending alert notifications to the Ocean integration, follow these steps: +## Capabilities -### Prerequisite +### Configure real-time updates -Prepare a webhook `URL` using this format: `{app_host}/integration/webhook/problem`. The `app_host` parameter should match the ingress or external load balancer where the integration will be deployed. For example, if your ingress or load balancer exposes the Dynatrace Ocean integration at `https://myservice.domain.com`, your webhook `URL` should be `https://myservice.domain.com/integration/webhook/problem`. +Currently, the Dynatrace API lacks support for programmatic webhook creation. +To set up a webhook configuration in Dynatrace for sending alert notifications to the Ocean integration, +follow these steps: -### Create a webhook in Dynatrace +:::info Webhook configuration +Prepare a webhook `URL` using this format: `{app_host}/integration/webhook/problem`. +The `app_host` parameter should match the ingress or external load balancer where the integration will be deployed. +For example, if your ingress or load balancer exposes the Dynatrace Ocean integration at `https://myservice.domain.com`, +your webhook `URL` should be `https://myservice.domain.com/integration/webhook/problem`. +::: 1. Go to Dynatrace. 2. Go to **Settings** > **Integration** > **Problem notifications**. 3. Select **Add notification**. 4. Select **Custom integration** from the available notification types. 5. Configure the notification using the following details. - 1. `Enabled` - ensure the notification is enabled. - 2. `Display name` - use a meaningful name such as Port Ocean Webhook. - 3. `Webhook URL` - enter the value of the `URL` you created above. - 4. Enable **Call webhook is new events merge into existing problems**. - 5. `Custom payload` - paste the following configuration: - ``` - { - "State":"{State}", - "ProblemID":"{ProblemID}", - "ProblemTitle":"{ProblemTitle}" - } - ``` - You can customize to your taste, the only important thing is the `ProblemID` key. The webhook integration will not work without it. - 6. `Alerting profile` - select the corresponding alerting profile. - 7. Leave the rest of the fields as is. + 1. `Enabled` - ensure the notification is enabled. + 2. `Display name` - use a meaningful name such as Port Ocean Webhook. + 3. `Webhook URL` - enter the value of the `URL` you created above. + 4. Enable **Call webhook is new events merge into existing problems**. + 5. `Custom payload` - paste the following configuration: + ``` + { + "State":"{State}", + "ProblemID":"{ProblemID}", + "ProblemTitle":"{ProblemTitle}" + } + ``` + You can customize to your taste, the only important thing is the `ProblemID` key. The webhook integration will not work without it. + 6. `Alerting profile` - select the corresponding alerting profile. + 7. Leave the rest of the fields as is. 6. Click **Save changes**. -### Ingest data into Port - -To ingest Dynatrace objects using the [integration configuration](#configuration-structure), you can follow the steps below: - -1. Go to the DevPortal Builder page. -2. Select a blueprint you want to ingest using Dynatrace. -3. Choose the **Ingest Data** option from the menu. -4. Select Dynatrace under the Incident Management category. -5. Modify the [configuration](#configuration-structure) according to your needs. -6. Click `Resync`. ## Examples diff --git a/docs/build-your-software-catalog/sync-data-to-catalog/apm-alerting/newrelic.md b/docs/build-your-software-catalog/sync-data-to-catalog/apm-alerting/newrelic.md index ffa4a6ca2..fd89bef64 100644 --- a/docs/build-your-software-catalog/sync-data-to-catalog/apm-alerting/newrelic.md +++ b/docs/build-your-software-catalog/sync-data-to-catalog/apm-alerting/newrelic.md @@ -6,26 +6,32 @@ import AzurePremise from "../templates/\_ocean_azure_premise.mdx" import AdvancedConfig from '../../../generalTemplates/_ocean_advanced_configuration_note.md' import PortApiRegionTip from "/docs/generalTemplates/_port_region_parameter_explanation_template.md" import OceanSaasInstallation from "/docs/build-your-software-catalog/sync-data-to-catalog/templates/_ocean_saas_installation.mdx" +import OceanRealtimeInstallation from "/docs/build-your-software-catalog/sync-data-to-catalog/templates/_ocean_realtime_installation.mdx" + # New Relic -Port's New Relic integration allows you to import `entities`, `issues` and `service-level` from your New Relic cloud account into Port, according to your mapping and definition. +Port's New Relic integration allows you to model New Relic resources in your software catalog and ingest data into them. -An `Entity` can be a host, an application, a service, a database, or any other component that sends data to New Relic. -An `Issue` is a group of incidents that describe the underlying problem of your symptoms. -A `Service Level` can be one of your key measurements or goals used to determine the performance of your monitored system. -## Common use cases +## Overview -- Map your monitored applications and services in New Relic with their current open alerts. -- Watch for new alerts and updates raised on your monitored applications and automatically synchronize them into Port. +This integration allows you to: -## Prerequisites +- Map and organize your desired New Relic resources and their metadata in Port (see supported resources below). +- Watch for New Relic object changes (create/update/delete) in real-time, and automatically apply the changes to your entities in Port. + + +### Supported Resources + +The resources that can be ingested from New Relic into Port are listed below. It is possible to reference any field that appears in the API responses linked below in the mapping configuration. + +- [`Entity`](https://docs.newrelic.com/docs/new-relic-solutions/new-relic-one/core-concepts/what-entity-new-relic/) +- [`Issue`](https://docs.newrelic.com/docs/alerts-applied-intelligence/new-relic-alerts/get-started/alerts-ai-overview-page/#issues) - -## Installation +## Setup Choose one of the following installation methods: @@ -37,63 +43,30 @@ Choose one of the following installation methods:
- + -Using this installation option means that the integration will be able to update Port in real time. +Using this installation option means that the integration will be able to update Port in real time using webhooks. -This table summarizes the available parameters for the installation. -Set them as you wish in the script below, then copy it and run it in your terminal: +

Prerequisites

-| Parameter | Description | Required | -| --------------------------------------- | ------------------------------------------------------------------------------------------------------------- | -------- | -| `port.clientId` | Your port client id | ✅ | -| `port.clientSecret` | Your port client secret | ✅ | -| `port.baseUrl` | Your Port API URL - `https://api.getport.io` for EU, `https://api.us.getport.io` for US | ✅ | -| `integration.identifier` | Change the identifier to describe your integration | ✅ | -| `integration.type` | The integration type | ✅ | -| `integration.eventListener.type` | The event listener type | ✅ | -| `integration.secrets.newRelicAPIKey` | The New Relic API key | ✅ | -| `integration.secrets.newRelicAccountID` | The New Relic account ID | ✅ | -| `scheduledResyncInterval` | The number of minutes between each resync | ❌ | -| `initializePortResources` | Default true, When set to true the integration will create default blueprints and the port App config Mapping | ❌ | + + + +For details about the available parameters for the installation, see the table below. -
-To install the integration using Helm, run the following command: -:::note -If you are using New Relic's EU region, add the following flag to the command: + -`--set integration.config.newRelicGraphqlURL="https://api.eu.newrelic.com/graphql"` -::: + -```bash showLineNumbers -# The following script will install an Ocean integration at your K8s cluster using helm -# initializePortResources: When set to true the integration will create default blueprints + JQ Mappings -# scheduledResyncInterval: the number of minutes between each resync -# integration.identifier: Change the identifier to describe your integration - -helm repo add --force-update port-labs https://port-labs.github.io/helm-charts -helm upgrade --install my-newrelic-integration port-labs/port-ocean \ - --set port.clientId="PORT_CLIENT_ID" \ - --set port.clientSecret="PORT_CLIENT_SECRET" \ - --set port.baseUrl="https://api.getport.io" \ - --set initializePortResources=true \ - --set scheduledResyncInterval=120 \ - --set integration.identifier="my-newrelic-integration" \ - --set integration.type="newrelic" \ - --set integration.eventListener.type="POLLING" \ - --set integration.secrets.newRelicAPIKey="" \ - --set integration.secrets.newRelicAccountID="" -``` - -To install the integration using ArgoCD, follow these steps: +To install the integration using ArgoCD: 1. Create a `values.yaml` file in `argocd/my-ocean-newrelic-integration` in your git repository with the content: @@ -197,6 +170,23 @@ kubectl apply -f my-ocean-newrelic-integration.yaml +This table summarizes the available parameters for the installation. + +| Parameter | Description | Required | +|-----------------------------------------|---------------------------------------------------------------------------------------------------------------|----------| +| `port.clientId` | Your port client id | ✅ | +| `port.clientSecret` | Your port client secret | ✅ | +| `port.baseUrl` | Your Port API URL - `https://api.getport.io` for EU, `https://api.us.getport.io` for US | ✅ | +| `integration.identifier` | Change the identifier to describe your integration | ✅ | +| `integration.type` | The integration type | ✅ | +| `integration.eventListener.type` | The event listener type | ✅ | +| `integration.secrets.newRelicAPIKey` | The New Relic API key | ✅ | +| `integration.secrets.newRelicAccountID` | The New Relic account ID | ✅ | +| `scheduledResyncInterval` | The number of minutes between each resync | ❌ | +| `initializePortResources` | Default true, When set to true the integration will create default blueprints and the port App config Mapping | ❌ | + +
+

Event listener

@@ -205,15 +195,17 @@ The integration uses polling to pull the configuration from Port every minute an
- - - -This workflow will run the New Relic integration once and then exit, this is useful for **scheduled** ingestion of data. + + +This workflow/pipeline will run the New Relic integration once and then exit, this is useful for **scheduled** ingestion of data. -:::warning +:::warning Real-time updates If you want the integration to update Port in real time you should use the [Real Time & Always On](?installation-methods=real-time-always-on#installation) installation option ::: + + + Make sure to configure the following [Github Secrets](https://docs.github.com/en/actions/security-guides/using-secrets-in-github-actions): @@ -254,14 +246,10 @@ jobs: -This pipeline will run the New Relic integration once and then exit, this is useful for **scheduled** ingestion of data. :::tip Your Jenkins agent should be able to run docker commands. ::: -:::warning -If you want the integration to update Port in real time using webhooks you should use the [Real Time & Always On](?installation-methods=real-time-always-on#installation) installation option. -::: Make sure to configure the following [Jenkins Credentials](https://www.jenkins.io/doc/book/using/using-credentials/) of `Secret Text` type: @@ -316,9 +304,8 @@ pipeline { ``` - - - + + @@ -361,13 +348,7 @@ steps: ``` - - -This workflow will run the New Relic integration once and then exit, this is useful for **scheduled** ingestion of data. - -:::warning Realtime updates in Port -If you want the integration to update Port in real time using webhooks you should use the [Real Time & Always On](?installation-methods=real-time-always-on#installation) installation option. -::: + Make sure to [configure the following GitLab variables](https://docs.gitlab.com/ee/ci/variables/#for-a-project): @@ -417,93 +398,19 @@ ingest_data: - - -## Ingesting Newrelic objects +## Configuration -The Newrelic integration uses a YAML configuration to describe the process of loading data into the developer portal. +Port integrations use a [YAML mapping block](/build-your-software-catalog/customize-integrations/configure-mapping#configuration-structure) to ingest data from the third-party api into Port. -Here is an example snippet from the config which demonstrates the process for getting `Issue` data from Newrelic: +The mapping makes use of the [JQ JSON processor](https://stedolan.github.io/jq/manual/) to select, modify, concatenate, transform and perform other operations on existing fields and values from the integration API. -```yaml showLineNumbers -resources: - - kind: newRelicAlert - selector: - query: "true" - newRelicTypes: ["ISSUE"] - port: - entity: - mappings: - blueprint: '"newRelicAlert"' - identifier: .issueId - title: .title[0] - properties: - priority: .priority - state: .state - sources: .sources - conditionName: .conditionName - alertPolicyNames: .policyName - activatedAt: .activatedAt - relations: - newRelicService: .__APPLICATION.entity_guids + .__SERVICE.entity_guids -``` - -The integration makes use of the [JQ JSON processor](https://stedolan.github.io/jq/manual/) to select, modify, concatenate, transform and perform other operations on existing fields and values from Newrelic's API events. - -### Configuration structure - -The integration configuration determines which resources will be queried from Newrelic, and which entities and properties will be created in Port. - -:::tip Supported resources -The following resources can be used to map data from Newrelic, it is possible to reference any field that appears in the API responses linked below for the mapping configuration. - -- [`Entity`](https://docs.newrelic.com/docs/new-relic-solutions/new-relic-one/core-concepts/what-entity-new-relic/) -- [`Issue`](https://docs.newrelic.com/docs/alerts-applied-intelligence/new-relic-alerts/get-started/alerts-ai-overview-page/#issues) - -::: - -- The root key of the integration configuration is the `resources` key: - - ```yaml showLineNumbers - # highlight-next-line - resources: - - kind: project - selector: - ... - ``` - -- The `kind` key is a specifier for a Newrelic object: - - ```yaml showLineNumbers - resources: - # highlight-next-line - - kind: project - selector: - ... - ``` - -- The `selector` key allows you to filter which objects of the specified `kind` will be ingested into your software catalog: - - ```yaml showLineNumbers - resources: - - kind: newRelicService - selector: - query: "true" - newRelicTypes: ["SERVICE", "APPLICATION"] - calculateOpenIssueCount: true - entityQueryFilter: "type in ('SERVICE','APPLICATION')" - entityExtraPropertiesQuery: | - ... on ApmApplicationEntityOutline { - guid - name - } - ``` +### Additional Configuration - **newRelicTypes** - An array of Newrelic entity types that will be fetched. The default value is ['SERVICE', 'APPLICATION']. This is related to the type field in the Newrelic entity. - **calculateOpenIssueCount:** @@ -518,40 +425,9 @@ The following resources can be used to map data from Newrelic, it is possible to - The `port`, `entity` and the `mappings` keys are used to map the Newrelic object fields to Port entities. To create multiple mappings of the same kind, you can add another item in the `resources` array; - ```yaml showLineNumbers - resources: - - kind: newRelicAlert - selector: - query: "true" - newRelicTypes: ["ISSUE"] - port: - # highlight-start - entity: - mappings: - blueprint: '"newRelicAlert"' - identifier: .issueId - title: .title[0] - properties: - priority: .priority - state: .state - sources: .sources - conditionName: .conditionName - alertPolicyNames: .policyName - activatedAt: .activatedAt - relations: - newRelicService: .__APPLICATION.entity_guids + .__SERVICE.entity_guids - # highlight-end - - kind: newRelicAlert # In this instance project is mapped again with a different filter - selector: - query: '.name == "MyIssuetName"' - port: - entity: - mappings: ... - ``` - - :::tip Blueprint key - Note the value of the `blueprint` key - if you want to use a hardcoded string, you need to encapsulate it in 2 sets of quotes, for example use a pair of single-quotes (`'`) and then another pair of double-quotes (`"`) - ::: + + +## Capabilities ### Tags @@ -574,22 +450,13 @@ Some Newrelic `entities` have a property named `tags` which contains potentially ] ``` -Before mapping, this integration performs a tranformation on each `tag`, after which the example above would look like this: +Before mapping, this integration performs a transformation on each `tag`, after which the example above would look like this: ```json showLineNumbers tags = ["coreCount":"10","hostStatus":"running"] ``` -### Ingest data into Port - -To ingest Newrelic objects using the [integration configuration](#configuration-structure), you can follow the steps below: -1. Go to the DevPortal Builder page. -2. Select a blueprint you want to ingest using Newrelic. -3. Choose the **Ingest Data** option from the menu. -4. Select Newrelic under the APM & alerting category. -5. Add the contents of your [integration configuration](#configuration-structure) to the editor. -6. Click `Resync`. ## Examples diff --git a/docs/build-your-software-catalog/sync-data-to-catalog/apm-alerting/sentry.md b/docs/build-your-software-catalog/sync-data-to-catalog/apm-alerting/sentry.md index 3e8b23e9a..5507f94ba 100644 --- a/docs/build-your-software-catalog/sync-data-to-catalog/apm-alerting/sentry.md +++ b/docs/build-your-software-catalog/sync-data-to-catalog/apm-alerting/sentry.md @@ -10,26 +10,32 @@ import SentryIssuesBluePrint from "/docs/build-your-software-catalog/custom-inte import SentryIssuesConfiguration from "/docs/build-your-software-catalog/custom-integration/webhook/examples/resources/sentry/\_example_sentry_issue_event_webhook_configuration.mdx" import PortApiRegionTip from "/docs/generalTemplates/_port_region_parameter_explanation_template.md" import OceanSaasInstallation from "/docs/build-your-software-catalog/sync-data-to-catalog/templates/_ocean_saas_installation.mdx" +import OceanRealtimeInstallation from "/docs/build-your-software-catalog/sync-data-to-catalog/templates/_ocean_realtime_installation.mdx" + # Sentry -Port's Sentry integration allows you to import `projects`, `issues`, `project-tag` and `issue-tag` from your Sentry cloud account into Port, according to your mapping and definition. +Port's Sentry integration allows you to model Sentry resources in your software catalog and ingest data into them. -A `Project` is essentially a container for all the data and information related to a specific application or service that you want to monitor. -An `Issue` is a group of incidents that describe the underlying problem of your symptoms. +## Overview -A `Tag` is a key/value pair used to attach additional metadata to objects. This metadata can include information such as the environment, runtime, log level, and more. +This integration allows you to: -## Common use cases +- Map and organize your desired Sentry resources and their metadata in Port (see supported resources below). +- Watch for Sentry object changes (create/update/delete) in real-time, and automatically apply the changes to your entities in Port. -- Map your monitored projects and issues into Port. +### Supported Resources -## Prerequisites +The resources that can be ingested from Sentry into Port are listed below. It is possible to reference any field that appears in the API responses linked below in the mapping configuration. + +- [`Project`](https://docs.sentry.io/api/projects/list-your-projects/) +- [`Issue`](https://docs.sentry.io/api/events/list-a-projects-issues/) +- [`Project Tag`](https://docs.sentry.io/api/projects/list-a-tags-values/) +- [`Issue Tag`](https://docs.sentry.io/api/events/list-a-tags-values-related-to-an-issue/) - -## Installation +## Setup Choose one of the following installation methods: @@ -41,53 +47,29 @@ Choose one of the following installation methods: - + Using this installation option means that the integration will be able to update Port in real time using webhooks. -This table summarizes the available parameters for the installation. -Set them as you wish in the script below, then copy it and run it in your terminal: - -| Parameter | Description | Required | -| --------------------------------------- | ------------------------------------------------------------------------------------------------------------- | -------- | -| `port.clientId` | Your port client id | ✅ | -| `port.clientSecret` | Your port client secret | ✅ | -| `port.baseUrl` | Your Port API URL - `https://api.getport.io` for EU, `https://api.us.getport.io` for US | ✅ | -| `integration.identifier` | Change the identifier to describe your integration | ✅ | -| `integration.type` | The integration type | ✅ | -| `integration.eventListener.type` | The event listener type | ✅ | -| `integration.secrets.sentryToken` | The Sentry API [token](https://docs.sentry.io/api/guides/create-auth-token/). The token requires `read` permissions for `Projects` and `Issue & Event` | ✅ | -| `integration.config.sentryHost` | The Sentry host. For example https://sentry.io | ✅ | -| `integration.config.sentryOrganization` | The Sentry organization slug. For example `acme` from `https://acme.sentry.io` | ✅ | -| `scheduledResyncInterval` | The number of minutes between each resync | ❌ | -| `initializePortResources` | Default true, When set to true the integration will create default blueprints and the port App config Mapping | ❌ | -
+

Prerequisites

+ + + +For details about the available parameters for the installation, see the table below. -To install the integration using Helm, run the following command: - -```bash showLineNumbers -helm repo add --force-update port-labs https://port-labs.github.io/helm-charts -helm upgrade --install my-sentry-integration port-labs/port-ocean \ - --set port.clientId="PORT_CLIENT_ID" \ - --set port.clientSecret="PORT_CLIENT_SECRET" \ - --set port.baseUrl="https://api.getport.io" \ - --set initializePortResources=true \ - --set integration.identifier="my-sentry-integration" \ - --set integration.type="sentry" \ - --set integration.eventListener.type="POLLING" \ - --set integration.config.sentryHost="https://sentry.io" \ - --set integration.secrets.sentryToken="string" \ - --set integration.config.sentryOrganization="string" -``` + + + + -To install the integration using ArgoCD, follow these steps: +To install the integration using ArgoCD: 1. Create a `values.yaml` file in `argocd/my-ocean-sentry-integration` in your git repository with the content: @@ -173,6 +155,26 @@ kubectl apply -f my-ocean-sentry-integration.yaml +This table summarizes the available parameters for the installation. +Note the parameters specific to this integration, they are last in the table. + +| Parameter | Description | Required | +|-----------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------|----------| +| `port.clientId` | Your port client id | ✅ | +| `port.clientSecret` | Your port client secret | ✅ | +| `port.baseUrl` | Your Port API URL - `https://api.getport.io` for EU, `https://api.us.getport.io` for US | ✅ | +| `integration.identifier` | Change the identifier to describe your integration | ✅ | +| `integration.type` | The integration type | ✅ | +| `integration.eventListener.type` | The event listener type | ✅ | +| `scheduledResyncInterval` | The number of minutes between each resync | ❌ | +| `initializePortResources` | Default true, When set to true the integration will create default blueprints and the port App config Mapping | ❌ | +| `integration.secrets.sentryToken` | The Sentry API [token](https://docs.sentry.io/api/guides/create-auth-token/). The token requires `read` permissions for `Projects` and `Issue & Event` | ✅ | +| `integration.config.sentryHost` | The Sentry host. For example https://sentry.io | ✅ | +| `integration.config.sentryOrganization` | The Sentry organization slug. For example `acme` from `https://acme.sentry.io` | ✅ | + + +
+

Event listener

@@ -181,15 +183,17 @@ The integration uses polling to pull the configuration from Port every minute an
- - - -This workflow will run the Sentry integration once and then exit, this is useful for **scheduled** ingestion of data. + -:::warning +This workflow/pipeline will run the Sentry integration once and then exit, this is useful for **scheduled** ingestion of data. + +:::warning Real-time updates If you want the integration to update Port in real time you should use the [Real Time & Always On](?installation-methods=real-time-always-on#installation) installation option ::: + + + Make sure to configure the following [Github Secrets](https://docs.github.com/en/actions/security-guides/using-secrets-in-github-actions): @@ -226,14 +230,10 @@ jobs: -This pipeline will run the Sentry integration once and then exit, this is useful for **scheduled** ingestion of data. :::tip Your Jenkins agent should be able to run docker commands. ::: -:::warning -If you want the integration to update Port in real time using webhooks you should use the [Real Time & Always On](?installation-methods=real-time-always-on#installation) installation option. -::: Make sure to configure the following [Jenkins Credentials](https://www.jenkins.io/doc/book/using/using-credentials/) of `Secret Text` type: @@ -285,10 +285,9 @@ pipeline { ``` - - + @@ -332,13 +331,8 @@ steps: ``` + - -This workflow will run the Sentry integration once and then exit, this is useful for **scheduled** ingestion of data. - -:::warning Realtime updates in Port -If you want the integration to update Port in real time using webhooks you should use the [Real Time & Always On](?installation-methods=real-time-always-on#installation) installation option. -::: Make sure to [configure the following GitLab variables](https://docs.gitlab.com/ee/ci/variables/#for-a-project): @@ -396,104 +390,13 @@ ingest_data: -## Ingesting Sentry objects - -The Sentry integration uses a YAML configuration to describe the process of loading data into the developer portal. -Here is an example snippet from the config which demonstrates the process for getting `Issue` data from Sentry: - -```yaml showLineNumbers -resources: - - kind: issue - selector: - query: "true" - port: - entity: - mappings: - identifier: ".id" - title: ".title" - blueprint: '"sentryIssue"' - properties: - link: ".permalink" - status: ".status" - isUnhandled: ".isUnhandled" - relations: - project: ".project.slug" -``` - -The integration makes use of the [JQ JSON processor](https://stedolan.github.io/jq/manual/) to select, modify, concatenate, transform and perform other operations on existing fields and values from Sentry's API events. - -### Configuration structure - -The integration configuration determines which resources will be queried from Sentry, and which entities and properties will be created in Port. - -:::tip Supported resources -The following resources can be used to map data from Sentry, it is possible to reference any field that appears in the API responses linked below for the mapping configuration. - -- [`Project`](https://docs.sentry.io/api/projects/list-your-projects/) -- [`Issue`](https://docs.sentry.io/api/events/list-a-projects-issues/) -- [`Project Tag`](https://docs.sentry.io/api/projects/list-a-tags-values/) -- [`Issue Tag`](https://docs.sentry.io/api/events/list-a-tags-values-related-to-an-issue/) - - -::: - -- The root key of the integration configuration is the `resources` key: - - ```yaml showLineNumbers - # highlight-next-line - resources: - - kind: project - selector: - ... - ``` - -- The `kind` key is a specifier for a Sentry object: - - ```yaml showLineNumbers - resources: - # highlight-next-line - - kind: project - selector: - ... - ``` - -- The `port`, `entity` and the `mappings` keys are used to map the Sentry object fields to Port entities. To create multiple mappings of the same kind, you can add another item in the `resources` array; - - ```yaml showLineNumbers - resources: - - kind: project - selector: - query: "true" - port: - # highlight-start - entity: - mappings: - identifier: .slug - title: .name - blueprint: '"sentryProject"' - properties: - dateCreated: .dateCreated - platform: .platform - status: .status - link: .organization.links.organizationUrl + "/projects/" + .name - # highlight-end - ``` - -:::tip Blueprint key -Note the value of the `blueprint` key - if you want to use a hardcoded string, you need to encapsulate it in 2 sets of quotes, for example use a pair of single-quotes (`'`) and then another pair of double-quotes (`"`) -::: +## Configuration -### Ingest data into Port +Port integrations use a [YAML mapping block](/build-your-software-catalog/customize-integrations/configure-mapping#configuration-structure) to ingest data from the third-party api into Port. -To ingest Sentry objects using the [integration configuration](#configuration-structure), you can follow the steps below: +The mapping makes use of the [JQ JSON processor](https://stedolan.github.io/jq/manual/) to select, modify, concatenate, transform and perform other operations on existing fields and values from the integration API. -1. Go to the DevPortal Builder page. -2. Select a blueprint you want to ingest using Sentry. -3. Choose the **Ingest Data** option from the menu. -4. Select Sentry under the APM & alerting category. -5. Add the contents of your [integration configuration](#configuration-structure) to the editor. -6. Click `Resync`. ## Examples diff --git a/docs/build-your-software-catalog/sync-data-to-catalog/argocd/argocd.md b/docs/build-your-software-catalog/sync-data-to-catalog/argocd/argocd.md index b443d1666..b7dac1aa1 100644 --- a/docs/build-your-software-catalog/sync-data-to-catalog/argocd/argocd.md +++ b/docs/build-your-software-catalog/sync-data-to-catalog/argocd/argocd.md @@ -1,5 +1,6 @@ import Tabs from "@theme/Tabs" import TabItem from "@theme/TabItem" +import Prerequisites from "/docs/build-your-software-catalog/sync-data-to-catalog/templates/\_ocean_helm_prerequisites_block.mdx" import ProjecttBlueprint from '/docs/build-your-software-catalog/custom-integration/webhook/examples/resources/argocd/\_example_project_blueprint.mdx' import ApplicationBlueprint from '/docs/build-your-software-catalog/custom-integration/webhook/examples/resources/argocd/\_example_application_blueprint.mdx' import EventBlueprint from '/docs/build-your-software-catalog/custom-integration/webhook/examples/resources/argocd/\_example_event_blueprint.mdx' @@ -8,72 +9,146 @@ import ArgoCDWebhookConfig from '/docs/build-your-software-catalog/custom-integr import ArgoCDEventWebhookConfig from '/docs/build-your-software-catalog/custom-integration/webhook/examples/resources/argocd/\_example_events_webhook_config.mdx' import ArgoCDEventManifest from '/docs/build-your-software-catalog/custom-integration/webhook/examples/resources/argocd/\_example_events_manifest.mdx' import PortApiRegionTip from "/docs/generalTemplates/_port_region_parameter_explanation_template.md" +import OceanRealtimeInstallation from "/docs/build-your-software-catalog/sync-data-to-catalog/templates/_ocean_realtime_installation.mdx" + # ArgoCD -Port's ArgoCD integration allows you to import `cluster`, `project`, `application`, `deployment-history`, `kubernetes-resource` and `managed-resource` from your ArgoCD instance into Port, according to your mapping and definition. +Port's ArgoCD integration allows you to model ArgoCD resources in your software catalog and ingest data into them. + + +## Overview + +This integration allows you to: + +- Map and organize your desired ArgoCD resources and their metadata in Port (see supported resources below). +- Watch for ArgoCD object changes (create/update/delete) in real-time, and automatically apply the changes to your software catalog. + +### Supported Resources + +The resources that can be ingested from ArgoCD into Port are listed below. +It is possible to reference any field that appears in the API responses linked below in the mapping configuration. + +- [`cluster`](https://cd.apps.argoproj.io/swagger-ui#operation/ClusterService_List) +- [`project`](https://cd.apps.argoproj.io/swagger-ui#operation/ProjectService_List) +- [`application`](https://cd.apps.argoproj.io/swagger-ui#operation/ApplicationService_List) +- [`deployment-history`](https://cd.apps.argoproj.io/swagger-ui#operation/ApplicationService_List) +- [`kubernetes-resource`](https://cd.apps.argoproj.io/swagger-ui#operation/ApplicationService_List) +- [`managed-resource`](https://cd.apps.argoproj.io/swagger-ui#operation/ApplicationService_ManagedResources) + +## Prerequisites + +### Generate an ArgoCD token + +1. Navigate to `/settings/accounts/`. For example, if you access your ArgoCD at `https://localhost:8080`, you should navigate to `https://localhost:8080/settings/accounts/` + +2. The user should have `apiKey` capabilities to allow generating authentication tokens for API access. If you don't have a user created yet, follow the guide on [how to create a new ArgoCD user](https://argo-cd.readthedocs.io/en/stable/operator-manual/user-management/#create-new-user) + +3. Newly created users may have limited scope to resources by default. For that reason, You will need to configure the RBAC policy for the new user by following [this guide](https://argo-cd.readthedocs.io/en/stable/operator-manual/rbac/) + +4. Ensure that the policy definition grants enough permission to `read` resources such as `applications`, `clusters`, `projects`, `repositories` etc. + +5. Under **Tokens** on your ArgoCD UI, Click **Generate New** to create a new token for the user or use the CLI: + + ```bash + argocd account generate-token --account + ``` + +6. Create an ArgoCD user with readonly permissions + + 1. Create an `argocd-user.yaml` file with the below manifest to create a new user `port-ocean-user` + +
+ Create user manifest (click to expand) + + ```yaml showLineNumbers + apiVersion: v1 + kind: ConfigMap + metadata: + name: argocd-cm + namespace: argocd + labels: + app.kubernetes.io/name: argocd-cm + app.kubernetes.io/part-of: argocd + data: + # add an additional local user with apiKey and login capabilities + # apiKey - allows generating API keys + # login - allows to login using UI + accounts.port-ocean-user: apiKey, login + accounts.port-ocean-user.enabled: "true" + ``` +
+ + 2. Apply the manifest with `kubectl` to create the user: + + ```bash + kubectl apply -f argocd-user.yaml + ``` + + 3. Grant read only RBAC policy to the new user using the below manifest file (`argocd-rbac-cm.yaml`) -## Common use cases +
+ RBAC policy to grant readonly role to the new user (click to expand) + + ```yaml showLineNumbers + apiVersion: v1 + kind: ConfigMap + metadata: + name: argocd-rbac-cm + namespace: argocd + data: + policy.default: role:readonly + policy.csv: | + p, role:read-only-role, applications, *, */*, allow + p, role:read-only-role, clusters, get, *, allow + p, role:read-only-role, repositories, get, *, allow + p, role:read-only-role, projects, get, *, allow + p, role:read-only-role, logs, get, *, allow + + g, port-ocean-user, role:read-only-role + ``` +
-- Map your monitored Kubernetes resources in ArgoCD. -- Watch for object changes (create/update) in real-time, and automatically apply the changes to your entities in Port. + 4. Apply the `argocd-rbac-cm.yaml` manifest with `kubectl`: -## Installation + ```bash + kubectl apply -f argocd-rbac-cm.yaml + ``` + + 5. Go to your ArgoCD UI to generate a new token for the user or use the CLI + ```bash + argocd account generate-token --account + ``` + + +## Setup Choose one of the following installation methods: - + -Using this installation option means that the integration will be able to update Port in real time using webhooks. +Using this installation method means that the integration will be able to update Port in real time using webhooks. -This table summarizes the available parameters for the installation. -Set them as you wish in the script below, then copy it and run it in your terminal: - -| Parameter | Description | Required | -| -------------------------------- | ------------------------------------------------------------------------------------------------------------- | -------- | -| `port.clientId` | Your port client id | ✅ | -| `port.clientSecret` | Your port client secret | ✅ | -| `port.baseUrl` | Your Port API URL - `https://api.getport.io` for EU, `https://api.us.getport.io` for US | ✅ | -| `integration.identifier` | Change the identifier to describe your integration | ✅ | -| `integration.type` | The integration type | ✅ | -| `integration.eventListener.type` | The event listener type | ✅ | -| `integration.secrets.token` | The ArgoCD API token | ✅ | -| `integration.config.serverUrl` | The ArgoCD server url | ✅ | -| `integration.config.ignoreServerError` | Whether to ignore server errors when fetching data from ArgoCD. The default value is `false` meaning the integration will raise exceptions and fail the resync event | ❌ | -| `scheduledResyncInterval` | The number of minutes between each resync | ❌ | -| `initializePortResources` | Default true, When set to true the integration will create default blueprints and the port App config Mapping | ❌ | -| `sendRawDataExamples` | Enable sending raw data examples from the third party API to port for testing and managing the integration mapping. Default is true | ❌ | +

Prerequisites

-
+ + + +For details about the available parameters for the installation, see the table below. -To install the integration using Helm, run the following command: - -```bash showLineNumbers -helm repo add --force-update port-labs https://port-labs.github.io/helm-charts -helm upgrade --install my-argocd-integration port-labs/port-ocean \ - --set port.clientId="CLIENT_ID" \ - --set port.clientSecret="CLIENT_SECRET" \ - --set port.baseUrl="https://api.getport.io" \ - --set initializePortResources=true \ - --set sendRawDataExamples=true \ - --set scheduledResyncInterval=60 \ - --set integration.identifier="my-argocd-integration" \ - --set integration.type="argocd" \ - --set integration.eventListener.type="POLLING" \ - --set integration.secrets.token="" \ - --set integration.config.serverUrl="" -``` + + -To install the integration using ArgoCD, follow these steps: +To install the integration using ArgoCD: 1. Create a `values.yaml` file in `argocd/my-ocean-argocd-integration` in your git repository with the content: @@ -157,30 +232,51 @@ kubectl apply -f my-ocean-argocd-integration.yaml +This table summarizes the available parameters for the installation. +Note the parameters specific to this integration, they are last in the table. + +| Parameter | Description | Required | +|----------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------| +| `port.clientId` | Your port client id | ✅ | +| `port.clientSecret` | Your port client secret | ✅ | +| `port.baseUrl` | Your Port API URL - `https://api.getport.io` for EU, `https://api.us.getport.io` for US | ✅ | +| `initializePortResources` | Default true, When set to true the integration will create default blueprints and the port App config Mapping | ❌ | +| `sendRawDataExamples` | Enable sending raw data examples from the third party API to port for testing and managing the integration mapping. Default is true | ❌ | +| `integration.identifier` | Change the identifier to describe your integration | ✅ | +| `integration.type` | The integration type | ✅ | +| `integration.eventListener.type` | The event listener type | ✅ | +| `integration.secrets.token` | The ArgoCD API token, docs can be found [here](https://argo-cd.readthedocs.io/en/stable/user-guide/commands/argocd_account_generate-token/) | ✅ | +| `integration.config.serverUrl` | The ArgoCD server url | ✅ | +| `integration.config.ignoreServerError` | Whether to ignore server errors when fetching data from ArgoCD. The default value is `false` meaning the integration will raise exceptions and fail the resync event | ❌ | +| `scheduledResyncInterval` | The number of minutes between each resync | ❌ | + +
- - - -This workflow will run the ArgoCD integration once and then exit, this is useful for **scheduled** ingestion of data. + -:::warning +This workflow/pipeline will run the ArgoCD integration once and then exit, this is useful for **scheduled** ingestion of data. + +:::warning Real-time updates If you want the integration to update Port in real time using webhooks you should use the [Real Time & Always On](?installation-methods=real-time-always-on#installation) installation option ::: + + + Make sure to configure the following [Github Secrets](https://docs.github.com/en/actions/security-guides/using-secrets-in-github-actions): -| Parameter | Description | Required | -| ---------------------------------------- | ------------------------------------------------------------------------------------------------------------------ | -------- | -| `OCEAN__INTEGRATION__CONFIG__TOKEN` | The ArgoCD API token | ✅ | -| `OCEAN__INTEGRATION__CONFIG__SERVER_URL` | The ArgoCD server URL | ✅ | -| `OCEAN__INTEGRATION__CONFIG__IGNORE_SERVER_ERROR` | Whether to ignore server errors when fetching data from ArgoCD. The default value is `false` meaning the integration will raise exceptions and fail the resync event | ❌ | -| `OCEAN__INITIALIZE_PORT_RESOURCES` | Default true, When set to false the integration will not create default blueprints and the port App config Mapping | ❌ | -| `OCEAN__INTEGRATION__IDENTIFIER` | Change the identifier to describe your integration, if not set will use the default one | ❌ | -| `OCEAN__PORT__CLIENT_ID` | Your port client id | ✅ | -| `OCEAN__PORT__CLIENT_SECRET` | Your port client secret | ✅ | -| `OCEAN__PORT__BASE_URL` | Your Port API URL - `https://api.getport.io` for EU, `https://api.us.getport.io` for US | ✅ | -| `OCEAN__SEND_RAW_DATA_EXAMPLES` | Enable sending raw data examples from the third party API to port for testing and managing the integration mapping. Default is true | ❌ | +| Parameter | Description | Required | +|---------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------| +| `OCEAN__INTEGRATION__CONFIG__TOKEN` | The ArgoCD API token | ✅ | +| `OCEAN__INTEGRATION__CONFIG__SERVER_URL` | The ArgoCD server URL | ✅ | +| `OCEAN__INTEGRATION__CONFIG__IGNORE_SERVER_ERROR` | Whether to ignore server errors when fetching data from ArgoCD. The default value is `false` meaning the integration will raise exceptions and fail the resync event | ❌ | +| `OCEAN__INITIALIZE_PORT_RESOURCES` | Default true, When set to false the integration will not create default blueprints and the port App config Mapping | ❌ | +| `OCEAN__INTEGRATION__IDENTIFIER` | Change the identifier to describe your integration, if not set will use the default one | ❌ | +| `OCEAN__PORT__CLIENT_ID` | Your port client id | ✅ | +| `OCEAN__PORT__CLIENT_SECRET` | Your port client secret | ✅ | +| `OCEAN__PORT__BASE_URL` | Your Port API URL - `https://api.getport.io` for EU, `https://api.us.getport.io` for US | ✅ | +| `OCEAN__SEND_RAW_DATA_EXAMPLES` | Enable sending raw data examples from the third party API to port for testing and managing the integration mapping. Default is true | ❌ |
@@ -214,15 +310,10 @@ jobs:
-This pipeline will run the ArgoCD integration once and then exit, this is useful for **scheduled** ingestion of data. :::tip Your Jenkins agent should be able to run docker commands. ::: -:::warning -If you want the integration to update Port in real time using webhooks you should use -the [Real Time & Always On](?installation-methods=real-time-always-on#installation) installation option. -::: Make sure to configure the following [Jenkins Credentials](https://www.jenkins.io/doc/book/using/using-credentials/) of `Secret Text` type: @@ -286,26 +377,21 @@ pipeline { -This pipeline will run the ArgoCD integration once and then exit, this is useful for **scheduled** ingestion of data. - -:::warning Realtime updates in Port -If you want the integration to update Port in real time using webhooks you should use the [Real Time & Always On](?installation-methods=real-time-always-on#installation) installation option. -::: Make sure to [configure the following GitLab variables](https://docs.gitlab.com/ee/ci/variables/#for-a-project): -| Parameter | Description | Required | -| ---------------------------------------- | ------------------------------------------------------------------------------------------------------------------ | -------- | -| `OCEAN__INTEGRATION__CONFIG__TOKEN` | The ArgoCD API token | ✅ | -| `OCEAN__INTEGRATION__CONFIG__SERVER_URL` | The ArgoCD server URL | ✅ | -| `OCEAN__INTEGRATION__CONFIG__IGNORE_SERVER_ERROR` | Whether to ignore server errors when fetching data from ArgoCD. The default value is `false` meaning the integration will raise exceptions and fail the resync event | ❌ | -| `OCEAN__INITIALIZE_PORT_RESOURCES` | Default true, When set to false the integration will not create default blueprints and the port App config Mapping | ❌ | -| `OCEAN__SEND_RAW_DATA_EXAMPLES` | Enable sending raw data examples from the third party API to port for testing and managing the integration mapping. Default is true | ❌ | -| `OCEAN__INTEGRATION__IDENTIFIER` | Change the identifier to describe your integration, if not set will use the default one | ❌ | -| `OCEAN__PORT__CLIENT_ID` | Your port client id | ✅ | -| `OCEAN__PORT__CLIENT_SECRET` | Your port client secret | ✅ | -| `OCEAN__PORT__BASE_URL` | Your Port API URL - `https://api.getport.io` for EU, `https://api.us.getport.io` for US | ✅ | +| Parameter | Description | Required | +|---------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------| +| `OCEAN__INTEGRATION__CONFIG__TOKEN` | The ArgoCD API token | ✅ | +| `OCEAN__INTEGRATION__CONFIG__SERVER_URL` | The ArgoCD server URL | ✅ | +| `OCEAN__INTEGRATION__CONFIG__IGNORE_SERVER_ERROR` | Whether to ignore server errors when fetching data from ArgoCD. The default value is `false` meaning the integration will raise exceptions and fail the resync event | ❌ | +| `OCEAN__INITIALIZE_PORT_RESOURCES` | Default true, When set to false the integration will not create default blueprints and the port App config Mapping | ❌ | +| `OCEAN__SEND_RAW_DATA_EXAMPLES` | Enable sending raw data examples from the third party API to port for testing and managing the integration mapping. Default is true | ❌ | +| `OCEAN__INTEGRATION__IDENTIFIER` | Change the identifier to describe your integration, if not set will use the default one | ❌ | +| `OCEAN__PORT__CLIENT_ID` | Your port client id | ✅ | +| `OCEAN__PORT__CLIENT_SECRET` | Your port client secret | ✅ | +| `OCEAN__PORT__BASE_URL` | Your Port API URL - `https://api.getport.io` for EU, `https://api.us.getport.io` for US | ✅ |
@@ -358,231 +444,39 @@ ingest_data:
-### Generating ArgoCD token +## Configuration -1. Navigate to `/settings/accounts/`. For example, if you access your ArgoCD at `https://localhost:8080`, you should navigate to `https://localhost:8080/settings/accounts/` -2. The user should have `apiKey` capabilities to allow generating authentication tokens for API access. If you don't have a user created yet, follow the guide on [how to create a new ArgoCD user](https://argo-cd.readthedocs.io/en/stable/operator-manual/user-management/#create-new-user) -3. Newly created users may have limited scope to resources by default. For that reason, You will need to configure RBAC policy for the new user by following [this guide](https://argo-cd.readthedocs.io/en/stable/operator-manual/rbac/) -4. Ensure that the policy definition grants enough permission to `read` resources such as `applications`, `clusters`, `projects`, `repositories` etc. -5. Under **Tokens** on your ArgoCD UI, Click **Generate New** to create a new token for the user or use the CLI: +Port integrations use a [YAML mapping block](/build-your-software-catalog/customize-integrations/configure-mapping#configuration-structure) to ingest data from the third-party api into Port. -```bash -argocd account generate-token --account -``` - -:::tip Creating ArgoCD user with readonly permissions - -1. Create an `argocd-user.yaml` file with the below manifest to create a new user `port-ocean-user` - -
- Create user manifest (click to expand) - -```yaml showLineNumbers -apiVersion: v1 -kind: ConfigMap -metadata: - name: argocd-cm - namespace: argocd - labels: - app.kubernetes.io/name: argocd-cm - app.kubernetes.io/part-of: argocd -data: - # add an additional local user with apiKey and login capabilities - # apiKey - allows generating API keys - # login - allows to login using UI - accounts.port-ocean-user: apiKey, login - accounts.port-ocean-user.enabled: "true" -``` -
- -2. Apply the manifest with `kubectl` to create the user: -```bash -kubectl apply -f argocd-user.yaml -``` -3. Grant read only RBAC policy to the new user using the below manifest file (`argocd-rbac-cm.yaml`) -
- RBAC policy to grant readonly role to the new user (click to expand) - -```yaml showLineNumbers -apiVersion: v1 -kind: ConfigMap -metadata: - name: argocd-rbac-cm - namespace: argocd -data: - policy.default: role:readonly - policy.csv: | - p, role:read-only-role, applications, *, */*, allow - p, role:read-only-role, clusters, get, *, allow - p, role:read-only-role, repositories, get, *, allow - p, role:read-only-role, projects, get, *, allow - p, role:read-only-role, logs, get, *, allow - - g, port-ocean-user, role:read-only-role -``` -
- -4. Apply the `argocd-rbac-cm.yaml` manifest with `kubectl`: -```bash -kubectl apply -f argocd-rbac-cm.yaml -``` +The mapping makes use of the [JQ JSON processor](https://stedolan.github.io/jq/manual/) to select, modify, concatenate, transform and perform other operations on existing fields and values from the integration API. -5. Go to your ArgoCD UI to generate a new token for the user or use the CLI -```bash -argocd account generate-token --account -``` -::: -## Ingesting ArgoCD objects - -The ArgoCD integration uses a YAML configuration to describe the process of loading data into the developer portal. - -Here is an example snippet from the config which demonstrates the process for getting `application` data from ArgoCD: - -```yaml showLineNumbers -createMissingRelatedEntities: true -deleteDependentEntities: true -resources: - - kind: application - selector: - query: "true" - port: - entity: - mappings: - identifier: .metadata.uid - title: .metadata.name - blueprint: '"argocdApplication"' - properties: - gitRepo: .spec.source.repoURL - gitPath: .spec.source.path - destinationServer: .spec.destination.server - namespace: .metadata.namespace - syncStatus: .status.sync.status - healthStatus: .status.health.status - createdAt: .metadata.creationTimestamp - relations: - project: .spec.project -``` - -The integration makes use of the [JQ JSON processor](https://stedolan.github.io/jq/manual/) to select, modify, concatenate, transform and perform other operations on existing fields and values from ArgoCD's API events. - -### Configuration structure - -The integration configuration determines which resources will be queried from ArgoCD, and which entities and properties will be created in Port. - -:::tip Supported resources -The following resources can be used to map data from ArgoCD, it is possible to reference any field that appears in the API responses linked below for the mapping configuration. - -- [`cluster`](https://cd.apps.argoproj.io/swagger-ui#operation/ClusterService_List) -- [`project`](https://cd.apps.argoproj.io/swagger-ui#operation/ProjectService_List) -- [`application`](https://cd.apps.argoproj.io/swagger-ui#operation/ApplicationService_List) -- [`deployment-history`](https://cd.apps.argoproj.io/swagger-ui#operation/ApplicationService_List) - You can reference any valid property from the `.status.history` object of the ArgoCD application -- [`kubernetes-resource`](https://cd.apps.argoproj.io/swagger-ui#operation/ApplicationService_List) - You can reference any valid property from the `.status.resources` object of the ArgoCD application -- [`managed-resource`](https://cd.apps.argoproj.io/swagger-ui#operation/ApplicationService_ManagedResources) - -::: - -- The root key of the integration configuration is the `resources` key: - - ```yaml showLineNumbers - # highlight-next-line - resources: - - kind: application - selector: - ... - ``` - -- The `kind` key is a specifier for an ArgoCD object: - - ```yaml showLineNumbers - resources: - # highlight-next-line - - kind: application - selector: - ... - ``` - -- The `selector` and the `query` keys allow you to filter which objects of the specified `kind` will be ingested into your software catalog: - - ```yaml showLineNumbers - resources: - - kind: application - # highlight-start - selector: - query: "true" # JQ boolean expression. If evaluated to false - this object will be skipped. - # highlight-end - port: - ``` - -- The `port`, `entity` and the `mappings` keys are used to map the ArgoCD object fields to Port entities. To create multiple mappings of the same kind, you can add another item in the `resources` array; - - ```yaml showLineNumbers - resources: - - kind: application - selector: - query: "true" - port: - # highlight-start - entity: - mappings: # Mappings between one ArgoCD object to a Port entity. Each value is a JQ query. - identifier: .metadata.uid - title: .metadata.name - blueprint: '"argocdApplication"' - properties: - gitRepo: .spec.source.repoURL - gitPath: .spec.source.path - destinationServer: .spec.destination.server - namespace: .metadata.namespace - syncStatus: .status.sync.status - healthStatus: .status.health.status - createdAt: .metadata.creationTimestamp - relations: - project: .spec.project - # highlight-end - - kind: application # In this instance application is mapped again with a different filter - selector: - query: '.metadata.name == "MyApplicationName"' - port: - entity: - mappings: ... - ``` - - :::tip Blueprint key - Note the value of the `blueprint` key - if you want to use a hardcoded string, you need to encapsulate it in 2 sets of quotes, for example use a pair of single-quotes (`'`) and then another pair of double-quotes (`"`) - ::: - -## Configuring real-time updates +## Capabilities +### Configure real-time updates Currently, the ArgoCD REST API lacks support for programmatic webhook creation. To set up a webhook configuration in ArgoCD for sending notifications to the Ocean integration, follow these steps: -### Prerequisite - -1. You have access to a Kubernetes cluster where ArgoCD is deployed. -2. You have `kubectl` installed and configured to access your cluster. - -### Steps - 1. Install ArgoCD notifications manifest; -```bash showLineNumbers +```bash kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj-labs/argocd-notifications/release-1.0/manifests/install.yaml ``` 2. Install ArgoCD triggers and templates manifest; -```bash showLineNumbers +```bash kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj-labs/argocd-notifications/release-1.0/catalog/install.yaml ``` 3. Use `kubectl` to connect to the Kubernetes cluster where your ArgoCD instance is deployed; -```bash showLineNumbers +```bash kubectl config use-context ``` 4. Set the current namespace to your ArgoCD namespace, use the following command; -```bash showLineNumbers +```bash kubectl config set-context --current --namespace= ``` @@ -654,576 +548,16 @@ kubectl apply -n -f This command deploys the webhook notification configuration to your ArgoCD notification configmap (`argocd-notifications-cm`), allowing Ocean to receive real-time events. -### Ingest data into Port - -To ingest ArgoCD objects using the [integration configuration](#configuration-structure), you can follow the steps below: - -1. Go to the DevPortal Builder page. -2. Select a blueprint you want to ingest using ArgoCD. -3. Choose the **Ingest Data** option from the menu. -4. Select ArgoCD under the Kubernetes Stack providers category. -5. Modify the [configuration](#configuration-structure) according to your needs. -6. Click `Resync`. ## Examples -Examples of blueprints and the relevant integration configurations: - -### Cluster - -
-Cluster blueprint - -```json showLineNumbers - { - "identifier": "argocdCluster", - "description": "This blueprint represents an ArgoCD cluster", - "title": "ArgoCD Cluster", - "icon": "Argo", - "schema": { - "properties": { - "applicationsCount": { - "title": "Applications Count", - "type": "number", - "description": "The number of applications managed by Argo CD on the cluster", - "icon": "DefaultProperty" - }, - "serverVersion": { - "title": "Server Version", - "type": "string", - "description": "Contains information about the Kubernetes version of the cluster", - "icon": "DefaultProperty" - }, - "labels": { - "title": "Labels", - "type": "object", - "description": "Contains information about cluster metadata", - "icon": "DefaultProperty" - }, - "updatedAt": { - "icon": "DefaultProperty", - "title": "Updated At", - "type": "string", - "format": "date-time" - }, - "server": { - "title": "Server", - "description": "The API server URL of the Kubernetes cluster", - "type": "string", - "icon": "DefaultProperty" - } - }, - "required": [] - }, - "mirrorProperties": {}, - "calculationProperties": {}, - "relations": {} - } -``` - -
- -
-Integration configuration - -```yaml showLineNumbers -createMissingRelatedEntities: true -deleteDependentEntities: true -resources: - - kind: cluster - selector: - query: "true" - port: - entity: - mappings: - identifier: .name - title: .name - blueprint: '"argocdCluster"' - properties: - applicationsCount: .info.applicationsCount - serverVersion: .serverVersion - labels: .labels - updatedAt: .connectionState.attemptedAt - server: .server -``` - -
- -### Namespace - -
-Namespace blueprint - -```json showLineNumbers - { - "identifier": "argocdNamespace", - "description": "This blueprint represents an ArgoCD namespace", - "title": "ArgoCD Namespace", - "icon": "Argo", - "schema": { - "properties": {}, - "required": [] - }, - "aggregationProperties": {}, - "mirrorProperties": {}, - "calculationProperties": {}, - "relations": { - "cluster": { - "title": "ArgoCD Cluster", - "target": "argocdCluster", - "required": false, - "many": false - } - } - } -``` - -
- -
-Integration configuration - -```yaml showLineNumbers -createMissingRelatedEntities: true -deleteDependentEntities: true -resources: - - kind: cluster - selector: - query: "true" - port: - itemsToParse: .namespaces - entity: - mappings: - identifier: .name + "-" + .item | tostring - title: .name + "-" + .item - blueprint: '"argocdNamespace"' - properties: {} - relations: - cluster: .name -``` - -
- -### Project - -
- Project blueprint - -```json showlineNumbers - { - "identifier": "argocdProject", - "description": "This blueprint represents an ArgoCD Project", - "title": "ArgoCD Project", - "icon": "Argo", - "schema": { - "properties": { - "createdAt": { - "title": "Created At", - "type": "string", - "format": "date-time", - "icon": "DefaultProperty" - }, - "description": { - "title": "Description", - "description": "Project description", - "type": "string", - "icon": "DefaultProperty" - } - }, - "required": [] - }, - "mirrorProperties": {}, - "calculationProperties": {}, - "relations": {} - } -``` - -
- -
-Integration configuration +To view and test the integration's mapping against examples of the third-party API responses, use the jq playground in your [data sources page](https://app.getport.io/settings/data-sources). Find the integration in the list of data sources and click on it to open the playground. -```yaml showLineNumbers -createMissingRelatedEntities: true -deleteDependentEntities: true -resources: - - kind: project - selector: - query: "true" - port: - entity: - mappings: - identifier: .metadata.name - title: .metadata.name - blueprint: '"argocdProject"' - properties: - createdAt: .metadata.creationTimestamp - description: .spec.description -``` +Examples of blueprints and the relevant integration configurations can be found on the argocd [examples page](example.md) -
+## Relevant Guides -### Application - -
- Application blueprint - -```json showlineNumbers - { - "identifier": "argocdApplication", - "description": "This blueprint represents an ArgoCD Application", - "title": "Running Service", - "icon": "Argo", - "schema": { - "properties": { - "gitRepo": { - "type": "string", - "icon": "Git", - "title": "Repository URL", - "description": "The URL of the Git repository containing the application source code" - }, - "gitPath": { - "type": "string", - "title": "Path", - "description": "The path within the Git repository where the application manifests are located" - }, - "destinationServer": { - "type": "string", - "title": "Destination Server", - "description": "The URL of the target cluster's Kubernetes control plane API" - }, - "revision": { - "type": "string", - "title": "Revision", - "description": "Revision contains information about the revision the comparison has been performed to" - }, - "targetRevision": { - "type": "string", - "title": "Target Revision", - "description": "Target Revision defines the revision of the source to sync the application to. In case of Git, this can be commit, tag, or branch" - }, - "syncStatus": { - "type": "string", - "title": "Sync Status", - "enum": [ - "Synced", - "OutOfSync", - "Unknown" - ], - "enumColors": { - "Synced": "green", - "OutOfSync": "red", - "Unknown": "lightGray" - }, - "description": "Status is the sync state of the comparison" - }, - "healthStatus": { - "type": "string", - "title": "Health Status", - "enum": [ - "Healthy", - "Missing", - "Suspended", - "Degraded", - "Progressing", - "Unknown" - ], - "enumColors": { - "Healthy": "green", - "Missing": "yellow", - "Suspended": "purple", - "Degraded": "red", - "Progressing": "blue", - "Unknown": "lightGray" - }, - "description": "Status holds the status code of the application or resource" - }, - "createdAt": { - "title": "Created At", - "type": "string", - "format": "date-time", - "description": "The created timestamp of the application" - }, - "labels": { - "type": "object", - "title": "Labels", - "description": "Map of string keys and values that can be used to organize and categorize object" - }, - "annotations": { - "type": "object", - "title": "Annotations", - "description": "Annotations are unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata" - } - }, - "required": [] - }, - "mirrorProperties": {}, - "calculationProperties": {}, - "relations": { - "project": { - "title": "ArgoCD Project", - "target": "argocdProject", - "required": false, - "many": false - }, - "cluster": { - "title": "ArgoCD Cluster", - "target": "argocdCluster", - "required": false, - "many": false - }, - "namespace": { - "title": "ArgoCD Namespace", - "target": "argocdNamespace", - "required": false, - "many": false - } - } - } -``` - -
- -
-Integration configuration - -```yaml showLineNumbers -createMissingRelatedEntities: true -deleteDependentEntities: true -resources: - - kind: application - selector: - query: "true" - port: - entity: - mappings: - identifier: .metadata.uid - title: .metadata.name - blueprint: '"argocdApplication"' - properties: - gitRepo: .spec.source.repoURL - gitPath: .spec.source.path - destinationServer: .spec.destination.server - revision: .status.sync.revision - targetRevision: .spec.source.targetRevision - syncStatus: .status.sync.status - healthStatus: .status.health.status - createdAt: .metadata.creationTimestamp - labels: .metadata.labels - annotations: .metadata.annotations - relations: - project: .spec.project - namespace: .metadata.namespace - cluster: .spec.destination.name -``` - -
- -### Deployment history - -
- Deployment history blueprint - -```json showlineNumbers - { - "identifier": "argocdDeploymentHistory", - "description": "This blueprint represents an ArgoCD deployment history", - "title": "ArgoCD Deployment History", - "icon": "Argo", - "schema": { - "properties": { - "deployedAt": { - "title": "Deployed At", - "type": "string", - "format": "date-time" - }, - "deployStartedAt": { - "title": "Deploy Started At", - "type": "string", - "format": "date-time" - }, - "revision": { - "title": "Revision", - "type": "string" - }, - "initiatedBy": { - "title": "Initiated By", - "type": "string" - }, - "repoURL": { - "title": "Repository URL", - "type": "string" - }, - "sourcePath": { - "title": "Source Path", - "type": "string" - } - }, - "required": [] - }, - "mirrorProperties": {}, - "calculationProperties": {}, - "aggregationProperties": {}, - "relations": { - "application": { - "title": "Application", - "target": "argocdApplication", - "required": false, - "many": false - } - } - } -``` - -
- -
-Integration configuration - -```yaml showLineNumbers -createMissingRelatedEntities: true -deleteDependentEntities: true -resources: - - kind: application - selector: - query: "true" - port: - itemsToParse: .status.history - entity: - mappings: - identifier: .metadata.uid + "-" + (.item.id | tostring) - title: .metadata.name + "-" + (.item.id | tostring) - blueprint: '"argocdDeploymentHistory"' - properties: - deployedAt: .item.deployedAt - deployStartedAt: .item.deployStartedAt - revision: .item.source.repoURL + "/commit/" + .item.revision - initiatedBy: .item.initiatedBy.username - repoURL: .item.source.repoURL - sourcePath: .item.source.path - relations: - application: .metadata.uid -``` - -
- -### Kubernetes Resource - -
- Images blueprint - -```json showlineNumbers - { - "identifier": "image", - "description": "This blueprint represents an image", - "title": "Image", - "icon": "AWS", - "schema": { - "properties": {}, - "required": [] - }, - "mirrorProperties": {}, - "calculationProperties": {}, - "aggregationProperties": {}, - "relations": {} - } -``` - -
- -
- Kubernetes resource blueprint - -```json showlineNumbers - { - "identifier": "argocdKubernetesResource", - "description": "This blueprint represents an ArgoCD kubernetes resource", - "title": "Kubernetes Resource", - "icon": "Argo", - "schema": { - "properties": { - "kind": { - "title": "Kind", - "type": "string" - }, - "version": { - "title": "Version", - "type": "string" - }, - "namespace": { - "title": "Namespace", - "type": "string" - }, - "labels": { - "type": "object", - "title": "Labels" - }, - "annotations": { - "type": "object", - "title": "Annotations" - } - }, - "required": [] - }, - "mirrorProperties": { - "healthStatus": { - "title": "Health Status", - "path": "application.healthStatus" - }, - "syncStatus": { - "title": "Sync Status", - "path": "application.syncStatus" - } - }, - "calculationProperties": {}, - "aggregationProperties": {}, - "relations": { - "application": { - "title": "Application", - "target": "argocdApplication", - "required": false, - "many": false - }, - "image": { - "title": "Image", - "target": "image", - "required": false, - "many": false - } - } - } -``` - -
- - -
-Integration configuration - -```yaml showLineNumbers -createMissingRelatedEntities: true -deleteDependentEntities: true -resources: - - kind: managed-resource - selector: - query: "true" - port: - entity: - mappings: - identifier: .__application.metadata.uid + "-" + .kind + "-" + .name - title: .__application.metadata.name + "-" + .kind + "-" + .name - blueprint: '"argocdKubernetesResource"' - properties: - kind: .kind - namespace: .namespace - version: .resourceVersion - annotations: .liveState | fromjson | .metadata.annotations - labels: .liveState | fromjson | .metadata.labels - relations: - application: .__application.metadata.uid - image: 'if .kind == "Deployment" then .liveState | fromjson | .spec.template.spec.containers[0].image else null end' -``` -
+For relevant guides and examples, see the [guides section](https://docs.getport.io/guides?tags=ArgoCD). ## Alternative installation via webhook @@ -1285,25 +619,25 @@ To set up a webhook configuration in ArgoCD for sending notifications to Port, f 1. Install ArgoCD notifications manifest; -```bash showLineNumbers +```bash kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj-labs/argocd-notifications/release-1.0/manifests/install.yaml ``` 2. Install ArgoCD triggers and templates manifest; -```bash showLineNumbers +```bash kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj-labs/argocd-notifications/release-1.0/catalog/install.yaml ``` 3. Use `kubectl` to connect to the Kubernetes cluster where your ArgoCD instance is deployed; -```bash showLineNumbers +```bash kubectl config use-context ``` 4. Set the current namespace to your ArgoCD namespace, use the following command; -```bash showLineNumbers +```bash kubectl config set-context --current --namespace= ``` @@ -1431,25 +765,25 @@ To set up a webhook configuration in ArgoCD for sending notifications to Port, f 1. Install ArgoCD notifications manifest; -```bash showLineNumbers +```bash kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj-labs/argocd-notifications/release-1.0/manifests/install.yaml ``` 2. Install ArgoCD triggers and templates manifest; -```bash showLineNumbers +```bash kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj-labs/argocd-notifications/release-1.0/catalog/install.yaml ``` 3. Use `kubectl` to connect to the Kubernetes cluster where your ArgoCD instance is deployed; -```bash showLineNumbers +```bash kubectl config use-context ``` 4. Set the current namespace to your ArgoCD namespace, use the following command; -```bash showLineNumbers +```bash kubectl config set-context --current --namespace= ``` @@ -1481,7 +815,3 @@ Done! any change that happens to your applications in ArgoCD will trigger a webh -More relevant guides and examples: - -- [Rollback ArgoCD deployment](/guides/all/rollback-argocd-deployment) -- [Self-service action to synchronize ArgoCD application](/guides/all/sync-argocd-app) diff --git a/docs/build-your-software-catalog/sync-data-to-catalog/argocd/example.md b/docs/build-your-software-catalog/sync-data-to-catalog/argocd/example.md new file mode 100644 index 000000000..739cefff5 --- /dev/null +++ b/docs/build-your-software-catalog/sync-data-to-catalog/argocd/example.md @@ -0,0 +1,562 @@ +--- +sidebar_position: 2 +--- + +# Examples +To view and test the integration's mapping against examples of the third-party API responses, use the jq playground in your [data sources page](https://app.getport.io/settings/data-sources). Find the integration in the list of data sources and click on it to open the playground. + +## Cluster + +
+Cluster blueprint + +```json showLineNumbers + { + "identifier": "argocdCluster", + "description": "This blueprint represents an ArgoCD cluster", + "title": "ArgoCD Cluster", + "icon": "Argo", + "schema": { + "properties": { + "applicationsCount": { + "title": "Applications Count", + "type": "number", + "description": "The number of applications managed by Argo CD on the cluster", + "icon": "DefaultProperty" + }, + "serverVersion": { + "title": "Server Version", + "type": "string", + "description": "Contains information about the Kubernetes version of the cluster", + "icon": "DefaultProperty" + }, + "labels": { + "title": "Labels", + "type": "object", + "description": "Contains information about cluster metadata", + "icon": "DefaultProperty" + }, + "updatedAt": { + "icon": "DefaultProperty", + "title": "Updated At", + "type": "string", + "format": "date-time" + }, + "server": { + "title": "Server", + "description": "The API server URL of the Kubernetes cluster", + "type": "string", + "icon": "DefaultProperty" + } + }, + "required": [] + }, + "mirrorProperties": {}, + "calculationProperties": {}, + "relations": {} + } +``` + +
+ +
+Integration configuration + +```yaml showLineNumbers +createMissingRelatedEntities: true +deleteDependentEntities: true +resources: + - kind: cluster + selector: + query: "true" + port: + entity: + mappings: + identifier: .name + title: .name + blueprint: '"argocdCluster"' + properties: + applicationsCount: .info.applicationsCount + serverVersion: .serverVersion + labels: .labels + updatedAt: .connectionState.attemptedAt + server: .server +``` + +
+ +## Namespace + +
+Namespace blueprint + +```json showLineNumbers + { + "identifier": "argocdNamespace", + "description": "This blueprint represents an ArgoCD namespace", + "title": "ArgoCD Namespace", + "icon": "Argo", + "schema": { + "properties": {}, + "required": [] + }, + "aggregationProperties": {}, + "mirrorProperties": {}, + "calculationProperties": {}, + "relations": { + "cluster": { + "title": "ArgoCD Cluster", + "target": "argocdCluster", + "required": false, + "many": false + } + } + } +``` + +
+ +
+Integration configuration + +```yaml showLineNumbers +createMissingRelatedEntities: true +deleteDependentEntities: true +resources: + - kind: cluster + selector: + query: "true" + port: + itemsToParse: .namespaces + entity: + mappings: + identifier: .name + "-" + .item | tostring + title: .name + "-" + .item + blueprint: '"argocdNamespace"' + properties: {} + relations: + cluster: .name +``` + +
+ +## Project + +
+ Project blueprint + +```json showlineNumbers + { + "identifier": "argocdProject", + "description": "This blueprint represents an ArgoCD Project", + "title": "ArgoCD Project", + "icon": "Argo", + "schema": { + "properties": { + "createdAt": { + "title": "Created At", + "type": "string", + "format": "date-time", + "icon": "DefaultProperty" + }, + "description": { + "title": "Description", + "description": "Project description", + "type": "string", + "icon": "DefaultProperty" + } + }, + "required": [] + }, + "mirrorProperties": {}, + "calculationProperties": {}, + "relations": {} + } +``` + +
+ +
+Integration configuration + +```yaml showLineNumbers +createMissingRelatedEntities: true +deleteDependentEntities: true +resources: + - kind: project + selector: + query: "true" + port: + entity: + mappings: + identifier: .metadata.name + title: .metadata.name + blueprint: '"argocdProject"' + properties: + createdAt: .metadata.creationTimestamp + description: .spec.description +``` + +
+ +## Application + +
+ Application blueprint + +```json showlineNumbers + { + "identifier": "argocdApplication", + "description": "This blueprint represents an ArgoCD Application", + "title": "Running Service", + "icon": "Argo", + "schema": { + "properties": { + "gitRepo": { + "type": "string", + "icon": "Git", + "title": "Repository URL", + "description": "The URL of the Git repository containing the application source code" + }, + "gitPath": { + "type": "string", + "title": "Path", + "description": "The path within the Git repository where the application manifests are located" + }, + "destinationServer": { + "type": "string", + "title": "Destination Server", + "description": "The URL of the target cluster's Kubernetes control plane API" + }, + "revision": { + "type": "string", + "title": "Revision", + "description": "Revision contains information about the revision the comparison has been performed to" + }, + "targetRevision": { + "type": "string", + "title": "Target Revision", + "description": "Target Revision defines the revision of the source to sync the application to. In case of Git, this can be commit, tag, or branch" + }, + "syncStatus": { + "type": "string", + "title": "Sync Status", + "enum": [ + "Synced", + "OutOfSync", + "Unknown" + ], + "enumColors": { + "Synced": "green", + "OutOfSync": "red", + "Unknown": "lightGray" + }, + "description": "Status is the sync state of the comparison" + }, + "healthStatus": { + "type": "string", + "title": "Health Status", + "enum": [ + "Healthy", + "Missing", + "Suspended", + "Degraded", + "Progressing", + "Unknown" + ], + "enumColors": { + "Healthy": "green", + "Missing": "yellow", + "Suspended": "purple", + "Degraded": "red", + "Progressing": "blue", + "Unknown": "lightGray" + }, + "description": "Status holds the status code of the application or resource" + }, + "createdAt": { + "title": "Created At", + "type": "string", + "format": "date-time", + "description": "The created timestamp of the application" + }, + "labels": { + "type": "object", + "title": "Labels", + "description": "Map of string keys and values that can be used to organize and categorize object" + }, + "annotations": { + "type": "object", + "title": "Annotations", + "description": "Annotations are unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata" + } + }, + "required": [] + }, + "mirrorProperties": {}, + "calculationProperties": {}, + "relations": { + "project": { + "title": "ArgoCD Project", + "target": "argocdProject", + "required": false, + "many": false + }, + "cluster": { + "title": "ArgoCD Cluster", + "target": "argocdCluster", + "required": false, + "many": false + }, + "namespace": { + "title": "ArgoCD Namespace", + "target": "argocdNamespace", + "required": false, + "many": false + } + } + } +``` + +
+ +
+Integration configuration + +```yaml showLineNumbers +createMissingRelatedEntities: true +deleteDependentEntities: true +resources: + - kind: application + selector: + query: "true" + port: + entity: + mappings: + identifier: .metadata.uid + title: .metadata.name + blueprint: '"argocdApplication"' + properties: + gitRepo: .spec.source.repoURL + gitPath: .spec.source.path + destinationServer: .spec.destination.server + revision: .status.sync.revision + targetRevision: .spec.source.targetRevision + syncStatus: .status.sync.status + healthStatus: .status.health.status + createdAt: .metadata.creationTimestamp + labels: .metadata.labels + annotations: .metadata.annotations + relations: + project: .spec.project + namespace: .metadata.namespace + cluster: .spec.destination.name +``` + +
+ +## Deployment history + +
+ Deployment history blueprint + +```json showlineNumbers + { + "identifier": "argocdDeploymentHistory", + "description": "This blueprint represents an ArgoCD deployment history", + "title": "ArgoCD Deployment History", + "icon": "Argo", + "schema": { + "properties": { + "deployedAt": { + "title": "Deployed At", + "type": "string", + "format": "date-time" + }, + "deployStartedAt": { + "title": "Deploy Started At", + "type": "string", + "format": "date-time" + }, + "revision": { + "title": "Revision", + "type": "string" + }, + "initiatedBy": { + "title": "Initiated By", + "type": "string" + }, + "repoURL": { + "title": "Repository URL", + "type": "string" + }, + "sourcePath": { + "title": "Source Path", + "type": "string" + } + }, + "required": [] + }, + "mirrorProperties": {}, + "calculationProperties": {}, + "aggregationProperties": {}, + "relations": { + "application": { + "title": "Application", + "target": "argocdApplication", + "required": false, + "many": false + } + } + } +``` + +
+ +
+Integration configuration + +```yaml showLineNumbers +createMissingRelatedEntities: true +deleteDependentEntities: true +resources: + - kind: application + selector: + query: "true" + port: + itemsToParse: .status.history + entity: + mappings: + identifier: .metadata.uid + "-" + (.item.id | tostring) + title: .metadata.name + "-" + (.item.id | tostring) + blueprint: '"argocdDeploymentHistory"' + properties: + deployedAt: .item.deployedAt + deployStartedAt: .item.deployStartedAt + revision: .item.source.repoURL + "/commit/" + .item.revision + initiatedBy: .item.initiatedBy.username + repoURL: .item.source.repoURL + sourcePath: .item.source.path + relations: + application: .metadata.uid +``` + +
+ +## Kubernetes Resource + +
+ Images blueprint + +```json showlineNumbers + { + "identifier": "image", + "description": "This blueprint represents an image", + "title": "Image", + "icon": "AWS", + "schema": { + "properties": {}, + "required": [] + }, + "mirrorProperties": {}, + "calculationProperties": {}, + "aggregationProperties": {}, + "relations": {} + } +``` + +
+ +
+ Kubernetes resource blueprint + +```json showlineNumbers + { + "identifier": "argocdKubernetesResource", + "description": "This blueprint represents an ArgoCD kubernetes resource", + "title": "Kubernetes Resource", + "icon": "Argo", + "schema": { + "properties": { + "kind": { + "title": "Kind", + "type": "string" + }, + "version": { + "title": "Version", + "type": "string" + }, + "namespace": { + "title": "Namespace", + "type": "string" + }, + "labels": { + "type": "object", + "title": "Labels" + }, + "annotations": { + "type": "object", + "title": "Annotations" + } + }, + "required": [] + }, + "mirrorProperties": { + "healthStatus": { + "title": "Health Status", + "path": "application.healthStatus" + }, + "syncStatus": { + "title": "Sync Status", + "path": "application.syncStatus" + } + }, + "calculationProperties": {}, + "aggregationProperties": {}, + "relations": { + "application": { + "title": "Application", + "target": "argocdApplication", + "required": false, + "many": false + }, + "image": { + "title": "Image", + "target": "image", + "required": false, + "many": false + } + } + } +``` + +
+ + +
+Integration configuration + +```yaml showLineNumbers +createMissingRelatedEntities: true +deleteDependentEntities: true +resources: + - kind: managed-resource + selector: + query: "true" + port: + entity: + mappings: + identifier: .__application.metadata.uid + "-" + .kind + "-" + .name + title: .__application.metadata.name + "-" + .kind + "-" + .name + blueprint: '"argocdKubernetesResource"' + properties: + kind: .kind + namespace: .namespace + version: .resourceVersion + annotations: .liveState | fromjson | .metadata.annotations + labels: .liveState | fromjson | .metadata.labels + relations: + application: .__application.metadata.uid + image: 'if .kind == "Deployment" then .liveState | fromjson | .spec.template.spec.containers[0].image else null end' +``` +
\ No newline at end of file diff --git a/docs/build-your-software-catalog/sync-data-to-catalog/cicd/octopus-deploy/octopus-deploy.md b/docs/build-your-software-catalog/sync-data-to-catalog/cicd/octopus-deploy/octopus-deploy.md index 77b9a4bc1..b87052b0c 100644 --- a/docs/build-your-software-catalog/sync-data-to-catalog/cicd/octopus-deploy/octopus-deploy.md +++ b/docs/build-your-software-catalog/sync-data-to-catalog/cicd/octopus-deploy/octopus-deploy.md @@ -10,18 +10,31 @@ import DockerParameters from "./\_octopus-deploy-docker-parameters.mdx" import AdvancedConfig from '../../../../generalTemplates/_ocean_advanced_configuration_note.md' import PortApiRegionTip from "/docs/generalTemplates/_port_region_parameter_explanation_template.md" import OceanSaasInstallation from "./\_octopus_deploy_ocean_saas_installation.mdx" +import OceanRealtimeInstallation from "/docs/build-your-software-catalog/sync-data-to-catalog/templates/_ocean_realtime_installation.mdx" + # Octopus Deploy Integration -Port's Octopus Deploy integration allows you to import `spaces`, `projects`, `releases`, `deployments`, and `machines` from Octopus Deploy into Port based on your mappings and definitions. +Port's Octopus Deploy integration allows you to model Octopus Deploy resources in your software catalog and ingest data into them. + +## Overview + +This integration allows you to: + +- Map and organize your desired Octopus Deploy resources and their metadata in Port (see supported resources below). +- Watch for Octopus Deploy object changes (create/update/delete) in real-time, and automatically apply the changes to your entities in Port. -## Common Use Cases +### Supported Resources -- Map `spaces`, `projects`, `releases`, `deployments`, and `machines` from Octopus Deploy to Port. -- Monitor real-time changes (create/update/delete) in Octopus Deploy and automatically sync them with your entities in Port. +The resources that can be ingested from Octopus Deploy into Port are listed below. It is possible to reference any field that appears in the API responses linked below in the mapping configuration. +- [`Space`](https://octopus.com/docs/octopus-rest-api/examples/spaces) +- [`Project`](https://octopus.com/docs/octopus-rest-api/examples/projects) +- [`Release`](https://octopus.com/docs/octopus-rest-api/examples/releases) +- [`Deployment`](https://octopus.com/docs/octopus-rest-api/examples/deployments) -## Installation + +## Setup Choose one of the following installation methods: @@ -33,55 +46,28 @@ Choose one of the following installation methods:
- + Using this installation option means that the integration will be able to update Port in real time using webhooks. -This table summarizes the available parameters for the installation. -Set them as you wish in the script below, then copy it and run it in your terminal: +

Prerequisites

-| Parameter | Description | Required | -|-------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------| -------- | -| `port.clientId` | Your port client id ([Get the credentials](https://docs.getport.io/build-your-software-catalog/custom-integration/api/#find-your-port-credentials)) | ✅ | -| `port.clientSecret` | Your port client secret ([Get the credentials](https://docs.getport.io/build-your-software-catalog/custom-integration/api/#find-your-port-credentials)) | ✅ | -| `port.baseUrl` | Your Port API URL - `https://api.getport.io` for EU, `https://api.us.getport.io` for US | ✅ | -| `integration.identifier` | Change the identifier to describe your integration | ✅ | -| `integration.type` | The integration type | ✅ | -| `integration.eventListener.type` | The event listener type | ✅ | -| `integration.secrets.octopusApiKey` | The Octopus API Key | ✅ | -| `integration.config.serverUrl` | The Octopus host | ✅ | -| `integration.config.appHost` | The host of the Port Ocean app. Used to set up the integration endpoint as the target for webhooks created in Octopus | ❌ | -| `scheduledResyncInterval` | The number of minutes between each resync | ❌ | -| `initializePortResources` | Default true, When set to true the integration will create default blueprints and the port App config Mapping | ❌ | -| `sendRawDataExamples` | Enable sending raw data examples from the third party API to port for testing and managing the integration mapping. Default is true | ❌ | + -
+For details about the available parameters for the installation, see the table below. -To install the integration using Helm, run the following command: - -```bash showLineNumbers -helm repo add --force-update port-labs https://port-labs.github.io/helm-charts -helm upgrade --install octopus port-labs/port-ocean \ - --set port.clientId= \ - --set port.clientSecret= \ - --set port.baseUrl="https://api.getport.io" \ - --set initializePortResources=true \ - --set sendRawDataExamples=true \ - --set integration.identifier="octopus" \ - --set integration.type="octopus" \ - --set integration.eventListener.type="POLLING" \ - --set integration.secrets.octopusApiKey="Enter value here" \ - --set integration.config.serverUrl="https://example.com" -``` + + + -To install the integration using ArgoCD, follow these steps: +To install the integration using ArgoCD: 1. Create a `values.yaml` file in `argocd/my-octopus-integration` in your git repository with the content: @@ -169,19 +155,41 @@ kubectl apply -f my-octopus-integration.yaml +This table summarizes the available parameters for the installation. + +| Parameter | Description | Required | +|-------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------|----------| +| `port.clientId` | Your port client id ([Get the credentials](https://docs.getport.io/build-your-software-catalog/custom-integration/api/#find-your-port-credentials)) | ✅ | +| `port.clientSecret` | Your port client secret ([Get the credentials](https://docs.getport.io/build-your-software-catalog/custom-integration/api/#find-your-port-credentials)) | ✅ | +| `port.baseUrl` | Your Port API URL - `https://api.getport.io` for EU, `https://api.us.getport.io` for US | ✅ | +| `integration.identifier` | Change the identifier to describe your integration | ✅ | +| `integration.type` | The integration type | ✅ | +| `integration.eventListener.type` | The event listener type | ✅ | +| `integration.secrets.octopusApiKey` | The Octopus API Key, docs can be found [here](https://octopus.com/docs/octopus-rest-api/how-to-create-an-api-key) | ✅ | +| `integration.config.serverUrl` | The Octopus host | ✅ | +| `integration.config.appHost` | The host of the Port Ocean app. Used to set up the integration endpoint as the target for webhooks created in Octopus | ❌ | +| `scheduledResyncInterval` | The number of minutes between each resync | ❌ | +| `initializePortResources` | Default true, When set to true the integration will create default blueprints and the port App config Mapping | ❌ | +| `sendRawDataExamples` | Enable sending raw data examples from the third party API to port for testing and managing the integration mapping. Default is true | ❌ | + + +
+
- - - -This workflow will run the Octopus integration once and then exit, this is useful for **scheduled** ingestion of data. + + +This workflow/pipeline will run the Octopus integration once and then exit, this is useful for **scheduled** ingestion of data. -:::warning +:::warning Real-time updates If you want the integration to update Port in real time using webhooks you should use the [Real Time & Always On](?installation-methods=real-time-always-on#installation) installation option ::: + + + Make sure to configure the following [Github Secrets](https://docs.github.com/en/actions/security-guides/using-secrets-in-github-actions): @@ -216,15 +224,11 @@ jobs: -This pipeline will run the Octopus integration once and then exit, this is useful for **scheduled** ingestion of data. :::tip Your Jenkins agent should be able to run docker commands. ::: -:::warning -If you want the integration to update Port in real time using webhooks you should use -the [Real Time & Always On](?installation-methods=real-time-always-on#installation) installation option. -::: + Make sure to configure the following [Jenkins Credentials](https://www.jenkins.io/doc/book/using/using-credentials/) of `Secret Text` type: @@ -275,8 +279,8 @@ pipeline { ``` - - + +
@@ -319,13 +323,7 @@ steps: ```
- -This workflow will run the Octopus integration once and then exit, this is useful for **scheduled** ingestion of data. - -:::warning Realtime updates in Port -If you want the integration to update Port in real time using webhooks you should use the [Real Time & Always On](?installation-methods=real-time-always-on#installation) installation option. -::: Make sure to [configure the following GitLab variables](https://docs.gitlab.com/ee/ci/variables/#for-a-project): @@ -384,55 +382,13 @@ ingest_data:
-### Generating Octopus API Key -To generate a Api Key for authenticating the Octopus Deploy API calls: -1. Log into the Octopus Web Portal, click your profile image and select Profile. -2. Click My API Keys. -3. Click New API key, state the purpose of the API key, set the expiry and click Generate new. -4. Copy the new API key to your clipboard and use as `OCTOPUS_API_KEY`. +## Configuration - +Port integrations use a [YAML mapping block](/build-your-software-catalog/customize-integrations/configure-mapping#configuration-structure) to ingest data from the third-party api into Port. +The mapping makes use of the [JQ JSON processor](https://stedolan.github.io/jq/manual/) to select, modify, concatenate, transform and perform other operations on existing fields and values from the integration API. -## Ingesting Octopus Objects - -The Octopus integration uses a YAML configuration to define how data is loaded into the developer portal. - -Below is an example snippet from the configuration that shows how to retrieve `space` data from Octopus: - - -```yaml showLineNumbers -createMissingRelatedEntities: true -deleteDependentEntities: true -resources: - - kind: space - selector: - query: "true" - port: - entity: - mappings: - identifier: .Id - title: .Name - blueprint: '"octopusSpace"' - properties: - url: env.OCEAN__INTEGRATION__CONFIG__SERVER_URL + "/app#/" + .Id - description: .Description -``` - -The integration makes use of the [JQ JSON processor](https://stedolan.github.io/jq/manual/) to select, modify, concatenate, transform and perform other operations on existing fields and values from Octopus's API events. - - -### Ingest data into Port - -To ingest Octopus objects using the [integration configuration](/build-your-software-catalog/customize-integrations/configure-mapping), you can follow the steps below: - -1. Go to the DevPortal Builder page. -2. Select a blueprint you want to ingest using Octopus. -3. Choose the **Ingest Data** option from the menu. -4. Click the Octopus integration to open the edit page. -5. Modify the [configuration](/build-your-software-catalog/customize-integrations/configure-mapping) according to your needs. -6. Click `Resync`. ## Examples @@ -567,7 +523,6 @@ resources: - ### Release
@@ -644,7 +599,6 @@ resources:
- ### Deployment
@@ -727,7 +681,6 @@ resources:
- ### Machine
diff --git a/docs/build-your-software-catalog/sync-data-to-catalog/cloud-providers/gcp/installation.md b/docs/build-your-software-catalog/sync-data-to-catalog/cloud-providers/gcp/installation.md index 7dd8e499b..602b2055b 100644 --- a/docs/build-your-software-catalog/sync-data-to-catalog/cloud-providers/gcp/installation.md +++ b/docs/build-your-software-catalog/sync-data-to-catalog/cloud-providers/gcp/installation.md @@ -320,6 +320,7 @@ gcp_ocean_integration_image = "/port-ocean-g gcp_organization = "" gcp_ocean_setup_project = "" gcp_included_projects = [""] # The Project list that the integration digests resources from. +gcp_project_filter = "" # The filter string used to retrieve GCP projects, allowing complex filtering by combining multiple conditions with logical operators (AND | OR). integration_identifier = "gcp" scheduled_resync_interval = 1440 event_listener = { @@ -345,8 +346,9 @@ The Port GCP integration's Terraform module offers a set of configurations: | `gcp_ocean_integration_image` | | True | The Artifact Registry / Dockerhub image to deploy. | | `integration_identifier` | | True | The Integration's identifier in Port | | `port_base_url` | 'https://api.getport.io' | False | The Port Base url. | -| `gcp_included_projects` | [] | False | The Projects list you want the integration to collect from. If left empty, It will collect *All* projects in the organization. | -| `gcp_excluded_projects` | [] | False | The Projects list you want the integration NOT to collect from. This will be overriden by any value in gcp_included_projects besides []. | +| `gcp_included_projects` | [] | False | The Projects list you want the integration to collect from. If left empty, It will collect *All* projects in the organization. `This option will be deprecated soon.` | +| `gcp_project_filter` | | False | The filter string used to retrieve GCP projects, allowing complex filtering by combining multiple conditions with logical operators. Follows GCP's [filter expressions syntax](https://registry.terraform.io/providers/hashicorp/google/latest/docs/data-sources/projects#filter-1). Example `parent.id:184606565139 labels.environment:production AND labels.team:devops OR labels priority:high` | +| `gcp_excluded_projects` | [] | False | The Projects list you want the integration NOT to collect from. This will be overriden by any value in gcp_included_projects besides []. `This option will be deprecated soon.` | | `assets_types_for_monitoring` | ["cloudresourcemanager.googleapis.com/Organization", "cloudresourcemanager.googleapis.com/Project", "storage.googleapis.com/Bucket", "cloudfunctions.googleapis.com/Function", "pubsub.googleapis.com/Subscription", "pubsub.googleapis.com/Topic"] | False | The list of asset types the integration will digest real-time events for. | | `ocean_integration_service_account_permissions` | ["cloudasset.assets.exportResource", "cloudasset.assets.listCloudAssetFeeds", "cloudasset.assets.listResource", "cloudasset.assets.searchAllResources", "cloudasset.feeds.create", "cloudasset.feeds.list", "pubsub.topics.list", "pubsub.topics.get", "pubsub.subscriptions.list", "pubsub.subscriptions.get", "resourcemanager.projects.get", "resourcemanager.projects.list", "resourcemanager.folders.get", "resourcemanager.folders.list", "resourcemanager.organizations.get", "cloudquotas.quotas.get", "run.routes.invoke", "run.jobs.run"] | False | The permissions granted to the integration's service_account. We recommend not changing it to prevent unexpected errors. | | `assets_feed_topic_id` | "ocean-integration-topic" | False | The name of the topic created to recieve real time events. | @@ -362,6 +364,41 @@ The Port GCP integration's Terraform module offers a set of configurations: | `scheduled_resync_interval` | 1440 | False | The interval to resync the integration (in minutes). | | `ocean_service_account_custom_roles` | [] | False | A list of custom roles you want to grant the Integration's Service account. The module will grant these permissions to every available project and to the setup project `gcp_ocean_setup_project`. Example value: ["organizations/1234567890/roles/MyCustomRole", "organizations/1234567890/roles/MyOtherCustomRole"] | +

Optional - Project Filtering

+ +You have the option to specify which projects are included or excluded for real-time events. This can be particularly useful when you have a large number of projects and want to target specific ones based on certain criteria. + +:::warning Deprecation Notice +The variables `gcp_included_projects` and `gcp_excluded_projects` are deprecated and will be removed in future releases. We recommend using the gcp_project_filter variable for project filtering moving forward. +::: + +You can use the following three filtering strategies together: + +- `gcp_excluded_projects` +- `gcp_included_projects` +- `gcp_project_filter` + +However, please note the priority conditions when using them simultaneously. + +

Priority Conditions

+You can use all three filtering strategies together, but it's important to understand how they interact. The following priority conditions apply: + +- **gcp_included_projects (Highest Priority):** + - When specified, only the projects listed in `gcp_included_projects` are included. + - All other filters (`gcp_excluded_projects` and `gcp_project_filter`) are ignored. + - Use this when you have a specific list of projects to include, regardless of other criteria. + +- **gcp_excluded_projects:** + - If `gcp_included_projects` is not specified but `gcp_excluded_projects` is provided, all projects are included except those listed. + - The `gcp_project_filter` is still applied, further refining the included projects. + +- **gcp_project_filter:** + - If neither `gcp_included_projects` nor `gcp_excluded_projects` are specified, and `gcp_project_filter` is provided, only projects matching the filter criteria are included. + - This allows for flexible and complex filtering using GCP's native filtering syntax. + +- **Default Behavior (Lowest Priority):** + - If none of the above variables are specified, all projects in your GCP organization are included by default. +

Optional - Scaling the permissions

If you want the integration to collect resources from multiple projects/folders or to have it collect from the entire organization, you need to have permissions to create/view additional resources. Follow these instructions to make sure you are equipped with enough permissions. diff --git a/docs/build-your-software-catalog/sync-data-to-catalog/code-quality-security/sonarqube/examples.md b/docs/build-your-software-catalog/sync-data-to-catalog/code-quality-security/sonarqube/examples.md new file mode 100644 index 000000000..f32e9d2a0 --- /dev/null +++ b/docs/build-your-software-catalog/sync-data-to-catalog/code-quality-security/sonarqube/examples.md @@ -0,0 +1,539 @@ +--- +sidebar_position: 2 +--- + +# Examples +To view and test the integration's mapping against examples of the third-party API responses, use the jq playground in your [data sources page](https://app.getport.io/settings/data-sources). Find the integration in the list of data sources and click on it to open the playground. + +## Project + +
+Projects blueprint (Click to expand) + +```json showLineNumbers +{ + "identifier": "sonarQubeProject", + "title": "SonarQube Project", + "icon": "sonarqube", + "schema": { + "properties": { + "organization": { + "type": "string", + "title": "Organization", + "icon": "TwoUsers" + }, + "link": { + "type": "string", + "format": "url", + "title": "Link", + "icon": "Link" + }, + "lastAnalysisDate": { + "type": "string", + "format": "date-time", + "icon": "Clock", + "title": "Last Analysis Date" + }, + "numberOfBugs": { + "type": "number", + "title": "Number Of Bugs" + }, + "numberOfCodeSmells": { + "type": "number", + "title": "Number Of CodeSmells" + }, + "numberOfVulnerabilities": { + "type": "number", + "title": "Number Of Vulnerabilities" + }, + "numberOfHotSpots": { + "type": "number", + "title": "Number Of HotSpots" + }, + "numberOfDuplications": { + "type": "number", + "title": "Number Of Duplications" + }, + "coverage": { + "type": "number", + "title": "Coverage" + }, + "mainBranch": { + "type": "string", + "icon": "Git", + "title": "Main Branch" + }, + "tags": { + "type": "array", + "title": "Tags" + } + }, + "required": [] + }, + "mirrorProperties": {}, + "calculationProperties": {}, + "relations": {} +} +``` + +
+ +
+Integration configuration (Click to expand) + +:::tip filter projects +The integration provides an option to filter the data that is retrieved from the SonarQube API using the following attributes: + +1. `query`: Limits the search to component names that contain the supplied string +2. `alertStatus`: To filter a project's quality gate status. Accepts a list of values such as `OK`, `ERROR` and `WARN` +3. `languages`: To filter projects using a list of languages or a single language +4. `tags`: To filter a list of tags or a single tag +5. `qualifier`: To filter on a component qualifier. Accepts values such as `TRK` (for projects only) and `APP` (for applications only) + +These attributes can be enabled using the path: `selector.apiFilters.filter`. By default, the integration fetches only SonarQube projects using the `qualifier` attribute. +::: + +:::tip Define your own metrics +Besides filtering the API data, the integration provides a mechanism to allow users to define their own list of metrics used in SonarQube to evaluate the code. This list can be defined in the `selector.metrics` property. A complete list of valid SonarQube metrics can be in the [SonarQube documentation](https://docs.sonarsource.com/sonarqube/latest/user-guide/code-metrics/metrics-definition/) +::: + +:::note Supported Sonar environment +Please note that the API filters are supported on on-premise Sonar environments (SonarQube) only, and will not work on SonarCloud. +::: + +```yaml showLineNumbers +createMissingRelatedEntities: true +deleteDependentEntities: true +resources: + - kind: projects + selector: + query: "true" + apiFilters: + filter: + qualifier: TRK + metrics: + - code_smells + - coverage + - bugs + - vulnerabilities + - duplicated_files + - security_hotspots + - new_violations + - new_coverage + - new_duplicated_lines_density + port: + entity: + mappings: + blueprint: '"sonarQubeProject"' + identifier: .key + title: .name + properties: + organization: .organization + link: .__link + lastAnalysisStatus: .__branch.status.qualityGateStatus + lastAnalysisDate: .__branch.analysisDate + numberOfBugs: .__measures[]? | select(.metric == "bugs") | .value + numberOfCodeSmells: .__measures[]? | select(.metric == "code_smells") | .value + numberOfVulnerabilities: .__measures[]? | select(.metric == "vulnerabilities") | .value + numberOfHotSpots: .__measures[]? | select(.metric == "security_hotspots") | .value + numberOfDuplications: .__measures[]? | select(.metric == "duplicated_files") | .value + coverage: .__measures[]? | select(.metric == "coverage") | .value + mainBranch: .__branch.name + tags: .tags +``` +
+ +## Issue + +
+Issue blueprint (Click to expand) + +```json showLineNumbers +{ + "identifier": "sonarQubeIssue", + "title": "SonarQube Issue", + "icon": "sonarqube", + "schema": { + "properties": { + "type": { + "type": "string", + "title": "Type", + "enum": ["CODE_SMELL", "BUG", "VULNERABILITY"] + }, + "severity": { + "type": "string", + "title": "Severity", + "enum": ["MAJOR", "INFO", "MINOR", "CRITICAL", "BLOCKER"], + "enumColors": { + "MAJOR": "orange", + "INFO": "green", + "CRITICAL": "red", + "BLOCKER": "red", + "MINOR": "yellow" + } + }, + "link": { + "type": "string", + "format": "url", + "icon": "Link", + "title": "Link" + }, + "status": { + "type": "string", + "title": "Status", + "enum": ["OPEN", "CLOSED", "RESOLVED", "REOPENED", "CONFIRMED"] + }, + "assignees": { + "title": "Assignees", + "type": "string", + "icon": "TwoUsers" + }, + "tags": { + "type": "array", + "title": "Tags" + }, + "createdAt": { + "type": "string", + "format": "date-time", + "title": "Created At" + } + } + }, + "mirrorProperties": {}, + "calculationProperties": {}, + "relations": { + "sonarQubeProject": { + "target": "sonarQubeProject", + "required": false, + "title": "SonarQube Project", + "many": false + } + } +} +``` + +
+ +
+Integration configuration (Click to expand) + +:::tip filter issues +The integration provides an option to filter the data that is retrieved from the SonarQube API using the following attributes: + +1. `assigned`: To retrieve assigned or unassigned issues. Accepts values: `yes`, `no`, `true`, `false` +2. `assignees`: A list of assignee logins +3. `cleanCodeAttributeCategories`: List of clean code attribute categories. Accepts values: `ADAPTABLE`, `CONSISTENT`, `INTENTIONAL`, `RESPONSIBLE` +4. `createdBefore`: To retrieve issues created before the given date +5. `createdAfter`: To retrieve issues created after the given date +6. `impactSeverities`: List of impact severities. Accepts values: `HIGH`, `LOW`, `MEDIUM` +7. `impactSoftwareQualities`: List of impact software qualities. Accepts values: `MAINTAINABILITY`, `RELIABILITY`, `SECURITY` +8. `statuses`: List of statuses. Accepts values: `OPEN`, `CONFIRMED`, `FALSE_POSITIVE`, `ACCEPTED`, `FIXED` +9. `languages`: List of languages +10. `resolved`: To retrieve resolved or unresolved issues. Accepts values: `yes`, `no`, `true`, `false` +11. `scopes`: List of scopes. Accepts values: `MAIN`, `TESTS` +12. `tags`: List of tags + +These attributes can be enabled using the path: `selector.apiFilters`. By default, the integration fetches unresolved SonarQube issues. It is also possible to configure the integration to fetch issues from a SonarQube project using the path: `selector.projectApiFilters.filter` while specifying any of [the above project attributes](#project) +::: + +:::note Supported Sonar environment +Please note that the API filters are supported on on-premise Sonar environments (SonarQube) only, and will not work on SonarCloud. +::: + +```yaml showLineNumbers +createMissingRelatedEntities: true +deleteDependentEntities: true +resources: + - kind: issues + selector: + query: "true" + apiFilters: + resolved: 'false' + projectApiFilters: + filter: + qualifier: TRK + port: + entity: + mappings: + blueprint: '"sonarQubeIssue"' + identifier: .key + title: .message + properties: + type: .type + severity: .severity + link: .__link + status: .status + assignees: .assignee + tags: .tags + createdAt: .creationDate + relations: + sonarQubeProject: .project +``` + +
+ +## Saas Analysis + +
+Saas analysis blueprint (Click to expand) + +```json showLineNumbers +{ + "identifier": "sonarQubeAnalysis", + "title": "SonarQube Analysis", + "icon": "sonarqube", + "schema": { + "properties": { + "branch": { + "type": "string", + "title": "Branch", + "icon": "GitVersion" + }, + "fixedIssues": { + "type": "number", + "title": "Fixed Issues" + }, + "newIssues": { + "type": "number", + "title": "New Issues" + }, + "coverage": { + "title": "Coverage", + "type": "number" + }, + "duplications": { + "type": "number", + "title": "Duplications" + }, + "createdAt": { + "type": "string", + "format": "date-time", + "title": "Created At" + } + } + }, + "mirrorProperties": {}, + "calculationProperties": {}, + "relations": { + "sonarQubeProject": { + "target": "sonarQubeProject", + "required": false, + "title": "SonarQube Project", + "many": false + } + } +} +``` + +
+ +
+Integration configuration (Click to expand) + +```yaml showLineNumbers +createMissingRelatedEntities: true +deleteDependentEntities: true +resources: + - kind: saas_analysis + selector: + query: "true" + port: + entity: + mappings: + blueprint: '"sonarQubeAnalysis"' + identifier: .analysisId + title: .__commit.message // .analysisId + properties: + branch: .__branchName + fixedIssues: .measures.violations_fixed + newIssues: .measures.violations_added + coverage: .measures.coverage_change + duplications: .measures.duplicated_lines_density_change + createdAt: .__analysisDate + relations: + sonarQubeProject: .__project +``` + +
+ +## On-Premise Analysis + +
+On-premise analysis blueprint (Click to expand) + +```json showLineNumbers +{ + "identifier": "sonarQubeAnalysis", + "title": "SonarQube Analysis", + "icon": "sonarqube", + "schema": { + "properties": { + "branch": { + "type": "string", + "title": "Branch", + "icon": "GitVersion" + }, + "fixedIssues": { + "type": "number", + "title": "Fixed Issues" + }, + "newIssues": { + "type": "number", + "title": "New Issues" + }, + "coverage": { + "title": "Coverage", + "type": "number" + }, + "duplications": { + "type": "number", + "title": "Duplications" + }, + "createdAt": { + "type": "string", + "format": "date-time", + "title": "Created At" + } + } + }, + "mirrorProperties": {}, + "calculationProperties": {}, + "relations": { + "sonarQubeProject": { + "target": "sonarQubeProject", + "required": false, + "title": "SonarQube Project", + "many": false + } + } +} +``` + +
+ +
+Integration configuration (Click to expand) + +```yaml showLineNumbers +createMissingRelatedEntities: true +deleteDependentEntities: true +resources: + - kind: onprem_analysis + selector: + query: 'true' + port: + entity: + mappings: + blueprint: '"sonarQubeAnalysis"' + identifier: .__project + "-" + .key + title: .title + properties: + branch: .branch + newIssues: .__measures[]? | select(.metric == "new_violations") | .period.value + coverage: .__measures[]? | select(.metric == "new_coverage") | .period.value + duplications: .__measures[]? | select(.metric == "new_duplicated_lines_density") | .period.value + createdAt: .analysisDate + relations: + sonarQubeProject: .__project +``` + +
+ +## Portfolio + +
+Portfolio blueprint (Click to expand) + +```json showLineNumbers +{ + "identifier": "sonarQubePortfolio", + "title": "SonarQube Portfolio", + "icon": "sonarqube", + "schema": { + "properties": { + "description": { + "type": "string", + "title": "Description" + }, + "visibility": { + "type": "string", + "title": "Visibility", + "enum": [ + "PUBLIC", + "PRIVATE" + ], + "enumColors": { + "PUBLIC": "green", + "PRIVATE": "lightGray" + } + }, + "selectionMode": { + "type": "string", + "title": "Selection Mode", + "enum": [ + "AUTO", + "MANUAL", + "NONE" + ], + "enumColors": { + "AUTO": "blue", + "MANUAL": "green", + "NONE": "lightGray" + } + }, + "disabled": { + "type": "boolean", + "title": "Disabled" + } + }, + "required": [] + }, + "mirrorProperties": {}, + "calculationProperties": {}, + "aggregationProperties": {}, + "relations": { + "referencedBy": { + "title": "Referenced By", + "target": "sonarQubePortfolio", + "required": false, + "many": true + }, + "subPortfolios": { + "title": "Sub Portfolios", + "target": "sonarQubePortfolio", + "required": false, + "many": true + } + } +} +``` + +
+ +
+Integration configuration (Click to expand) + +```yaml showLineNumbers +deleteDependentEntities: true +createMissingRelatedEntities: true +resources: + - kind: portfolios + selector: + query: 'true' + port: + entity: + mappings: + identifier: .key + title: .name + blueprint: '"sonarQubePortfolio"' + properties: + description: .description + visibility: if .visibility then .visibility | ascii_upcase else null end + selectionMode: if .selectionMode then .selectionMode | ascii_upcase else null end + disabled: .disabled + relations: + subPortfolios: .subViews | map(select((.qualifier | IN("VW", "SVW"))) | .key) + referencedBy: .referencedBy | map(select((.qualifier | IN("VW", "SVW"))) | .key) +``` + +
\ No newline at end of file diff --git a/docs/build-your-software-catalog/sync-data-to-catalog/code-quality-security/sonarqube/sonarqube.md b/docs/build-your-software-catalog/sync-data-to-catalog/code-quality-security/sonarqube/sonarqube.md index 188c0a824..321806e89 100644 --- a/docs/build-your-software-catalog/sync-data-to-catalog/code-quality-security/sonarqube/sonarqube.md +++ b/docs/build-your-software-catalog/sync-data-to-catalog/code-quality-security/sonarqube/sonarqube.md @@ -11,24 +11,40 @@ import SonarcloudAnalysisBlueprint from "/docs/build-your-software-catalog/custo import SonarcloudAnalysisConfiguration from "/docs/build-your-software-catalog/custom-integration/webhook/examples/resources/sonarqube/\_example_sonarcloud_analysis_configuration.mdx"; import PortApiRegionTip from "/docs/generalTemplates/_port_region_parameter_explanation_template.md" import OceanSaasInstallation from "/docs/build-your-software-catalog/sync-data-to-catalog/templates/_ocean_saas_installation.mdx" +import OceanRealtimeInstallation from "/docs/build-your-software-catalog/sync-data-to-catalog/templates/_ocean_realtime_installation.mdx" + # SonarQube -Port's SonarQube integration (powered by [Ocean](https://ocean.getport.io)) allows you to import `projects`, `issues` and `analyses` from your SonarQube account into -Port, according to your mapping and definitions. +Port's SonarQube integration allows you to model SonarQube resources in your software catalog and ingest data into them. -## Common use cases -- Map `projects`, `issues`, `analyses` and `portfolios` in your SonarQube organization environment. -- Watch for object changes (create/update/delete) in real-time, and automatically apply the changes to your entities in - Port. -- Create/delete SonarQube objects using self-service actions. +## Overview -## Prerequisites +This integration allows you to: - +- Map and organize your desired SonarQube resources and their metadata in Port (see supported resources below). +- Watch for SonarQube object changes (create/update/delete) in real-time, and automatically apply the changes to your entities in Port. + + +### Supported Resources + +The resources that can be ingested from SonarQube into Port are listed below. It is possible to reference any field that appears in the API responses linked below in the mapping configuration. + + +- `Project` - represents a SonarQube project. Retrieves data + from [`components`](https://next.sonarqube.com/sonarqube/web_api/api/components), [`measures`](https://next.sonarqube.com/sonarqube/web_api/api/measures), + and [`branches`](https://next.sonarqube.com/sonarqube/web_api/api/project_branches). +- [`Issue`](https://next.sonarqube.com/sonarqube/web_api/api/issues) - represents a SonarQube issue +- `Saas Analysis` - represents analysis and latest activity in your SonarCloud environment. +- `On-premise Analysis` - since SonarQube doesn't offer a straightforward API + for fetching analysis and latest activity in on-premise installations, + Port's integration provides an alternative solution for on-premise installation. +By utilizing the [pull requests](https://next.sonarqube.com/sonarqube/web_api/api/project_pull_requests) and [measures](https://next.sonarqube.com/sonarqube/web_api/api/measures) APIs, + you can now visualize the results of scan analyses for each pull request. -## Installation + +## Setup Choose one of the following installation methods: @@ -40,54 +56,25 @@ Choose one of the following installation methods: - + Using this installation option means that the integration will be able to update Port in real time using webhooks. -This table summarizes the available parameters for the installation. -Set them as you wish in the script below, then copy it and run it in your terminal: - -| Parameter | Description | Example | Required | -| ---------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------- | ------- | -| `port.clientId` | Your port client id ([How to get the credentials](https://docs.getport.io/build-your-software-catalog/custom-integration/api/#find-your-port-credentials)) | | ✅ | -| `port.clientSecret` | Your port client secret ([How to get the credentials](https://docs.getport.io/build-your-software-catalog/custom-integration/api/#find-your-port-credentials)) | | ✅ | -| `port.baseUrl` | Your Port API URL - `https://api.getport.io` for EU, `https://api.us.getport.io` for US | | ✅ | -| `integration.secrets.sonarApiToken` | The [SonarQube API token](https://docs.sonarsource.com/sonarqube/9.8/user-guide/user-account/generating-and-using-tokens/#generating-a-token) | | ✅ | -| `integration.config.sonarOrganizationId` | The SonarQube [organization Key](https://docs.sonarsource.com/sonarcloud/appendices/project-information/#project-and-organization-keys) (Not required when using on-prem sonarqube instance) | myOrganization | ✅ | -| `integration.config.sonarIsOnPremise` | A boolean value indicating whether the SonarQube instance is on-premise. The default value is `false` | false | ✅ | -| `integration.config.appHost` | A URL bounded to the integration container that can be accessed by sonarqube. When used the integration will create webhooks on top of sonarqube to listen to any live changes in the data | https://my-ocean-integration.com | ✅ | -| `integration.config.sonarUrl` | Required if using **On-Prem**, Your SonarQube instance URL | https://my-sonar-instance.com | ❌ | +

Prerequisites

- + -
-To install the integration using Helm, run the following command: - -```bash showLineNumbers -helm repo add --force-update port-labs https://port-labs.github.io/helm-charts -helm upgrade --install my-sonarqube-integration port-labs/port-ocean \ - --set port.clientId="PORT_CLIENT_ID" \ - --set port.clientSecret="PORT_CLIENT_SECRET" \ - --set port.baseUrl="https://api.getport.io" \ - --set initializePortResources=true \ - --set sendRawDataExamples=true \ - --set scheduledResyncInterval=120 \ - --set integration.identifier="my-sonarqube-integration" \ - --set integration.type="sonarqube" \ - --set integration.eventListener.type="POLLING" \ - --set integration.config.sonarIsOnPremise="" \ - --set integration.secrets.sonarApiToken="" \ - --set integration.config.sonarOrganizationId="" -``` + + -To install the integration using ArgoCD, follow these steps: +To install the integration using ArgoCD: 1. Create a `values.yaml` file in `argocd/my-ocean-sonarqube-integration` in your git repository with the content: @@ -173,21 +160,38 @@ kubectl apply -f my-ocean-sonarqube-integration.yaml +This table summarizes the available parameters for the installation. + +| Parameter | Description | Example | Required | +|------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------|----------| +| `port.clientId` | Your port client id ([How to get the credentials](https://docs.getport.io/build-your-software-catalog/custom-integration/api/#find-your-port-credentials)) | | ✅ | +| `port.clientSecret` | Your port client secret ([How to get the credentials](https://docs.getport.io/build-your-software-catalog/custom-integration/api/#find-your-port-credentials)) | | ✅ | +| `port.baseUrl` | Your Port API URL - `https://api.getport.io` for EU, `https://api.us.getport.io` for US | | ✅ | +| `integration.secrets.sonarApiToken` | The [SonarQube API token](https://docs.sonarsource.com/sonarqube/9.8/user-guide/user-account/generating-and-using-tokens/#generating-a-token) | | ✅ | +| `integration.config.sonarOrganizationId` | The SonarQube [organization Key](https://docs.sonarsource.com/sonarcloud/appendices/project-information/#project-and-organization-keys) (Not required when using on-prem sonarqube instance) | myOrganization | ✅ | +| `integration.config.sonarIsOnPremise` | A boolean value indicating whether the SonarQube instance is on-premise. The default value is `false` | false | ✅ | +| `integration.config.appHost` | A URL bounded to the integration container that can be accessed by sonarqube. When used the integration will create webhooks on top of sonarqube to listen to any live changes in the data | https://my-ocean-integration.com | ✅ | +| `integration.config.sonarUrl` | Required if using **On-Prem**, Your SonarQube instance URL | https://my-sonar-instance.com | ❌ | + + + +
+
- + - - -This workflow will run the SonarQube integration once and then exit, this is useful for **scheduled** ingestion of data. +This workflow/pipeline will run the SonarQube integration once and then exit, this is useful for **scheduled** ingestion of data. -:::warning -If you want the integration to update Port in real time using webhooks you should use -the [Real Time & Always On](?installation-methods=real-time-always-on#installation) installation option +:::warning Real-time updates +If you want the integration to update Port in real time using webhooks you should use the [Real Time & Always On](?installation-methods=real-time-always-on#installation) installation option. ::: + + + Make sure to configure the following [Github Secrets](https://docs.github.com/en/actions/security-guides/using-secrets-in-github-actions): @@ -226,15 +230,10 @@ jobs: -This pipeline will run the SonarQube integration once and then exit, this is useful for **scheduled** ingestion of data. :::tip Your Jenkins agent should be able to run docker commands. ::: -:::warning -If you want the integration to update Port in real time using webhooks you should use -the [Real Time & Always On](?installation-methods=real-time-always-on#installation) installation option. -::: Make sure to configure the following [Jenkins Credentials](https://www.jenkins.io/doc/book/using/using-credentials/) of `Secret Text` type: @@ -288,17 +287,12 @@ pipeline { ``` - -This pipeline will run the SonarQube integration once and then exit, this is useful for **scheduled** ingestion of data. :::tip Your Azure Devops agent should be able to run docker commands. ::: -:::warning -If you want the integration to update Port in real time using webhooks you should use -the [Real Time & Always On](?installation-methods=real-time-always-on#installation) installation option. -::: + Make sure to configure the following variables using [Azure Devops variable groups](https://learn.microsoft.com/en-us/azure/devops/pipelines/library/variable-groups?view=azure-devops&tabs=yaml). Add them into in a variable group named `port-ocean-credentials`: @@ -348,11 +342,7 @@ steps: -This pipeline will run the SonarQube integration once and then exit, this is useful for **scheduled** ingestion of data. -:::warning Realtime updates in Port -If you want the integration to update Port in real time using webhooks you should use the [Real Time & Always On](?installation-methods=real-time-always-on#installation) installation option. -::: Make sure to [configure the following GitLab variables](https://docs.gitlab.com/ee/ci/variables/#for-a-project): @@ -411,616 +401,22 @@ ingest_data: -## Ingesting SonarQube objects - -The SonarQube integration uses a YAML configuration to describe the process of loading data into the developer portal. - -Here is an example snippet from the config which demonstrates the process for getting `project` data from SonarQube: - -```yaml showLineNumbers -resources: - - kind: projects - selector: - query: "true" - port: - entity: - mappings: - blueprint: '"sonarQubeProject"' - identifier: .key - title: .name - properties: - organization: .organization - link: .__link - lastAnalysisStatus: .__branch.status.qualityGateStatus - lastAnalysisDate: .__branch.analysisDate - numberOfBugs: .__measures[]? | select(.metric == "bugs") | .value - numberOfCodeSmells: .__measures[]? | select(.metric == "code_smells") | .value - numberOfVulnerabilities: .__measures[]? | select(.metric == "vulnerabilities") | .value - numberOfHotSpots: .__measures[]? | select(.metric == "security_hotspots") | .value - numberOfDuplications: .__measures[]? | select(.metric == "duplicated_files") | .value - coverage: .__measures[]? | select(.metric == "coverage") | .value - mainBranch: .__branch.name - tags: .tags -``` - -The integration makes use of the [JQ JSON processor](https://stedolan.github.io/jq/manual/) to select, modify, -concatenate, transform and perform other operations on existing fields and values from SonarQube's API events. +## Configuration - +Port integrations use a [YAML mapping block](/build-your-software-catalog/customize-integrations/configure-mapping#configuration-structure) to ingest data from the third-party api into Port. -```yaml showLineNumbers -resources: - - kind: projects - selector: - query: "true" - port: - entity: - mappings: - blueprint: '"sonarQubeProject"' - identifier: .key - title: .name - properties: - organization: .organization - link: .__link - lastAnalysisStatus: .__branch.status.qualityGateStatus - lastAnalysisDate: .__branch.analysisDate - numberOfBugs: .__measures[]? | select(.metric == "bugs") | .value - numberOfCodeSmells: .__measures[]? | select(.metric == "code_smells") | .value - numberOfVulnerabilities: .__measures[]? | select(.metric == "vulnerabilities") | .value - numberOfHotSpots: .__measures[]? | select(.metric == "security_hotspots") | .value - numberOfDuplications: .__measures[]? | select(.metric == "duplicated_files") | .value - coverage: .__measures[]? | select(.metric == "coverage") | .value - mainBranch: .__branch.name - tags: .tags - # highlight-end - - kind: projects # In this instance project is mapped again with a different filter - selector: - query: '.name == "MyProjectName"' - port: - entity: - mappings: ... -``` +The mapping makes use of the [JQ JSON processor](https://stedolan.github.io/jq/manual/) to select, modify, concatenate, transform and perform other operations on existing fields and values from the integration API. - ## Examples -Examples of blueprints and the relevant integration configurations: - -### Project - -
-Projects blueprint (Click to expand) - -```json showLineNumbers -{ - "identifier": "sonarQubeProject", - "title": "SonarQube Project", - "icon": "sonarqube", - "schema": { - "properties": { - "organization": { - "type": "string", - "title": "Organization", - "icon": "TwoUsers" - }, - "link": { - "type": "string", - "format": "url", - "title": "Link", - "icon": "Link" - }, - "lastAnalysisDate": { - "type": "string", - "format": "date-time", - "icon": "Clock", - "title": "Last Analysis Date" - }, - "numberOfBugs": { - "type": "number", - "title": "Number Of Bugs" - }, - "numberOfCodeSmells": { - "type": "number", - "title": "Number Of CodeSmells" - }, - "numberOfVulnerabilities": { - "type": "number", - "title": "Number Of Vulnerabilities" - }, - "numberOfHotSpots": { - "type": "number", - "title": "Number Of HotSpots" - }, - "numberOfDuplications": { - "type": "number", - "title": "Number Of Duplications" - }, - "coverage": { - "type": "number", - "title": "Coverage" - }, - "mainBranch": { - "type": "string", - "icon": "Git", - "title": "Main Branch" - }, - "tags": { - "type": "array", - "title": "Tags" - } - }, - "required": [] - }, - "mirrorProperties": {}, - "calculationProperties": {}, - "relations": {} -} -``` - -
- -
-Integration configuration (Click to expand) - -:::tip filter projects -The integration provides an option to filter the data that is retrieved from the SonarQube API using the following attributes: - -1. `query`: Limits the search to component names that contain the supplied string -2. `alertStatus`: To filter a project's quality gate status. Accepts a list of values such as `OK`, `ERROR` and `WARN` -3. `languages`: To filter projects using a list of languages or a single language -4. `tags`: To filter a list of tags or a single tag -5. `qualifier`: To filter on a component qualifier. Accepts values such as `TRK` (for projects only) and `APP` (for applications only) - -These attributes can be enabled using the path: `selector.apiFilters.filter`. By default, the integration fetches only SonarQube projects using the `qualifier` attribute. -::: - -:::tip Define your own metrics -Besides filtering the API data, the integration provides a mechanism to allow users to define their own list of metrics used in SonarQube to evaluate the code. This list can be defined in the `selector.metrics` property. A complete list of valid SonarQube metrics can be in the [SonarQube documentation](https://docs.sonarsource.com/sonarqube/latest/user-guide/code-metrics/metrics-definition/) -::: - -:::note Supported Sonar environment -Please note that the API filters are supported on on-premise Sonar environments (SonarQube) only, and will not work on SonarCloud. -::: - -```yaml showLineNumbers -createMissingRelatedEntities: true -deleteDependentEntities: true -resources: - - kind: projects - selector: - query: "true" - apiFilters: - filter: - qualifier: TRK - metrics: - - code_smells - - coverage - - bugs - - vulnerabilities - - duplicated_files - - security_hotspots - - new_violations - - new_coverage - - new_duplicated_lines_density - port: - entity: - mappings: - blueprint: '"sonarQubeProject"' - identifier: .key - title: .name - properties: - organization: .organization - link: .__link - lastAnalysisStatus: .__branch.status.qualityGateStatus - lastAnalysisDate: .__branch.analysisDate - numberOfBugs: .__measures[]? | select(.metric == "bugs") | .value - numberOfCodeSmells: .__measures[]? | select(.metric == "code_smells") | .value - numberOfVulnerabilities: .__measures[]? | select(.metric == "vulnerabilities") | .value - numberOfHotSpots: .__measures[]? | select(.metric == "security_hotspots") | .value - numberOfDuplications: .__measures[]? | select(.metric == "duplicated_files") | .value - coverage: .__measures[]? | select(.metric == "coverage") | .value - mainBranch: .__branch.name - tags: .tags -``` -
- -### Issue - -
-Issue blueprint (Click to expand) - -```json showLineNumbers -{ - "identifier": "sonarQubeIssue", - "title": "SonarQube Issue", - "icon": "sonarqube", - "schema": { - "properties": { - "type": { - "type": "string", - "title": "Type", - "enum": ["CODE_SMELL", "BUG", "VULNERABILITY"] - }, - "severity": { - "type": "string", - "title": "Severity", - "enum": ["MAJOR", "INFO", "MINOR", "CRITICAL", "BLOCKER"], - "enumColors": { - "MAJOR": "orange", - "INFO": "green", - "CRITICAL": "red", - "BLOCKER": "red", - "MINOR": "yellow" - } - }, - "link": { - "type": "string", - "format": "url", - "icon": "Link", - "title": "Link" - }, - "status": { - "type": "string", - "title": "Status", - "enum": ["OPEN", "CLOSED", "RESOLVED", "REOPENED", "CONFIRMED"] - }, - "assignees": { - "title": "Assignees", - "type": "string", - "icon": "TwoUsers" - }, - "tags": { - "type": "array", - "title": "Tags" - }, - "createdAt": { - "type": "string", - "format": "date-time", - "title": "Created At" - } - } - }, - "mirrorProperties": {}, - "calculationProperties": {}, - "relations": { - "sonarQubeProject": { - "target": "sonarQubeProject", - "required": false, - "title": "SonarQube Project", - "many": false - } - } -} -``` - -
+To view and test the integration's mapping against examples of the third-party API responses, +use the jq playground in your [data sources page](https://app.getport.io/settings/data-sources). +Find the integration in the list of data sources and click on it to open the playground. -
-Integration configuration (Click to expand) - -:::tip filter issues -The integration provides an option to filter the data that is retrieved from the SonarQube API using the following attributes: - -1. `assigned`: To retrieve assigned or unassigned issues. Accepts values: `yes`, `no`, `true`, `false` -2. `assignees`: A list of assignee logins -3. `cleanCodeAttributeCategories`: List of clean code attribute categories. Accepts values: `ADAPTABLE`, `CONSISTENT`, `INTENTIONAL`, `RESPONSIBLE` -4. `createdBefore`: To retrieve issues created before the given date -5. `createdAfter`: To retrieve issues created after the given date -6. `impactSeverities`: List of impact severities. Accepts values: `HIGH`, `LOW`, `MEDIUM` -7. `impactSoftwareQualities`: List of impact software qualities. Accepts values: `MAINTAINABILITY`, `RELIABILITY`, `SECURITY` -8. `statuses`: List of statuses. Accepts values: `OPEN`, `CONFIRMED`, `FALSE_POSITIVE`, `ACCEPTED`, `FIXED` -9. `languages`: List of languages -10. `resolved`: To retrieve resolved or unresolved issues. Accepts values: `yes`, `no`, `true`, `false` -11. `scopes`: List of scopes. Accepts values: `MAIN`, `TESTS` -12. `tags`: List of tags - -These attributes can be enabled using the path: `selector.apiFilters`. By default, the integration fetches unresolved SonarQube issues. It is also possible to configure the integration to fetch issues from a SonarQube project using the path: `selector.projectApiFilters.filter` while specifying any of [the above project attributes](#project) -::: +Examples of blueprints and the relevant integration configurations can be found on the sonarqube [examples page](examples.md) -:::note Supported Sonar environment -Please note that the API filters are supported on on-premise Sonar environments (SonarQube) only, and will not work on SonarCloud. -::: - -```yaml showLineNumbers -createMissingRelatedEntities: true -deleteDependentEntities: true -resources: - - kind: issues - selector: - query: "true" - apiFilters: - resolved: 'false' - projectApiFilters: - filter: - qualifier: TRK - port: - entity: - mappings: - blueprint: '"sonarQubeIssue"' - identifier: .key - title: .message - properties: - type: .type - severity: .severity - link: .__link - status: .status - assignees: .assignee - tags: .tags - createdAt: .creationDate - relations: - sonarQubeProject: .project -``` - -
-### Saas Analysis - -
-Saas analysis blueprint (Click to expand) - -```json showLineNumbers -{ - "identifier": "sonarQubeAnalysis", - "title": "SonarQube Analysis", - "icon": "sonarqube", - "schema": { - "properties": { - "branch": { - "type": "string", - "title": "Branch", - "icon": "GitVersion" - }, - "fixedIssues": { - "type": "number", - "title": "Fixed Issues" - }, - "newIssues": { - "type": "number", - "title": "New Issues" - }, - "coverage": { - "title": "Coverage", - "type": "number" - }, - "duplications": { - "type": "number", - "title": "Duplications" - }, - "createdAt": { - "type": "string", - "format": "date-time", - "title": "Created At" - } - } - }, - "mirrorProperties": {}, - "calculationProperties": {}, - "relations": { - "sonarQubeProject": { - "target": "sonarQubeProject", - "required": false, - "title": "SonarQube Project", - "many": false - } - } -} -``` - -
- -
-Integration configuration (Click to expand) - -```yaml showLineNumbers -createMissingRelatedEntities: true -deleteDependentEntities: true -resources: - - kind: saas_analysis - selector: - query: "true" - port: - entity: - mappings: - blueprint: '"sonarQubeAnalysis"' - identifier: .analysisId - title: .__commit.message // .analysisId - properties: - branch: .__branchName - fixedIssues: .measures.violations_fixed - newIssues: .measures.violations_added - coverage: .measures.coverage_change - duplications: .measures.duplicated_lines_density_change - createdAt: .__analysisDate - relations: - sonarQubeProject: .__project -``` - -
- -### On-Premise Analysis - -
-On-premise analysis blueprint (Click to expand) - -```json showLineNumbers -{ - "identifier": "sonarQubeAnalysis", - "title": "SonarQube Analysis", - "icon": "sonarqube", - "schema": { - "properties": { - "branch": { - "type": "string", - "title": "Branch", - "icon": "GitVersion" - }, - "fixedIssues": { - "type": "number", - "title": "Fixed Issues" - }, - "newIssues": { - "type": "number", - "title": "New Issues" - }, - "coverage": { - "title": "Coverage", - "type": "number" - }, - "duplications": { - "type": "number", - "title": "Duplications" - }, - "createdAt": { - "type": "string", - "format": "date-time", - "title": "Created At" - } - } - }, - "mirrorProperties": {}, - "calculationProperties": {}, - "relations": { - "sonarQubeProject": { - "target": "sonarQubeProject", - "required": false, - "title": "SonarQube Project", - "many": false - } - } -} -``` - -
- -
-Integration configuration (Click to expand) - -```yaml showLineNumbers -createMissingRelatedEntities: true -deleteDependentEntities: true -resources: - - kind: onprem_analysis - selector: - query: 'true' - port: - entity: - mappings: - blueprint: '"sonarQubeAnalysis"' - identifier: .__project + "-" + .key - title: .title - properties: - branch: .branch - newIssues: .__measures[]? | select(.metric == "new_violations") | .period.value - coverage: .__measures[]? | select(.metric == "new_coverage") | .period.value - duplications: .__measures[]? | select(.metric == "new_duplicated_lines_density") | .period.value - createdAt: .analysisDate - relations: - sonarQubeProject: .__project -``` - -
- -### Portfolio - -
-Portfolio blueprint (Click to expand) - -```json showLineNumbers -{ - "identifier": "sonarQubePortfolio", - "title": "SonarQube Portfolio", - "icon": "sonarqube", - "schema": { - "properties": { - "description": { - "type": "string", - "title": "Description" - }, - "visibility": { - "type": "string", - "title": "Visibility", - "enum": [ - "PUBLIC", - "PRIVATE" - ], - "enumColors": { - "PUBLIC": "green", - "PRIVATE": "lightGray" - } - }, - "selectionMode": { - "type": "string", - "title": "Selection Mode", - "enum": [ - "AUTO", - "MANUAL", - "NONE" - ], - "enumColors": { - "AUTO": "blue", - "MANUAL": "green", - "NONE": "lightGray" - } - }, - "disabled": { - "type": "boolean", - "title": "Disabled" - } - }, - "required": [] - }, - "mirrorProperties": {}, - "calculationProperties": {}, - "aggregationProperties": {}, - "relations": { - "referencedBy": { - "title": "Referenced By", - "target": "sonarQubePortfolio", - "required": false, - "many": true - }, - "subPortfolios": { - "title": "Sub Portfolios", - "target": "sonarQubePortfolio", - "required": false, - "many": true - } - } -} -``` - -
- -
-Integration configuration (Click to expand) - -```yaml showLineNumbers -deleteDependentEntities: true -createMissingRelatedEntities: true -resources: - - kind: portfolios - selector: - query: 'true' - port: - entity: - mappings: - identifier: .key - title: .name - blueprint: '"sonarQubePortfolio"' - properties: - description: .description - visibility: if .visibility then .visibility | ascii_upcase else null end - selectionMode: if .selectionMode then .selectionMode | ascii_upcase else null end - disabled: .disabled - relations: - subPortfolios: .subViews | map(select((.qualifier | IN("VW", "SVW"))) | .key) - referencedBy: .referencedBy | map(select((.qualifier | IN("VW", "SVW"))) | .key) -``` - -
## Let's Test It diff --git a/docs/build-your-software-catalog/sync-data-to-catalog/code-quality-security/wiz.md b/docs/build-your-software-catalog/sync-data-to-catalog/code-quality-security/wiz.md index c8a48db7a..fc620640e 100644 --- a/docs/build-your-software-catalog/sync-data-to-catalog/code-quality-security/wiz.md +++ b/docs/build-your-software-catalog/sync-data-to-catalog/code-quality-security/wiz.md @@ -9,15 +9,31 @@ import WizConfiguration from "/docs/build-your-software-catalog/custom-integrati import FindCredentials from "/docs/build-your-software-catalog/custom-integration/api/_template_docs/_find_credentials.mdx"; import PortApiRegionTip from "/docs/generalTemplates/_port_region_parameter_explanation_template.md" import OceanSaasInstallation from "/docs/build-your-software-catalog/sync-data-to-catalog/templates/_ocean_saas_installation.mdx" +import OceanRealtimeInstallation from "/docs/build-your-software-catalog/sync-data-to-catalog/templates/_ocean_realtime_installation.mdx" + # Wiz -Our Wiz integration allows you to import `projects`, `issues`, `controls`, and `serviceTickets` from your Wiz account into Port, according to your mapping and definitions. +Port's Wiz integration allows you to model Wiz resources in your software catalog and ingest data into them. + + +## Overview + +This integration allows you to: + +- Map and organize your desired Wiz resources and their metadata in Port (see supported resources below). +- Watch for Wiz object changes (create/update/delete) in real-time, and automatically apply the changes to your entities in Port. -## Common use cases -- Map `projects`, `issues`, `controls`, and `serviceTickets` in your Wiz organization environment. -- Watch for object changes (create/update/delete) in real-time, and automatically apply the changes to your entities in Port. +### Supported Resources + +The resources that can be ingested from Wiz into Port are listed below. It is possible to reference any field that appears in the API responses linked below in the mapping configuration. + +- [`Project`](https://integrate.wiz.io/reference/pull-projects) +- [`Issue`](https://integrate.wiz.io/reference/issues-tutorial) +- [`Control`](https://integrate.wiz.io/docs/welcome#controls) +- [`Service ticket`](https://integrate.wiz.io/reference/issues-query#:~:text=string-,serviceTickets,-array) + ## Prerequisites @@ -85,7 +101,7 @@ You must create a service account in Wiz to generate the Client ID and Client Se

-## Installation +## Setup Choose one of the following installation methods: @@ -97,61 +113,28 @@ Choose one of the following installation methods:
- + Using this installation option means that the integration will be able to update Port in real time using webhooks. -This table summarizes the available parameters for the installation. -Set them as you wish in the script below, then copy it and run it in your terminal: - -| Parameter | Description | Required | -| ----------------------------------- | ------------------------------------------------------------------------------------------------------------------ | ------- | -| `port.clientId` | Your port client id ([Get the credentials](https://docs.getport.io/build-your-software-catalog/custom-integration/api/#find-your-port-credentials)) | ✅ | -| `port.clientSecret` | Your port client secret ([Get the credentials](https://docs.getport.io/build-your-software-catalog/custom-integration/api/#find-your-port-credentials)) | ✅ | -| `port.baseUrl` | Your Port API URL - `https://api.getport.io` for EU, `https://api.us.getport.io` for US | ✅ | -| `integration.identifier` | Change the identifier to describe your integration | ✅ | -| `integration.type` | The integration type | ✅ | -| `integration.eventListener.type` | The event listener type | ✅ | -| `integration.secrets.wizClientId` | The Wiz Client ID | ✅ | -| `integration.secrets.wizClientSecret`| The Wiz Client Secret | ✅ | -| `integration.config.wizApiUrl` | The Wiz API URL. | ✅ | -| `integration.config.wizTokenUrl` | The Wiz Token Authentication URL | ✅ | -| `integration.config.appHost` | The host of the Port Ocean app. Used to set up the integration endpoint as the target for Webhooks created in Wiz | ✅ | -| `integration.secret.wizWebhookVerificationToken` | This is a password you create, that is used to verify webhook events to Port | ❌ | -| `scheduledResyncInterval` | The number of minutes between each resync | ❌ | -| `initializePortResources` | Default true, When set to true the integration will create default blueprints and the port App config Mapping | ❌ | -| `sendRawDataExamples` | Enable sending raw data examples from the third party API to port for testing and managing the integration mapping. Default is true | ❌ | +

Prerequisites

+ -
+ +For details about the available parameters for the installation, see the table below. -To install the integration using Helm, run the following command: - -```bash showLineNumbers -helm repo add --force-update port-labs https://port-labs.github.io/helm-charts -helm upgrade --install my-wiz-integration port-labs/port-ocean \ - --set port.clientId="PORT_CLIENT_ID" \ - --set port.clientSecret="PORT_CLIENT_SECRET" \ - --set port.baseUrl="https://api.getport.io" \ - --set initializePortResources=true \ - --set sendRawDataExamples=true \ - --set scheduledResyncInterval=120 \ - --set integration.identifier="my-wiz-integration" \ - --set integration.type="wiz" \ - --set integration.eventListener.type="POLLING" \ - --set integration.secrets.wizClientId="WIZ_CLIENT_ID" \ - --set integration.secrets.wizClientSecret="WIZ_CLIENT_SECRET" \ - --set integration.secrets.wizApiUrl="WIZ_API_URL" \ - --set integration.config.wizTokenUrl="WIZ_TOKEN_URL" -``` + + + -To install the integration using ArgoCD, follow these steps: +To install the integration using ArgoCD: 1. Create a `values.yaml` file in `argocd/my-ocean-wiz-integration` in your git repository with the content: @@ -238,18 +221,42 @@ kubectl apply -f my-ocean-wiz-integration.yaml +This table summarizes the available parameters for the installation. +Note the parameters specific to this integration, they are last in the table. + +| Parameter | Description | Required | +|--------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------|----------| +| `port.clientId` | Your port client id ([Get the credentials](https://docs.getport.io/build-your-software-catalog/custom-integration/api/#find-your-port-credentials)) | ✅ | +| `port.clientSecret` | Your port client secret ([Get the credentials](https://docs.getport.io/build-your-software-catalog/custom-integration/api/#find-your-port-credentials)) | ✅ | +| `port.baseUrl` | Your Port API URL - `https://api.getport.io` for EU, `https://api.us.getport.io` for US | ✅ | +| `integration.identifier` | Change the identifier to describe your integration | ✅ | +| `integration.type` | The integration type | ✅ | +| `integration.config.appHost` | The host of the Port Ocean app. Used to set up the integration endpoint as the target for Webhooks created in Wiz | ✅ | +| `integration.eventListener.type` | The event listener type | ✅ | +| `scheduledResyncInterval` | The number of minutes between each resync | ❌ | +| `initializePortResources` | Default true, When set to true the integration will create default blueprints and the port App config Mapping | ❌ | +| `sendRawDataExamples` | Enable sending raw data examples from the third party API to port for testing and managing the integration mapping. Default is true | ❌ | +| `integration.secrets.wizClientId` | The Wiz Client ID | ✅ | +| `integration.secrets.wizClientSecret` | The Wiz Client Secret | ✅ | +| `integration.config.wizApiUrl` | The Wiz API URL. | ✅ | +| `integration.config.wizTokenUrl` | The Wiz Token Authentication URL | ✅ | +| `integration.secret.wizWebhookVerificationToken` | This is a password you create, that is used to verify webhook events to Port | ❌ | + +
+
- - - -This workflow will run the Wiz integration once and then exit, this is useful for **scheduled** ingestion of data. + -:::warning -If you want the integration to update Port in real time using webhooks you should use the [Real Time & Always On](?installation-methods=real-time-always-on#installation) installation option +This workflow/pipeline will run the Wiz integration once and then exit, this is useful for **scheduled** ingestion of data. + +:::warning Real-time updates +If you want the integration to update Port in real time using webhooks you should use the [Real Time & Always On](?installation-methods=real-time-always-on#installation) installation option. ::: + + Make sure to configure the following [Github Secrets](https://docs.github.com/en/actions/security-guides/using-secrets-in-github-actions): @@ -287,16 +294,11 @@ jobs: ``` - -This pipeline will run the Wiz integration once and then exit, this is useful for **scheduled** ingestion of data. + :::tip Your Jenkins agent should be able to run docker commands. ::: -:::warning -If you want the integration to update Port in real time using webhooks you should use -the [Real Time & Always On](?installation-methods=real-time-always-on#installation) installation option. -:::
@@ -353,10 +355,9 @@ pipeline { ```
- - + @@ -402,11 +403,6 @@ steps: ``` -This pipeline will run the Wiz integration once and then exit, this is useful for **scheduled** ingestion of data. - -:::warning Realtime updates in Port -If you want the integration to update Port in real time using webhooks you should use the [Real Time & Always On](?installation-methods=real-time-always-on#installation) installation option. -::: Make sure to [configure the following GitLab variables](https://docs.gitlab.com/ee/ci/variables/#for-a-project): @@ -456,7 +452,7 @@ ingest_data: ``` -
+
@@ -466,121 +462,12 @@ ingest_data:
-## Ingesting Wiz objects - -The Wiz integration uses a YAML configuration to describe the process of loading data into the developer portal. - -Here is an example snippet from the config which demonstrates the process for getting `project` data from Wiz: - -```yaml showLineNumbers -createMissingRelatedEntities: true -deleteDependentEntities: true -resources: - - kind: project - selector: - query: 'true' - port: - entity: - mappings: - blueprint: '"wizProject"' - identifier: .id - title: .name - properties: - archived: .archived - businessUnit: .businessUnit - description: .description -``` - -The integration makes use of the [JQ JSON processor](https://stedolan.github.io/jq/manual/) to select, modify, concatenate, transform and perform other operations on existing fields and values from Wiz's API events. - -### Configuration structure +## Configuration -The integration configuration determines which resources will be queried from Wiz, and which entities and properties will be created in Port. +Port integrations use a [YAML mapping block](/build-your-software-catalog/customize-integrations/configure-mapping#configuration-structure) to ingest data from the third-party api into Port. -:::tip Supported resources -The following resources can be used to map data from Wiz, it is possible to reference any field that appears in the API responses linked below for the mapping configuration. - -- [`Project`](https://integrate.wiz.io/reference/pull-projects) -- [`Issue`](https://integrate.wiz.io/reference/issues-tutorial) -- [`Control`](https://integrate.wiz.io/docs/welcome#controls) -- [`Service ticket`](https://integrate.wiz.io/reference/issues-query#:~:text=string-,serviceTickets,-array) -::: +The mapping makes use of the [JQ JSON processor](https://stedolan.github.io/jq/manual/) to select, modify, concatenate, transform and perform other operations on existing fields and values from the integration API. -- The root key of the integration configuration is the `resources` key: - - ```yaml showLineNumbers - # highlight-next-line - resources: - - kind: project - selector: - ... - ``` - -- The `kind` key is a specifier for a Wiz object: - - ```yaml showLineNumbers - resources: - # highlight-next-line - - kind: project - selector: - ... - ``` - -- The `selector` and the `query` keys allow you to filter which objects of the specified `kind` will be ingested into your software catalog: - - ```yaml showLineNumbers - resources: - - kind: project - # highlight-start - selector: - query: "true" # JQ boolean expression. If evaluated to false - this object will be skipped. - # highlight-end - port: - ``` - -- The `port`, `entity` and the `mappings` keys are used to map the Wiz object fields to Port entities. To create multiple mappings of the same kind, you can add another item in the `resources` array; - - ```yaml showLineNumbers - resources: - - kind: project - selector: - query: "true" - port: - # highlight-start - entity: - mappings: # Mappings between one Wiz object to a Port entity. Each value is a JQ query. - identifier: .id - title: .attributes.name - blueprint: '"wizProject"' - identifier: .id - title: .name - properties: - archived: .archived - businessUnit: .businessUnit - description: .description - # highlight-end - - kind: project # In this instance project is mapped again with a different filter - selector: - query: '.name == "MyProjectName"' - port: - entity: - mappings: ... - ``` - - :::tip Blueprint key - Note the value of the `blueprint` key - if you want to use a hardcoded string, you need to encapsulate it in 2 sets of quotes, for example use a pair of single-quotes (`'`) and then another pair of double-quotes (`"`) - ::: - -### Ingest data into Port - -To ingest Wiz objects using the [integration configuration](#configuration-structure), you can follow the steps below: - -1. Go to the DevPortal Builder page. -2. Select a blueprint you want to ingest using Wiz. -3. Choose the **Ingest Data** option from the menu. -4. Select Wiz under the Code quality & security providers category. -5. Modify the [configuration](#configuration-structure) according to your needs. -6. Click `Resync`. ## Examples @@ -932,7 +819,6 @@ resources:
- ### Service Ticket
@@ -984,50 +870,6 @@ resources:
-## Alternative installation via webhook -While the Ocean integration described above is the recommended installation method, you may prefer to use a webhook to ingest data from Wiz. If so, use the following instructions: - -
- -Webhook installation (click to expand) - -In this example you are going to create a webhook integration between [Wiz](https://wiz.io/) and Port, which will ingest Wiz issue entities into Port. - -

Port configuration

- -Create the following blueprint definition: - -
-Wiz issue blueprint - - - -
- -Create the following webhook configuration [using Port's UI](/build-your-software-catalog/custom-integration/webhook/?operation=ui#configuring-webhook-endpoints) - -
-Wiz issue webhook configuration - -1. **Basic details** tab - fill the following details: - 1. Title : `Wiz Mapper`; - 2. Identifier : `wiz_mapper`; - 3. Description : `A webhook configuration to map Wiz issues to Port`; - 4. Icon : `Box`; -2. **Integration configuration** tab - fill the following JQ mapping: - - - -
- -

Create a webhook in Wiz

- -1. Send an email to win@wiz.io requesting for access to the developer documentation or reach out to your Wiz account manager. -2. Follow this [guide](https://integrate.wiz.io/reference/webhook-tutorial#create-a-custom-webhook) in the documentation to create a webhook. - -Done! Any issue created in Wiz will trigger a webhook event to the webhook URL provided by Port. Port will parse the events according to the mapping and update the catalog entities accordingly. -
- ## Let's Test It This section includes a sample response data from Wiz. In addition, it includes the entity created from the resync event based on the Ocean configuration provided in the previous section. @@ -1309,3 +1151,51 @@ The combination of the sample payload and the Ocean configuration generates the ``` + +## Alternative installation via webhook +While the Ocean integration described above is the recommended installation method, you may prefer to use a webhook to ingest data from Wiz. If so, use the following instructions: + +**Note** that when using the webhook installation method, data will be ingested into Port only when the webhook is triggered. + + +
+ +Webhook installation (click to expand) + +In this example you are going to create a webhook integration between [Wiz](https://wiz.io/) and Port, which will ingest Wiz issue entities into Port. + +

Port configuration

+ +Create the following blueprint definition: + +
+Wiz issue blueprint + + + +
+ +Create the following webhook configuration [using Port's UI](/build-your-software-catalog/custom-integration/webhook/?operation=ui#configuring-webhook-endpoints) + +
+Wiz issue webhook configuration + +1. **Basic details** tab - fill the following details: + 1. Title : `Wiz Mapper`; + 2. Identifier : `wiz_mapper`; + 3. Description : `A webhook configuration to map Wiz issues to Port`; + 4. Icon : `Box`; +2. **Integration configuration** tab - fill the following JQ mapping: + + + +
+ +

Create a webhook in Wiz

+ +1. Send an email to win@wiz.io requesting for access to the developer documentation or reach out to your Wiz account manager. +2. Follow this [guide](https://integrate.wiz.io/reference/webhook-tutorial#create-a-custom-webhook) in the documentation to create a webhook. + +Done! Any issue created in Wiz will trigger a webhook event to the webhook URL provided by Port. Port will parse the events according to the mapping and update the catalog entities accordingly. +
+ diff --git a/docs/build-your-software-catalog/sync-data-to-catalog/feature-management/launchdarkly/launchdarkly.md b/docs/build-your-software-catalog/sync-data-to-catalog/feature-management/launchdarkly/launchdarkly.md index c6a78ca52..f378e5481 100644 --- a/docs/build-your-software-catalog/sync-data-to-catalog/feature-management/launchdarkly/launchdarkly.md +++ b/docs/build-your-software-catalog/sync-data-to-catalog/feature-management/launchdarkly/launchdarkly.md @@ -9,6 +9,10 @@ import TabItem from "@theme/TabItem" import DockerParameters from "./\_launchdarkly_one_time_docker_parameters.mdx" import PortApiRegionTip from "/docs/generalTemplates/_port_region_parameter_explanation_template.md" import OceanSaasInstallation from "/docs/build-your-software-catalog/sync-data-to-catalog/templates/_ocean_saas_installation.mdx" +import OceanRealtimeInstallation from "/docs/build-your-software-catalog/sync-data-to-catalog/templates/_ocean_realtime_installation.mdx" +import Prerequisites from "../../templates/\_ocean_helm_prerequisites_block.mdx" + + # LaunchDarkly @@ -45,61 +49,27 @@ Choose one of the following installation methods:
- - -

Prerequisites

- -To install the integration, you need a Kubernetes cluster that the integration's container chart will be deployed to. + -Please make sure that you have [`kubectl`](https://kubernetes.io/docs/tasks/tools/#kubectl) and [`helm`](https://helm.sh/) installed on your machine, and that your `kubectl` CLI is connected to the Kubernetes cluster where you plan to install the integration. +Using this installation option means that the integration will be able to update Port in real time using webhooks. +

Prerequisites

-Using this installation option means that the integration will be able to update Port in real time using webhooks. + -This table summarizes the available parameters for the installation. -Set them as you wish in the script below, then copy it and run it in your terminal: - -| Parameter | Description | Required | -| ---------------------------------------- | ------------------------------------------------------------------------------------------------------------- | ------- | -| `port.clientId` | Your Port client id | ✅ | -| `port.clientSecret` | Your Port client secret | ✅ | -| `port.baseUrl` | Your Port API URL - `https://api.getport.io` for EU, `https://api.us.getport.io` for US | ✅ | -| `integration.identifier` | Change the identifier to describe your integration | ✅ | -| `integration.type` | The integration type | ✅ | -| `integration.eventListener.type` | The event listener type | ✅ | -| `integration.config.launchdarklyHost` | Your LaunchDarkly host. For example https://app.launchdarkly.com for the default endpoint | ✅ | -| `integration.config.launchdarklyToken` | The LaunchDarkly API token | ✅ | -| `integration.config.appHost` | Your application's host url | ✅ | -| `scheduledResyncInterval` | The number of minutes between each resync | ❌ | -| `initializePortResources` | Default true, When set to true the integration will create default blueprints and the port App config Mapping | ❌ | -| `sendRawDataExamples` | Default, true, Enable sending raw data examples from the third part API to port for testing and managing the integration mapping | ❌ | +For details about the available parameters for the installation, see the table below. -
-To install the integration using Helm, run the following command: - -```bash showLineNumbers -helm repo add --force-update port-labs https://port-labs.github.io/helm-charts -helm upgrade --install launchdarkly port-labs/port-ocean \ - --set port.clientId="PORT_CLIENT_ID" \ - --set port.clientSecret="PORT_CLIENT_SECRET" \ - --set port.baseUrl="https://api.getport.io" \ - --set initializePortResources=true \ - --set sendRawDataExamples=true \ - --set integration.identifier="my-launchdarkly-integration" \ - --set integration.type="launchdarkly" \ - --set integration.eventListener.type="POLLING" \ - --set integration.secrets.launchdarklyHost="string" \ - --set integration.secrets.launchdarklyToken="string" \ -``` + + -To install the integration using ArgoCD, follow these steps: +To install the integration using ArgoCD: 1. Create a `values.yaml` file in `argocd/my-ocean-launchdarkly-integration` in your git repository with the content: @@ -182,6 +152,23 @@ kubectl apply -f my-ocean-launchdarkly-integration.yaml +This table summarizes the available parameters for the installation. + +| Parameter | Description | Required | +|----------------------------------------|----------------------------------------------------------------------------------------------------------------------------------|----------| +| `port.clientId` | Your Port client id | ✅ | +| `port.clientSecret` | Your Port client secret | ✅ | +| `port.baseUrl` | Your Port API URL - `https://api.getport.io` for EU, `https://api.us.getport.io` for US | ✅ | +| `integration.identifier` | Change the identifier to describe your integration | ✅ | +| `integration.type` | The integration type | ✅ | +| `integration.eventListener.type` | The event listener type | ✅ | +| `integration.config.launchdarklyHost` | Your LaunchDarkly host. For example https://app.launchdarkly.com for the default endpoint | ✅ | +| `integration.config.launchdarklyToken` | The LaunchDarkly API token, docs can be found [here](https://docs.launchdarkly.com/home/account/api-create) | ✅ | +| `integration.config.appHost` | Your application's host url | ✅ | +| `scheduledResyncInterval` | The number of minutes between each resync | ❌ | +| `initializePortResources` | Default true, When set to true the integration will create default blueprints and the port App config Mapping | ❌ | +| `sendRawDataExamples` | Default, true, Enable sending raw data examples from the third part API to port for testing and managing the integration mapping | ❌ | +

Event listener

The integration uses polling to pull the configuration from Port every minute and check it for changes. If there is a change, a resync will occur. @@ -191,14 +178,15 @@ The integration uses polling to pull the configuration from Port every minute an - - -This workflow will run the LaunchDarkly integration once and then exit, this is useful for **scheduled** ingestion of data. +This workflow/pipeline will run the LaunchDarkly integration once and then exit, this is useful for **scheduled** ingestion of data. -:::warning -If you want the integration to update Port in real time you should use the [Real Time & Always On](?installation-methods=real-time-always-on#installation) installation option +:::warning Real-time updates +If you want the integration to update Port in real time using webhooks you should use the [Real Time & Always On](?installation-methods=real-time-always-on#installation) installation option. ::: + + + Make sure to configure the following [Github Secrets](https://docs.github.com/en/actions/security-guides/using-secrets-in-github-actions): @@ -234,14 +222,10 @@ jobs: -This pipeline will run the LaunchDarkly integration once and then exit, this is useful for **scheduled** ingestion of data. :::tip Your Jenkins agent should be able to run docker commands. ::: -:::warning -If you want the integration to update Port in real time using webhooks you should use the [Real Time & Always On](?installation-methods=real-time-always-on#installation) installation option. -::: Make sure to configure the following [LaunchDarkly Credentials](https://www.jenkins.io/doc/book/using/using-credentials/) of `Secret Text` type: @@ -293,11 +277,6 @@ pipeline { -This workflow will run the LaunchDarkly integration once and then exit, this is useful for **scheduled** ingestion of data. - -:::warning Realtime updates in Port -If you want the integration to update Port in real time using webhooks you should use the [Real Time & Always On](?installation-methods=real-time-always-on#installation) installation option. -::: Make sure to [configure the following GitLab variables](https://docs.gitlab.com/ee/ci/variables/#for-a-project): @@ -346,7 +325,6 @@ ingest_data: ``` - @@ -706,6 +684,587 @@ resources: + +## Let's Test It + +This section includes sample response data from LaunchDarkly. In addition, it includes the entity created from the resync event based on the Ocean configuration provided in the previous section. + +### Payload + +Here is an example of the payload structure from LaunchDarkly: + +
+ Project response data + +```json showLineNumbers +{ + "_links": { + "environments": { + "href": "/api/v2/projects/fourth-project/environments", + "type": "application/json" + }, + "flagDefaults": { + "href": "/api/v2/projects/fourth-project/flag-defaults", + "type": "application/json" + }, + "self": { + "href": "/api/v2/projects/fourth-project", + "type": "application/json" + } + }, + "_id": "666b298cc671e81012b578c6", + "key": "fourth-project", + "includeInSnippetByDefault": false, + "defaultClientSideAvailability": { + "usingMobileKey": false, + "usingEnvironmentId": false + }, + "name": "Fourth Project", + "tags": [] +} +``` +
+ + +
+ Feature Flag response data + +```json showLineNumbers +{ + "_links": { + "parent": { + "href": "/api/v2/flags/fourth-project", + "type": "application/json" + }, + "self": { + "href": "/api/v2/flags/fourth-project/randomflag", + "type": "application/json" + } + }, + "_maintainer": { + "_id": "6669b0f34162860fefd6d724", + "_links": { + "self": { + "href": "/api/v2/members/6669b0f34162860fefd6d724", + "type": "application/json" + } + }, + "email": "example@gmail.com", + "firstName": "John", + "lastName": "Doe", + "role": "owner" + }, + "_version": 1, + "archived": false, + "clientSideAvailability": { + "usingEnvironmentId": false, + "usingMobileKey": false + }, + "creationDate": 1718299647527, + "customProperties": {}, + "defaults": { + "offVariation": 1, + "onVariation": 0 + }, + "deprecated": false, + "description": "", + "environments": { + "fourth-env": { + "_environmentName": "fourth-env", + "_site": { + "href": "/fourth-project/fourth-env/features/randomflag", + "type": "text/html" + }, + "_summary": { + "prerequisites": 0, + "variations": { + "0": { + "contextTargets": 0, + "isFallthrough": true, + "nullRules": 0, + "rules": 0, + "targets": 0 + }, + "1": { + "contextTargets": 0, + "isOff": true, + "nullRules": 0, + "rules": 0, + "targets": 0 + } + } + }, + "archived": false, + "lastModified": 1718299647539, + "on": false, + "salt": "c713989066a446febf07a42d488221e8", + "sel": "6d7c3692dd9d4ffa8eee8e2d96b6fd2c", + "trackEvents": false, + "trackEventsFallthrough": false, + "version": 1 + }, + "new-env": { + "_environmentName": "new env", + "_site": { + "href": "/fourth-project/new-env/features/randomflag", + "type": "text/html" + }, + "_summary": { + "prerequisites": 0, + "variations": { + "0": { + "contextTargets": 0, + "isFallthrough": true, + "nullRules": 0, + "rules": 0, + "targets": 0 + }, + "1": { + "contextTargets": 0, + "isOff": true, + "nullRules": 0, + "rules": 0, + "targets": 0 + } + } + }, + "archived": false, + "lastModified": 1718299647539, + "on": false, + "salt": "caa436a38411406491f0da9230349bb3", + "sel": "8bcf1667ab2f4f628fc26ad31966f045", + "trackEvents": false, + "trackEventsFallthrough": false, + "version": 1 + }, + "new-project": { + "_environmentName": "new_project", + "_site": { + "href": "/fourth-project/new-project/features/randomflag", + "type": "text/html" + }, + "_summary": { + "prerequisites": 0, + "variations": { + "0": { + "contextTargets": 0, + "isFallthrough": true, + "nullRules": 0, + "rules": 0, + "targets": 0 + }, + "1": { + "contextTargets": 0, + "isOff": true, + "nullRules": 0, + "rules": 0, + "targets": 0 + } + } + }, + "archived": false, + "lastModified": 1718299647539, + "on": false, + "salt": "f79c8849d22d497d8a519fbb6263aeda", + "sel": "257f0acaf18f4252b40258f8aa93b966", + "trackEvents": false, + "trackEventsFallthrough": false, + "version": 1 + }, + "production": { + "_environmentName": "Production", + "_site": { + "href": "/fourth-project/production/features/randomflag", + "type": "text/html" + }, + "_summary": { + "prerequisites": 0, + "variations": { + "0": { + "contextTargets": 0, + "isFallthrough": true, + "nullRules": 0, + "rules": 0, + "targets": 0 + }, + "1": { + "contextTargets": 0, + "isOff": true, + "nullRules": 0, + "rules": 0, + "targets": 0 + } + } + }, + "archived": false, + "lastModified": 1718299647539, + "on": false, + "salt": "28c5efba5fd445d5896a8b9f7f8fbff6", + "sel": "28a317cdf3aa4d40b8a0b1c6f56be4c9", + "trackEvents": false, + "trackEventsFallthrough": false, + "version": 1 + }, + "shadow": { + "_environmentName": "shadow", + "_site": { + "href": "/fourth-project/shadow/features/randomflag", + "type": "text/html" + }, + "_summary": { + "prerequisites": 0, + "variations": { + "0": { + "contextTargets": 0, + "isFallthrough": true, + "nullRules": 0, + "rules": 0, + "targets": 0 + }, + "1": { + "contextTargets": 0, + "isOff": true, + "nullRules": 0, + "rules": 0, + "targets": 0 + } + } + }, + "archived": false, + "lastModified": 1718311480830, + "on": false, + "salt": "cb214aeac84f48d08ff136514c589b11", + "sel": "00b5f9ae56a547db9c4e5e619bdb39f3", + "trackEvents": false, + "trackEventsFallthrough": false, + "version": 1 + }, + "some-random-env": { + "_environmentName": "some-random-env", + "_site": { + "href": "/fourth-project/some-random-env/features/randomflag", + "type": "text/html" + }, + "_summary": { + "prerequisites": 0, + "variations": { + "0": { + "contextTargets": 0, + "isFallthrough": true, + "nullRules": 0, + "rules": 0, + "targets": 0 + }, + "1": { + "contextTargets": 0, + "isOff": true, + "nullRules": 0, + "rules": 0, + "targets": 0 + } + } + }, + "archived": false, + "lastModified": 1718300514123, + "on": false, + "salt": "0618861de85c48a5a77c360db7a8847b", + "sel": "5ae511fe5630469084453c2c4d45f719", + "trackEvents": false, + "trackEventsFallthrough": false, + "version": 1 + }, + "staging": { + "_environmentName": "staging", + "_site": { + "href": "/fourth-project/staging/features/randomflag", + "type": "text/html" + }, + "_summary": { + "prerequisites": 0, + "variations": { + "0": { + "contextTargets": 0, + "isFallthrough": true, + "nullRules": 0, + "rules": 0, + "targets": 0 + }, + "1": { + "contextTargets": 0, + "isOff": true, + "nullRules": 0, + "rules": 0, + "targets": 0 + } + } + }, + "archived": false, + "lastModified": 1718300902420, + "on": false, + "salt": "bc27ddc205984379a4863f5f1323bdb0", + "sel": "2762811a62734de79277544ff4362f8c", + "trackEvents": false, + "trackEventsFallthrough": false, + "version": 1 + }, + "test": { + "_environmentName": "Test", + "_site": { + "href": "/fourth-project/test/features/randomflag", + "type": "text/html" + }, + "_summary": { + "prerequisites": 0, + "variations": { + "0": { + "contextTargets": 0, + "isFallthrough": true, + "nullRules": 0, + "rules": 0, + "targets": 0 + }, + "1": { + "contextTargets": 0, + "isOff": true, + "nullRules": 0, + "rules": 0, + "targets": 0 + } + } + }, + "archived": false, + "lastModified": 1718299647539, + "on": false, + "salt": "fac0fe470f844433986166f3d570415d", + "sel": "8e8ae9542dc94f35b1ac64c845277d8a", + "trackEvents": false, + "trackEventsFallthrough": false, + "version": 1 + } + }, + "experiments": { + "baselineIdx": 0, + "items": [] + }, + "goalIds": [], + "includeInSnippet": false, + "key": "randomflag", + "kind": "boolean", + "maintainerId": "6669b0f34162860fefd6d724", + "name": "randomflag", + "tags": [], + "temporary": true, + "variationJsonSchema": null, + "variations": [ + { + "_id": "8868f0d9-8b1d-4575-9436-827188276792", + "value": true + }, + { + "_id": "8929317b-d2aa-479c-9249-e6c0ec5dc415", + "value": false + } + ], + "__projectKey": "fourth-project" +} +``` + +
+ +
+ Environment response data + +```json showLineNumbers +{ + "_links": { + "analytics": { + "href": "https://app.launchdarkly.com/snippet/events/v1/666b2a74cbdbfb108f3fc911.js", + "type": "text/html" + }, + "apiKey": { + "href": "/api/v2/projects/fourth-project/environments/fourth-env/apiKey", + "type": "application/json" + }, + "mobileKey": { + "href": "/api/v2/projects/fourth-project/environments/fourth-env/mobileKey", + "type": "application/json" + }, + "self": { + "href": "/api/v2/projects/fourth-project/environments/fourth-env", + "type": "application/json" + }, + "snippet": { + "href": "https://app.launchdarkly.com/snippet/features/666b2a74cbdbfb108f3fc911.js", + "type": "text/html" + } + }, + "_id": "666b2a74cbdbfb108f3fc911", + "_pubnub": { + "channel": "b4f644c56dbbfe88a4028cb2d2142c258926f9b7a9add263d105202f0cd6599c", + "cipherKey": "9571e2de187881614fe9b6b94d13a99fbdb056e508c9226e6c6bb7d0be117725" + }, + "key": "fourth-env", + "name": "fourth-env", + "apiKey": "sdk-1b3cf928-acae-4553-aab3-c956b7f04219", + "mobileKey": "mob-87679d8a-698d-4c5f-9ec1-05e368975afe", + "color": "e2e6ff", + "defaultTtl": 0, + "secureMode": false, + "defaultTrackEvents": false, + "requireComments": false, + "confirmChanges": false, + "tags": [], + "approvalSettings": { + "required": false, + "bypassApprovalsForPendingChanges": false, + "minNumApprovals": 1, + "canReviewOwnRequest": false, + "canApplyDeclinedChanges": true, + "serviceKind": "launchdarkly", + "serviceConfig": {}, + "requiredApprovalTags": [] + }, + "critical": false, + "__projectKey": "fourth-project" +} +``` + +
+ + +
+ Feature Flag In Environment response data + +```json showLineNumbers +{ + "_links": { + "parent": { + "href": "/api/v2/flags/fourth-project/olulufe", + "type": "application/json" + }, + "self": { + "href": "/api/v2/flag-statuses/fourth-project/shadow/olulufe", + "type": "application/json" + } + }, + "name": "new", + "lastRequested": null, + "__environmentKey": "shadow", + "__projectKey": "fourth-project" +} +``` + +
+ +### Mapping Result + +The combination of the sample payload and the Ocean configuration generates the following Port entity: + +
+ Project entity in Port + +```json showLineNumbers +{ + "identifier": "fourth-project", + "title": "Fourth Project", + "blueprint": "launchDarklyProject", + "properties": { + "tags": [] + }, + "relation": { + "service": "fourth-project" + } +} +``` +
+ +
+ Feature Flag entity in Port + +```json showLineNumbers +{ + "identifier": "randomflag-fourth-project", + "title": "randomflag", + "blueprint": "launchDarklyFeatureFlag", + "properties": { + "kind": "boolean", + "description": "", + "creationDate": "2024-06-13T17:27:27Z", + "clientSideAvailability": { + "usingEnvironmentId": false, + "usingMobileKey": false + }, + "temporary": true, + "tags": [], + "maintainer": "example@gmail.com", + "deprecated": false, + "variations": [ + { + "_id": "8868f0d9-8b1d-4575-9436-827188276792", + "value": true + }, + { + "_id": "8929317b-d2aa-479c-9249-e6c0ec5dc415", + "value": false + } + ], + "customProperties": {}, + "archived": false + }, + "relations": { + "project": "fourth-project" + } +} +``` + +
+ +
+ Environment entity in Port + +```json showLineNumbers +{ + "identifier": "fourth-env-fourth-project", + "title": "fourth-env", + "blueprint": "launchDarklyEnvironment", + "properties": { + "defaultTtl": 0, + "secureMode": false, + "defaultTrackEvents": false, + "requireComments": false, + "confirmChanges": false, + "tags": [], + "critical": false + }, + "relations": { + "project": "fourth-project" + } + } +``` + +
+ +
+ Feature Flag In Environment entity in Port + +```json showLineNumbers +{ + "identifier": "olulufe-shadow", + "title": "olulufe-shadow", + "blueprint": "launchDarklyFFInEnvironment", + "properties": { + "status": "new" + }, + "relations": { + "environment": "shadow-fourth-project", + "featureFlag": "olulufe-fourth-project" + } + } +``` +
+ + + ## Relevant Guides For relevant guides and examples, see the [guides section](https://docs.getport.io/guides?tags=Launchdarkly). diff --git a/docs/build-your-software-catalog/sync-data-to-catalog/git/azure-devops/_azuredevops_exporter_supported_resources.mdx b/docs/build-your-software-catalog/sync-data-to-catalog/git/azure-devops/_azuredevops_exporter_supported_resources.mdx index 2cdf91b81..c9bcd927f 100644 --- a/docs/build-your-software-catalog/sync-data-to-catalog/git/azure-devops/_azuredevops_exporter_supported_resources.mdx +++ b/docs/build-your-software-catalog/sync-data-to-catalog/git/azure-devops/_azuredevops_exporter_supported_resources.mdx @@ -1,7 +1,3 @@ -:::tip Available Azure DevOps resources - -The following resources can be used to map data from Azure DevOps, it is possible to reference any field that appears in the API responses linked below for the Azure Devops integration api reference. - - [`repository`](https://learn.microsoft.com/en-us/rest/api/azure/devops/git/repositories/list?view=azure-devops-rest-7.2&tabs=HTTP#gitrepository) - [`repository-policy`](https://learn.microsoft.com/en-us/rest/api/azure/devops/git/policy-configurations/get?view=azure-devops-rest-7.1#policyconfiguration) - [`project`](https://learn.microsoft.com/en-us/rest/api/azure/devops/core/projects/list?view=azure-devops-rest-7.1&tabs=HTTP#teamprojectreference) @@ -12,6 +8,3 @@ The following resources can be used to map data from Azure DevOps, it is possibl - [`work-item`](https://learn.microsoft.com/en-us/rest/api/azure/devops/wit/wiql/query-by-wiql?view=azure-devops-rest-7.1&tabs=HTTP) - [`board`](https://learn.microsoft.com/en-us/rest/api/azure/devops/work/boards/list?view=azure-devops-rest-7.1) - [`release`](https://learn.microsoft.com/en-us/rest/api/azure/devops/release/releases?view=azure-devops-rest-7.1) - - -::: diff --git a/docs/build-your-software-catalog/sync-data-to-catalog/git/azure-devops/azure-devops.md b/docs/build-your-software-catalog/sync-data-to-catalog/git/azure-devops/azure-devops.md index 22a20198d..de02ce83e 100644 --- a/docs/build-your-software-catalog/sync-data-to-catalog/git/azure-devops/azure-devops.md +++ b/docs/build-your-software-catalog/sync-data-to-catalog/git/azure-devops/azure-devops.md @@ -2,181 +2,45 @@ import Tabs from "@theme/Tabs" import TabItem from "@theme/TabItem" import AzureDevopsResources from './\_azuredevops_exporter_supported_resources.mdx' + # Azure DevOps -Our integration with Azure DevOps allows you to export objects to Port as entities of existing blueprints. The integration supports real-time event processing so Port always provides an accurate real-time representation of your Azure DevOps resources. +Port's Azure DevOps integration allows you to model Azure DevOps resources in your software catalog and ingest data into them. -## Common use cases +## Overview -Our Azure DevOps integration makes it easy to fill the software catalog with data directly from your organization, for example: +This integration allows you to: -- Map most of the resources in the organization, including **projects**, **repositories**, **pipelines**, **pull requests**, **teams** and **members**. +- Map and orgaize your desired Azure DevOps resources and their metadata in Port (see supported resources below). - Watch for Azure DevOps object changes (create/update/delete) in real-time, and automatically apply the changes to your entities in Port. - Manage Port entities using GitOps. -## Installation - -To install Port's Azure DevOps integration, follow the [installation](./installation.md) guide. - -## Ingesting Git objects - -This integration allows you to ingest a variety of objects resources provided by the Azure DevOps API. It allows you to perform ETL operations on data from the Azure DevOps API into the desired data model. - -This integration uses a YAML configuration to describe the ETL process to load data into the developer portal. The approach reflects a golden middle between an overly opinionated Git visualization that might not work for everyone and a too-broad approach that could introduce unneeded complexity into the developer portal. - -Here is an example snippet from the config which demonstrates the ETL process for getting `pull-request` data from Azure DevOps into the software catalog: - -```yaml showLineNumbers -resources: - # Extract - # highlight-start - - kind: pull-request - selector: - query: "true" # JQ boolean query. If evaluated to false - skip syncing the object. - # highlight-end - port: - entity: - mappings: - # Transform & Load - # highlight-start - identifier: >- - .repository.project.name + "/" + .repository.name + (.pullRequestId - | tostring) | gsub(" "; "") - blueprint: '"azureDevopsPullRequest"' - properties: - creator: .createdBy.uniqueName - status: .status - reviewers: '[.reviewers[].uniqueName]' - createdAt: .creationDate - relations: - repository: '.repository.project.name + "/" + .repository.name | gsub(" "; "")' - # highlight-end -``` - -The integration makes use of the [JQ JSON processor](https://stedolan.github.io/jq/manual/) to select, modify, concatenate, transform and perform other operations on existing fields and values from the different Azure DevOps API routes. - -### Integration configuration - -The integration's configuration is how you specify the exact resources you want to query from your organization, and which entities and properties you want to fill with the received data. - -Here is an example for the integration configuration block: - -```yaml showLineNumbers -resources: - - kind: repository - selector: - query: 'true' # JQ boolean query. If evaluated to false - skip syncing the object. - port: - entity: - mappings: - identifier: .project.name + "/" + .name # The Entity identifier will be the repository name. - title: .name - blueprint: '"azureDevopsRepository"' - properties: - url: .url - readme: file://README.md -``` - -### Configuration structure - -- The root key of the integration configuration is the `resources` key: - - ```yaml showLineNumbers - # highlight-next-line - resources: - - kind: repository - selector: - ... - ``` - -- The `kind` key is a specifier for an object from the Azure DevOps API: - - ```yaml showLineNumbers - resources: - # highlight-next-line - - kind: repository - selector: - ... - ``` + +### Supported Resources + +The resources that can be ingested from Azure DevOps into Port are listed below. -#### Filtering unwanted objects - -The `selector` and the `query` keys let you filter exactly which objects from the specified `kind` will be ingested to the software catalog: - - ```yaml showLineNumbers - resources: - - kind: repository - # highlight-start - selector: - query: "true" # JQ boolean query. If evaluated to false - skip syncing the object. - # highlight-end - port: - ``` - -For example, to ingest only repositories that have a name starting with `"service"`, use the `query` key like this: - -```yaml showLineNumbers -query: .name | startswith("service") -``` - -
- -:::tip WIQL Support -The Ocean Azure DevOps integration supports querying objects from the `work-item` kind using [WIQL](https://learn.microsoft.com/en-us/azure/devops/boards/queries/wiql-syntax?view=azure-devops), providing fine-grained control over which work items are ingested into Port. - -To leverage WIQL filtering, add a `wiql` key with your WIQL query as the value within the `selector` object. For example: - -```yaml showLineNumbers -resources: - - kind: work-item # WIQL filtering can only be used with the "work-item" kind - selector: - query: "true" - wiql: "[System.WorkItemType] = 'Task' AND [System.State] = 'Active'" # WIQL query, will only ingest work items of type "Task" whose state is "Active" - port: -``` - -::: - -The `port`, `entity` and the `mappings` keys open the section used to map the Azure DevOps API object fields to Port entities. To create multiple mappings of the same kind, you can add another item to the `resources` array; - - ```yaml showLineNumbers - resources: - - kind: repository - selector: - query: "true" - port: - # highlight-start - entity: - mappings: # Mappings between one Azure DevOps API object to a Port entity. Each value is a JQ query. - identifier: '.project.name + "/" + .name | gsub(" "; "")' - title: .name - blueprint: '"azureDevopsRepository"' - properties: - url: .url - readme: file://README.md - # highlight-end - - kind: repository # In this instance project is mapped again with a different filter - selector: - query: '.name == "MyRepositoryName"' - port: - entity: - mappings: ... - ``` - - :::tip - Pay attention to the value of the `blueprint` key, if you want to use a hardcoded string, you need to encapsulate it in 2 sets of quotes, for example use a pair of single-quotes (`'`) and then another pair of double-quotes (`"`) - ::: - -## Permissions - -Port's Azure DevOps integration requires a personal access token, follow the instructions in the [installation](./installation.md#create-a-personal-access-token) guide. + +## Setup + +To install Port's Azure DevOps integration, see the [installation](./installation.md#setup) page. + +## Configuration + +Port integrations use a [YAML mapping block](/build-your-software-catalog/customize-integrations/configure-mapping#configuration-structure) to ingest data from the third-party api into Port. + +The mapping makes use of the [JQ JSON processor](https://stedolan.github.io/jq/manual/) to select, modify, concatenate, transform and perform other operations on existing fields and values from the integration API. + ## Examples Refer to the [examples](./examples.md) page for practical configurations and their corresponding blueprint definitions. +## Relevant Guides +For relevant guides and examples, see the [guides section](https://docs.getport.io/guides?tags=AzureDevops). + ## GitOps Port's Azure DevOps integration also provides GitOps capabilities, refer to the [GitOps](./gitops/gitops.md) page to learn more. diff --git a/docs/build-your-software-catalog/sync-data-to-catalog/git/azure-devops/installation.md b/docs/build-your-software-catalog/sync-data-to-catalog/git/azure-devops/installation.md index 3ca09912f..a9da2598c 100644 --- a/docs/build-your-software-catalog/sync-data-to-catalog/git/azure-devops/installation.md +++ b/docs/build-your-software-catalog/sync-data-to-catalog/git/azure-devops/installation.md @@ -4,11 +4,14 @@ sidebar_position: 1 import Tabs from "@theme/Tabs" import TabItem from "@theme/TabItem" +import Prerequisites from "../../templates/\_ocean_helm_prerequisites_block.mdx" import HelmParameters from "../../templates/\_ocean-advanced-parameters-helm.mdx" import DockerParameters from "./\_azuredevops_one_time_docker_parameters.mdx" import AdvancedConfig from '../../../../generalTemplates/_ocean_advanced_configuration_note.md' import PortApiRegionTip from "/docs/generalTemplates/_port_region_parameter_explanation_template.md" import OceanSaasInstallation from "/docs/build-your-software-catalog/sync-data-to-catalog/templates/_ocean_saas_installation.mdx" +import OceanRealtimeInstallation from "/docs/build-your-software-catalog/sync-data-to-catalog/templates/_ocean_realtime_installation.mdx" + # Installation @@ -18,24 +21,24 @@ This page details how to install Port's Azure DevOps integration (powered by the - How to [configure](#configure-the-integration) and customize the integration before deploying it. - How to [deploy](#deploy-the-integration) the integration in the configuration that fits your use case. -:::note Prerequisites +## Prerequisites - An Azure DevOps account with admin privileges. - If you choose the real time & always on installation method, you will need a kubernetes cluster on which to install the integration. - Your Port user role is set to `Admin`. -::: -## Create a personal access token +## Setup + +### Create a personal access token The integration requires a personal access token to authenticate with your Azure DevOps account. You can create one by following [these steps](https://learn.microsoft.com/en-us/azure/devops/organizations/accounts/use-personal-access-tokens-to-authenticate?view=azure-devops&tabs=Windows#create-a-pat). The token should either have `admin` permissions, or `read` permissions for each of the supported resources you want to ingest into Port. -## Configure the integration -### `appHost` & listening to hooks +### AppHost & listening to hooks :::tip The `appHost` parameter is used specifically to enable the real-time functionality of the integration. @@ -58,52 +61,28 @@ Choose one of the following installation methods:
- + Using this installation option means that the integration will be able to update Port in real time using webhooks. -This table summarizes the available parameters for the installation. -Set them as you wish in the script below, then copy it and run it in your terminal: +

Prerequisites

-| Parameter | Description | Example | Required | -| ---------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- | ------- | -| `port.clientId` | Your port [client id](https://docs.getport.io/build-your-software-catalog/custom-integration/api/#find-your-port-credentials) | | ✅ | -| `port.clientSecret` | Your port [client secret](https://docs.getport.io/build-your-software-catalog/custom-integration/api/#find-your-port-credentials) | | ✅ | -| `port.baseUrl` | Your Port API URL - `https://api.getport.io` for EU, `https://api.us.getport.io` for US | | ✅ | -| `integration.secrets.personalAccessToken` | The [personal access token](#tokenmapping) used to query authenticate with your Azure Devops account | | ✅ | -| `integration.secrets.organizationUrl` | The URL of your Azure DevOps organization | https://dev.azure.com/organizationName | ✅ | -| `integration.secrets.isProjectsLimited` | If using a project-scoped personal access token, this setting is enabled to create webhooks for individual projects. Enabled by default | | ❌ | -| `integration.config.appHost` | The host of the Port Ocean app. Used to set up the integration endpoint as the target for webhooks created in Azure DevOps | https://my-ocean-integration.com | ✅ | + + +For details about the available parameters for the installation, see the table below. - -
-To install the integration using Helm, run the following command: - -```bash showLineNumbers -helm repo add --force-update port-labs https://port-labs.github.io/helm-charts -helm upgrade --install my-azure-devops-integration port-labs/port-ocean \ - --set port.clientId="PORT_CLIENT_ID" \ - --set port.clientSecret="PORT_CLIENT_SECRET" \ - --set port.baseUrl="https://api.getport.io" \ - --set initializePortResources=true \ - --set sendRawDataExamples=true \ - --set scheduledResyncInterval=120 \ - --set integration.identifier="my-azure-devops-integration" \ - --set integration.type="azure-devops" \ - --set integration.eventListener.type="POLLING" \ - --set integration.secrets.organizationUrl="https://dev.azure.com/organizationName" \ - --set integration.secrets.personalAccessToken="Enter value here" -``` + + -To install the integration using ArgoCD, follow these steps: +To install the integration using ArgoCD: 1. Create a `values.yaml` file in `argocd/my-ocean-azure-devops-integration` in your git repository with the content: @@ -186,11 +165,38 @@ kubectl apply -f my-ocean-azure-devops-integration.yaml +This table summarizes the available parameters for the installation. + +| Parameter | Description | Example | Required | +|-------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------|----------| +| `port.clientId` | Your port [client id](https://docs.getport.io/build-your-software-catalog/custom-integration/api/#find-your-port-credentials) | | ✅ | +| `port.clientSecret` | Your port [client secret](https://docs.getport.io/build-your-software-catalog/custom-integration/api/#find-your-port-credentials) | | ✅ | +| `port.baseUrl` | Your Port API URL - `https://api.getport.io` for EU, `https://api.us.getport.io` for US | | ✅ | +| `integration.secrets.personalAccessToken` | The [personal access token](#create-a-personal-access-token) used to query authenticate with your Azure Devops account | | ✅ | +| `integration.secrets.organizationUrl` | The URL of your Azure DevOps organization | https://dev.azure.com/organizationName | ✅ | +| `integration.secrets.isProjectsLimited` | If using a project-scoped personal access token, this setting is enabled to create webhooks for individual projects. Enabled by default | | ❌ | +| `integration.config.appHost` | The host of the Port Ocean app. Used to set up the integration endpoint as the target for webhooks created in Azure DevOps | https://my-ocean-integration.com | ✅ | +| `integration.eventListener.type` | The event listener type. Read more about [event listeners](https://ocean.getport.io/framework/features/event-listener) | | ✅ | +| `integration.type` | The integration to be installed | | ✅ | +| `scheduledResyncInterval` | The number of minutes between each resync. When not set the integration will resync for each event listener resync event. Read more about [scheduledResyncInterval](https://ocean.getport.io/develop-an-integration/integration-configuration/#scheduledresyncinterval---run-scheduled-resync) | | ❌ | +| `initializePortResources` | Default true, When set to true the integration will create default blueprints and the port App config Mapping. Read more about [initializePortResources](https://ocean.getport.io/develop-an-integration/integration-configuration/#initializeportresources---initialize-port-resources) | | ❌ | +| `sendRawDataExamples` | Enable sending raw data examples from the third party API to port for testing and managing the integration mapping. Default is true | | ❌ | + + + +
+
- + + +This pipeline will run the Azure DevOps integration once and then exit, this is useful for **scheduled** ingestion of data. + +:::warning Real-time updates +If you want the integration to update Port in real time using webhooks you should use the [Real Time & Always On](?installation-methods=real-time-always-on#installation) installation option. +::: diff --git a/docs/build-your-software-catalog/sync-data-to-catalog/git/github/_github_exporter_supported_resources.mdx b/docs/build-your-software-catalog/sync-data-to-catalog/git/github/_github_exporter_supported_resources.mdx index 0f34790ac..4e4ef659d 100644 --- a/docs/build-your-software-catalog/sync-data-to-catalog/git/github/_github_exporter_supported_resources.mdx +++ b/docs/build-your-software-catalog/sync-data-to-catalog/git/github/_github_exporter_supported_resources.mdx @@ -12,7 +12,6 @@ - [`user`](https://docs.github.com/en/rest/users/users#get-a-user) - [`team`](https://docs.github.com/en/rest/teams/teams#get-a-team-by-name) - [`dependabot-alert`](https://docs.github.com/en/rest/dependabot/alerts#list-dependabot-alerts-for-a-repository) -- [`packages`](https://github.com/port-labs/example-github-packages) - [`branches`](https://docs.github.com/en/rest/branches/branches#get-a-branch) - [`code-scanning`](https://docs.github.com/en/rest/code-scanning/code-scanning#list-code-scanning-alerts-for-a-repository) - [`releases`](https://docs.github.com/en/rest/releases/releases#list-releases) diff --git a/docs/build-your-software-catalog/sync-data-to-catalog/git/github/examples/example-repository-alerts/_github_exporter_example_dependabot_alert_blueprint.mdx b/docs/build-your-software-catalog/sync-data-to-catalog/git/github/examples/example-repository-alerts/_github_exporter_example_dependabot_alert_blueprint.mdx index f688c7035..52e1ac789 100644 --- a/docs/build-your-software-catalog/sync-data-to-catalog/git/github/examples/example-repository-alerts/_github_exporter_example_dependabot_alert_blueprint.mdx +++ b/docs/build-your-software-catalog/sync-data-to-catalog/git/github/examples/example-repository-alerts/_github_exporter_example_dependabot_alert_blueprint.mdx @@ -70,6 +70,18 @@ "type": "string", "format": "url" } + }, + "alertCreatedAt": { + "icon": "DefaultProperty", + "type": "string", + "title": "Alert Created At", + "format": "date-time" + }, + "alertUpdatedAt": { + "icon": "DefaultProperty", + "type": "string", + "title": "Alert Updated At", + "format": "date-time" } }, "required": [] diff --git a/docs/build-your-software-catalog/sync-data-to-catalog/git/github/examples/example-repository-alerts/_github_exporter_example_repo_dependabot_port_app_config.mdx b/docs/build-your-software-catalog/sync-data-to-catalog/git/github/examples/example-repository-alerts/_github_exporter_example_repo_dependabot_port_app_config.mdx index 05f08be09..bc73e35ee 100644 --- a/docs/build-your-software-catalog/sync-data-to-catalog/git/github/examples/example-repository-alerts/_github_exporter_example_repo_dependabot_port_app_config.mdx +++ b/docs/build-your-software-catalog/sync-data-to-catalog/git/github/examples/example-repository-alerts/_github_exporter_example_repo_dependabot_port_app_config.mdx @@ -37,6 +37,8 @@ resources: cveID: .security_advisory.cve_id url: .html_url references: "[.security_advisory.references[].url]" + alertCreatedAt: .created_at + alertUpdatedAt: .updated_at relations: service: .repo.name - kind: code-scanning-alerts diff --git a/docs/build-your-software-catalog/sync-data-to-catalog/git/gitlab/example-member/_gitlab_exporter_example_member_port_app_config.mdx b/docs/build-your-software-catalog/sync-data-to-catalog/git/gitlab/example-member/_gitlab_exporter_example_member_port_app_config.mdx index 7f2505ecc..c46ad5be0 100644 --- a/docs/build-your-software-catalog/sync-data-to-catalog/git/gitlab/example-member/_gitlab_exporter_example_member_port_app_config.mdx +++ b/docs/build-your-software-catalog/sync-data-to-catalog/git/gitlab/example-member/_gitlab_exporter_example_member_port_app_config.mdx @@ -3,9 +3,6 @@ Ocean integration configuration ```yaml showLineNumbers -deleteDependentEntities: true -createMissingRelatedEntities: true -enableMergeEntity: true resources: - kind: group-with-members # replace with `project-with-members` to retrieve members from projects selector: diff --git a/docs/build-your-software-catalog/sync-data-to-catalog/git/gitlab/example-projects-members/_gitlab_exporter_example_project_member_port_app_config.mdx b/docs/build-your-software-catalog/sync-data-to-catalog/git/gitlab/example-projects-members/_gitlab_exporter_example_project_member_port_app_config.mdx index f109d7520..156279ee5 100644 --- a/docs/build-your-software-catalog/sync-data-to-catalog/git/gitlab/example-projects-members/_gitlab_exporter_example_project_member_port_app_config.mdx +++ b/docs/build-your-software-catalog/sync-data-to-catalog/git/gitlab/example-projects-members/_gitlab_exporter_example_project_member_port_app_config.mdx @@ -3,9 +3,6 @@ Ocean integration configuration ```yaml showLineNumbers -deleteDependentEntities: true -createMissingRelatedEntities: true -enableMergeEntity: true resources: - kind: project-with-members selector: diff --git a/docs/build-your-software-catalog/sync-data-to-catalog/git/gitlab/examples.md b/docs/build-your-software-catalog/sync-data-to-catalog/git/gitlab/examples.md index 94f002c5f..8f41d14c1 100644 --- a/docs/build-your-software-catalog/sync-data-to-catalog/git/gitlab/examples.md +++ b/docs/build-your-software-catalog/sync-data-to-catalog/git/gitlab/examples.md @@ -220,6 +220,11 @@ In the following example you will ingest your GitLab members to Port, you may us ::: +:::caution GitLab free plan limitation +Primary email addresses are not available for GitLab "Free plan" users. +::: + + @@ -278,6 +283,10 @@ In the following example you will ingest your GitLab groups and their members to In the following example you will ingest your GitLab projects and their members to Port, you may use the following Port blueprint definitions and integration configuration: +:::caution Limitation +Real time webhook events are not supported for the `project-with-members` kind. +::: + @@ -285,8 +294,7 @@ In the following example you will ingest your GitLab projects and their members - Refer to the [setup](gitlab.md#setup) section to learn more about the integration configuration setup process. - We leverage [JQ JSON processor](https://stedolan.github.io/jq/manual/) to map and transform GitLab objects to Port entities. -- Click [Here](https://docs.gitlab.com/ee/api/groups.html#list-a-groups-projects) for the GitLab project object structure. -- Click [Here](https://docs.gitlab.com/ee/api/issues.html#list-project-issues) for the GitLab issue object structure. +- Click [Here](https://docs.gitlab.com/ee/api/members.html#list-all-members-of-a-group-or-project) for the GitLab project or group member object structure. ::: @@ -298,4 +306,4 @@ The above examples shows a specific use cases, but Port's GitLab integration sup When adding the ingestion of other resources, remember to add an entry to the `resources` array and change the value provided to the `kind` key accordingly. -After creating the blueprints and saving the integration configuration, you will see new entities in Port matching your projects alongside their issues. \ No newline at end of file +After creating the blueprints and saving the integration configuration, you will see new entities in Port matching your projects alongside their issues. diff --git a/docs/build-your-software-catalog/sync-data-to-catalog/git/gitlab/gitlab.md b/docs/build-your-software-catalog/sync-data-to-catalog/git/gitlab/gitlab.md index 8e2794110..32bedd165 100644 --- a/docs/build-your-software-catalog/sync-data-to-catalog/git/gitlab/gitlab.md +++ b/docs/build-your-software-catalog/sync-data-to-catalog/git/gitlab/gitlab.md @@ -24,10 +24,13 @@ It is possible to reference any field that appears in the API responses linked b - [`pipeline`](https://docs.gitlab.com/ee/api/pipelines.html#get-a-single-pipeline) - [`group`](https://docs.gitlab.com/ee/api/groups.html#details-of-a-group) - [`file`](https://docs.gitlab.com/ee/api/repository_files.html#get-file-from-repository) +- [`members`](https://docs.gitlab.com/ee/api/members.html#list-all-members-of-a-group-or-project) + ## Setup -To install Port's GitLab integration, follow the [installation](./installation.md#setup) guide. +To install Port's GitLab integration, see the [installation](./installation.md#setup) page. + :::info Permission Port's GitLab integration requires a group access token with the `api` scope. To create a group access token, follow the instructions in the [installation](./installation.md#creating-a-gitlab-group-access-token) guide @@ -382,7 +385,7 @@ itemsToParse: .file.content | if type== "object" then [.] else . end ``` ::: -## Limitations +#### Limitations - Currently only files up to 1MB in size are supported. - Only JSON and YAML formats are automatically parsed. Other file formats can be ingested as raw files. diff --git a/docs/build-your-software-catalog/sync-data-to-catalog/git/gitlab/installation.md b/docs/build-your-software-catalog/sync-data-to-catalog/git/gitlab/installation.md index 2e7768c1a..d7dcf4f52 100644 --- a/docs/build-your-software-catalog/sync-data-to-catalog/git/gitlab/installation.md +++ b/docs/build-your-software-catalog/sync-data-to-catalog/git/gitlab/installation.md @@ -5,10 +5,13 @@ sidebar_position: 1 import Tabs from "@theme/Tabs" import TabItem from "@theme/TabItem" import HelmParameters from "../../templates/\_ocean-advanced-parameters-helm.mdx" +import Prerequisites from "../../templates/\_ocean_helm_prerequisites_block.mdx" import DockerParameters from "./\_gitlab_one_time_docker_parameters.mdx" import AdvancedConfig from '/docs/generalTemplates/_ocean_advanced_configuration_note.md' import PortApiRegionTip from "/docs/generalTemplates/_port_region_parameter_explanation_template.md" import OceanSaasInstallation from "/docs/build-your-software-catalog/sync-data-to-catalog/templates/_ocean_saas_installation.mdx" +import OceanRealtimeInstallation from "/docs/build-your-software-catalog/sync-data-to-catalog/templates/_ocean_realtime_installation.mdx" + # Installation @@ -63,7 +66,7 @@ In this example: -See the [token mapping](#tokenmapping) section for more information. +See the [Token Mapping](#token-mapping) section for more information. The following steps will guide you how to create a GitLab group access token. @@ -78,9 +81,7 @@ The following steps will guide you how to create a GitLab group access token. 3. Click "Create group access token". 4. Copy the generated token and use it when deploying the integration in the following steps. -### Configure the GitLab integration - -### `tokenMapping` +### Token Mapping The GitLab integration supports fetching data related to specific paths in your GitLab groups. The integration is also able to fetch data from different GitLab parent groups by providing additional group tokens. In order to do so, you need to map the desired paths to the relevant access tokens. The `tokenMapping` parameter supports specifying the paths that the integration will search for files and information in, using [globPatterns](https://www.malikbrowne.com/blog/a-beginners-guide-glob-patterns). @@ -182,7 +183,7 @@ In both options you'll need to provide the `useSystemHook` parameter with the va ![GitLab System Hook](/img/integrations/gitlab/GitLabSystemHook.png) -### `tokenGroupHooksOverrideMapping` +### Listen to specified groups the integration can support listening to webhooks on specified groups, by configuring the `tokenGroupHooksOverrideMapping` parameter. this parameter is not required, and when you don't use it, the integration will listen to all of the root groups (if not using `useSystemHooks=true`) @@ -204,7 +205,7 @@ You can configure multiple tokens, and multiple groups per token (the token shou - The group path is the full path in gitlab. If a group path is incorrect, the webhook will not be created. - The events for each group must match the supported event types mentioned below. if you would like to have all the events provided in the webhook, you can use: `{"events" = []}`, but not eliminate this key completely, because it is required. -### Deploy the GitLab integration +## Deploy the GitLab integration Choose one of the following installation methods: @@ -216,67 +217,27 @@ Choose one of the following installation methods: - + Using this installation option means that the integration will be able to update Port in real time using webhooks. -This table summarizes the available parameters for the installation. -Set them as you wish in the script below, then copy it and run it in your terminal: +

Prerequisites

-| Parameter | Description | Example | Required | -| ---------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- | ------- | -| `port.clientId` | Your Port [client id](https://docs.getport.io/build-your-software-catalog/custom-integration/api/#find-your-port-credentials) | | ✅ | -| `port.clientSecret` | Your Port [client secret](https://docs.getport.io/build-your-software-catalog/custom-integration/api/#find-your-port-credentials) | | ✅ | -| `port.baseUrl` | Your Port API URL - `https://api.getport.io` for EU, `https://api.us.getport.io` for US | | ✅ | -| `integration.secrets.tokenMapping` | The [token mapping](#tokenmapping) configuration used to query GitLab | | ✅ | -| `integration.config.appHost` | The host of the Port Ocean app. Used to set up the integration endpoint as the target for webhooks created in GitLab | https://my-ocean-integration.com | ✅ | -| `integration.config.gitlabHost` | (for self-hosted GitLab) the URL of your GitLab instance | https://my-gitlab.com | ❌ | -| `integration.secrets.tokenGroupHooksOverrideMapping` | The [token group hooks override mapping](#tokengrouphooksoverridemapping) configuration used to create custom webhooks on groups | | ❌ | + - +For details about the available parameters for the installation, see the table below. -
-To install the integration using Helm, run the following command: - -```bash showLineNumbers -helm repo add --force-update port-labs https://port-labs.github.io/helm-charts -helm upgrade --install my-gitlab-integration port-labs/port-ocean \ - --set port.clientId="PORT_CLIENT_ID" \ - --set port.clientSecret="PORT_CLIENT_SECRET" \ - --set port.baseUrl="https://api.getport.io" \ - --set initializePortResources=true \ - --set sendRawDataExamples=true \ - --set scheduledResyncInterval=120 \ - --set integration.identifier="my-gitlab-integration" \ - --set integration.type="gitlab" \ - --set integration.eventListener.type="POLLING" \ - --set integration.secrets.tokenMapping="\{\"TOKEN\": [\"GROUP_NAME/**\"]\}" -``` - - - -It is also possible to get Port's UI to generate your installation command for you, Port will inject values such as your Port client ID and client secret directly into the command, making it easier to get started. - -Follow these steps to setup the integration through Port's UI: -1. Click the ingest button in Port Builder Page for the blueprint you want to ingest using GitLab: + - ![DevPortal Builder ingest button](/img/integrations/gitlab/DevPortalBuilderIngestButton.png) - -2. Select GitLab under the Git providers category: - - ![DevPortal Builder GitLab option](/img/integrations/gitlab/DevPortalBuilderGitLabOption.png) - -3. Copy the helm installation command and set the [required configuration](#configuring-the-gitlab-integration); - -4. Run the helm command with the updated parameters to install the integration in your Kubernetes cluster. + -To install the integration using ArgoCD, follow these steps: +To install the integration using ArgoCD: 1. Create a `values.yaml` file in `argocd/my-ocean-gitlab-integration` in your git repository with the content: @@ -357,21 +318,43 @@ kubectl apply -f my-ocean-gitlab-integration.yaml +This table summarizes the available parameters for the installation. +Note the parameters specific to this integration, they are last in the table. + +| Parameter | Description | Example | Required | +|------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------|----------| +| `port.clientId` | Your Port [client id](https://docs.getport.io/build-your-software-catalog/custom-integration/api/#find-your-port-credentials) | | ✅ | +| `port.clientSecret` | Your Port [client secret](https://docs.getport.io/build-your-software-catalog/custom-integration/api/#find-your-port-credentials) | | ✅ | +| `port.baseUrl` | Your Port API URL - `https://api.getport.io` for EU, `https://api.us.getport.io` for US | | ✅ | +| `integration.secrets.tokenMapping` | The [token mapping](#tokenmapping) configuration used to query GitLab | | ✅ | +| `integration.config.appHost` | The host of the Port Ocean app. Used to set up the integration endpoint as the target for webhooks created in GitLab | https://my-ocean-integration.com | ✅ | +| `integration.config.gitlabHost` | (for self-hosted GitLab) the URL of your GitLab instance | https://my-gitlab.com | ❌ | +| `integration.secrets.tokenGroupHooksOverrideMapping` | The [token group hooks override mapping](#tokengrouphooksoverridemapping) configuration used to create custom webhooks on groups | | ❌ | +| `integration.eventListener.type` | The event listener type. Read more about [event listeners](https://ocean.getport.io/framework/features/event-listener) | | ✅ | +| `integration.type` | The integration to be installed | | ✅ | +| `scheduledResyncInterval` | The number of minutes between each resync. When not set the integration will resync for each event listener resync event. Read more about [scheduledResyncInterval](https://ocean.getport.io/develop-an-integration/integration-configuration/#scheduledresyncinterval---run-scheduled-resync) | | ❌ | +| `initializePortResources` | Default true, When set to true the integration will create default blueprints and the port App config Mapping. Read more about [initializePortResources](https://ocean.getport.io/develop-an-integration/integration-configuration/#initializeportresources---initialize-port-resources) | | ❌ | +| `sendRawDataExamples` | Enable sending raw data examples from the third party API to port for testing and managing the integration mapping. Default is true | | ❌ | + + + +
+
- + - - - -This workflow will run the GitLab integration once and then exit, this is useful for **scheduled** ingestion of data. +This pipeline will run the GitLab integration once and then exit, this is useful for **scheduled** ingestion of data. -:::warning +:::warning Real-time updates If you want the integration to update Port in real time using webhooks you should use the [Real Time & Always On](?installation-methods=real-time-always-on#installation) installation option. ::: + + + Make sure to configure the following [GitLab Variables](https://docs.gitlab.com/ee/ci/variables/): @@ -433,15 +416,11 @@ Also make sure to keep the double-quotes (`"`) when passing the `OCEAN__INTEGRAT - -This pipeline will run the GitLab integration once and then exit, this is useful for **scheduled** ingestion of data. :::tip Your Jenkins agent should be able to run docker commands. ::: -:::warning -If you want the integration to update Port in real time using webhooks you should use the [Real Time & Always On](?installation-methods=real-time-always-on#installation) installation option. -::: + Make sure to configure the following [Jenkins Credentials](https://www.jenkins.io/doc/book/using/using-credentials/) of `Secret Text` type: diff --git a/docs/build-your-software-catalog/sync-data-to-catalog/incident-management/opsgenie/_category_.json b/docs/build-your-software-catalog/sync-data-to-catalog/incident-management/opsgenie/_category_.json new file mode 100644 index 000000000..022ddde78 --- /dev/null +++ b/docs/build-your-software-catalog/sync-data-to-catalog/incident-management/opsgenie/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Opsgenie", + "position": 3 +} diff --git a/docs/build-your-software-catalog/sync-data-to-catalog/incident-management/_opsgenie_docker_params.mdx b/docs/build-your-software-catalog/sync-data-to-catalog/incident-management/opsgenie/_opsgenie_docker_params.mdx similarity index 100% rename from docs/build-your-software-catalog/sync-data-to-catalog/incident-management/_opsgenie_docker_params.mdx rename to docs/build-your-software-catalog/sync-data-to-catalog/incident-management/opsgenie/_opsgenie_docker_params.mdx diff --git a/docs/build-your-software-catalog/sync-data-to-catalog/incident-management/opsgenie/examples.md b/docs/build-your-software-catalog/sync-data-to-catalog/incident-management/opsgenie/examples.md new file mode 100644 index 000000000..d1449cb90 --- /dev/null +++ b/docs/build-your-software-catalog/sync-data-to-catalog/incident-management/opsgenie/examples.md @@ -0,0 +1,568 @@ +--- +sidebar_position: 2 +--- + +# Examples +To view and test the integration's mapping against examples of the third-party API responses, use the jq playground in your [data sources page](https://app.getport.io/settings/data-sources). Find the integration in the list of data sources and click on it to open the playground. + + +## Team + +
+Team blueprint + +```json showLineNumbers +{ + "identifier": "opsGenieTeam", + "description": "This blueprint represents an OpsGenie team in our software catalog", + "title": "OpsGenie Team", + "icon": "OpsGenie", + "schema": { + "properties": { + "description": { + "type": "string", + "title": "Description", + "icon": "DefaultProperty" + }, + "url": { + "title": "URL", + "type": "string", + "description": "URL to the service", + "format": "url", + "icon": "DefaultProperty" + }, + "oncallUsers": { + "type": "array", + "title": "Current Oncalls", + "items": { + "type": "string", + "format": "user" + } + } + }, + "required": [] + }, + "mirrorProperties": {}, + "calculationProperties": {}, + "aggregationProperties": {}, + "relations": {} +} +``` + +
+ +
+Integration configuration + +```yaml showLineNumbers +createMissingRelatedEntities: true +deleteDependentEntities: true +resources: + - kind: team + selector: + query: 'true' + port: + entity: + mappings: + identifier: .id + title: .name + blueprint: '"opsGenieTeam"' + properties: + description: .description + url: .links.web +``` + +
+ + +## Schedule + +
+Schedule blueprint + +```json showLineNumbers +{ + "identifier": "opsGenieSchedule", + "description": "This blueprint represents a OpsGenie schedule in our software catalog", + "title": "OpsGenie Schedule", + "icon": "OpsGenie", + "schema": { + "properties": { + "timezone": { + "title": "Timezone", + "type": "string" + }, + "description": { + "title": "Description", + "type": "string" + }, + "users": { + "title": "Users", + "type": "array", + "items": { + "type": "string", + "format": "user" + } + }, + "startDate": { + "title": "Start Date", + "type": "string", + "format": "date-time" + }, + "endDate": { + "title": "End Date", + "type": "string", + "format": "date-time" + }, + "rotationType": { + "type": "string", + "title": "Rotation Type" + } + }, + "required": [] + }, + "mirrorProperties": {}, + "calculationProperties": {}, + "aggregationProperties": {}, + "relations": { + "ownerTeam": { + "title": "Owner Team", + "target": "opsGenieTeam", + "required": false, + "many": false + } + } +} +``` + +
+ +
+Integration configuration + +```yaml showLineNumbers +createMissingRelatedEntities: true +deleteDependentEntities: true +resources: + - kind: schedule + selector: + query: 'true' + apiQueryParams: + expand: rotation + port: + itemsToParse: .rotations + entity: + mappings: + identifier: .id + "_" + .item.id + title: .name + "_" + .item.name + blueprint: '"opsGenieSchedule"' + properties: + timezone: .timezone + description: .description + startDate: .item.startDate + endDate: .item.endDate + rotationType: .item.type + users: '[.item.participants[] | select(has("username")) | .username]' + relations: + ownerTeam: .ownerTeam.id +``` + +
+ + +## Service + +
+Service blueprint + +```json showLineNumbers +{ + "identifier": "opsGenieService", + "description": "This blueprint represents an OpsGenie service in our software catalog", + "title": "OpsGenie Service", + "icon": "OpsGenie", + "schema": { + "properties": { + "description": { + "type": "string", + "title": "Description", + "icon": "DefaultProperty" + }, + "url": { + "title": "URL", + "type": "string", + "description": "URL to the service", + "format": "url", + "icon": "DefaultProperty" + }, + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "title": "Tags", + "icon": "DefaultProperty" + } + }, + "required": [] + }, + "mirrorProperties": { + "oncallUsers": { + "title": "Current Oncalls", + "path": "ownerTeam.oncallUsers" + } + }, + "calculationProperties": { + }, + "aggregationProperties": { + "numberOfOpenIncidents": { + "title": "Number of open incidents", + "type": "number", + "target": "opsGenieIncident", + "query": { + "combinator": "and", + "rules": [ + { + "property": "status", + "operator": "=", + "value": "open" + } + ] + }, + "calculationSpec": { + "calculationBy": "entities", + "func": "count" + } + } + }, + "relations": { + "ownerTeam": { + "title": "Owner Team", + "target": "opsGenieTeam", + "required": false, + "many": false + } + } +} +``` + +
+ +
+Integration configuration + +```yaml showLineNumbers +createMissingRelatedEntities: true +deleteDependentEntities: true +resources: + - kind: service + selector: + query: 'true' + port: + entity: + mappings: + identifier: .id + title: .name + blueprint: '"opsGenieService"' + properties: + description: .description + url: .links.web + tags: .tags + relations: + ownerTeam: .teamId +``` + +
+ +## Incident + +
+Incident blueprint + +```json showLineNumbers +{ + "identifier": "opsGenieIncident", + "description": "This blueprint represents an OpsGenie incident in our software catalog", + "title": "OpsGenie Incident", + "icon": "OpsGenie", + "schema": { + "properties": { + "description": { + "title": "Description", + "type": "string" + }, + "status": { + "type": "string", + "title": "Status", + "enum": [ + "closed", + "open", + "resolved" + ], + "enumColors": { + "closed": "blue", + "open": "red", + "resolved": "green" + }, + "description": "The status of the incident" + }, + "url": { + "type": "string", + "format": "url", + "title": "URL" + }, + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "title": "Tags" + }, + "responders": { + "type": "array", + "title": "Responders", + "description": "Responders to the alert" + }, + "priority": { + "type": "string", + "title": "Priority" + }, + "createdAt": { + "title": "Create At", + "type": "string", + "format": "date-time" + }, + "updatedAt": { + "title": "Updated At", + "type": "string", + "format": "date-time" + } + }, + "required": [] + }, + "mirrorProperties": {}, + "calculationProperties": {}, + "relations": { + "services": { + "title": "Impacted Services", + "target": "opsGenieService", + "many": true, + "required": false + } + } +} +``` + +
+ +
+Integration configuration + +```yaml showLineNumbers +createMissingRelatedEntities: true +deleteDependentEntities: true +resources: + - kind: incident + selector: + query: 'true' + apiQueryParams: + status: open + port: + entity: + mappings: + identifier: .id + title: .message + blueprint: '"opsGenieIncident"' + properties: + status: .status + responders: .responders + priority: .priority + tags: .tags + url: .links.web + createdAt: .createdAt + updatedAt: .updatedAt + description: .description + relations: + services: .impactedServices +``` + +
+ +## Alert + +
+Alert blueprint + +```json showLineNumbers +{ + "identifier": "opsGenieAlert", + "description": "This blueprint represents an OpsGenie alert in our software catalog", + "title": "OpsGenie Alert", + "icon": "OpsGenie", + "schema": { + "properties": { + "description": { + "title": "Description", + "type": "string" + }, + "status": { + "type": "string", + "title": "Status", + "enum": [ + "closed", + "open" + ], + "enumColors": { + "closed": "green", + "open": "red" + }, + "description": "The status of the alert" + }, + "acknowledged": { + "type": "boolean", + "title": "Acknowledged" + }, + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "title": "Tags" + }, + "responders": { + "type": "array", + "title": "Responders", + "description": "Responders to the alert" + }, + "integration": { + "type": "string", + "title": "Integration", + "description": "The name of the Integration" + }, + "priority": { + "type": "string", + "title": "Priority" + }, + "sourceName": { + "type": "string", + "title": "Source Name", + "description": "Alert source name" + }, + "createdBy": { + "title": "Created By", + "type": "string", + "format": "user" + }, + "createdAt": { + "title": "Create At", + "type": "string", + "format": "date-time" + }, + "updatedAt": { + "title": "Updated At", + "type": "string", + "format": "date-time" + }, + "count": { + "title": "Count", + "type": "number" + } + }, + "required": [] + }, + "mirrorProperties": {}, + "calculationProperties": {}, + "relations": { + "relatedIncident": { + "title": "Related Incident", + "target": "opsGenieIncident", + "required": false, + "many": false + } + } +} +``` + +
+ +
+Integration configuration + +```yaml showLineNumbers +createMissingRelatedEntities: true +deleteDependentEntities: true +resources: + - kind: alert + selector: + query: 'true' + apiQueryParams: + status: open + port: + entity: + mappings: + identifier: .id + title: .message + blueprint: '"opsGenieAlert"' + properties: + status: .status + acknowledged: .acknowledged + responders: .responders + priority: .priority + sourceName: .source + tags: .tags + count: .count + createdBy: .owner + createdAt: .createdAt + updatedAt: .updatedAt + description: .description + integration: .integration.name + relations: + relatedIncident: 'if (.alias | contains("_")) then (.alias | split("_")[0]) else null end' +``` + +
+ +:::tip filter alerts and incidents +The integration provides an option to filter the data that is retrieved from the OpsGenie API using the following attributes: + +1. `createdAt`: The date and time the alert or incident was created +2. `lastOccurredAt`: The date and time the alert or incident was last occurred +3. `snoozedUntil`: The date and time the alert or incident was snoozed until +4. `priority`: The priority of the alert or incident. Accepts values such as `P1`, `P2`, `P3`, `P4` and `P5` +5. `status`: The status of the alert or incident. Accepts values such as `open`, `closed` and `resolved` +6. `isSeen`: Whether the alert or incident has been seen. Accepts a boolean `true` or `false` +7. `acknowledged`: Whether the alert or incident has been acknowledged. Accepts a boolean `true` or `false` +8. `snoozed`: Whether the alert or incident has been snoozed. Accepts a boolean `true` or `false` +9. `owner`: The owner of the alert or incident. Accepts an OpsGenie username +10. `teams`: The teams associated with the alert or incident +11. `acknowledgedBy`: The user who acknowledged the alert or incident +12. `closedBy`: The user who closed the alert or incident +13. `message`: The message of the alert or incident + +These attributes can be enabled using the path: `selector.apiQueryParams`. By default, the integration fetches `open` alerts and incidents. +::: + +## Current On-call +To bring the current on-call users, update your configuration mapping to populate the `OpsGenieTeam` blueprint with team and on-call data. This will enable you to view on-call information at the service level: + +
+Integration configuration + +```yaml showLineNumbers +createMissingRelatedEntities: true +deleteDependentEntities: true +resources: + - kind: schedule-oncall + selector: + query: 'true' + port: + entity: + mappings: + identifier: .ownerTeam.id + title: .ownerTeam.name + blueprint: '"opsGenieTeam"' + properties: + oncallUsers: .__currentOncalls.onCallRecipients +``` + +
\ No newline at end of file diff --git a/docs/build-your-software-catalog/sync-data-to-catalog/incident-management/opsgenie.md b/docs/build-your-software-catalog/sync-data-to-catalog/incident-management/opsgenie/opsgenie.md similarity index 66% rename from docs/build-your-software-catalog/sync-data-to-catalog/incident-management/opsgenie.md rename to docs/build-your-software-catalog/sync-data-to-catalog/incident-management/opsgenie/opsgenie.md index dd606d5b5..6712db05b 100644 --- a/docs/build-your-software-catalog/sync-data-to-catalog/incident-management/opsgenie.md +++ b/docs/build-your-software-catalog/sync-data-to-catalog/incident-management/opsgenie/opsgenie.md @@ -4,32 +4,43 @@ sidebar_position: 2 import Tabs from "@theme/Tabs" import TabItem from "@theme/TabItem" -import Prerequisites from "../templates/\_ocean_helm_prerequisites_block.mdx" -import AzurePremise from "../templates/\_ocean_azure_premise.mdx" +import Prerequisites from "../../templates/\_ocean_helm_prerequisites_block.mdx" +import AzurePremise from "../../templates/\_ocean_azure_premise.mdx" import DockerParameters from "./\_opsgenie_docker_params.mdx" -import AdvancedConfig from '../../../generalTemplates/_ocean_advanced_configuration_note.md' +import AdvancedConfig from '../../../../generalTemplates/_ocean_advanced_configuration_note.md' import OpsGenieAlertBlueprint from "/docs/build-your-software-catalog/custom-integration/webhook/examples/resources/opsgenie/\_example_opsgenie_alert_blueprint.mdx"; import OpsGenieAlertConfiguration from "/docs/build-your-software-catalog/custom-integration/webhook/examples/resources/opsgenie/\_example_opsgenie_alert_configuration.mdx"; import PortApiRegionTip from "/docs/generalTemplates/_port_region_parameter_explanation_template.md" import OceanSaasInstallation from "/docs/build-your-software-catalog/sync-data-to-catalog/templates/_ocean_saas_installation.mdx" +import OceanRealtimeInstallation from "/docs/build-your-software-catalog/sync-data-to-catalog/templates/_ocean_realtime_installation.mdx" + # Opsgenie -Port's Opsgenie integration allows you to import `alert`, `incident`, `service`, `team`, `schedule` and `schedule-oncall` from your Opsgenie account into Port, according to your mapping and definitions. +Port's Opsgenie integration allows you to model Opsgenie resources in your software catalog and ingest data into them. -## Common use cases +## Overview -- Map `alert`, `incident`, `service`, `team`, `schedule`, and `schedule-oncall` in your Opsgenie account. -- Watch for object changes (create/update/delete) in real-time, and automatically apply the changes to your entities in Port. +This integration allows you to: -## Prerequisites -:::info API Token -An OpsGenie API token with the `read` and `configuration access` scopes is required. Port requires the `read` permission to allow the integration to access incidents and alerts. Port also needs the `configuraton access` permission to allow the integration to access service, teams, and schedules. See [here](https://support.atlassian.com/opsgenie/docs/api-key-management/) for more information on OpsGenie API key management. -::: +- Map and organize your desired Opsgenie resources and their metadata in Port (see supported resources below). +- Watch for Opsgenie object changes (create/update/delete) in real-time, and automatically apply the changes to your entities in Port. + + +### Supported Resources + +The resources that can be ingested from Opsgenie into Port are listed below. It is possible to reference any field that appears in the API responses linked below in the mapping configuration. + +- [`Alert`](https://docs.opsgenie.com/docs/alert-api#list-alerts) +- [`Incident`](https://docs.opsgenie.com/docs/incident-api#list-incidents) +- [`Service`](https://docs.opsgenie.com/docs/service-api#list-services) +- [`Team`](https://docs.opsgenie.com/docs/team-api#list-teams) +- [`Service`](https://docs.opsgenie.com/docs/service-api#list-services) +- [`Schedule`](https://docs.opsgenie.com/docs/schedule-api#list-schedules) +- [`Schedule-Oncall`](https://docs.opsgenie.com/docs/who-is-on-call-api#get-on-calls) - -## Installation +## Setup Choose one of the following installation methods: @@ -41,53 +52,31 @@ Choose one of the following installation methods:
- + Using this installation option means that the integration will be able to update Port in real time using webhooks. -This table summarizes the available parameters for the installation. -Set them as you wish in the script below, then copy it and run it in your terminal: - -| Parameter | Description | Required | -| -------------------------------- | ------------------------------------------------------------------------------------------------------------- | -------- | -| `port.clientId` | Your port client id | ✅ | -| `port.clientSecret` | Your port client secret | ✅ | -| `port.baseUrl` | Your Port API URL - `https://api.getport.io` for EU, `https://api.us.getport.io` for US | ✅ | -| `integration.identifier` | Change the identifier to describe your integration | ✅ | -| `integration.type` | The integration type | ✅ | -| `integration.eventListener.type` | The event listener type | ✅ | -| `integration.secrets.apiToken` | The Opsgenie API token | ✅ | -| `integration.config.apiUrl` | The Opsgenie API URL. If not specified, the default will be https://api.opsgenie.com | ✅ | -| `scheduledResyncInterval` | The number of minutes between each resync | ❌ | -| `initializePortResources` | Default true, When set to true the integration will create default blueprints and the port App config Mapping | ❌ | -| `sendRawDataExamples` | Enable sending raw data examples from the third party API to port for testing and managing the integration mapping. Default is true | ❌ | +

Prerequisites

+ +:::info API Token +An OpsGenie API token with the `read` and `configuration access` scopes is required. Port requires the `read` permission to allow the integration to access incidents and alerts. Port also needs the `configuraton access` permission to allow the integration to access service, teams, and schedules. See [here](https://support.atlassian.com/opsgenie/docs/api-key-management/) for more information on OpsGenie API key management. +::: + + + +For details about the available parameters for the installation, see the table below. -
-To install the integration using Helm, run the following command: - -```bash showLineNumbers -helm repo add --force-update port-labs https://port-labs.github.io/helm-charts -helm upgrade --install my-opsgenie-integration port-labs/port-ocean \ - --set port.clientId="CLIENT_ID" \ - --set port.clientSecret="CLIENT_SECRET" \ - --set port.baseUrl="https://api.getport.io" \ - --set initializePortResources=true \ - --set sendRawDataExamples=true \ - --set integration.identifier="my-opsgenie-integration" \ - --set integration.type="opsgenie" \ - --set integration.eventListener.type="POLLING" \ - --set integration.secrets.apiToken="API_TOKEN" \ - --set integration.config.apiUrl="https://api.opsgenie.com" -``` + + -To install the integration using ArgoCD, follow these steps: +To install the integration using ArgoCD: 1. Create a `values.yaml` file in `argocd/my-ocean-opsgenie-integration` in your git repository with the content: @@ -171,19 +160,40 @@ kubectl apply -f my-ocean-opsgenie-integration.yaml +This table summarizes the available parameters for the installation. The parameters specific to this integration are last in the table. + +| Parameter | Description | Required | +|----------------------------------|-------------------------------------------------------------------------------------------------------------------------------------|----------| +| `port.clientId` | Your port client id | ✅ | +| `port.clientSecret` | Your port client secret | ✅ | +| `port.baseUrl` | Your Port API URL - `https://api.getport.io` for EU, `https://api.us.getport.io` for US | ✅ | +| `integration.identifier` | Change the identifier to describe your integration | ✅ | +| `integration.type` | The integration type | ✅ | +| `scheduledResyncInterval` | The number of minutes between each resync | ❌ | +| `initializePortResources` | Default true, When set to true the integration will create default blueprints and the port App config Mapping | ❌ | +| `sendRawDataExamples` | Enable sending raw data examples from the third party API to port for testing and managing the integration mapping. Default is true | ❌ | +| `integration.eventListener.type` | The event listener type | ✅ | +| `integration.secrets.apiToken` | The Opsgenie API token, docs can be found [here](https://support.atlassian.com/opsgenie/docs/api-key-management/) | ✅ | +| `integration.config.apiUrl` | The Opsgenie API URL. If not specified, the default will be https://api.opsgenie.com | ✅ | + + +
+
- - - -This workflow will run the Opsgenie integration once and then exit, this is useful for **scheduled** ingestion of data. + -:::warning +This workflow/pipeline will run the Opsgenie integration once and then exit, this is useful for **scheduled** ingestion of data. + +:::warning Real-time updates If you want the integration to update Port in real time using webhooks you should use the [Real Time & Always On](?installation-methods=real-time-always-on#installation) installation option ::: + + + Make sure to configure the following [Github Secrets](https://docs.github.com/en/actions/security-guides/using-secrets-in-github-actions): @@ -219,15 +229,10 @@ jobs: -This pipeline will run the Opsgenie integration once and then exit, this is useful for **scheduled** ingestion of data. :::tip Your Jenkins agent should be able to run docker commands. ::: -:::warning -If you want the integration to update Port in real time using webhooks you should use -the [Real Time & Always On](?installation-methods=real-time-always-on#installation) installation option. -::: Make sure to configure the following [Jenkins Credentials](https://www.jenkins.io/doc/book/using/using-credentials/) of `Secret Text` type: @@ -279,9 +284,8 @@ pipeline { ``` - - + @@ -325,12 +329,7 @@ steps: ``` - -This workflow will run the Opsgenie integration once and then exit, this is useful for **scheduled** ingestion of data. - -:::warning Realtime updates in Port -If you want the integration to update Port in real time using webhooks you should use the [Real Time & Always On](?installation-methods=real-time-always-on#installation) installation option. -::: + Make sure to [configure the following GitLab variables](https://docs.gitlab.com/ee/ci/variables/#for-a-project): @@ -388,697 +387,57 @@ ingest_data: -## Ingesting Opsgenie objects - -The Opsgenie integration uses a YAML configuration to describe the process of loading data into the developer portal. See [examples](#examples) below. - -The integration makes use of the [JQ JSON processor](https://stedolan.github.io/jq/manual/) to select, modify, concatenate, transform and perform other operations on existing fields and values from Opsgenie's API events. -### Configuration structure +## Configuration -The integration configuration determines which resources will be queried from Opsgenie, and which entities and properties will be created in Port. +Port integrations use a [YAML mapping block](/build-your-software-catalog/customize-integrations/configure-mapping#configuration-structure) to ingest data from the third-party api into Port. -:::tip Supported resources -The following resources can be used to map data from Opsgenie, it is possible to reference any field that appears in the API responses linked below for the mapping configuration. +The mapping makes use of the [JQ JSON processor](https://stedolan.github.io/jq/manual/) to select, modify, concatenate, transform and perform other operations on existing fields and values from the integration API. -- [`Alert`](https://docs.opsgenie.com/docs/alert-api#list-alerts) -- [`Incident`](https://docs.opsgenie.com/docs/incident-api#list-incidents) -- [`Service`](https://docs.opsgenie.com/docs/service-api#list-services) -- [`Team`](https://docs.opsgenie.com/docs/team-api#list-teams) -- [`Service`](https://docs.opsgenie.com/docs/service-api#list-services) -- [`Schedule`](https://docs.opsgenie.com/docs/schedule-api#list-schedules) -- [`Schedule-Oncall`](https://docs.opsgenie.com/docs/who-is-on-call-api#get-on-calls) -::: +## Capabilities -- The root key of the integration configuration is the `resources` key: - - ```yaml showLineNumbers - # highlight-next-line - resources: - - kind: service - selector: - ... - ``` - -- The `kind` key is a specifier for a Opsgenie object: - - ```yaml showLineNumbers - resources: - # highlight-next-line - - kind: service - selector: - ... - ``` - -- The `selector` and the `query` keys allow you to filter which objects of the specified `kind` will be ingested into your software catalog: - - ```yaml showLineNumbers - resources: - - kind: service - # highlight-start - selector: - query: "true" # JQ boolean expression. If evaluated to false - this object will be skipped. - # highlight-end - port: - ``` - -- The `port`, `entity` and the `mappings` keys are used to map the Opsgenie object fields to Port entities. To create multiple mappings of the same kind, you can add another item in the `resources` array; - - ```yaml showLineNumbers - resources: - - kind: service - selector: - query: "true" - port: - # highlight-start - entity: - mappings: # Mappings between one Opsgenie object to a Port entity. Each value is a JQ query. - identifier: .id - title: .name - blueprint: '"opsGenieService"' - properties: - description: .description - # highlight-end - - kind: service # In this instance service is mapped again with a different filter - selector: - query: '.name == "MyServiceName"' - port: - entity: - mappings: ... - ``` - - :::tip Blueprint key - Note the value of the `blueprint` key - if you want to use a hardcoded string, you need to encapsulate it in 2 sets of quotes, for example use a pair of single-quotes (`'`) and then another pair of double-quotes (`"`) - ::: - -## Configuring real-time updates +### Configure real-time updates Currently, the OpsGenie API lacks support for programmatic webhook creation. To set up a webhook configuration in OpsGenie for sending alert notifications to the Ocean integration, follow these steps: -### Prerequisite +#### Prerequisites -Prepare a webhook `URL` using this format: `{app_host}/integration/webhook`. The `app_host` parameter should match the ingress or external load balancer where the integration will be deployed. For example, if your ingress or load balancer exposes the OpsGenie Ocean integration at `https://myservice.domain.com`, your webhook `URL` should be `https://myservice.domain.com/integration/webhook`. +Prepare a webhook `URL` using this format: `{app_host}/integration/webhook`. +The `app_host` parameter should match the ingress or external load balancer where the integration will be deployed. +For example, if your ingress or load balancer exposes the OpsGenie Ocean integration at `https://myservice.domain.com`, your webhook `URL` should be `https://myservice.domain.com/integration/webhook`. -### Create a webhook in OpsGenie +#### Create a webhook in OpsGenie -1. Go to OpsGenie; -2. Select **Settings**; -3. Click on **Integrations** under the **Integrations** section of the sidebar; -4. Click on **Add integration**; -5. In the search box, type _Webhook_ and select the webhook option; +1. Go to OpsGenie. +2. Select **Settings**. +3. Click on **Integrations** under the **Integrations** section of the sidebar. +4. Click on **Add integration**. +5. In the search box, type _Webhook_ and select the webhook option. 6. Input the following details: - 1. `Name` - use a meaningful name such as Port Ocean Webhook; - 2. Be sure to keep the "Enabled" checkbox checked; - 3. Check the "Add Alert Description to Payload" checkbox; - 4. Check the "Add Alert Details to Payload" checkbox; + 1. `Name` - use a meaningful name such as Port Ocean Webhook. + 2. Be sure to keep the "Enabled" checkbox checked. + 3. Check the "Add Alert Description to Payload" checkbox. + 4. Check the "Add Alert Details to Payload" checkbox. 5. Add the following action triggers to the webhook by clicking on **Add new action**: - 1. If _alert is snoozed_ in Opsgenie, _post to url_ in Webhook; - 2. If _alert's description is updated_ in Opsgenie, _post to url_ in Webhook; - 3. If _alert's message is updated_ in Opsgenie, _post to url_ in Webhook; - 4. If _alert's priority is updated_ in Opsgenie, _post to url_ in Webhook; - 5. If _a responder is added to the alert_ in Opsgenie, _post to url_ in Webhook; - 6. if _a user executes "Assign Ownership_ in Opsgenie, _post to url_ in Webhook; - 7. if _a tag is added to the alert_ in Opsgenie, _post to url_ in Webhook; - 8. .if _a tag is removed from the alert_ in Opsgenie, _post to url_ in Webhook; + 1. If _alert is snoozed_ in Opsgenie, _post to url_ in Webhook. + 2. If _alert's description is updated_ in Opsgenie, _post to url_ in Webhook. + 3. If _alert's message is updated_ in Opsgenie, _post to url_ in Webhook. + 4. If _alert's priority is updated_ in Opsgenie, _post to url_ in Webhook. + 5. If _a responder is added to the alert_ in Opsgenie, _post to url_ in Webhook. + 6. if _a user executes "Assign Ownership_ in Opsgenie, _post to url_ in Webhook. + 7. if _a tag is added to the alert_ in Opsgenie, _post to url_ in Webhook. + 8. .if _a tag is removed from the alert_ in Opsgenie, _post to url_ in Webhook. 6. `Webhook URL` - enter the value of the `URL` you created above. -7. Click **Save integration** - -### Ingest data into Port +7. Click **Save integration**. -To ingest Opsgenie objects using the [integration configuration](#configuration-structure), you can follow the steps below: -1. Go to the DevPortal Builder page. -2. Select a blueprint you want to ingest using Opsgenie. -3. Choose the **Ingest Data** option from the menu. -4. Select Opsgenie under the Incident management category. -5. Modify the [configuration](#configuration-structure) according to your needs. -6. Click `Resync`. ## Examples -Examples of blueprints and the relevant integration configurations: +To view and test the integration's mapping against examples of the third-party API responses, use the jq playground in your [data sources page](https://app.getport.io/settings/data-sources). Find the integration in the list of data sources and click on it to open the playground. -### Team - -
-Team blueprint - -```json showLineNumbers -{ - "identifier": "opsGenieTeam", - "description": "This blueprint represents an OpsGenie team in our software catalog", - "title": "OpsGenie Team", - "icon": "OpsGenie", - "schema": { - "properties": { - "description": { - "type": "string", - "title": "Description", - "icon": "DefaultProperty" - }, - "url": { - "title": "URL", - "type": "string", - "description": "URL to the service", - "format": "url", - "icon": "DefaultProperty" - }, - "oncallUsers": { - "type": "array", - "title": "Current Oncalls", - "items": { - "type": "string", - "format": "user" - } - } - }, - "required": [] - }, - "mirrorProperties": {}, - "calculationProperties": {}, - "aggregationProperties": {}, - "relations": {} -} -``` - -
- -
-Integration configuration - -```yaml showLineNumbers -createMissingRelatedEntities: true -deleteDependentEntities: true -resources: - - kind: team - selector: - query: 'true' - port: - entity: - mappings: - identifier: .id - title: .name - blueprint: '"opsGenieTeam"' - properties: - description: .description - url: .links.web -``` - -
- - -### Schedule - -
-Schedule blueprint - -```json showLineNumbers -{ - "identifier": "opsGenieSchedule", - "description": "This blueprint represents a OpsGenie schedule in our software catalog", - "title": "OpsGenie Schedule", - "icon": "OpsGenie", - "schema": { - "properties": { - "timezone": { - "title": "Timezone", - "type": "string" - }, - "description": { - "title": "Description", - "type": "string" - }, - "users": { - "title": "Users", - "type": "array", - "items": { - "type": "string", - "format": "user" - } - }, - "startDate": { - "title": "Start Date", - "type": "string", - "format": "date-time" - }, - "endDate": { - "title": "End Date", - "type": "string", - "format": "date-time" - }, - "rotationType": { - "type": "string", - "title": "Rotation Type" - } - }, - "required": [] - }, - "mirrorProperties": {}, - "calculationProperties": {}, - "aggregationProperties": {}, - "relations": { - "ownerTeam": { - "title": "Owner Team", - "target": "opsGenieTeam", - "required": false, - "many": false - } - } -} -``` - -
- -
-Integration configuration - -```yaml showLineNumbers -createMissingRelatedEntities: true -deleteDependentEntities: true -resources: - - kind: schedule - selector: - query: 'true' - apiQueryParams: - expand: rotation - port: - itemsToParse: .rotations - entity: - mappings: - identifier: .id + "_" + .item.id - title: .name + "_" + .item.name - blueprint: '"opsGenieSchedule"' - properties: - timezone: .timezone - description: .description - startDate: .item.startDate - endDate: .item.endDate - rotationType: .item.type - users: '[.item.participants[] | select(has("username")) | .username]' - relations: - ownerTeam: .ownerTeam.id -``` - -
- - -### Service - -
-Service blueprint - -```json showLineNumbers -{ - "identifier": "opsGenieService", - "description": "This blueprint represents an OpsGenie service in our software catalog", - "title": "OpsGenie Service", - "icon": "OpsGenie", - "schema": { - "properties": { - "description": { - "type": "string", - "title": "Description", - "icon": "DefaultProperty" - }, - "url": { - "title": "URL", - "type": "string", - "description": "URL to the service", - "format": "url", - "icon": "DefaultProperty" - }, - "tags": { - "type": "array", - "items": { - "type": "string" - }, - "title": "Tags", - "icon": "DefaultProperty" - } - }, - "required": [] - }, - "mirrorProperties": { - "oncallUsers": { - "title": "Current Oncalls", - "path": "ownerTeam.oncallUsers" - } - }, - "calculationProperties": { - }, - "aggregationProperties": { - "numberOfOpenIncidents": { - "title": "Number of open incidents", - "type": "number", - "target": "opsGenieIncident", - "query": { - "combinator": "and", - "rules": [ - { - "property": "status", - "operator": "=", - "value": "open" - } - ] - }, - "calculationSpec": { - "calculationBy": "entities", - "func": "count" - } - } - }, - "relations": { - "ownerTeam": { - "title": "Owner Team", - "target": "opsGenieTeam", - "required": false, - "many": false - } - } -} -``` - -
- -
-Integration configuration - -```yaml showLineNumbers -createMissingRelatedEntities: true -deleteDependentEntities: true -resources: - - kind: service - selector: - query: 'true' - port: - entity: - mappings: - identifier: .id - title: .name - blueprint: '"opsGenieService"' - properties: - description: .description - url: .links.web - tags: .tags - relations: - ownerTeam: .teamId -``` - -
- -### Incident - -
-Incident blueprint - -```json showLineNumbers -{ - "identifier": "opsGenieIncident", - "description": "This blueprint represents an OpsGenie incident in our software catalog", - "title": "OpsGenie Incident", - "icon": "OpsGenie", - "schema": { - "properties": { - "description": { - "title": "Description", - "type": "string" - }, - "status": { - "type": "string", - "title": "Status", - "enum": [ - "closed", - "open", - "resolved" - ], - "enumColors": { - "closed": "blue", - "open": "red", - "resolved": "green" - }, - "description": "The status of the incident" - }, - "url": { - "type": "string", - "format": "url", - "title": "URL" - }, - "tags": { - "type": "array", - "items": { - "type": "string" - }, - "title": "Tags" - }, - "responders": { - "type": "array", - "title": "Responders", - "description": "Responders to the alert" - }, - "priority": { - "type": "string", - "title": "Priority" - }, - "createdAt": { - "title": "Create At", - "type": "string", - "format": "date-time" - }, - "updatedAt": { - "title": "Updated At", - "type": "string", - "format": "date-time" - } - }, - "required": [] - }, - "mirrorProperties": {}, - "calculationProperties": {}, - "relations": { - "services": { - "title": "Impacted Services", - "target": "opsGenieService", - "many": true, - "required": false - } - } -} -``` - -
- -
-Integration configuration - -```yaml showLineNumbers -createMissingRelatedEntities: true -deleteDependentEntities: true -resources: - - kind: incident - selector: - query: 'true' - apiQueryParams: - status: open - port: - entity: - mappings: - identifier: .id - title: .message - blueprint: '"opsGenieIncident"' - properties: - status: .status - responders: .responders - priority: .priority - tags: .tags - url: .links.web - createdAt: .createdAt - updatedAt: .updatedAt - description: .description - relations: - services: .impactedServices -``` - -
- -### Alert - -
-Alert blueprint - -```json showLineNumbers -{ - "identifier": "opsGenieAlert", - "description": "This blueprint represents an OpsGenie alert in our software catalog", - "title": "OpsGenie Alert", - "icon": "OpsGenie", - "schema": { - "properties": { - "description": { - "title": "Description", - "type": "string" - }, - "status": { - "type": "string", - "title": "Status", - "enum": [ - "closed", - "open" - ], - "enumColors": { - "closed": "green", - "open": "red" - }, - "description": "The status of the alert" - }, - "acknowledged": { - "type": "boolean", - "title": "Acknowledged" - }, - "tags": { - "type": "array", - "items": { - "type": "string" - }, - "title": "Tags" - }, - "responders": { - "type": "array", - "title": "Responders", - "description": "Responders to the alert" - }, - "integration": { - "type": "string", - "title": "Integration", - "description": "The name of the Integration" - }, - "priority": { - "type": "string", - "title": "Priority" - }, - "sourceName": { - "type": "string", - "title": "Source Name", - "description": "Alert source name" - }, - "createdBy": { - "title": "Created By", - "type": "string", - "format": "user" - }, - "createdAt": { - "title": "Create At", - "type": "string", - "format": "date-time" - }, - "updatedAt": { - "title": "Updated At", - "type": "string", - "format": "date-time" - }, - "count": { - "title": "Count", - "type": "number" - } - }, - "required": [] - }, - "mirrorProperties": {}, - "calculationProperties": {}, - "relations": { - "relatedIncident": { - "title": "Related Incident", - "target": "opsGenieIncident", - "required": false, - "many": false - } - } -} -``` - -
- -
-Integration configuration - -```yaml showLineNumbers -createMissingRelatedEntities: true -deleteDependentEntities: true -resources: - - kind: alert - selector: - query: 'true' - apiQueryParams: - status: open - port: - entity: - mappings: - identifier: .id - title: .message - blueprint: '"opsGenieAlert"' - properties: - status: .status - acknowledged: .acknowledged - responders: .responders - priority: .priority - sourceName: .source - tags: .tags - count: .count - createdBy: .owner - createdAt: .createdAt - updatedAt: .updatedAt - description: .description - integration: .integration.name - relations: - relatedIncident: 'if (.alias | contains("_")) then (.alias | split("_")[0]) else null end' -``` - -
- -:::tip filter alerts and incidents -The integration provides an option to filter the data that is retrieved from the OpsGenie API using the following attributes: - -1. `createdAt`: The date and time the alert or incident was created -2. `lastOccurredAt`: The date and time the alert or incident was last occurred -3. `snoozedUntil`: The date and time the alert or incident was snoozed until -4. `priority`: The priority of the alert or incident. Accepts values such as `P1`, `P2`, `P3`, `P4` and `P5` -5. `status`: The status of the alert or incident. Accepts values such as `open`, `closed` and `resolved` -6. `isSeen`: Whether the alert or incident has been seen. Accepts a boolean `true` or `false` -7. `acknowledged`: Whether the alert or incident has been acknowledged. Accepts a boolean `true` or `false` -8. `snoozed`: Whether the alert or incident has been snoozed. Accepts a boolean `true` or `false` -9. `owner`: The owner of the alert or incident. Accepts an OpsGenie username -10. `teams`: The teams associated with the alert or incident -11. `acknowledgedBy`: The user who acknowledged the alert or incident -12. `closedBy`: The user who closed the alert or incident -13. `message`: The message of the alert or incident - -These attributes can be enabled using the path: `selector.apiQueryParams`. By default, the integration fetches `open` alerts and incidents. -::: - -### Current On-call -To bring the current on-call users, update your configuration mapping to populate the `OpsGenieTeam` blueprint with team and on-call data. This will enable you to view on-call information at the service level: - -
-Integration configuration - -```yaml showLineNumbers -createMissingRelatedEntities: true -deleteDependentEntities: true -resources: - - kind: schedule-oncall - selector: - query: 'true' - port: - entity: - mappings: - identifier: .ownerTeam.id - title: .ownerTeam.name - blueprint: '"opsGenieTeam"' - properties: - oncallUsers: .__currentOncalls.onCallRecipients -``` - -
+Examples of blueprints and the relevant integration configurations can be found on the opsgenie [examples page](examples.md) ## Let's Test It @@ -1660,8 +1019,6 @@ The examples below pull data from the OpsGenie REST Api, in a defined scheduled ## Migration Guide to Version 0.2.0 This guide outlines how to update your existing OpsGenie integration configuration to take advantage of the performance improvements and breaking changes introduced in version 0.2.0. -Below is an overview of changes in version 0.2.0 - ### Key Improvements - **New Blueprints and Kinds**: Added new kinds for team, schedule, and schedule-oncall. @@ -1676,7 +1033,8 @@ Below is an overview of changes in version 0.2.0 ### Migration Steps -#### Step 1: Understand Existing Configuration +

Step 1: Understand Existing Configuration

+ In versions prior to 0.2.0, your Port app's configuration may have used a mapping like the one below:
@@ -1749,7 +1107,8 @@ resources: ```
-#### Step 2: Update to New Configuration +

Step 2: Update to New Configuration

+ To adapt to version 0.2.0, you will need to update your configuration as follows:
@@ -1824,7 +1183,7 @@ resources: In the updated configuration, the `opsGenieService` blueprint no longer includes properties like `oncallTeam`, `teamMembers`, and `oncallUsers`. These properties are now part of the new `OpsGenieTeam` blueprint. If you need to track on-call teams and users for each service, follow the steps below. -#### Step 3: Create the OpsGenieTeam Blueprint +

Step 3: Create the OpsGenieTeam Blueprint

To manage team-related data, create a new `OpsGenieTeam` blueprint in Port using the following schema: @@ -1870,7 +1229,7 @@ To manage team-related data, create a new `OpsGenieTeam` blueprint in Port using ```
-#### Step 4: Update the OpsGenieService Blueprint +

Step 4: Update the OpsGenieService Blueprint

Next, update the `opsGenieService` blueprint to reference the `OpsGenieTeam` blueprint by establishing a relation and mirroring relevant properties: @@ -1949,7 +1308,7 @@ Next, update the `opsGenieService` blueprint to reference the `OpsGenieTeam` blu ``` -#### Step 5: Update the Mapping Configuration +

Step 5: Update the Mapping Configuration

Update your configuration mapping to correctly populate the `OpsGenieTeam` blueprint with team and on-call data. This will enable you to view on-call team information at the service level: @@ -2001,7 +1360,7 @@ resources: ``` -#### Final Step: Full Configuration Example +

Final Step: Full Configuration Example

After completing these changes, your configuration should look like this, incorporating blueprints for `team`, `service`, `alert` and `incident`:
diff --git a/docs/build-your-software-catalog/sync-data-to-catalog/incident-management/pagerduty/example.md b/docs/build-your-software-catalog/sync-data-to-catalog/incident-management/pagerduty/examples.md similarity index 100% rename from docs/build-your-software-catalog/sync-data-to-catalog/incident-management/pagerduty/example.md rename to docs/build-your-software-catalog/sync-data-to-catalog/incident-management/pagerduty/examples.md diff --git a/docs/build-your-software-catalog/sync-data-to-catalog/incident-management/pagerduty/pagerduty.md b/docs/build-your-software-catalog/sync-data-to-catalog/incident-management/pagerduty/pagerduty.md index 514e87467..dec781e01 100644 --- a/docs/build-your-software-catalog/sync-data-to-catalog/incident-management/pagerduty/pagerduty.md +++ b/docs/build-your-software-catalog/sync-data-to-catalog/incident-management/pagerduty/pagerduty.md @@ -57,6 +57,9 @@ Choose one of the following installation methods: +Using this installation option means that the integration will be able to update Port in real time using webhooks. + +

Prerequisites

@@ -68,7 +71,7 @@ For details about the available parameters for the installation, see the table b - + @@ -183,13 +186,14 @@ This table summarizes the available parameters for the installation. - - -This workflow will run the PagerDuty integration once and then exit, this is useful for **scheduled** ingestion of data. -:::warning -If you want the integration to update Port in real time using webhooks you should use the [Real Time & Always On](?installation-methods=real-time-always-on#installation) installation option +This workflow/pipeline will run the PagerDuty integration once and then exit, this is useful for **scheduled** ingestion of data. + +:::warning Real-time updates +If you want the integration to update Port in real time using webhooks you should use the [Real Time & Always On](?installation-methods=real-time-always-on#installation) installation option. ::: + + Make sure to configure the following [Github Secrets](https://docs.github.com/en/actions/security-guides/using-secrets-in-github-actions): @@ -227,16 +231,11 @@ jobs: -This pipeline will run the PagerDuty integration once and then exit, this is useful for **scheduled** ingestion of data. :::tip Your Jenkins agent should be able to run docker commands. ::: -:::warning -If you want the integration to update Port in real time using webhooks you should use -the [Real Time & Always On](?installation-methods=real-time-always-on#installation) installation option. -::: Make sure to configure the following [Jenkins Credentials](https://www.jenkins.io/doc/book/using/using-credentials/) of `Secret Text` type: @@ -290,7 +289,7 @@ pipeline { - + @@ -335,11 +334,6 @@ steps: -This workflow will run the PagerDuty integration once and then exit, this is useful for **scheduled** ingestion of data. - -:::warning Realtime updates in Port -If you want the integration to update Port in real time using webhooks you should use the [Real Time & Always On](?installation-methods=real-time-always-on#installation) installation option. -::: Make sure to [configure the following GitLab variables](https://docs.gitlab.com/ee/ci/variables/#for-a-project): @@ -772,7 +766,7 @@ To enrich your PagerDuty incident entities with analytics data, follow the steps To view and test the integration's mapping against examples of the third-party API responses, use the jq playground in your [data sources page](https://app.getport.io/settings/data-sources). Find the integration in the list of data sources and click on it to open the playground. -Additional examples of blueprints and the relevant integration configurations can be found on the pagerduty [examples page](example.md) +Additional examples of blueprints and the relevant integration configurations can be found on the pagerduty [examples page](examples.md) ## Let's Test It diff --git a/docs/build-your-software-catalog/sync-data-to-catalog/project-management/jira/jira.md b/docs/build-your-software-catalog/sync-data-to-catalog/project-management/jira/jira.md index 5197e6323..3df3e5a4b 100644 --- a/docs/build-your-software-catalog/sync-data-to-catalog/project-management/jira/jira.md +++ b/docs/build-your-software-catalog/sync-data-to-catalog/project-management/jira/jira.md @@ -10,6 +10,8 @@ import PortApiRegionTip from "/docs/generalTemplates/_port_region_parameter_expl import JiraIssueBlueprint from "/docs/build-your-software-catalog/custom-integration/webhook/examples/resources/jira/\_example_jira_issue_blueprint.mdx" import JiraIssueConfiguration from "/docs/build-your-software-catalog/custom-integration/webhook/examples/resources/jira/\_example_jira_issue_configuration.mdx" import JiraIssueConfigurationPython from "/docs/build-your-software-catalog/custom-integration/webhook/examples/resources/jira/\_example_jira_issue_configuration_python.mdx" +import OceanRealtimeInstallation from "/docs/build-your-software-catalog/sync-data-to-catalog/templates/_ocean_realtime_installation.mdx" + # Jira @@ -50,49 +52,23 @@ Choose one of the following installation methods: Using this installation option means that the integration will be able to update Port in real time using webhooks. -This table summarizes the available parameters for the installation. -Set them as you wish in the script below, then copy it and run it in your terminal: +

Prerequisites

-| Parameter | Description | Example | Required | -| ---------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------ | -------------------------------- | ------- | -| `port.clientId` | Your port [client id](https://docs.getport.io/build-your-software-catalog/custom-integration/api/#find-your-port-credentials) | | ✅ | -| `port.clientSecret` | Your port [client secret](https://docs.getport.io/build-your-software-catalog/custom-integration/api/#find-your-port-credentials) | | ✅ | -| `port.baseUrl` | Your Port API URL - `https://api.getport.io` for EU, `https://api.us.getport.io` for US | | ✅ | -| `integration.secrets.atlassianUserEmail` | The email of the user used to query Jira | user@example.com | ✅ | -| `integration.secrets.atlassianUserToken` | [Jira API token](https://support.atlassian.com/atlassian-account/docs/manage-api-tokens-for-your-atlassian-account/) generated by the user | | ✅ | -| `integration.config.jiraHost` | The URL of your Jira | https://example.atlassian.net | ✅ | -| `integration.config.appHost` | The host of the Port Ocean app. Used to set up the integration endpoint as the target for webhooks created in Jira | https://my-ocean-integration.com | ✅ | + - +For details about the available parameters for the installation, see the table below. -
-To install the integration using Helm, run the following command: - -```bash showLineNumbers -helm repo add --force-update port-labs https://port-labs.github.io/helm-charts -helm upgrade --install my-jira-integration port-labs/port-ocean \ - --set port.clientId="PORT_CLIENT_ID" \ - --set port.clientSecret="PORT_CLIENT_SECRET" \ - --set port.baseUrl="https://api.getport.io" \ - --set initializePortResources=true \ - --set sendRawDataExamples=true \ - --set scheduledResyncInterval=120 \ - --set integration.identifier="my-jira-integration" \ - --set integration.type="jira" \ - --set integration.eventListener.type="POLLING" \ - --set integration.config.jiraHost="string" \ - --set integration.secrets.atlassianUserEmail="string" \ - --set integration.secrets.atlassianUserToken="string" -``` + + -To install the integration using ArgoCD, follow these steps: +To install the integration using ArgoCD: 1. Create a `values.yaml` file in `argocd/my-ocean-jira-integration` in your git repository with the content: @@ -184,35 +160,53 @@ kubectl apply -f my-ocean-jira-integration.yaml +This table summarizes the available parameters for the installation. + +| Parameter | Description | Example | Required | +|------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------|----------| +| `port.clientId` | Your port [client id](https://docs.getport.io/build-your-software-catalog/custom-integration/api/#find-your-port-credentials) | | ✅ | +| `port.clientSecret` | Your port [client secret](https://docs.getport.io/build-your-software-catalog/custom-integration/api/#find-your-port-credentials) | | ✅ | +| `port.baseUrl` | Your Port API URL - `https://api.getport.io` for EU, `https://api.us.getport.io` for US | | ✅ | +| `integration.secrets.atlassianUserEmail` | The email of the user used to query Jira | user@example.com | ✅ | +| `integration.secrets.atlassianUserToken` | [Jira API token](https://support.atlassian.com/atlassian-account/docs/manage-api-tokens-for-your-atlassian-account/) generated by the user | | ✅ | +| `integration.config.jiraHost` | The URL of your Jira | https://example.atlassian.net | ✅ | +| `integration.config.appHost` | The host of the Port Ocean app. Used to set up the integration endpoint as the target for webhooks created in Jira | https://my-ocean-integration.com | ✅ | +| `integration.eventListener.type` | The event listener type. Read more about [event listeners](https://ocean.getport.io/framework/features/event-listener) | | ✅ | +| `integration.type` | The integration to be installed | | ✅ | +| `scheduledResyncInterval` | The number of minutes between each resync. When not set the integration will resync for each event listener resync event. Read more about [scheduledResyncInterval](https://ocean.getport.io/develop-an-integration/integration-configuration/#scheduledresyncinterval---run-scheduled-resync) | | ❌ | +| `initializePortResources` | Default true, When set to true the integration will create default blueprints and the port App config Mapping. Read more about [initializePortResources](https://ocean.getport.io/develop-an-integration/integration-configuration/#initializeportresources---initialize-port-resources) | | ❌ | +| `sendRawDataExamples` | Enable sending raw data examples from the third party API to port for testing and managing the integration mapping. Default is true | | ❌ | + +
- - -This workflow will run the Jira integration once and then exit, this is useful for **scheduled** ingestion of data. +This workflow/pipeline will run the Jira integration once and then exit, this is useful for **scheduled** ingestion of data. -:::warning Realtime updates in Port +:::warning Realtime updates If you want the integration to update Port in real time using webhooks you should use the [Real Time & Always On](?installation-methods=real-time-always-on#installation) installation option. ::: + + + Make sure to configure the following [Github Secrets](https://docs.github.com/en/actions/security-guides/using-secrets-in-github-actions): | Parameter | Description | Example | Required | |----------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------|----------| | `port_client_id` | Your Port client ([How to get the credentials](https://docs.getport.io/build-your-software-catalog/custom-integration/api/#find-your-port-credentials)) id | | ✅ | | `port_client_secret` | Your Port client ([How to get the credentials](https://docs.getport.io/build-your-software-catalog/custom-integration/api/#find-your-port-credentials)) secret | | ✅ | -| `port_base_url` | Your Port API URL - `https://api.getport.io` for EU, `https://api.us.getport.io` for US | | ✅ | +| `port_base_url` | Your Port API URL - `https://api.getport.io` for EU, `https://api.us.getport.io` for US | | ✅ | | `config -> jira_host` | The URL of your Jira | https://example.atlassian.net | ✅ | | `config -> atlassian_user_email` | The email of the user used to query Jira | user@example.com | ✅ | | `config -> atlassian_user_token` | [Jira API token](https://support.atlassian.com/atlassian-account/docs/manage-api-tokens-for-your-atlassian-account/) generated by the user | | ✅ | | `initialize_port_resources` | Default true, When set to true the integration will create default blueprints and the port App config Mapping. Read more about [initializePortResources](https://ocean.getport.io/develop-an-integration/integration-configuration/#initializeportresources---initialize-port-resources) | | ❌ | -| `send_raw_data_examples` | Enable sending raw data examples from the third party API to port for testing and managing the integration mapping. Default is true | | ❌ | - +| `send_raw_data_examples` | Enable sending raw data examples from the third party API to port for testing and managing the integration mapping. Default is true | | ❌ | | `identifier` | The identifier of the integration that will be installed | | ❌ | -| `version` | The version of the integration that will be installed | latest | ❌ |` +| `version` | The version of the integration that will be installed | latest | ❌ |
@@ -253,15 +247,11 @@ jobs:
-This pipeline will run the Jira integration once and then exit, this is useful for **scheduled** ingestion of data. :::tip Tip for Jenkins agent Your Jenkins agent should be able to run docker commands. ::: -:::warning Realtime updates in Port -If you want the integration to update Port in real time using webhooks you should use the [Real Time & Always On](?installation-methods=real-time-always-on#installation) installation option. -::: Make sure to configure the following [Jenkins Credentials](https://www.jenkins.io/doc/book/using/using-credentials/) of `Secret Text` type: @@ -316,7 +306,7 @@ pipeline { - + @@ -361,11 +351,8 @@ steps: -This workflow will run the Jira integration once and then exit, this is useful for **scheduled** ingestion of data. -:::warning Realtime updates in Port -If you want the integration to update Port in real time using webhooks you should use the [Real Time & Always On](?installation-methods=real-time-always-on#installation) installation option. -::: + Make sure to [configure the following GitLab variables](https://docs.gitlab.com/ee/ci/variables/#for-a-project): @@ -680,6 +667,292 @@ resources:
+ +## Let's Test It + +This section includes a sample response data from Jira. In addition, it includes the entity created from the resync event based on the Ocean configuration provided in the previous section. + +### Payload + +Here is an example of the payload structure from Jira: + +
+ Project response data + +```json showLineNumbers +{ + "expand": "description,lead,issueTypes,url,projectKeys,permissions,insight", + "self": "https://myaccount.atlassian.net/rest/api/3/project/10000", + "id": "10000", + "key": "PA", + "name": "Port-AI", + "avatarUrls": { + "48x48": "https://myaccount.atlassian.net/rest/api/3/universal_avatar/view/type/project/avatar/10413", + "24x24": "https://myaccount.atlassian.net/rest/api/3/universal_avatar/view/type/project/avatar/10413?size=small", + "16x16": "https://myaccount.atlassian.net/rest/api/3/universal_avatar/view/type/project/avatar/10413?size=xsmall", + "32x32": "https://myaccount.atlassian.net/rest/api/3/universal_avatar/view/type/project/avatar/10413?size=medium" + }, + "projectTypeKey": "software", + "simplified": true, + "style": "next-gen", + "isPrivate": false, + "properties": {}, + "entityId": "7f4f8d6f-705b-4074-84be-46f0d012cd8e", + "uuid": "7f4f8d6f-705b-4074-84be-46f0d012cd8e" +} +``` + +
+ +
+ Issue response data + +```json showLineNumbers +{ + "expand": "operations,versionedRepresentations,editmeta,changelog,customfield_10010.requestTypePractice,renderedFields", + "id": "10000", + "self": "https://myaccount.atlassian.net/rest/api/3/issue/10000", + "key": "PA-1", + "fields": { + "statuscategorychangedate": "2023-11-06T11:02:59.341+0000", + "issuetype": { + "self": "https://myaccount.atlassian.net/rest/api/3/issuetype/10001", + "id": "10001", + "description": "Tasks track small, distinct pieces of work.", + "iconUrl": "https://myaccount.atlassian.net/rest/api/2/universal_avatar/view/type/issuetype/avatar/10318?size=medium", + "name": "Task", + "subtask": false, + "avatarId": 10318, + "entityId": "a7309bf9-70c5-4237-bdaf-0261037b6ecc", + "hierarchyLevel": 0 + }, + "timespent": "None", + "customfield_10030": "None", + "project": { + "self": "https://myaccount.atlassian.net/rest/api/3/project/10000", + "id": "10000", + "key": "PA", + "name": "Port-AI", + "projectTypeKey": "software", + "simplified": true, + "avatarUrls": { + "48x48": "https://myaccount.atlassian.net/rest/api/3/universal_avatar/view/type/project/avatar/10413", + "24x24": "https://myaccount.atlassian.net/rest/api/3/universal_avatar/view/type/project/avatar/10413?size=small", + "16x16": "https://myaccount.atlassian.net/rest/api/3/universal_avatar/view/type/project/avatar/10413?size=xsmall", + "32x32": "https://myaccount.atlassian.net/rest/api/3/universal_avatar/view/type/project/avatar/10413?size=medium" + } + }, + "customfield_10031": "None", + "customfield_10032": "None", + "fixVersions": [], + "aggregatetimespent": "None", + "resolution": "None", + "customfield_10027": "None", + "customfield_10028": "None", + "customfield_10029": "None", + "resolutiondate": "None", + "workratio": -1, + "watches": { + "self": "https://myaccount.atlassian.net/rest/api/3/issue/PA-1/watchers", + "watchCount": 1, + "isWatching": true + }, + "lastViewed": "None", + "created": "2023-11-06T11:02:59.000+0000", + "customfield_10020": "None", + "customfield_10021": "None", + "customfield_10022": "None", + "priority": { + "self": "https://myaccount.atlassian.net/rest/api/3/priority/3", + "iconUrl": "https://myaccount.atlassian.net/images/icons/priorities/medium.svg", + "name": "Medium", + "id": "3" + }, + "customfield_10023": "None", + "customfield_10024": "None", + "customfield_10025": "None", + "labels": ["infra"], + "customfield_10026": "None", + "customfield_10016": "None", + "customfield_10017": "None", + "customfield_10018": { + "hasEpicLinkFieldDependency": false, + "showField": false, + "nonEditableReason": { + "reason": "PLUGIN_LICENSE_ERROR", + "message": "The Parent Link is only available to Jira Premium users." + } + }, + "customfield_10019": "0|hzzzzz:", + "timeestimate": "None", + "aggregatetimeoriginalestimate": "None", + "versions": [], + "issuelinks": [], + "assignee": { + "self": "https://myaccount.atlassian.net/rest/api/3/user?accountId=712020%3A05acda87-42da-44d8-b21e-f71a508e5d11", + "accountId": "712020:05acda87-42da-44d8-b21e-f71a508e5d11", + "emailAddress": "username@example.com.io", + "avatarUrls": { + "48x48": "https://secure.gravatar.com/avatar/0d5d34ceb820d324d69046a1b2f51dc0?d=https%3A%2F%2Favatar-management--avatars.us-west-2.prod.public.atl-paas.net%2Finitials%2FIC-3.png", + "24x24": "https://secure.gravatar.com/avatar/0d5d34ceb820d324d69046a1b2f51dc0?d=https%3A%2F%2Favatar-management--avatars.us-west-2.prod.public.atl-paas.net%2Finitials%2FIC-3.png", + "16x16": "https://secure.gravatar.com/avatar/0d5d34ceb820d324d69046a1b2f51dc0?d=https%3A%2F%2Favatar-management--avatars.us-west-2.prod.public.atl-paas.net%2Finitials%2FIC-3.png", + "32x32": "https://secure.gravatar.com/avatar/0d5d34ceb820d324d69046a1b2f51dc0?d=https%3A%2F%2Favatar-management--avatars.us-west-2.prod.public.atl-paas.net%2Finitials%2FIC-3.png" + }, + "displayName": "User Name", + "active": true, + "timeZone": "UTC", + "accountType": "atlassian" + }, + "updated": "2023-11-06T11:03:18.244+0000", + "status": { + "self": "https://myaccount.atlassian.net/rest/api/3/status/10000", + "description": "", + "iconUrl": "https://myaccount.atlassian.net/", + "name": "To Do", + "id": "10000", + "statusCategory": { + "self": "https://myaccount.atlassian.net/rest/api/3/statuscategory/2", + "id": 2, + "key": "new", + "colorName": "blue-gray", + "name": "To Do" + } + }, + "components": [], + "timeoriginalestimate": "None", + "description": "None", + "customfield_10010": "None", + "customfield_10014": "None", + "customfield_10015": "None", + "customfield_10005": "None", + "customfield_10006": "None", + "security": "None", + "customfield_10007": "None", + "customfield_10008": "None", + "aggregatetimeestimate": "None", + "customfield_10009": "None", + "summary": "Setup infra", + "creator": { + "self": "https://myaccount.atlassian.net/rest/api/3/user?accountId=712020%3A05acda87-42da-44d8-b21e-f71a508e5d11", + "accountId": "712020:05acda87-42da-44d8-b21e-f71a508e5d11", + "emailAddress": "username@example.com.io", + "avatarUrls": { + "48x48": "https://secure.gravatar.com/avatar/0d5d34ceb820d324d69046a1b2f51dc0?d=https%3A%2F%2Favatar-management--avatars.us-west-2.prod.public.atl-paas.net%2Finitials%2FIC-3.png", + "24x24": "https://secure.gravatar.com/avatar/0d5d34ceb820d324d69046a1b2f51dc0?d=https%3A%2F%2Favatar-management--avatars.us-west-2.prod.public.atl-paas.net%2Finitials%2FIC-3.png", + "16x16": "https://secure.gravatar.com/avatar/0d5d34ceb820d324d69046a1b2f51dc0?d=https%3A%2F%2Favatar-management--avatars.us-west-2.prod.public.atl-paas.net%2Finitials%2FIC-3.png", + "32x32": "https://secure.gravatar.com/avatar/0d5d34ceb820d324d69046a1b2f51dc0?d=https%3A%2F%2Favatar-management--avatars.us-west-2.prod.public.atl-paas.net%2Finitials%2FIC-3.png" + }, + "displayName": "User Name", + "active": true, + "timeZone": "UTC", + "accountType": "atlassian" + }, + "subtasks": [], + "reporter": { + "self": "https://myaccount.atlassian.net/rest/api/3/user?accountId=712020%3A05acda87-42da-44d8-b21e-f71a508e5d11", + "accountId": "712020:05acda87-42da-44d8-b21e-f71a508e5d11", + "emailAddress": "username@example.com.io", + "avatarUrls": { + "48x48": "https://secure.gravatar.com/avatar/0d5d34ceb820d324d69046a1b2f51dc0?d=https%3A%2F%2Favatar-management--avatars.us-west-2.prod.public.atl-paas.net%2Finitials%2FIC-3.png", + "24x24": "https://secure.gravatar.com/avatar/0d5d34ceb820d324d69046a1b2f51dc0?d=https%3A%2F%2Favatar-management--avatars.us-west-2.prod.public.atl-paas.net%2Finitials%2FIC-3.png", + "16x16": "https://secure.gravatar.com/avatar/0d5d34ceb820d324d69046a1b2f51dc0?d=https%3A%2F%2Favatar-management--avatars.us-west-2.prod.public.atl-paas.net%2Finitials%2FIC-3.png", + "32x32": "https://secure.gravatar.com/avatar/0d5d34ceb820d324d69046a1b2f51dc0?d=https%3A%2F%2Favatar-management--avatars.us-west-2.prod.public.atl-paas.net%2Finitials%2FIC-3.png" + }, + "displayName": "User Name", + "active": true, + "timeZone": "UTC", + "accountType": "atlassian" + }, + "aggregateprogress": { + "progress": 0, + "total": 0 + }, + "customfield_10001": "None", + "customfield_10002": "None", + "customfield_10003": "None", + "customfield_10004": "None", + "environment": "None", + "duedate": "None", + "progress": { + "progress": 0, + "total": 0 + }, + "votes": { + "self": "https://myaccount.atlassian.net/rest/api/3/issue/PA-1/votes", + "votes": 0, + "hasVoted": false + } + } +} +``` + +
+ +### Mapping Result + +The combination of the sample payload and the Ocean configuration generates the following Port entity: + +
+ Project entity in Port + +```json showLineNumbers +{ + "identifier": "PA", + "title": "Port-AI", + "icon": null, + "blueprint": "jiraProject", + "team": [], + "properties": { + "url": "https://myaccount.atlassian.net/projects/PA", + "totalIssues": 100 + }, + "relations": {}, + "createdAt": "2023-11-06T11:22:05.433Z", + "createdBy": "hBx3VFZjqgLPEoQLp7POx5XaoB0cgsxW", + "updatedAt": "2023-11-06T11:22:05.433Z", + "updatedBy": "hBx3VFZjqgLPEoQLp7POx5XaoB0cgsxW" +} +``` + +
+ +
+ Issue entity in Port + +```json showLineNumbers +{ + "identifier": "PA-1", + "title": "Setup infra", + "icon": null, + "blueprint": "jiraIssue", + "team": [], + "properties": { + "url": "https://myaccount.atlassian.net/browse/PA-1", + "status": "To Do", + "issueType": "Task", + "components": [], + "assignee": "username@example.com.io", + "reporter": "username@example.com.io", + "creator": "username@example.com.io", + "priority": "3", + "created": "2023-11-06T11:02:59.000+0000", + "updated": "2023-11-06T11:03:18.244+0000" + }, + "relations": { + "parentIssue": null, + "project": "PA", + "subtasks": [] + }, + "createdAt": "2023-11-06T11:22:07.550Z", + "createdBy": "hBx3VFZjqgLPEoQLp7POx5XaoB0cgsxW", + "updatedAt": "2023-11-06T11:22:07.550Z", + "updatedBy": "hBx3VFZjqgLPEoQLp7POx5XaoB0cgsxW" +} +``` + +
+ + ## Relevant Guides For relevant guides and examples, see the [guides section](https://docs.getport.io/guides?tags=Jira). diff --git a/docs/build-your-software-catalog/sync-data-to-catalog/project-management/linear/linear.md b/docs/build-your-software-catalog/sync-data-to-catalog/project-management/linear/linear.md index ca61bffd0..a883baa63 100644 --- a/docs/build-your-software-catalog/sync-data-to-catalog/project-management/linear/linear.md +++ b/docs/build-your-software-catalog/sync-data-to-catalog/project-management/linear/linear.md @@ -10,6 +10,7 @@ import LinearIssueBlueprint from "/docs/build-your-software-catalog/custom-integ import LinearIssueConfiguration from "/docs/build-your-software-catalog/custom-integration/webhook/examples/resources/linear/\_example_linear_issue_configuration.mdx" import PortApiRegionTip from "/docs/generalTemplates/_port_region_parameter_explanation_template.md" import OceanSaasInstallation from "/docs/build-your-software-catalog/sync-data-to-catalog/templates/_ocean_saas_installation.mdx" +import OceanRealtimeInstallation from "/docs/build-your-software-catalog/sync-data-to-catalog/templates/_ocean_realtime_installation.mdx" # Linear @@ -43,53 +44,27 @@ Choose one of the following installation methods:
- - -

Prerequisites

- - + Using this installation option means that the integration will be able to update Port in real time using webhooks. -This table summarizes the available parameters for the installation. -Set them as you wish in the script below, then copy it and run it in your terminal: +

Prerequisites

-| Parameter | Description | Example | Required | -| ---------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------ | -------------------------------- | ------- | -| `port.clientId` | Your port [client id](https://docs.getport.io/build-your-software-catalog/custom-integration/api/#find-your-port-credentials) | | ✅ | -| `port.clientSecret` | Your port [client secret](https://docs.getport.io/build-your-software-catalog/custom-integration/api/#find-your-port-credentials) | | ✅ | -| `port.baseUrl` | Your Port API URL - `https://api.getport.io` for EU, `https://api.us.getport.io` for US | | ✅ | -| `integration.secrets.linearApiKey` | Linear [API key](https://developers.linear.app/docs/graphql/working-with-the-graphql-api#personal-api-keys) used to query the Linear GraphQL API | | ✅ | -| `integration.config.appHost` | The host of the Port Ocean app. Used to set up the integration endpoint as the target for webhooks created in Linear | https://my-ocean-integration.com | ✅ | + - +For details about the available parameters for the installation, see the table below. -
-To install the integration using Helm, run the following command: - -```bash showLineNumbers -helm repo add --force-update port-labs https://port-labs.github.io/helm-charts -helm upgrade --install my-linear-integration port-labs/port-ocean \ - --set port.clientId="PORT_CLIENT_ID" \ - --set port.clientSecret="PORT_CLIENT_SECRET" \ - --set port.baseUrl="https://api.getport.io" \ - --set initializePortResources=true \ - --set sendRawDataExamples=true \ - --set scheduledResyncInterval=120 \ - --set integration.identifier="my-linear-integration" \ - --set integration.type="linear" \ - --set integration.eventListener.type="POLLING" \ - --set integration.secrets.linearApiKey="string" -``` + + -To install the integration using ArgoCD, follow these steps: +To install the integration using ArgoCD: 1. Create a `values.yaml` file in `argocd/my-ocean-linear-integration` in your git repository with the content: @@ -177,32 +152,50 @@ kubectl apply -f my-ocean-linear-integration.yaml +This table summarizes the available parameters for the installation. + +| Parameter | Description | Example | Required | +|------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------|----------| +| `port.clientId` | Your port [client id](https://docs.getport.io/build-your-software-catalog/custom-integration/api/#find-your-port-credentials) | | ✅ | +| `port.clientSecret` | Your port [client secret](https://docs.getport.io/build-your-software-catalog/custom-integration/api/#find-your-port-credentials) | | ✅ | +| `port.baseUrl` | Your Port API URL - `https://api.getport.io` for EU, `https://api.us.getport.io` for US | | ✅ | +| `integration.secrets.linearApiKey` | Linear [API key](https://developers.linear.app/docs/graphql/working-with-the-graphql-api#personal-api-keys) used to query the Linear GraphQL API | | ✅ | +| `integration.config.appHost` | The host of the Port Ocean app. Used to set up the integration endpoint as the target for webhooks created in Linear | https://my-ocean-integration.com | ✅ | +| `integration.eventListener.type` | The event listener type. Read more about [event listeners](https://ocean.getport.io/framework/features/event-listener) | | ✅ | +| `integration.type` | The integration to be installed | | ✅ | +| `scheduledResyncInterval` | The number of minutes between each resync. When not set the integration will resync for each event listener resync event. Read more about [scheduledResyncInterval](https://ocean.getport.io/develop-an-integration/integration-configuration/#scheduledresyncinterval---run-scheduled-resync) | | ❌ | +| `initializePortResources` | Default true, When set to true the integration will create default blueprints and the port App config Mapping. Read more about [initializePortResources](https://ocean.getport.io/develop-an-integration/integration-configuration/#initializeportresources---initialize-port-resources) | | ❌ | +| `sendRawDataExamples` | Enable sending raw data examples from the third party API to port for testing and managing the integration mapping. Default is true | | ❌ | + +
+
- - -This workflow will run the Linear integration once and then exit, this is useful for **scheduled** ingestion of data. +This workflow/pipeline will run the Linear integration once and then exit, this is useful for **scheduled** ingestion of data. -:::warning Realtime updates in Port +:::warning Real-time updates If you want the integration to update Port in real time using webhooks you should use the [Real Time & Always On](?installation-methods=real-time-always-on#installation) installation option. ::: + + + Make sure to configure the following [Github Secrets](https://docs.github.com/en/actions/security-guides/using-secrets-in-github-actions): -| Parameter | Description | Example | Required | -|----------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------|----------| -| `port_client_id` | Your Port client ([How to get the credentials](https://docs.getport.io/build-your-software-catalog/custom-integration/api/#find-your-port-credentials)) id | | ✅ | -| `port_client_secret` | Your Port client ([How to get the credentials](https://docs.getport.io/build-your-software-catalog/custom-integration/api/#find-your-port-credentials)) secret | | ✅ | -| `port_base_url` | Your Port API URL - `https://api.getport.io` for EU, `https://api.us.getport.io` for US | | ✅ | -| `config -> linear_api_key` | Linear [API key](https://developers.linear.app/docs/graphql/working-with-the-graphql-api#personal-api-keys) used to query the Linear GraphQL API | | ✅ | -| `initialize_port_resources` | Default true, When set to true the integration will create default blueprints and the port App config Mapping. Read more about [initializePortResources](https://ocean.getport.io/develop-an-integration/integration-configuration/#initializeportresources---initialize-port-resources) | | ❌ | -| `identifier` | The identifier of the integration that will be installed | | ❌ | -| `version` | The version of the integration that will be installed | latest | ❌ |` -| `sendRawDataExamples` | Enable sending raw data examples from the third party API to port for testing and managing the integration mapping. Default is true | true | | ❌ | +| Parameter | Description | Example | Required | +|-----------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------|----------| +| `port_client_id` | Your Port client ([How to get the credentials](https://docs.getport.io/build-your-software-catalog/custom-integration/api/#find-your-port-credentials)) id | | ✅ | +| `port_client_secret` | Your Port client ([How to get the credentials](https://docs.getport.io/build-your-software-catalog/custom-integration/api/#find-your-port-credentials)) secret | | ✅ | +| `port_base_url` | Your Port API URL - `https://api.getport.io` for EU, `https://api.us.getport.io` for US | | ✅ | +| `config -> linear_api_key` | Linear [API key](https://developers.linear.app/docs/graphql/working-with-the-graphql-api#personal-api-keys) used to query the Linear GraphQL API | | ✅ | +| `initialize_port_resources` | Default true, When set to true the integration will create default blueprints and the port App config Mapping. Read more about [initializePortResources](https://ocean.getport.io/develop-an-integration/integration-configuration/#initializeportresources---initialize-port-resources) | | ❌ | +| `identifier` | The identifier of the integration that will be installed | | ❌ | +| `version` | The version of the integration that will be installed | latest | ❌ |` +| `sendRawDataExamples` | Enable sending raw data examples from the third party API to port for testing and managing the integration mapping. Default is true | true | | ❌ |
:::tip Ocean Sail Github Action @@ -240,15 +233,11 @@ jobs:
-This pipeline will run the Linear integration once and then exit, this is useful for **scheduled** ingestion of data. :::tip Tip for Jenkins agent Your Jenkins agent should be able to run docker commands. ::: -:::warning Realtime updates in Port -If you want the integration to update Port in real time using webhooks you should use the [Real Time & Always On](?installation-methods=real-time-always-on#installation) installation option. -::: Make sure to configure the following [Jenkins Credentials](https://www.jenkins.io/doc/book/using/using-credentials/) of `Secret Text` type: @@ -297,9 +286,8 @@ pipeline { ``` - - + @@ -340,13 +328,7 @@ steps: ``` - -This workflow will run the Linear integration once and then exit, this is useful for **scheduled** ingestion of data. - -:::warning Realtime updates in Port -If you want the integration to update Port in real time using webhooks you should use the [Real Time & Always On](?installation-methods=real-time-always-on#installation) installation option. -::: Make sure to [configure the following GitLab variables](https://docs.gitlab.com/ee/ci/variables/#for-a-project): @@ -660,6 +642,216 @@ resources: +## Let's Test It + +This section includes sample response data from Linear. In addition, it includes the entity created from the resync event based on the Ocean configuration provided in the previous section. + +### Payload + +Here is an example of the payload structure from Linear: + +
+ Team response data + +```json showLineNumbers +{ + "id": "92d25fa4-fb1c-449f-b314-47f82e8f280d", + "name": "Port", + "key": "POR", + "description": null, + "organization": { + "id": "36968e1b-496c-4610-8c25-641364da172e", + "name": "Getport", + "urlKey": "getport" + } +} +``` + +
+ +
+Label response data + +```json showLineNumbers +{ + "id": "36f84d2c-7b7d-4a71-96f2-6ea4140004d5", + "createdAt": "2024-05-17T15:17:40.858Z", + "updatedAt": "2024-05-17T15:17:40.858Z", + "archivedAt": null, + "name": "New-sample-label", + "description": null, + "color": "#bec2c8", + "isGroup": true, + "parent": null, + "children": { + "edges": [ + { + "node": { + "id": "2e483c90-2aca-4db6-924d-b0571d49f691" + } + } + ] + } +} +``` + +
+ + +
+ Issue response data + +```json showLineNumbers +{ + "id": "9b4745c2-a8e6-4432-9e56-0fa97b79ccbf", + "createdAt": "2024-05-16T21:52:00.299Z", + "updatedAt": "2024-05-17T09:27:40.077Z", + "archivedAt": null, + "number": 2, + "title": "sub issue with new title", + "priority": 3, + "estimate": null, + "sortOrder": -991, + "startedAt": null, + "completedAt": null, + "startedTriageAt": null, + "triagedAt": null, + "canceledAt": null, + "autoClosedAt": null, + "autoArchivedAt": null, + "dueDate": null, + "slaStartedAt": null, + "slaBreachesAt": null, + "trashed": null, + "snoozedUntilAt": null, + "labelIds": [ + "402b218c-938c-4ddf-85db-0019bc632316" + ], + "previousIdentifiers": [], + "subIssueSortOrder": -56.17340471045278, + "priorityLabel": "Medium", + "integrationSourceType": null, + "identifier": "POR-2", + "url": "https://linear.app/getport/issue/POR-2/sub-issue-with-new-title", + "branchName": "mor/por-2-sub-issue-with-new-title", + "customerTicketCount": 0, + "description": "", + "descriptionState": "AQG/pOWPAgAHAQtwcm9zZW1pcnJvcgMJcGFyYWdyYXBoAA==", + "team": { + "id": "92d25fa4-fb1c-449f-b314-47f82e8f280d", + "name": "Port", + "key": "POR" + }, + "state": { + "name": "Todo" + }, + "creator": { + "name": "Mor Paz", + "email": "mor@getport.io" + }, + "assignee": { + "name": "Dudi Elhadad", + "email": "dudi@getport.io" + }, + "parent": { + "id": "5ddd8e85-ad89-4c96-b901-0b901b29100d", + "identifier": "POR-1" + } +} + +``` + +
+ +### Mapping Result + +The combination of the sample payload and the Ocean configuration generates the following Port entity: + +
+ Team entity in Port + +```json showLineNumbers +{ + "identifier": "POR", + "title": "Port", + "icon": null, + "blueprint": "linearTeam", + "team": [], + "properties": { + "url": "https://linear.app/getport/team/POR", + "workspaceName": "Getport" + }, + "relations": {}, + "createdAt": "2024-05-19T16:19:15.232Z", + "createdBy": "KZ5zDPudPshQMShUb4cLopBEE1fNSJGE", + "updatedAt": "2024-05-19T16:19:15.232Z", + "updatedBy": "KZ5zDPudPshQMShUb4cLopBEE1fNSJGE" +} +``` + +
+ +
+Label entity in Port + +```json showLineNumbers +{ + "identifier": "36f84d2c-7b7d-4a71-96f2-6ea4140004d5", + "title": "New-sample-label", + "icon": null, + "blueprint": "linearLabel", + "team": [], + "properties": { + "isGroup": false + }, + "relations": { + "childLabels": [], + "parentLabel": null + }, + "createdAt": "2024-05-19T16:19:17.747Z", + "createdBy": "KZ5zDPudPshQMShUb4cLopBEE1fNSJGE", + "updatedAt": "2024-05-19T16:19:17.747Z", + "updatedBy": "KZ5zDPudPshQMShUb4cLopBEE1fNSJGE" +} +``` + +
+ +
+Issue entity in Port + +```json showLineNumbers +{ + "identifier": "POR-2", + "title": "sub issue with new title", + "icon": null, + "blueprint": "linearIssue", + "team": [], + "properties": { + "status": "Todo", + "url": "https://linear.app/getport/issue/POR-2/sub-issue-with-new-title", + "created": "2024-05-16T21:52:00.299Z", + "priority": "Medium", + "assignee": "dudi@getport.io", + "updated": "2024-05-17T09:27:40.077Z", + "creator": "mor@getport.io" + }, + "relations": { + "team": "POR", + "labels": [ + "402b218c-938c-4ddf-85db-0019bc632316" + ], + "parentIssue": "POR-1" + }, + "createdAt": "2024-05-19T16:19:21.143Z", + "createdBy": "KZ5zDPudPshQMShUb4cLopBEE1fNSJGE", + "updatedAt": "2024-05-19T16:19:21.143Z", + "updatedBy": "KZ5zDPudPshQMShUb4cLopBEE1fNSJGE" +} +``` + +
+ ## Alternative installation via webhook diff --git a/docs/build-your-software-catalog/sync-data-to-catalog/templates/_ocean_azure_premise.mdx b/docs/build-your-software-catalog/sync-data-to-catalog/templates/_ocean_azure_premise.mdx index 1a55fb191..342588ae5 100644 --- a/docs/build-your-software-catalog/sync-data-to-catalog/templates/_ocean_azure_premise.mdx +++ b/docs/build-your-software-catalog/sync-data-to-catalog/templates/_ocean_azure_premise.mdx @@ -1,27 +1,18 @@ -This pipeline will run the {props.name} integration once and then exit, this is useful for **scheduled** ingestion of data. - :::tip Your Azure Devops agent should be able to run docker commands. Learn more about agents [here](https://learn.microsoft.com/en-us/azure/devops/pipelines/agents/agents?view=azure-devops&tabs=yaml%2Cbrowser). ::: -:::warning -If you want the integration to update Port in real time using webhooks you should use -the [Real Time & Always On](?installation-methods=real-time-always-on#installation) installation option. -::: -
-:::tip Configure the variables in a variable group Variable groups store values and secrets you'll use in your pipelines across your project. [Learn more](https://learn.microsoft.com/en-us/azure/devops/pipelines/library/variable-groups?view=azure-devops&tabs=yaml) **Setting Up Your Credentials** -1. Create a Variable Group: Name it **port-ocean-credentials**. Store the required variables from the table. -2. Authorize Your Pipeline: +1. Create a Variable Group: Name it **port-ocean-credentials**. +2. Store the required variables (see the table below). +3. Authorize Your Pipeline: - Go to **"Library" -> "Variable groups."** - Find **port-ocean-credentials** and click on it. - Select "Pipeline Permissions" and add your pipeline to the authorized list. -::: -
diff --git a/docs/build-your-software-catalog/sync-data-to-catalog/terraform-cloud/_category_.json b/docs/build-your-software-catalog/sync-data-to-catalog/terraform-cloud/_category_.json index 9b33d01ca..aec5cb1df 100644 --- a/docs/build-your-software-catalog/sync-data-to-catalog/terraform-cloud/_category_.json +++ b/docs/build-your-software-catalog/sync-data-to-catalog/terraform-cloud/_category_.json @@ -1,4 +1,4 @@ { - "label": "Terraform Cloud", + "label": "Terraform", "position": 12 } diff --git a/docs/build-your-software-catalog/sync-data-to-catalog/terraform-cloud/examples.md b/docs/build-your-software-catalog/sync-data-to-catalog/terraform-cloud/examples.md new file mode 100644 index 000000000..30e7bc1a1 --- /dev/null +++ b/docs/build-your-software-catalog/sync-data-to-catalog/terraform-cloud/examples.md @@ -0,0 +1,475 @@ +--- +sidebar_position: 2 +--- + +# Examples +To view and test the integration's mapping against examples of the third-party API responses, use the jq playground in your [data sources page](https://app.getport.io/settings/data-sources). Find the integration in the list of data sources and click on it to open the playground. + +## Organization + +
+Organization blueprint + +```json showLineNumbers + { + "identifier": "terraformCloudOrganization", + "description": "This blueprint represents an organization in Terraform Cloud", + "title": "Terraform Cloud Organization", + "icon": "Terraform", + "schema": { + "properties": { + "externalId": { + "type": "string", + "title": "External ID", + "description": "The external ID of the organization" + }, + "ownerEmail": { + "type": "string", + "title": "Owner Email", + "description": "The email associated with the organization" + }, + "collaboratorAuthPolicy": { + "type": "string", + "title": "Collaborator Authentication Policy", + "description": "Policy for collaborator authentication" + }, + "planExpired": { + "type": "string", + "title": "Plan Expired", + "description": "Indicates if plan is expired" + }, + "planExpiresAt": { + "type": "string", + "format": "date-time", + "title": "Plan Expiry Date", + "description": "The data and time which the plan expires" + }, + "permissions": { + "type": "object", + "title": "Permissions", + "description": "Permissions associated with the organization" + }, + "samlEnabled": { + "type": "boolean", + "title": "SAML Enabled", + "description": "Indicates if SAML is enabled for the organization" + }, + "defaultExecutionMode": { + "type": "string", + "title": "Default Execution Mode", + "description": "The default execution mode for the organization" + } + } + }, + "mirrorProperties": {}, + "calculationProperties": {}, + "aggregationProperties": {}, + "relations": {} + } +``` +
+ +
+Integration configuration + +```yaml showLineNumbers +- kind: organization + selector: + query: "true" + port: + entity: + mappings: + identifier: .id + title: .attributes.name + blueprint: '"terraformCloudOrganization"' + properties: + externalId: .attributes."external-id" + ownerEmail: .attributes.email + collaboratorAuthPolicy: .attributes."collaborator-auth-policy" + planExpired: .attributes."plan-expired" + planExpiresAt: .attributes."plan-expires-at" + permissions: .attributes.permissions + samlEnabled: .attributes."saml-enabled" + defaultExecutionMode: .attributes."default-execution-mode" +``` +
+ +## Project +
+Project blueprint + +```json showLineNumbers + { + "identifier": "terraformCloudProject", + "description": "This blueprint represents a project in Terraform Cloud", + "title": "Terraform Cloud Project", + "icon": "Terraform", + "schema": { + "properties": { + "name": { + "type": "string", + "title": "Project Name", + "description": "The name of the Terraform Cloud project" + }, + "permissions": { + "type": "object", + "title": "Permissions", + "description": "The permisssions on the project" + } + } + }, + "mirrorProperties": {}, + "calculationProperties": {}, + "aggregationProperties": {}, + "relations": { + "organization": { + "title": "Terraform Cloud Organization", + "target": "terraformCloudOrganization", + "required": true, + "many": false + } + } + } +``` +
+ +
+Integration configuration + +```yaml showLineNumbers +- kind: project + selector: + query: "true" + port: + entity: + mappings: + identifier: .id + title: .attributes.name + blueprint: '"terraformCloudProject"' + properties: + name: .attributes.name + permissions: .attributes.permissions + relations: + organization: .relationships.organization.data.id +``` +
+ +## Workspace + +
+Workspace blueprint + +```json showLineNumbers + { + "identifier": "terraformCloudWorkspace", + "description": "This blueprint represents a workspace in Terraform Cloud", + "title": "Terraform Cloud Workspace", + "icon": "Terraform", + "schema": { + "properties": { + "organization": { + "type": "string", + "title": "Organization", + "description": "The organization within which the workspace belongs to" + }, + "createdAt": { + "type": "string", + "format": "date-time", + "title": "Creation Time", + "description": "The creation timestamp of the workspace" + }, + "updatedAt": { + "type": "string", + "format": "date-time", + "title": "Last Updated", + "description": "The last update timestamp of the workspace" + }, + "terraformVersion": { + "type": "string", + "title": "Terraform Cloud Version", + "description": "Version of Terraform cloud used by the workspace" + }, + "locked": { + "type": "boolean", + "title": "Locked Status", + "description": "Indicates whether the workspace is locked" + }, + "executionMode": { + "type": "string", + "title": "Execution Mode", + "description": "The execution mode of the workspace" + }, + "resourceCount": { + "type": "number", + "title": "Resource Count", + "description": "Number of resources managed by the workspace" + }, + "latestChangeAt": { + "type": "string", + "format": "date-time", + "title": "Latest Change", + "description": "Timestamp of the latest change in the workspace" + }, + "tags": { + "type": "array", + "title": "Workspace Tags", + "description": "Terraform workspace tags" + } + } + }, + "mirrorProperties": {}, + "calculationProperties": {}, + "aggregationProperties": {}, + "relations": { + "currentStateVersion": { + "title": "Current State Version", + "target": "terraformCloudStateVersion", + "required": false, + "many": false + }, + "project": { + "title": "Terraform Cloud Project", + "target": "terraformCloudProject", + "required": false, + "many": false + } + } + } +``` +
+ + +
+Integration configuration + +```yaml showLineNumbers +- kind: workspace + selector: + query: "true" + port: + entity: + mappings: + identifier: .id + title: .attributes.name + blueprint: '"terraformCloudWorkspace"' + properties: + organization: .relationships.organization.data.id + createdAt: .attributes."created-at" + updatedAt: .attributes."updated-at" + terraformVersion: .attributes."terraform-version" + locked: .attributes.locked + executionMode: .attributes."execution-mode" + resourceCount: .attributes."resource-count" + latestChangeAt: .attributes."latest-change-at" + tags: .__tags + relations: + currentStateVersion: .relationships."current-state-version".data.id + project: .relationships.project.data.id +``` + +
+ +## Run + +
+Run blueprint + +```json showLineNumbers +{ + "identifier": "terraformCloudRun", + "description": "This blueprint represents a run in Terraform cloud", + "title": "Terraform Cloud Run", + "icon": "Terraform", + "schema": { + "properties": { + "createdAt": { + "type": "string", + "format": "date-time", + "title": "Creation Time", + "description": "The creation timestamp of the run" + }, + "status": { + "type": "string", + "title": "Run Status", + "description": "The current status of the run" + }, + "hasChanges": { + "type": "boolean", + "title": "Has Changes", + "description": "Indicates whether the run has changes" + }, + "isDestroy": { + "type": "boolean", + "title": "Is Destroy", + "description": "Indicates whether the run is a destroy operation" + }, + "message": { + "type": "string", + "title": "Run Message", + "description": "Message associated with the run" + }, + "terraformVersion": { + "type": "string", + "title": "Terraform Cloud Version", + "description": "Version of Terraform cloud used in the run" + }, + "appliedAt": { + "type": "string", + "format": "date-time", + "title": "Applied Time", + "description": "Timestamp when the run was applied" + }, + "plannedAt": { + "type": "string", + "format": "date-time", + "title": "Planned Time", + "description": "Timestamp when the run was planned" + }, + "source": { + "type": "string", + "title": "Run Source", + "description": "The source of the run initiation" + } + } + }, + "relations": { + "terraformCloudWorkspace": { + "title": "Terraform Cloud Workspace", + "target": "terraformCloudWorkspace", + "required": false, + "many": false + } + } +} +``` + +
+ +
+Integration configuration + +```yaml showLineNumbers +- kind: run + selector: + query: "true" + port: + entity: + mappings: + identifier: .id + title: .attributes.message + blueprint: '"terraformCloudRun"' + properties: + createdAt: .attributes."created-at" + status: .attributes.status + hasChanges: .attributes."has-changes" + isDestroy: .attributes."is-destroy" + message: .attributes.message + terraformVersion: .attributes."terraform-version" + appliedAt: .attributes."status-timestamps"."applied-at" + plannedAt: .attributes."status-timestamps"."planned-at" + source: .attributes.source + relations: + terraformCloudWorkspace: .relationships.workspace.data.id +``` + +
+ +## State Version + +
+State Version blueprint + +```json showLineNumbers +{ + "identifier": "terraformCloudStateVersion", + "description": "This blueprint represents a version of a Terraform state version", + "title": "Terraform Cloud State Versions", + "icon": "Terraform", + "schema": { + "properties": { + "createdAt": { + "type": "string", + "format": "date-time", + "title": "Creation Time", + "description": "Timestamp when the state version was created" + }, + "serial": { + "type": "number", + "title": "Serial Number", + "description": "A unique identifier for this version within the workspace" + }, + "status": { + "type": "string", + "title": "Status", + "description": "The current status of the state version (e.g., 'queued', 'finished')" + }, + "size": { + "type": "number", + "title": "Size", + "description": "The size of the resources" + }, + "isResourcesProcessed": { + "type": "boolean", + "title": "Is Resources Processed", + "description": "Whethere the resources has been processed" + }, + "hostedStateDownloadUrl": { + "type": "string", + "title": "Download Url", + "format": "url", + "description": "Hosted state version download url " + }, + "hostedJsonDownloadUrl": { + "type": "string", + "title": "Download Url", + "format": "url", + "description": "Url for downloading state version in json format" + }, + "outputData": { + "type": "array", + "title": "Output", + "description": "output returned from state version" + }, + "vcsCommitUrl": { + "type": "string", + "title": "VCS Commit URL", + "format": "url", + "description": "URL of the VCS commit that triggered this state version" + } + } + }, + "relations": {}, + "mirrorProperties": {}, + "calculationProperties": {}, + "aggregationProperties": {} +} +``` + +
+ +
+Integration configuration + +```yaml showLineNumbers +- kind: state-version + selector: + query: "true" + port: + entity: + mappings: + identifier: .id + title: .id + blueprint: '"terraformCloudStateVersion"' + properties: + createdAt: .attributes."created-at" + serial: .attributes.serial + status: .attributes.status + size: .attributes.size + isResourcesProcessed: .attributes."resources-processed" + hostedStateDownloadUrl: .attributes."hosted-state-download-url" + hostedJsonDownloadUrl: .attributes."hosted-json-state-download-url" + vcsCommitUrl: .attributes."vcs-commit-url" + outputData: .__output +``` + +
\ No newline at end of file diff --git a/docs/build-your-software-catalog/sync-data-to-catalog/terraform-cloud/terraform-cloud.md b/docs/build-your-software-catalog/sync-data-to-catalog/terraform-cloud/terraform-cloud.md index 27d3158e7..f18cd9a59 100644 --- a/docs/build-your-software-catalog/sync-data-to-catalog/terraform-cloud/terraform-cloud.md +++ b/docs/build-your-software-catalog/sync-data-to-catalog/terraform-cloud/terraform-cloud.md @@ -1,6 +1,6 @@ --- sidebar_position: 1 -title: Terraform Cloud +title: Terraform description: Terraform integration in Port --- @@ -9,41 +9,42 @@ import TabItem from "@theme/TabItem" import DockerParameters from "./\_terraform_one_time_docker_parameters.mdx" import PortApiRegionTip from "/docs/generalTemplates/_port_region_parameter_explanation_template.md" import OceanSaasInstallation from "/docs/build-your-software-catalog/sync-data-to-catalog/templates/_ocean_saas_installation.mdx" +import OceanRealtimeInstallation from "/docs/build-your-software-catalog/sync-data-to-catalog/templates/_ocean_realtime_installation.mdx" +import Prerequisites from "../templates/\_ocean_helm_prerequisites_block.mdx" -# Terraform Cloud and Terraform Enterprise - -The Terraform Cloud Integration for Port enables seamless import and synchronization of `organizations`, `projects`, `workspaces`, `runs`, and `state versions` from your Terraform infrastructure management into Port. This integration allows you to effectively monitor and manage your Terraform Cloud workspaces and runs within the Port platform. -An `Organization` is a shared space for one or more teams to collaborate on workspaces. -A `Project` in Terraform Cloud is a collection of infrastructure configurations, often corresponding to a code repository. It serves as the primary organizational unit, grouping related `workspaces`, `runs`, and `state versions` to manage and structure Terraform code for efficient deployment and collaboration. - -A `Workspace` represents a workspace in Terraform cloud. A workspace is a logical environment where Terraform manages infrastructure, such as a set of cloud resources. +# Terraform Cloud and Terraform Enterprise -A `Run` represents an instance of Terraform operations (plan, apply, or destroy) executed within a workspace. Each run holds information about the operation status, duration, and other relevant metadata. +Port's Terraform Cloud integration allows you to model Terraform Cloud resources in your software catalog and ingest data into them. -A `State Version` represents a versioned state file in Terraform. Each state version is immutable and represents the state of your managed infrastructure at a particular point in time. State versions are used to track the changes in your infrastructure and help with auditing, rollbacks, and historical analysis. +## Overview -## Common use cases +This integration allows you to: -- Synchronization of Infrastructure Management: Automatically synchronize workspace, run and state version data from Terraform Cloud into Port for centralized tracking and management. -- Monitoring Run Statuses: Keep track of run outcomes (success, failure, etc.) and durations, providing insights into the health and performance of your infrastructure management processes. -- Identify drifts between your Terraform configuration and what's effectively deployed in your Cloud. +- Map and organize your desired Terraform Cloud resources and their metadata in Port (see supported resources below). +- Watch for Terraform Cloud object changes (create/update/delete) in real-time, and automatically apply the changes to your entities in Port. +- Automatically synchronize workspace, run and state version data from Terraform Cloud into Port for centralized tracking and management. -## Terraform Enterprise (Self Hosted) +:::info Terraform enterprise (self hosted) Port supports both Terraform Cloud and Terraform Enterprise versions (self hosted). The following data model and use cases are common for both integrations. If installing Port exporter for Terraform Enterprise, you will be required to specify your Terraform 's host URL by passing the following parameter to the installer: `integration.config.appHost` +::: +### Supported Resources -## Prerequisites +The resources that can be ingested from Terraform Cloud into Port are listed below. It is possible to reference any field that appears in the API responses linked below in the mapping configuration. -To install the integration, you need a Kubernetes cluster that the integration's container chart will be deployed to. +- [`Organization`](https://developer.hashicorp.com/terraform/cloud-docs/api-docs/organizations) +- [`Project`](https://developer.hashicorp.com/terraform/cloud-docs/api-docs/projects) +- [`Workspace`](https://developer.hashicorp.com/terraform/cloud-docs/api-docs/workspaces) +- [`Run`](https://developer.hashicorp.com/terraform/cloud-docs/api-docs/run) +- [`State Version`](https://developer.hashicorp.com/terraform/cloud-docs/api-docs/state-versions) -Please make sure that you have [`kubectl`](https://kubernetes.io/docs/tasks/tools/#kubectl) and [`helm`](https://helm.sh/) installed on your machine, and that your `kubectl` CLI is connected to the Kubernetes cluster where you plan to install the integration. -## Installation +## Setup Choose one of the following installation methods: @@ -55,53 +56,28 @@ Choose one of the following installation methods:
- + Using this installation option means that the integration will be able to update Port in real time using webhooks. -This table summarizes the available parameters for the installation. -Set them as you wish in the script below, then copy it and run it in your terminal: - -| Parameter | Description | Required | -| ---------------------------------------- | ------------------------------------------------------------------------------------------------------------- | ------- | -| `port.clientId` | Your Port client id | ✅ | -| `port.clientSecret` | Your Port client secret | ✅ | -| `port.baseUrl` | Your Port API URL - `https://api.getport.io` for EU, `https://api.us.getport.io` for US | ✅ | -| `integration.identifier` | Change the identifier to describe your integration | ✅ | -| `integration.type` | The integration type | ✅ | -| `integration.eventListener.type` | The event listener type | ✅ | -| `integration.config.terraformCloudHost` | Your Terraform host. For example https://app.terraform.io token | ✅ | -| `integration.config.terraformCloudToken` | The Terraform cloud API token | ✅ | -| `integration.config.appHost` | Your application's host url. Required when installing Terraform Enterprise (self hosted) | ✅ | -| `scheduledResyncInterval` | The number of minutes between each resync | ❌ | -| `initializePortResources` | When set to true the integration will create default blueprints and the port App config Mapping, defaults is true. | ❌ | -| `sendRawDataExamples` | Enable sending raw data examples from the third party API to port for testing and managingthe integration mapping, default is true. | ❌ | +

Prerequisites

+ + + +For details about the available parameters for the installation, see the table below. -
-To install the integration using Helm, run the following command: - -```bash showLineNumbers -helm repo add --force-update port-labs https://port-labs.github.io/helm-charts -helm upgrade --install terraform port-labs/port-ocean \ - --set port.clientId="PORT_CLIENT_ID" \ - --set port.clientSecret="PORT_CLIENT_SECRET" \ - --set port.baseUrl="https://api.getport.io" \ - --set initializePortResources=true \ - --set sendRawDataExamples=true \ - --set integration.identifier="my-terraform-cloud-integration" \ - --set integration.type="terraform-cloud" \ - --set integration.eventListener.type="POLLING" \ - --set integration.secrets.terraformCloudHost="string" \ - --set integration.secrets.terraformCloudToken="string" -``` + + + + -To install the integration using ArgoCD, follow these steps: +To install the integration using ArgoCD: 1. Create a `values.yaml` file in `argocd/my-ocean-terraform-cloud-integration` in your git repository with the content: @@ -184,22 +160,42 @@ kubectl apply -f my-ocean-terraform-cloud-integration.yaml +This table summarizes the available parameters for the installation. + +| Parameter | Description | Required | +|------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------|----------| +| `port.clientId` | Your Port client id | ✅ | +| `port.clientSecret` | Your Port client secret | ✅ | +| `port.baseUrl` | Your Port API URL - `https://api.getport.io` for EU, `https://api.us.getport.io` for US | ✅ | +| `integration.identifier` | Change the identifier to describe your integration | ✅ | +| `integration.type` | The integration type | ✅ | +| `integration.eventListener.type` | The event listener type | ✅ | +| `integration.config.appHost` | Your application's host url. Required when installing Terraform Enterprise (self hosted) | ✅ | +| `scheduledResyncInterval` | The number of minutes between each resync | ❌ | +| `initializePortResources` | When set to true the integration will create default blueprints and the port App config Mapping, defaults is true. | ❌ | +| `sendRawDataExamples` | Enable sending raw data examples from the third party API to port for testing and managing the integration mapping, default is true. | ❌ | +| `integration.config.terraformCloudHost` | Your Terraform host. For example `https://app.terraform.io` | ✅ | +| `integration.config.terraformCloudToken` | The Terraform cloud API token, docs can be found [here](https://developer.hashicorp.com/terraform/cloud-docs/users-teams-organizations/api-tokens) | ✅ | + +
+

Event listener

The integration uses polling to pull the configuration from Port every minute and check it for changes. If there is a change, a resync will occur.
- + - - -This workflow will run the Terraform cloud integration once and then exit, this is useful for **scheduled** ingestion of data. +This workflow/pipeline will run the Terraform Cloud integration once and then exit, this is useful for **scheduled** ingestion of data. -:::warning -If you want the integration to update Port in real time you should use the [Real Time & Always On](?installation-methods=real-time-always-on#installation) installation option +:::warning Real-time updates +If you want the integration to update Port in real time using webhooks you should use the [Real Time & Always On](?installation-methods=real-time-always-on#installation) installation option. ::: + + + Make sure to configure the following [Github Secrets](https://docs.github.com/en/actions/security-guides/using-secrets-in-github-actions): @@ -235,14 +231,10 @@ jobs: -This pipeline will run the Terraform cloud integration once and then exit, this is useful for **scheduled** ingestion of data. :::tip Your Jenkins agent should be able to run docker commands. ::: -:::warning -If you want the integration to update Port in real time using webhooks you should use the [Real Time & Always On](?installation-methods=real-time-always-on#installation) installation option. -::: Make sure to configure the following [Terraform Cloud Credentials](https://www.jenkins.io/doc/book/using/using-credentials/) of `Secret Text` type: @@ -293,13 +285,7 @@ pipeline { ``` - - -This workflow will run the Terraform cloud integration once and then exit, this is useful for **scheduled** ingestion of data. - -:::warning Realtime updates in Port -If you want the integration to update Port in real time using webhooks you should use the [Real Time & Always On](?installation-methods=real-time-always-on#installation) installation option. -::: + Make sure to [configure the following GitLab variables](https://docs.gitlab.com/ee/ci/variables/#for-a-project): @@ -356,584 +342,21 @@ ingest_data: -## Ingesting Terraform Cloud objects - -The Terraform integration uses a YAML configuration to describe the process of loading data into the developer portal. - -Here is an example snippet from the config which demonstrates the process for getting `Workspace` from Terraform cloud: - -```yaml showLineNumbers -resources: - - kind: workspace - selector: - query: "true" - port: - entity: - mappings: - identifier: .id - title: .attributes.name - blueprint: '"terrafomCloudWorkspace"' - properties: - organization: .relationships.organization.data.id - createdAt: .attributes."created-at" - updatedAt: .attributes."updated-at" - terraformVersion: .attributes."terraform-version" - locked: .attributes.locked - executionMode: .attributes."execution-mode" - resourceCount: .attributes."resource-count" - latestChangeAt: .attributes."latest-change-at" -``` - -The integration makes use of the [JQ JSON processor](https://stedolan.github.io/jq/manual/) to select, modify, concatenate, transform and perform other operations on existing fields and values from Terraform's API events. - -### Configuration structure - -The integration configuration determines which resources will be queried from Terraform Cloud, and which entities and properties will be created in Port. - -:::tip Supported resources -The following resources can be used to map data from Terraform Cloud, it is possible to reference any field that appears in the API responses linked below for the mapping configuration. - -- [`Organization`](https://developer.hashicorp.com/terraform/cloud-docs/api-docs/organizations) -- [`Project`](https://developer.hashicorp.com/terraform/cloud-docs/api-docs/projects) -- [`Workspace`](https://developer.hashicorp.com/terraform/cloud-docs/api-docs/workspaces) -- [`Run`](https://developer.hashicorp.com/terraform/cloud-docs/api-docs/run) -- [`State Version`](https://developer.hashicorp.com/terraform/cloud-docs/api-docs/state-versions) - -::: - -- The root key of the integration configuration is the `resources` key: - - ```yaml showLineNumbers - # highlight-next-line - resources: - - kind: workspace - selector: - ... - ``` - -- The `kind` key is a specifier for a Terraform object: - - ```yaml showLineNumbers - resources: - # highlight-next-line - - kind: run - selector: - ... - ``` -- The `port`, `entity` and the `mappings` keys are used to map the Terraform Cloud object fields to Port entities. To create multiple mappings of the same kind, you can add another item in the `resources` array; +## Configuration -```yaml showLineNumbers -resources: - - kind: workspace - selector: - query: "true" - port: - entity: - mappings: - identifier: .id - title: .attributes.name - blueprint: '"terrafomCloudWorkspace"' - properties: - organization: .relationships.organization.data.id - createdAt: .attributes."created-at" - updatedAt: .attributes."updated-at" - terraformVersion: .attributes."terraform-version" - locked: .attributes.locked - executionMode: .attributes."execution-mode" - resourceCount: .attributes."resource-count" - latestChangeAt: .attributes."latest-change-at" - relations: - currentStateVersion: .relationships."current-state-version".data.id -``` - -:::tip Blueprint key -Note the value of the `blueprint` key - if you want to use a hardcoded string, you need to encapsulate it in 2 sets of quotes, for example use a pair of single-quotes (`'`) and then another pair of double-quotes (`"`) -::: +Port integrations use a [YAML mapping block](/build-your-software-catalog/customize-integrations/configure-mapping#configuration-structure) to ingest data from the third-party api into Port. -### Ingest data into Port +The mapping makes use of the [JQ JSON processor](https://stedolan.github.io/jq/manual/) to select, modify, concatenate, transform and perform other operations on existing fields and values from the integration API. -To ingest Terraform Cloud objects using the [integration configuration](#configuration-structure), you can follow the steps below: - -1. Go to the DevPortal Builder page. -2. Select a blueprint you want to ingest using Terraform Cloud. -3. Choose the **Ingest Data** option from the menu. -4. Select Terraform Cloud under the IaC category. -5. Add the contents of your [integration configuration](#configuration-structure) to the editor. -6. Click `Resync`. ## Examples -Examples of blueprints and the relevant integration configurations: - -### Organization - -
-Organization blueprint - -```json showLineNumbers - { - "identifier": "terraformCloudOrganization", - "description": "This blueprint represents an organization in Terraform Cloud", - "title": "Terraform Cloud Organization", - "icon": "Terraform", - "schema": { - "properties": { - "externalId": { - "type": "string", - "title": "External ID", - "description": "The external ID of the organization" - }, - "ownerEmail": { - "type": "string", - "title": "Owner Email", - "description": "The email associated with the organization" - }, - "collaboratorAuthPolicy": { - "type": "string", - "title": "Collaborator Authentication Policy", - "description": "Policy for collaborator authentication" - }, - "planExpired": { - "type": "string", - "title": "Plan Expired", - "description": "Indicates if plan is expired" - }, - "planExpiresAt": { - "type": "string", - "format": "date-time", - "title": "Plan Expiry Date", - "description": "The data and time which the plan expires" - }, - "permissions": { - "type": "object", - "title": "Permissions", - "description": "Permissions associated with the organization" - }, - "samlEnabled": { - "type": "boolean", - "title": "SAML Enabled", - "description": "Indicates if SAML is enabled for the organization" - }, - "defaultExecutionMode": { - "type": "string", - "title": "Default Execution Mode", - "description": "The default execution mode for the organization" - } - } - }, - "mirrorProperties": {}, - "calculationProperties": {}, - "aggregationProperties": {}, - "relations": {} - } -``` -
- -
-Integration configuration - -```yaml showLineNumbers -- kind: organization - selector: - query: "true" - port: - entity: - mappings: - identifier: .id - title: .attributes.name - blueprint: '"terraformCloudOrganization"' - properties: - externalId: .attributes."external-id" - ownerEmail: .attributes.email - collaboratorAuthPolicy: .attributes."collaborator-auth-policy" - planExpired: .attributes."plan-expired" - planExpiresAt: .attributes."plan-expires-at" - permissions: .attributes.permissions - samlEnabled: .attributes."saml-enabled" - defaultExecutionMode: .attributes."default-execution-mode" -``` -
- -### Project -
-Project blueprint - -```json showLineNumbers - { - "identifier": "terraformCloudProject", - "description": "This blueprint represents a project in Terraform Cloud", - "title": "Terraform Cloud Project", - "icon": "Terraform", - "schema": { - "properties": { - "name": { - "type": "string", - "title": "Project Name", - "description": "The name of the Terraform Cloud project" - }, - "permissions": { - "type": "object", - "title": "Permissions", - "description": "The permisssions on the project" - } - } - }, - "mirrorProperties": {}, - "calculationProperties": {}, - "aggregationProperties": {}, - "relations": { - "organization": { - "title": "Terraform Cloud Organization", - "target": "terraformCloudOrganization", - "required": true, - "many": false - } - } - } -``` -
- -
-Integration configuration - -```yaml showLineNumbers -- kind: project - selector: - query: "true" - port: - entity: - mappings: - identifier: .id - title: .attributes.name - blueprint: '"terraformCloudProject"' - properties: - name: .attributes.name - permissions: .attributes.permissions - relations: - organization: .relationships.organization.data.id -``` -
- -### Workspace - -
-Workspace blueprint - -```json showLineNumbers - { - "identifier": "terraformCloudWorkspace", - "description": "This blueprint represents a workspace in Terraform Cloud", - "title": "Terraform Cloud Workspace", - "icon": "Terraform", - "schema": { - "properties": { - "organization": { - "type": "string", - "title": "Organization", - "description": "The organization within which the workspace belongs to" - }, - "createdAt": { - "type": "string", - "format": "date-time", - "title": "Creation Time", - "description": "The creation timestamp of the workspace" - }, - "updatedAt": { - "type": "string", - "format": "date-time", - "title": "Last Updated", - "description": "The last update timestamp of the workspace" - }, - "terraformVersion": { - "type": "string", - "title": "Terraform Cloud Version", - "description": "Version of Terraform cloud used by the workspace" - }, - "locked": { - "type": "boolean", - "title": "Locked Status", - "description": "Indicates whether the workspace is locked" - }, - "executionMode": { - "type": "string", - "title": "Execution Mode", - "description": "The execution mode of the workspace" - }, - "resourceCount": { - "type": "number", - "title": "Resource Count", - "description": "Number of resources managed by the workspace" - }, - "latestChangeAt": { - "type": "string", - "format": "date-time", - "title": "Latest Change", - "description": "Timestamp of the latest change in the workspace" - }, - "tags": { - "type": "array", - "title": "Workspace Tags", - "description": "Terraform workspace tags" - } - } - }, - "mirrorProperties": {}, - "calculationProperties": {}, - "aggregationProperties": {}, - "relations": { - "currentStateVersion": { - "title": "Current State Version", - "target": "terraformCloudStateVersion", - "required": false, - "many": false - }, - "project": { - "title": "Terraform Cloud Project", - "target": "terraformCloudProject", - "required": false, - "many": false - } - } - } -``` -
- - -
-Integration configuration - -```yaml showLineNumbers -- kind: workspace - selector: - query: "true" - port: - entity: - mappings: - identifier: .id - title: .attributes.name - blueprint: '"terraformCloudWorkspace"' - properties: - organization: .relationships.organization.data.id - createdAt: .attributes."created-at" - updatedAt: .attributes."updated-at" - terraformVersion: .attributes."terraform-version" - locked: .attributes.locked - executionMode: .attributes."execution-mode" - resourceCount: .attributes."resource-count" - latestChangeAt: .attributes."latest-change-at" - tags: .__tags - relations: - currentStateVersion: .relationships."current-state-version".data.id - project: .relationships.project.data.id -``` - -
- -### Run - -
-Run blueprint - -```json showLineNumbers -{ - "identifier": "terraformCloudRun", - "description": "This blueprint represents a run in Terraform cloud", - "title": "Terraform Cloud Run", - "icon": "Terraform", - "schema": { - "properties": { - "createdAt": { - "type": "string", - "format": "date-time", - "title": "Creation Time", - "description": "The creation timestamp of the run" - }, - "status": { - "type": "string", - "title": "Run Status", - "description": "The current status of the run" - }, - "hasChanges": { - "type": "boolean", - "title": "Has Changes", - "description": "Indicates whether the run has changes" - }, - "isDestroy": { - "type": "boolean", - "title": "Is Destroy", - "description": "Indicates whether the run is a destroy operation" - }, - "message": { - "type": "string", - "title": "Run Message", - "description": "Message associated with the run" - }, - "terraformVersion": { - "type": "string", - "title": "Terraform Cloud Version", - "description": "Version of Terraform cloud used in the run" - }, - "appliedAt": { - "type": "string", - "format": "date-time", - "title": "Applied Time", - "description": "Timestamp when the run was applied" - }, - "plannedAt": { - "type": "string", - "format": "date-time", - "title": "Planned Time", - "description": "Timestamp when the run was planned" - }, - "source": { - "type": "string", - "title": "Run Source", - "description": "The source of the run initiation" - } - } - }, - "relations": { - "terraformCloudWorkspace": { - "title": "Terraform Cloud Workspace", - "target": "terraformCloudWorkspace", - "required": false, - "many": false - } - } -} -``` - -
- -
-Integration configuration - -```yaml showLineNumbers -- kind: run - selector: - query: "true" - port: - entity: - mappings: - identifier: .id - title: .attributes.message - blueprint: '"terraformCloudRun"' - properties: - createdAt: .attributes."created-at" - status: .attributes.status - hasChanges: .attributes."has-changes" - isDestroy: .attributes."is-destroy" - message: .attributes.message - terraformVersion: .attributes."terraform-version" - appliedAt: .attributes."status-timestamps"."applied-at" - plannedAt: .attributes."status-timestamps"."planned-at" - source: .attributes.source - relations: - terraformCloudWorkspace: .relationships.workspace.data.id -``` - -
- -### State Version - -
-State Version blueprint - -```json showLineNumbers -{ - "identifier": "terraformCloudStateVersion", - "description": "This blueprint represents a version of a Terraform state version", - "title": "Terraform Cloud State Versions", - "icon": "Terraform", - "schema": { - "properties": { - "createdAt": { - "type": "string", - "format": "date-time", - "title": "Creation Time", - "description": "Timestamp when the state version was created" - }, - "serial": { - "type": "number", - "title": "Serial Number", - "description": "A unique identifier for this version within the workspace" - }, - "status": { - "type": "string", - "title": "Status", - "description": "The current status of the state version (e.g., 'queued', 'finished')" - }, - "size": { - "type": "number", - "title": "Size", - "description": "The size of the resources" - }, - "isResourcesProcessed": { - "type": "boolean", - "title": "Is Resources Processed", - "description": "Whethere the resources has been processed" - }, - "hostedStateDownloadUrl": { - "type": "string", - "title": "Download Url", - "format": "url", - "description": "Hosted state version download url " - }, - "hostedJsonDownloadUrl": { - "type": "string", - "title": "Download Url", - "format": "url", - "description": "Url for downloading state version in json format" - }, - "outputData": { - "type": "array", - "title": "Output", - "description": "output returned from state version" - }, - "vcsCommitUrl": { - "type": "string", - "title": "VCS Commit URL", - "format": "url", - "description": "URL of the VCS commit that triggered this state version" - } - } - }, - "relations": {}, - "mirrorProperties": {}, - "calculationProperties": {}, - "aggregationProperties": {} -} -``` - -
- -
-Integration configuration +To view and test the integration's mapping against examples of the third-party API responses, use the jq playground in your [data sources page](https://app.getport.io/settings/data-sources). +Find the integration in the list of data sources and click on it to open the playground. -```yaml showLineNumbers -- kind: state-version - selector: - query: "true" - port: - entity: - mappings: - identifier: .id - title: .id - blueprint: '"terraformCloudStateVersion"' - properties: - createdAt: .attributes."created-at" - serial: .attributes.serial - status: .attributes.status - size: .attributes.size - isResourcesProcessed: .attributes."resources-processed" - hostedStateDownloadUrl: .attributes."hosted-state-download-url" - hostedJsonDownloadUrl: .attributes."hosted-json-state-download-url" - vcsCommitUrl: .attributes."vcs-commit-url" - outputData: .__output -``` +Examples of blueprints and the relevant integration configurations can be found on the Terraform Cloud [examples page](examples.md) -
## Let's Test It @@ -2053,3 +1476,8 @@ The combination of the sample payload and the Ocean configuration generates the ``` + + +## Relevant Guides + +For relevant guides and examples, see the [guides section](https://docs.getport.io/guides?tags=Terraform). \ No newline at end of file diff --git a/docs/generalTemplates/_integration_template.md b/docs/generalTemplates/_integration_template.md index 9bb9ce8c9..309dae866 100644 --- a/docs/generalTemplates/_integration_template.md +++ b/docs/generalTemplates/_integration_template.md @@ -50,6 +50,8 @@ Choose one of the following installation methods: +Using this installation option means that the integration will be able to update Port in real time using webhooks. +

Prerequisites

@@ -65,6 +67,12 @@ Note the parameters specific to this integration, they are last in the table. +This workflow/pipeline will run the X integration once and then exit, this is useful for **scheduled** ingestion of data. + +:::warning Real-time updates +If you want the integration to update Port in real time using webhooks you should use the [Real Time & Always On](?installation-methods=real-time-always-on#installation) installation option. +::: +
diff --git a/docs/guides/all/approval-workflow-for-gitlab-deployment.md b/docs/guides/all/approval-workflow-for-gitlab-deployment.md index 28cf329e7..43df44c4c 100644 --- a/docs/guides/all/approval-workflow-for-gitlab-deployment.md +++ b/docs/guides/all/approval-workflow-for-gitlab-deployment.md @@ -304,8 +304,8 @@ This action can be executed by an admin to approve the change request. It trigge "port_context": { "runId": "{{ .run.id }}" }, - "deploy_run_id": "{{ .entity.properties.tags }}", - "system_id": "{{ .entity.properties.system_id }}" + "deploy_run_id": "{{ .entity.properties.externalTags }}", + "system_id": "{{ .entity.identifier }}" } }, "requiredApproval": false @@ -397,7 +397,7 @@ This automation is triggered when a `servicenowChangeRequest` is updated to *"ap }, "approval_status": "{{ .event.diff.after.properties.approval }}", "system_id": "{{ .event.diff.after.identifier }}", - "deploy_run_id": "{{ .event.diff.after.properties.tags }}" + "deploy_run_id": "{{ .event.diff.after.properties.externalTags }}" } }, "publish": true @@ -566,7 +566,7 @@ run-tests: changePriority=$(echo "$changeRequestResponse" | jq -r '.result.priority') changeDescription=$(echo "$changeRequestResponse" | jq -r '.result.short_description') changeApproval=$(echo "$changeRequestResponse" | jq -r '.result.approval') - changeTags=$(echo "$changeRequestResponse" | jq -r '.result.u_external_tag') + changeTags=$(echo "$changeRequestResponse" | jq -r '.result.external_tags') changeService=$(echo "$changeRequestResponse" | jq -r '.result.business_service.value') echo "Change Request Created Successfully: Number: $changeNumber, Sys ID: $changeSysId, State: $changeState" @@ -601,7 +601,7 @@ run-tests: \"priority\": \"$changePriority\", \"description\": \"$changeDescription\", \"approval\": \"$changeApproval\", - \"tags\": \"$changeTags\", + \"externalTags\": \"$changeTags\", \"service\": \"$changeService\" }, \"relations\": {} diff --git a/docs/guides/all/humanitec-integration.md b/docs/guides/all/humanitec-integration.md index 927f13d71..145e6f482 100644 --- a/docs/guides/all/humanitec-integration.md +++ b/docs/guides/all/humanitec-integration.md @@ -41,10 +41,10 @@ Create the following blueprint definitions in port: - - + + :::tip Blueprint Properties You may select the blueprints depending on what you want to track in your Humanitec account. ::: @@ -72,7 +72,7 @@ Fork our [humanitec integration repository](https://github.com/port-labs/humanit -2. Create the following Python files in a folder named `client` at the base directory of the `integration` folder: +2. Create the following Python files in a folder named `clients` at the base directory of the `integration` folder: 1. `port_client.py` – Manages authentication and API requests to Port, facilitating the creation and updating of entities within Port's system. 2. `humanitec_client.py` – Handles API interactions with Humanitec, including retrieving data with caching mechanisms to optimize performance. 3. `cache.py` - Provides an in-memory caching mechanism with thread-safe operations for setting, retrieving, and deleting cache entries asynchronously. @@ -146,4 +146,5 @@ jobs: -Done! any change that happens to your application, environment, workloads or resources in Humanitec will be synced to port on the schedule interval defined in the github workflow. \ No newline at end of file +Done! Any change that happens to your application, environment, workloads or resources in Humanitec will be synced to Port on the schedule interval defined in the GitHub workflow. + diff --git a/docs/integrations-index.md b/docs/integrations-index.md index 1e2fb81a7..d4706df36 100644 --- a/docs/integrations-index.md +++ b/docs/integrations-index.md @@ -220,7 +220,7 @@ This page contains a list of Port's available integrations, organized by the pla ## OpsGenie -- [OpsGenie integration and webhook](/build-your-software-catalog/sync-data-to-catalog/incident-management/opsgenie.md) +- [OpsGenie integration and webhook](/build-your-software-catalog/sync-data-to-catalog/incident-management/opsgenie/opsgenie.md) - [Self-service action to trigger an OpsGenie incident](https://docs.getport.io/guides/all/create-an-opsgenie-incident) ## Incident IO diff --git a/docs/resources.md b/docs/resources.md index 8bef6473c..f9b76b2c2 100644 --- a/docs/resources.md +++ b/docs/resources.md @@ -64,6 +64,26 @@ This video demonstrates how to build a simple yet effective platform to create a +#### End-to-end Secure GitOps: Akeyless, ArgoCD, & Port - @[TeKanAid](https://www.youtube.com/@TeKanAid) + +This video showcases a complete GitOps setup using **Port**, **GitHub Actions**, **ArgoCD**, **Kubernetes**, and **Akeyless** for end-to-end secret management. + +
+ + + +
+ +#### Implementing A GUI Based Internal Developer Platform (IDP) - @[TeKanAid](https://www.youtube.com/@TeKanAid) + +This video demonstrates how to set up a GUI-based IDP to ensure that the internal engineers and developers you're building the platform for have a proper way to interact with it. + +
+ + + +
+ --- ## Links diff --git a/docs/sso-rbac/sso-providers/ldap/_category_.json b/docs/sso-rbac/sso-providers/ldap/_category_.json new file mode 100644 index 000000000..d9bacd0ff --- /dev/null +++ b/docs/sso-rbac/sso-providers/ldap/_category_.json @@ -0,0 +1,5 @@ +{ + "label": "LDAP", + "position": 3 +} + \ No newline at end of file diff --git a/docs/sso-rbac/sso-providers/ldap/ldap.md b/docs/sso-rbac/sso-providers/ldap/ldap.md new file mode 100644 index 000000000..e34c19784 --- /dev/null +++ b/docs/sso-rbac/sso-providers/ldap/ldap.md @@ -0,0 +1,94 @@ +--- +title: "LDAP" +sidebar_position: 3 +description: Integrate any LDAP system with Port SSO +--- + +This documentation describes the process of integrating an LDAP system to Port. + +Port's login provider (Auth0) integrates with Active Directory using [Auth0's AD/LDAP Connector](https://auth0.com/docs/authenticate/identity-providers/enterprise-identity-providers/active-directory-ldap/ad-ldap-connector), which acts as a bridge between the internal AD/LDAP system and Auth0. This connector is needed because AD/LDAP is typically limited to internal networks. + +## Prerequisites +- Contact Port's support regarding the LDAP setup request. The support team will provide a `TICKET_URL` which will be needed as part of the connector installation process. +- A machine with internal network access, with [these prerequisites](https://auth0.com/docs/authenticate/identity-providers/enterprise-identity-providers/active-directory-ldap/ad-ldap-connector/install-configure-ad-ldap-connector#prerequisites). This machine needs HTTPS outbound access to the internet, and RDP access to the internal AD server. + + +## Setting up the AD/LDAP connector + +### 1. Initial installation + +To deploy Auth0's LDAP connector, follow Auth0's [connector installation guide](https://auth0.com/docs/authenticate/identity-providers/enterprise-identity-providers/active-directory-ldap/ad-ldap-connector/install-configure-ad-ldap-connector). + +When required to insert the `TICKET URL`, use the `TICKET_URL` provided by the Port support team. + +### 2. Update user profile mapping + +To allow user login, Port's login flow requires the login event to contain an `email` field, which is used as the user login ID. +AD users typically use the `User Principal Name` (for example `user@corp.domain.com`) as their login ID. + +:::note + +In case the AD connector was installed on a Linux machine, mapping updates are done by editing the `/opt/auth0-adldap/lib/profileMapper.js` file, and restarting the AD connector service. + +::: + +To pass the `User Principal Name` (UPN) value in the `email` field in the login event, [update the connector mapping](https://auth0.com/docs/authenticate/identity-providers/enterprise-identity-providers/active-directory-ldap/ad-ldap-connector/map-ad-ldap-profile-attributes-to-auth0), and add the following line of code at the bottom of the attribute update list: + +```js showLineNumbers + // Default mappings + // ... + // profile['manager'] = raw_data['manager']; + // profile['organizationUnits'] = raw_data['organizationUnits']; + + // Port custom mapping + //highlight-next-line + profile['email'] = raw_data['userPrincipalName']; + +``` + +You can optionally map the user's `given name` and `surname` by adding the following fields to the mapping: + +```js showLineNumbers + // By default, the user's given name is under the `givenName`. + // Replace in case your mapping is different. + profile['given_name'] = raw_data['givenName']; + // By default, the user's surname name is under the `sn`. + // Replace in case your mapping is different. + profile['family_name'] = raw_data['sn']; + +``` + +### 3. Update LDAP user query +By default, the AD connector uses `(sAMAccountName={0})` as the default LDAP query to search for the user which is attempting to login. + +The query needs to be updated to search the AD server using the user's `UPN` instead. To do this, add the following to the `config.js` file: + +```js showLineNumbers + + "LDAP_USER_BY_NAME": "(&(objectClass=user)(userPrincipalName={0}))"; + +``` + +:::note + +The `config.js` file is placed by default in the following paths: +- Windows machines - `C:\Program Files (x86)\Auth0\AD LDAP Connector\config.js` +- Linux machines - `/opt/auth0-adldap/config.js` + +To view the full list of configruation variables for the `config.js` file, click [here](https://auth0.com/docs/authenticate/identity-providers/enterprise-identity-providers/active-directory-ldap/ad-ldap-connector/ad-ldap-connector-config-file-schema). + +::: + +### 4. **Optional** - Client SSL authentication + +In some environments, accessing internal AD servers uses custom client SSL certificate authentication. If your AD environment requires client SSL certificates: +1. Contact Port's support team to request enabling client SSL certificates for your connection. +2. Follow the [Auth0 client SSL certificate guide](https://auth0.com/docs/authenticate/identity-providers/enterprise-identity-providers/active-directory-ldap/ad-ldap-connector/configure-ad-ldap-connector-client-certificates#configure-certificates) explaining how to configure certificates for the connector. +3. Restart the Auth0 Connector service on the machine hosting the AD connector. + + +:::warning + +After client SSL is enabled by Port's support team, if the connector is reset without proper SSL configuration, login attempts might fail. + +::: diff --git a/docusaurus.config.js b/docusaurus.config.js index 67b43a8dd..c19d9d360 100644 --- a/docusaurus.config.js +++ b/docusaurus.config.js @@ -134,6 +134,12 @@ const config = { target: "_blank", className: "header-slack-link", }, + { + to: "https://www.youtube.com/@getport", + position: "right", + target: "_blank", + className: "header-youtube-link", + }, ], }, hotjar: { diff --git a/package-lock.json b/package-lock.json index 6408bbb77..05fa56afd 100644 --- a/package-lock.json +++ b/package-lock.json @@ -15,18 +15,18 @@ "@docusaurus/plugin-ideal-image": "^3.6.3", "@docusaurus/preset-classic": "^3.6.3", "@docusaurus/theme-live-codeblock": "^3.6.3", - "@easyops-cn/docusaurus-search-local": "^0.45.0", + "@easyops-cn/docusaurus-search-local": "^0.46.1", "@emotion/react": "^11.13.5", "@emotion/styled": "^11.13.5", "@mdx-js/react": "^3.1.0", - "@mui/material": "^6.1.8", + "@mui/material": "^6.1.9", "@stackql/docusaurus-plugin-hubspot": "^1.0.1", "clsx": "^2.1.1", "docusaurus-plugin-hotjar": "^0.0.2", "docusaurus-plugin-image-zoom": "^2.0.0", "docusaurus-plugin-openapi-docs": "^4.0.1", "docusaurus-theme-openapi-docs": "^4.0.1", - "prettier": "^3.3.3", + "prettier": "^3.4.1", "prism-react-renderer": "^2.4.0", "react": "^18.3.1", "react-dom": "^18.3.1", @@ -3970,9 +3970,10 @@ } }, "node_modules/@easyops-cn/docusaurus-search-local": { - "version": "0.45.0", - "resolved": "https://registry.npmjs.org/@easyops-cn/docusaurus-search-local/-/docusaurus-search-local-0.45.0.tgz", - "integrity": "sha512-ccJjeYmBHrv2v8Y9eQnH79S0PEKcogACKkEatEKPcad7usQj/14jA9POUUUYW/yougLSXghwe+uIncbuUBuBFg==", + "version": "0.46.1", + "resolved": "https://registry.npmjs.org/@easyops-cn/docusaurus-search-local/-/docusaurus-search-local-0.46.1.tgz", + "integrity": "sha512-kgenn5+pctVlJg8s1FOAm9KuZLRZvkBTMMGJvTTcvNTmnFIHVVYzYfA2Eg+yVefzsC8/cSZGKKJ0kLf8I+mQyw==", + "license": "MIT", "dependencies": { "@docusaurus/plugin-content-docs": "^2 || ^3", "@docusaurus/theme-translations": "^2 || ^3", @@ -3983,6 +3984,7 @@ "@node-rs/jieba": "^1.6.0", "cheerio": "^1.0.0", "clsx": "^1.1.1", + "comlink": "^4.4.2", "debug": "^4.2.0", "fs-extra": "^10.0.0", "klaw-sync": "^6.0.0", @@ -4481,24 +4483,26 @@ } }, "node_modules/@mui/core-downloads-tracker": { - "version": "6.1.8", - "resolved": "https://registry.npmjs.org/@mui/core-downloads-tracker/-/core-downloads-tracker-6.1.8.tgz", - "integrity": "sha512-TGAvzwUg9hybDacwfIGFjI2bXYXrIqky+vMfaeay8rvT56/PNAlvIDUJ54kpT5KRc9AWAihOvtDI7/LJOThOmQ==", + "version": "6.1.9", + "resolved": "https://registry.npmjs.org/@mui/core-downloads-tracker/-/core-downloads-tracker-6.1.9.tgz", + "integrity": "sha512-TWqj7b1w5cmSz4H/uf+y2AHxAH4ldPR7D2bz0XVyn60GCAo/zRbRPx7cF8gTs/i7CiYeHzV6dtat0VpMwOtolw==", + "license": "MIT", "funding": { "type": "opencollective", "url": "https://opencollective.com/mui-org" } }, "node_modules/@mui/material": { - "version": "6.1.8", - "resolved": "https://registry.npmjs.org/@mui/material/-/material-6.1.8.tgz", - "integrity": "sha512-QZdQFnXct+7NXIzHgT3qt+sQiO7HYGZU2vymP9Xl9tUMXEOA/S1mZMMb7+WGZrk5TzNlU/kP/85K0da5V1jXoQ==", + "version": "6.1.9", + "resolved": "https://registry.npmjs.org/@mui/material/-/material-6.1.9.tgz", + "integrity": "sha512-NwqIN0bdsgzSbZd5JFcC+2ez0XW/XNs8uiV2PDHrqQ4qf/FEasFJG1z6g8JbCN0YlTrHZekVb17X0Fv0qcYJfQ==", + "license": "MIT", "dependencies": { "@babel/runtime": "^7.26.0", - "@mui/core-downloads-tracker": "^6.1.8", - "@mui/system": "^6.1.8", + "@mui/core-downloads-tracker": "^6.1.9", + "@mui/system": "^6.1.9", "@mui/types": "^7.2.19", - "@mui/utils": "^6.1.8", + "@mui/utils": "^6.1.9", "@popperjs/core": "^2.11.8", "@types/react-transition-group": "^4.4.11", "clsx": "^2.1.1", @@ -4517,7 +4521,7 @@ "peerDependencies": { "@emotion/react": "^11.5.0", "@emotion/styled": "^11.3.0", - "@mui/material-pigment-css": "^6.1.8", + "@mui/material-pigment-css": "^6.1.9", "@types/react": "^17.0.0 || ^18.0.0 || ^19.0.0", "react": "^17.0.0 || ^18.0.0 || ^19.0.0", "react-dom": "^17.0.0 || ^18.0.0 || ^19.0.0" @@ -4538,12 +4542,13 @@ } }, "node_modules/@mui/private-theming": { - "version": "6.1.8", - "resolved": "https://registry.npmjs.org/@mui/private-theming/-/private-theming-6.1.8.tgz", - "integrity": "sha512-TuKl7msynCNCVvhX3c0ef1sF0Qb3VHcPs8XOGB/8bdOGBr/ynmIG1yTMjZeiFQXk8yN9fzK/FDEKMFxILNn3wg==", + "version": "6.1.9", + "resolved": "https://registry.npmjs.org/@mui/private-theming/-/private-theming-6.1.9.tgz", + "integrity": "sha512-7aum/O1RquBYhfwL/7egDyl9GqJgPM6hoJDFFBbhF6Sgv9yI9v4w3ArKUkuVvR0CtVj4NXRVMKEioh1bjUzvuA==", + "license": "MIT", "dependencies": { "@babel/runtime": "^7.26.0", - "@mui/utils": "^6.1.8", + "@mui/utils": "^6.1.9", "prop-types": "^15.8.1" }, "engines": { @@ -4564,13 +4569,14 @@ } }, "node_modules/@mui/styled-engine": { - "version": "6.1.8", - "resolved": "https://registry.npmjs.org/@mui/styled-engine/-/styled-engine-6.1.8.tgz", - "integrity": "sha512-ZvEoT0U2nPLSLI+B4by4cVjaZnPT2f20f4JUPkyHdwLv65ZzuoHiTlwyhqX1Ch63p8bcJzKTHQVGisEoMK6PGA==", + "version": "6.1.9", + "resolved": "https://registry.npmjs.org/@mui/styled-engine/-/styled-engine-6.1.9.tgz", + "integrity": "sha512-xynSLlJRxHLzSfQaiDjkaTx8LiFb9ByVa7aOdwFnTxGWFMY1F+mkXwAUY4jDDE+MAxkWxlzzQE0wOohnsxhdQg==", + "license": "MIT", "dependencies": { "@babel/runtime": "^7.26.0", - "@emotion/cache": "^11.13.1", - "@emotion/serialize": "^1.3.2", + "@emotion/cache": "^11.13.5", + "@emotion/serialize": "^1.3.3", "@emotion/sheet": "^1.4.0", "csstype": "^3.1.3", "prop-types": "^15.8.1" @@ -4597,15 +4603,16 @@ } }, "node_modules/@mui/system": { - "version": "6.1.8", - "resolved": "https://registry.npmjs.org/@mui/system/-/system-6.1.8.tgz", - "integrity": "sha512-i1kLfQoWxzFpXTBQIuPoA3xKnAnP3en4I2T8xIolovSolGQX5k8vGjw1JaydQS40td++cFsgCdEU458HDNTGUA==", + "version": "6.1.9", + "resolved": "https://registry.npmjs.org/@mui/system/-/system-6.1.9.tgz", + "integrity": "sha512-8x+RucnNp21gfFYsklCaZf0COXbv3+v0lrVuXONxvPEkESi2rwLlOi8UPJfcz6LxZOAX3v3oQ7qw18vnpgueRg==", + "license": "MIT", "dependencies": { "@babel/runtime": "^7.26.0", - "@mui/private-theming": "^6.1.8", - "@mui/styled-engine": "^6.1.8", + "@mui/private-theming": "^6.1.9", + "@mui/styled-engine": "^6.1.9", "@mui/types": "^7.2.19", - "@mui/utils": "^6.1.8", + "@mui/utils": "^6.1.9", "clsx": "^2.1.1", "csstype": "^3.1.3", "prop-types": "^15.8.1" @@ -4639,6 +4646,7 @@ "version": "7.2.19", "resolved": "https://registry.npmjs.org/@mui/types/-/types-7.2.19.tgz", "integrity": "sha512-6XpZEM/Q3epK9RN8ENoXuygnqUQxE+siN/6rGRi2iwJPgBUR25mphYQ9ZI87plGh58YoZ5pp40bFvKYOCDJ3tA==", + "license": "MIT", "peerDependencies": { "@types/react": "^17.0.0 || ^18.0.0 || ^19.0.0" }, @@ -4649,9 +4657,10 @@ } }, "node_modules/@mui/utils": { - "version": "6.1.8", - "resolved": "https://registry.npmjs.org/@mui/utils/-/utils-6.1.8.tgz", - "integrity": "sha512-O2DWb1kz8hiANVcR7Z4gOB3SvPPsSQGUmStpyBDzde6dJIfBzgV9PbEQOBZd3EBsd1pB+Uv1z5LAJAbymmawrA==", + "version": "6.1.9", + "resolved": "https://registry.npmjs.org/@mui/utils/-/utils-6.1.9.tgz", + "integrity": "sha512-N7uzBp7p2or+xanXn3aH2OTINC6F/Ru/U8h6amhRZEev8bJhKN86rIDIoxZZ902tj+09LXtH83iLxFMjMHyqNA==", + "license": "MIT", "dependencies": { "@babel/runtime": "^7.26.0", "@mui/types": "^7.2.19", @@ -7581,6 +7590,12 @@ "node": ">=10" } }, + "node_modules/comlink": { + "version": "4.4.2", + "resolved": "https://registry.npmjs.org/comlink/-/comlink-4.4.2.tgz", + "integrity": "sha512-OxGdvBmJuNKSCMO4NTl1L47VRp6xn2wG4F/2hYzB6tiCb709otOxtEYCSvK80PtjODfXXZu8ds+Nw5kVCjqd2g==", + "license": "Apache-2.0" + }, "node_modules/comma-separated-tokens": { "version": "2.0.3", "resolved": "https://registry.npmjs.org/comma-separated-tokens/-/comma-separated-tokens-2.0.3.tgz", @@ -17247,9 +17262,10 @@ } }, "node_modules/prettier": { - "version": "3.3.3", - "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.3.3.tgz", - "integrity": "sha512-i2tDNA0O5IrMO757lfrdQZCc2jPNDVntV0m/+4whiDfWaTKfMNgR7Qz0NAeGz/nRqF4m5/6CLzbP4/liHt12Ew==", + "version": "3.4.1", + "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.4.1.tgz", + "integrity": "sha512-G+YdqtITVZmOJje6QkXQWzl3fSfMxFwm1tjTyo9exhkmWSqC4Yhd1+lug++IlR2mvRVAxEDDWYkQdeSztajqgg==", + "license": "MIT", "bin": { "prettier": "bin/prettier.cjs" }, diff --git a/package.json b/package.json index bbaee9130..fd3ba3265 100644 --- a/package.json +++ b/package.json @@ -23,18 +23,18 @@ "@docusaurus/plugin-ideal-image": "^3.6.3", "@docusaurus/preset-classic": "^3.6.3", "@docusaurus/theme-live-codeblock": "^3.6.3", - "@easyops-cn/docusaurus-search-local": "^0.45.0", + "@easyops-cn/docusaurus-search-local": "^0.46.1", "@emotion/react": "^11.13.5", "@emotion/styled": "^11.13.5", "@mdx-js/react": "^3.1.0", - "@mui/material": "^6.1.8", + "@mui/material": "^6.1.9", "docusaurus-plugin-openapi-docs": "^4.0.1", "docusaurus-theme-openapi-docs": "^4.0.1", "@stackql/docusaurus-plugin-hubspot": "^1.0.1", "clsx": "^2.1.1", "docusaurus-plugin-hotjar": "^0.0.2", "docusaurus-plugin-image-zoom": "^2.0.0", - "prettier": "^3.3.3", + "prettier": "^3.4.1", "prism-react-renderer": "^2.4.0", "react": "^18.3.1", "react-dom": "^18.3.1", diff --git a/src/css/custom.css b/src/css/custom.css index 52934345b..614372b51 100644 --- a/src/css/custom.css +++ b/src/css/custom.css @@ -195,6 +195,26 @@ html[data-theme="light"] .header-github-link:before { color: var(--ifm-navbar-link-color); } +.header-youtube-link { + align-items: center; + display: flex; +} + +.header-youtube-link:hover { + opacity: 0.6; +} + +.header-youtube-link:before { + align-self: center; + background: url("data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iaXNvLTg4NTktMSI/Pgo8IS0tIFVwbG9hZGVkIHRvOiBTVkcgUmVwbywgd3d3LnN2Z3JlcG8uY29tLCBHZW5lcmF0b3I6IFNWRyBSZXBvIE1peGVyIFRvb2xzIC0tPgo8c3ZnIGhlaWdodD0iODAwcHgiIHdpZHRoPSI4MDBweCIgdmVyc2lvbj0iMS4xIiBpZD0iTGF5ZXJfMSIgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIiB4bWxuczp4bGluaz0iaHR0cDovL3d3dy53My5vcmcvMTk5OS94bGluayIgCgkgdmlld0JveD0iMCAwIDQ2MS4wMDEgNDYxLjAwMSIgeG1sOnNwYWNlPSJwcmVzZXJ2ZSI+CjxnPgoJPHBhdGggc3R5bGU9ImZpbGw6I0Y2MUMwRDsiIGQ9Ik0zNjUuMjU3LDY3LjM5M0g5NS43NDRDNDIuODY2LDY3LjM5MywwLDExMC4yNTksMCwxNjMuMTM3djEzNC43MjgKCQljMCw1Mi44NzgsNDIuODY2LDk1Ljc0NCw5NS43NDQsOTUuNzQ0aDI2OS41MTNjNTIuODc4LDAsOTUuNzQ0LTQyLjg2Niw5NS43NDQtOTUuNzQ0VjE2My4xMzcKCQlDNDYxLjAwMSwxMTAuMjU5LDQxOC4xMzUsNjcuMzkzLDM2NS4yNTcsNjcuMzkzeiBNMzAwLjUwNiwyMzcuMDU2bC0xMjYuMDYsNjAuMTIzYy0zLjM1OSwxLjYwMi03LjIzOS0wLjg0Ny03LjIzOS00LjU2OFYxNjguNjA3CgkJYzAtMy43NzQsMy45ODItNi4yMiw3LjM0OC00LjUxNGwxMjYuMDYsNjMuODgxQzMwNC4zNjMsMjI5Ljg3MywzMDQuMjk4LDIzNS4yNDgsMzAwLjUwNiwyMzcuMDU2eiIvPgo8L2c+Cjwvc3ZnPg==") + 0 0 / contain; + content: ""; + display: inline-flex; + height: 24px; + width: 24px; + color: var(--ifm-navbar-link-color); +} + html[data-theme="dark"] .intercom-launcher-frame { border: 2px white solid !important; } diff --git a/static/img/self-service-actions/reflect-action-progress/myLatestRuns.png b/static/img/self-service-actions/reflect-action-progress/myLatestRuns.png new file mode 100644 index 000000000..4b3672a7c Binary files /dev/null and b/static/img/self-service-actions/reflect-action-progress/myLatestRuns.png differ diff --git a/static/rawApiSpec.yaml b/static/rawApiSpec.yaml index 4e1887c36..d6b573039 100644 --- a/static/rawApiSpec.yaml +++ b/static/rawApiSpec.yaml @@ -29129,16 +29129,17 @@ paths: responses: "200": description: Default Response - "/v1/organization/secrets/{secret_name}": + /v1/organization/secrets: get: tags: - Organization security: - - bearer: [] + - bearer: + - read:secrets responses: "200": description: Default Response - patch: + post: tags: - Organization requestBody: @@ -29147,40 +29148,38 @@ paths: schema: type: object properties: + secretName: + type: string + maxLength: 50 + minLength: 1 + pattern: ^[A-Za-z0-9-_]*$ secretValue: type: string description: type: string maxLength: 200 additionalProperties: false - required: [] - parameters: - - schema: - type: string - in: path - name: secret_name - required: true + required: + - secretName + - secretValue + required: true security: - - bearer: [] + - bearer: + - create:secrets responses: "200": description: Default Response - delete: + "/v1/organization/secrets/{secret_name}": + get: tags: - Organization - parameters: - - schema: - type: string - in: path - name: secret_name - required: true security: - - bearer: [] + - bearer: + - read:secrets responses: "200": description: Default Response - /v1/organization/secrets: - post: + patch: tags: - Organization requestBody: @@ -29189,23 +29188,37 @@ paths: schema: type: object properties: - secretName: - type: string - maxLength: 50 - minLength: 1 - pattern: ^[A-Za-z0-9-_]*$ secretValue: type: string description: type: string maxLength: 200 additionalProperties: false - required: - - secretName - - secretValue - required: true + required: [] + parameters: + - schema: + type: string + in: path + name: secret_name + required: true security: - - bearer: [] + - bearer: + - update:secrets + responses: + "200": + description: Default Response + delete: + tags: + - Organization + parameters: + - schema: + type: string + in: path + name: secret_name + required: true + security: + - bearer: + - delete:secrets responses: "200": description: Default Response