Skip to content

Commit

Permalink
Merge pull request #299 from derailed/popeye/v0.21.2
Browse files Browse the repository at this point in the history
Popeye v0.21.2
  • Loading branch information
derailed authored Mar 26, 2024
2 parents aa92a01 + 14ae249 commit 2c72b1a
Show file tree
Hide file tree
Showing 11 changed files with 59 additions and 11 deletions.
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
NAME := popeye
PACKAGE := github.com/derailed/$(NAME)
VERSION := v0.21.1
VERSION := v0.21.2
GIT := $(shell git rev-parse --short HEAD)
DATE := $(shell date +%FT%T%Z)
IMG_NAME := derailed/popeye
Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
<img src="https://github.com/derailed/popeye/raw/master/assets/popeye_logo.png" align="right" width="250" height="auto">

# Popeye - A Kubernetes Live Cluster Linter
# Popeye: Kubernetes Live Cluster Linter

Popeye is a utility that scans live Kubernetes clusters and reports potential issues with deployed resources and configurations.
As Kubernetes landscapes grows, it is becoming a challenge for a human to track the slew of manifests and policies that orchestrate a cluster.
Expand Down
25 changes: 25 additions & 0 deletions change_logs/release_v0.21.2.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
<img src="https://raw.githubusercontent.com/derailed/popeye/master/assets/popeye_logo.png" align="right" width="200" height="auto"/>

# Release v0.21.2

## Notes

Thank you to all that contributed with flushing out issues and enhancements for Popeye! I'll try to mark some of these issues as fixed. But if you don't mind grab the latest rev and see if we're happier with some of the fixes! If you've filed an issue please help me verify and close. Your support, kindness and awesome suggestions to make Popeye better is as ever very much noticed and appreciated!

This project offers a GitHub Sponsor button (over here 👆). As you well know this is not pimped out by big corps with deep pockets. If you feel `Popeye` is saving you cycles diagnosing potential cluster issues please consider sponsoring this project!! It does go a long way in keeping our servers lights on and beers in our fridge.

Also if you dig this tool, please make some noise on social! [@kitesurfer](https://twitter.com/kitesurfer)

---

## Maintenance Release

---

## Resolved Issues

. [#298](https://github.com/derailed/popeye/issues/298) Popeye showing errors for Complete cronjobs

---

<img src="https://raw.githubusercontent.com/derailed/popeye/master/assets/imhotep_logo.png" width="32" height="auto"/>&nbsp; © 2024 Imhotep Software LLC. All materials licensed under [Apache v2.0](http://www.apache.org/licenses/LICENSE-2.0)
7 changes: 5 additions & 2 deletions internal/dao/ev.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ import (
"github.com/derailed/popeye/internal"
"github.com/derailed/popeye/internal/client"
"github.com/derailed/popeye/types"
"github.com/rs/zerolog/log"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)

Expand All @@ -26,7 +27,8 @@ type EventInfo struct {
}

func (e EventInfo) IsIssue() bool {
return e.Kind == WarnEvt || !strings.Contains(e.Reason, "Success")
return e.Kind == WarnEvt ||
(e.Reason != "Success" && e.Reason != "SawCompletedJob")
}

type EventInfos []EventInfo
Expand Down Expand Up @@ -73,7 +75,8 @@ func EventsFor(ctx context.Context, gvr types.GVR, level, kind, fqn string) (Eve
return nil, err
}
if len(oo) == 0 {
return nil, fmt.Errorf("No events found %s", fqn)
log.Debug().Msgf("No events found %s: %s", gvr, fqn)
return nil, nil
}

tt := oo[0].(*metav1.Table)
Expand Down
4 changes: 4 additions & 0 deletions internal/issues/assets/codes.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -126,6 +126,10 @@ codes:
407:
message: "%s references %s %q which does not exist"
severity: 3
666:
message: "Lint internal error: %s"
severity: 3


# Pod controllers
500:
Expand Down
2 changes: 1 addition & 1 deletion internal/issues/codes_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ func TestCodesLoad(t *testing.T) {
cc, err := issues.LoadCodes()

assert.Nil(t, err)
assert.Equal(t, 114, len(cc.Glossary))
assert.Equal(t, 115, len(cc.Glossary))
assert.Equal(t, "No liveness probe", cc.Glossary[103].Message)
assert.Equal(t, rules.WarnLevel, cc.Glossary[103].Severity)
}
Expand Down
16 changes: 15 additions & 1 deletion internal/issues/collector.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,8 @@ import (
"github.com/rs/zerolog/log"
)

const errCode = 666

// Collector tracks linter issues and codes.
type Collector struct {
*config.Config
Expand Down Expand Up @@ -95,8 +97,20 @@ func (c *Collector) AddCode(ctx context.Context, code rules.ID, args ...interfac
// AddErr adds a collection of errors.
func (c *Collector) AddErr(ctx context.Context, errs ...error) {
run := internal.MustExtractRunInfo(ctx)
if c.codes == nil {
for _, e := range errs {
c.addIssue(run.Spec.FQN, New(run.SectionGVR, Root, rules.ErrorLevel, e.Error()))
}
return
}

co, ok := c.codes.Glossary[errCode]
if !ok {
// BOZO!! refact once codes are in!!
panic(fmt.Errorf("no codes found with id %d", errCode))
}
for _, e := range errs {
c.addIssue(run.Spec.FQN, New(run.SectionGVR, Root, rules.ErrorLevel, e.Error()))
c.addIssue(run.Spec.FQN, New(run.SectionGVR, Root, rules.ErrorLevel, co.Format(errCode, e.Error())))
}
}

Expand Down
6 changes: 4 additions & 2 deletions internal/lint/cronjob.go
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ func (s *CronJob) checkUtilization(ctx context.Context, over bool, fqn string) {
s.AddErr(ctx, err)
return
}
mx := jobResourceUsage(ctx, s.db, s, jj)
mx := jobResourceUsage(s.db, jj)
if mx.RequestCPU.IsZero() && mx.RequestMEM.IsZero() {
return
}
Expand All @@ -105,13 +105,15 @@ func checkEvents(ctx context.Context, ii *issues.Collector, r internal.R, kind,
ee, err := dao.EventsFor(ctx, internal.Glossary[r], kind, object, fqn)
if err != nil {
ii.AddErr(ctx, err)
return
}

for _, e := range ee.Issues() {
ii.AddErr(ctx, errors.New(e))
}
}

func jobResourceUsage(ctx context.Context, dba *db.DB, c Collector, jobs []*batchv1.Job) ConsumptionMetrics {
func jobResourceUsage(dba *db.DB, jobs []*batchv1.Job) ConsumptionMetrics {
var mx ConsumptionMetrics

if len(jobs) == 0 {
Expand Down
2 changes: 1 addition & 1 deletion internal/lint/dp_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,6 @@ func TestDPLint(t *testing.T) {
assert.Equal(t, 2, len(ii))
assert.Equal(t, `[POP-500] Zero scale detected`, ii[0].Message)
assert.Equal(t, rules.WarnLevel, ii[0].Level)
assert.Equal(t, `no pod selector given`, ii[1].Message)
assert.Equal(t, `[POP-666] Lint internal error: no pod selector given`, ii[1].Message)
assert.Equal(t, rules.ErrorLevel, ii[1].Level)
}
2 changes: 1 addition & 1 deletion internal/lint/ing_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ func TestIngLint(t *testing.T) {
assert.Equal(t, 2, len(ii))
assert.Equal(t, `[POP-1400] Ingress LoadBalancer port reported an error: boom`, ii[0].Message)
assert.Equal(t, rules.ErrorLevel, ii[0].Level)
assert.Equal(t, `Ingress local obj refs not supported`, ii[1].Message)
assert.Equal(t, `[POP-666] Lint internal error: Ingress local obj refs not supported`, ii[1].Message)
assert.Equal(t, rules.ErrorLevel, ii[1].Level)

ii = ing.Outcome()["default/ing6"]
Expand Down
2 changes: 1 addition & 1 deletion internal/lint/job.go
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ func (s *Job) checkUtilization(ctx context.Context, over bool, fqn string) {
s.AddErr(ctx, err)
return
}
mx := jobResourceUsage(ctx, s.db, s, jj)
mx := jobResourceUsage(s.db, jj)
if mx.RequestCPU.IsZero() && mx.RequestMEM.IsZero() {
return
}
Expand Down

0 comments on commit 2c72b1a

Please sign in to comment.