From 8c8739a47710da89d9288220074f200cafec3aab Mon Sep 17 00:00:00 2001 From: Aaron Hetherington Date: Fri, 9 Feb 2024 15:39:59 +0000 Subject: [PATCH] Fix linting errors and add comments --- pkg/workceptor/kubernetes.go | 67 +++++----- pkg/workceptor/kubernetes_test.go | 95 ++++++++------- pkg/workceptor/mock_workceptor/kubernetes.go | 122 +++++++++---------- 3 files changed, 145 insertions(+), 139 deletions(-) diff --git a/pkg/workceptor/kubernetes.go b/pkg/workceptor/kubernetes.go index 35b5847ad..7429e4052 100644 --- a/pkg/workceptor/kubernetes.go +++ b/pkg/workceptor/kubernetes.go @@ -66,29 +66,28 @@ type KubeExtraData struct { } type KubeAPIer interface { - NewNotFound(qualifiedResource schema.GroupResource, name string) *apierrors.StatusError - OneTermEqualSelector(k string, v string) fields.Selector - NewForConfig(c *rest.Config) (*kubernetes.Clientset, error) - GetLogs(clientset *kubernetes.Clientset, namespace string, name string, opts *corev1.PodLogOptions) *rest.Request - Get(clientset *kubernetes.Clientset, namespace string, ctx context.Context, name string, opts metav1.GetOptions) (*corev1.Pod, error) - Create(clientset *kubernetes.Clientset, namespace string, ctx context.Context, pod *corev1.Pod, opts metav1.CreateOptions) (*corev1.Pod, error) - List(clientset *kubernetes.Clientset, namespace string, ctx context.Context, opts metav1.ListOptions) (*corev1.PodList, error) - Watch(clientset *kubernetes.Clientset, namespace string, ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Delete(clientset *kubernetes.Clientset, namespace string, ctx context.Context, name string, opts metav1.DeleteOptions) error - SubResource(clientset *kubernetes.Clientset, podName string, podNamespace string) *rest.Request + NewNotFound(schema.GroupResource, string) *apierrors.StatusError + OneTermEqualSelector(string, string) fields.Selector + NewForConfig(*rest.Config) (*kubernetes.Clientset, error) + GetLogs(*kubernetes.Clientset, string, string, *corev1.PodLogOptions) *rest.Request + Get(context.Context, *kubernetes.Clientset, string, string, metav1.GetOptions) (*corev1.Pod, error) + Create(context.Context, *kubernetes.Clientset, string, *corev1.Pod, metav1.CreateOptions) (*corev1.Pod, error) + List(context.Context, *kubernetes.Clientset, string, metav1.ListOptions) (*corev1.PodList, error) + Watch(context.Context, *kubernetes.Clientset, string, metav1.ListOptions) (watch.Interface, error) + Delete(context.Context, *kubernetes.Clientset, string, string, metav1.DeleteOptions) error + SubResource(*kubernetes.Clientset, string, string) *rest.Request InClusterConfig() (*rest.Config, error) NewDefaultClientConfigLoadingRules() *clientcmd.ClientConfigLoadingRules - BuildConfigFromFlags(masterUrl string, kubeconfigPath string) (*rest.Config, error) - NewClientConfigFromBytes(configBytes []byte) (clientcmd.ClientConfig, error) - NewSPDYExecutor(config *rest.Config, method string, url *url.URL) (remotecommand.Executor, error) - StreamWithContext(exec remotecommand.Executor, ctx context.Context, options remotecommand.StreamOptions) error - UntilWithSync(ctx context.Context, lw cache.ListerWatcher, objType runtime.Object, precondition watch2.PreconditionFunc, conditions ...watch2.ConditionFunc) (*watch.Event, error) + BuildConfigFromFlags(string, string) (*rest.Config, error) + NewClientConfigFromBytes([]byte) (clientcmd.ClientConfig, error) + NewSPDYExecutor(*rest.Config, string, *url.URL) (remotecommand.Executor, error) + StreamWithContext(context.Context, remotecommand.Executor, remotecommand.StreamOptions) error + UntilWithSync(context.Context, cache.ListerWatcher, runtime.Object, watch2.PreconditionFunc, ...watch2.ConditionFunc) (*watch.Event, error) NewFakeNeverRateLimiter() flowcontrol.RateLimiter NewFakeAlwaysRateLimiter() flowcontrol.RateLimiter } -type KubeAPIWrapper struct { -} +type KubeAPIWrapper struct{} func (ku KubeAPIWrapper) NewNotFound(qualifiedResource schema.GroupResource, name string) *apierrors.StatusError { return apierrors.NewNotFound(qualifiedResource, name) @@ -106,23 +105,23 @@ func (ku KubeAPIWrapper) GetLogs(clientset *kubernetes.Clientset, namespace stri return clientset.CoreV1().Pods(namespace).GetLogs(name, opts) } -func (ku KubeAPIWrapper) Get(clientset *kubernetes.Clientset, namespace string, ctx context.Context, name string, opts metav1.GetOptions) (*corev1.Pod, error) { +func (ku KubeAPIWrapper) Get(ctx context.Context, clientset *kubernetes.Clientset, namespace string, name string, opts metav1.GetOptions) (*corev1.Pod, error) { return clientset.CoreV1().Pods(namespace).Get(ctx, name, opts) } -func (ku KubeAPIWrapper) Create(clientset *kubernetes.Clientset, namespace string, ctx context.Context, pod *corev1.Pod, opts metav1.CreateOptions) (*corev1.Pod, error) { +func (ku KubeAPIWrapper) Create(ctx context.Context, clientset *kubernetes.Clientset, namespace string, pod *corev1.Pod, opts metav1.CreateOptions) (*corev1.Pod, error) { return clientset.CoreV1().Pods(namespace).Create(ctx, pod, opts) } -func (ku KubeAPIWrapper) List(clientset *kubernetes.Clientset, namespace string, ctx context.Context, opts metav1.ListOptions) (*corev1.PodList, error) { +func (ku KubeAPIWrapper) List(ctx context.Context, clientset *kubernetes.Clientset, namespace string, opts metav1.ListOptions) (*corev1.PodList, error) { return clientset.CoreV1().Pods(namespace).List(ctx, opts) } -func (ku KubeAPIWrapper) Watch(clientset *kubernetes.Clientset, namespace string, ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { +func (ku KubeAPIWrapper) Watch(ctx context.Context, clientset *kubernetes.Clientset, namespace string, opts metav1.ListOptions) (watch.Interface, error) { return clientset.CoreV1().Pods(namespace).Watch(ctx, opts) } -func (ku KubeAPIWrapper) Delete(clientset *kubernetes.Clientset, namespace string, ctx context.Context, name string, opts metav1.DeleteOptions) error { +func (ku KubeAPIWrapper) Delete(ctx context.Context, clientset *kubernetes.Clientset, namespace string, name string, opts metav1.DeleteOptions) error { return clientset.CoreV1().Pods(namespace).Delete(ctx, name, opts) } @@ -138,8 +137,8 @@ func (ku KubeAPIWrapper) NewDefaultClientConfigLoadingRules() *clientcmd.ClientC return clientcmd.NewDefaultClientConfigLoadingRules() } -func (ku KubeAPIWrapper) BuildConfigFromFlags(masterUrl string, kubeconfigPath string) (*rest.Config, error) { - return clientcmd.BuildConfigFromFlags(masterUrl, kubeconfigPath) +func (ku KubeAPIWrapper) BuildConfigFromFlags(masterURL string, kubeconfigPath string) (*rest.Config, error) { + return clientcmd.BuildConfigFromFlags(masterURL, kubeconfigPath) } func (ku KubeAPIWrapper) NewClientConfigFromBytes(configBytes []byte) (clientcmd.ClientConfig, error) { @@ -150,7 +149,7 @@ func (ku KubeAPIWrapper) NewSPDYExecutor(config *rest.Config, method string, url return remotecommand.NewSPDYExecutor(config, method, url) } -func (ku KubeAPIWrapper) StreamWithContext(exec remotecommand.Executor, ctx context.Context, options remotecommand.StreamOptions) error { +func (ku KubeAPIWrapper) StreamWithContext(ctx context.Context, exec remotecommand.Executor, options remotecommand.StreamOptions) error { return exec.StreamWithContext(ctx, options) } @@ -166,6 +165,8 @@ func (ku KubeAPIWrapper) NewFakeAlwaysRateLimiter() flowcontrol.RateLimiter { return flowcontrol.NewFakeAlwaysRateLimiter() } +// KubeAPIWrapperInstance is a package level var that wraps all required kubernetes API calls. +// It is instantiated in the NewkubeWorker function and available throughout the package. var KubeAPIWrapperInstance KubeAPIer // ErrPodCompleted is returned when pod has already completed before we could attach. @@ -309,7 +310,7 @@ func (kw *kubeUnit) kubeLoggingWithReconnect(streamWait *sync.WaitGroup, stdout // get pod, with retry for retries := 5; retries > 0; retries-- { - kw.pod, err = KubeAPIWrapperInstance.Get(kw.clientset, podNamespace, kw.GetContext(), podName, metav1.GetOptions{}) + kw.pod, err = KubeAPIWrapperInstance.Get(kw.GetContext(), kw.clientset, podNamespace, podName, metav1.GetOptions{}) if err == nil { break } @@ -485,7 +486,7 @@ func (kw *kubeUnit) createPod(env map[string]string) error { } // get pod and store to kw.pod - kw.pod, err = KubeAPIWrapperInstance.Create(kw.clientset, ked.KubeNamespace, kw.GetContext(), pod, metav1.CreateOptions{}) + kw.pod, err = KubeAPIWrapperInstance.Create(kw.GetContext(), kw.clientset, ked.KubeNamespace, pod, metav1.CreateOptions{}) if err != nil { return err } @@ -509,12 +510,12 @@ func (kw *kubeUnit) createPod(env map[string]string) error { ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { options.FieldSelector = fieldSelector - return KubeAPIWrapperInstance.List(kw.clientset, ked.KubeNamespace, kw.GetContext(), options) + return KubeAPIWrapperInstance.List(kw.GetContext(), kw.clientset, ked.KubeNamespace, options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { options.FieldSelector = fieldSelector - return KubeAPIWrapperInstance.Watch(kw.clientset, ked.KubeNamespace, kw.GetContext(), options) + return KubeAPIWrapperInstance.Watch(kw.GetContext(), kw.clientset, ked.KubeNamespace, options) }, } @@ -640,7 +641,7 @@ func (kw *kubeUnit) runWorkUsingLogger() { default: } - kw.pod, err = KubeAPIWrapperInstance.Get(kw.clientset, podNamespace, kw.GetContext(), podName, metav1.GetOptions{}) + kw.pod, err = KubeAPIWrapperInstance.Get(kw.GetContext(), kw.clientset, podNamespace, podName, metav1.GetOptions{}) if err == nil { break } @@ -772,7 +773,7 @@ func (kw *kubeUnit) runWorkUsingLogger() { var err error for retries := 5; retries > 0; retries-- { - err = KubeAPIWrapperInstance.StreamWithContext(exec, kw.GetContext(), remotecommand.StreamOptions{ + err = KubeAPIWrapperInstance.StreamWithContext(kw.GetContext(), exec, remotecommand.StreamOptions{ Stdin: stdin, Tty: false, }) @@ -1357,7 +1358,7 @@ func (kw *kubeUnit) Restart() error { if err != nil { kw.GetWorkceptor().nc.GetLogger().Warning("Pod %s could not be deleted: %s", ked.PodName, err.Error()) } else { - err := KubeAPIWrapperInstance.Delete(kw.clientset, ked.KubeNamespace, context.Background(), ked.PodName, metav1.DeleteOptions{}) + err := KubeAPIWrapperInstance.Delete(kw.GetContext(), kw.clientset, ked.KubeNamespace, ked.PodName, metav1.DeleteOptions{}) if err != nil { kw.GetWorkceptor().nc.GetLogger().Warning("Pod %s could not be deleted: %s", ked.PodName, err.Error()) } @@ -1382,7 +1383,7 @@ func (kw *kubeUnit) Cancel() error { kw.CancelContext() kw.UpdateBasicStatus(WorkStateCanceled, "Canceled", -1) if kw.pod != nil { - err := KubeAPIWrapperInstance.Delete(kw.clientset, kw.pod.Namespace, context.Background(), kw.pod.Name, metav1.DeleteOptions{}) + err := KubeAPIWrapperInstance.Delete(kw.GetContext(), kw.clientset, kw.pod.Namespace, kw.pod.Name, metav1.DeleteOptions{}) if err != nil { kw.GetWorkceptor().nc.GetLogger().Error("Error deleting pod %s: %s", kw.pod.Name, err) } diff --git a/pkg/workceptor/kubernetes_test.go b/pkg/workceptor/kubernetes_test.go index f7eb4dea2..ee29659d1 100644 --- a/pkg/workceptor/kubernetes_test.go +++ b/pkg/workceptor/kubernetes_test.go @@ -138,7 +138,7 @@ type hasTerm struct { func (h *hasTerm) DeepCopySelector() fields.Selector { return h } func (h *hasTerm) Empty() bool { return true } -func (h *hasTerm) Matches(ls fields.Fields) bool { return true } +func (h *hasTerm) Matches(_ fields.Fields) bool { return true } func (h *hasTerm) Requirements() fields.Requirements { return []fields.Requirement{{ Field: h.field, @@ -146,18 +146,17 @@ func (h *hasTerm) Requirements() fields.Requirements { Value: h.value, }} } -func (h *hasTerm) RequiresExactMatch(field string) (value string, found bool) { return "", true } -func (h *hasTerm) String() string { return "Test" } -func (h *hasTerm) Transform(fn fields.TransformFunc) (fields.Selector, error) { return h, nil } +func (h *hasTerm) RequiresExactMatch(_ string) (value string, found bool) { return "", true } +func (h *hasTerm) String() string { return "Test" } +func (h *hasTerm) Transform(_ fields.TransformFunc) (fields.Selector, error) { return h, nil } -type ex struct { -} +type ex struct{} -func (e *ex) Stream(options remotecommand.StreamOptions) error { +func (e *ex) Stream(_ remotecommand.StreamOptions) error { return nil } -func (e *ex) StreamWithContext(ctx context.Context, options remotecommand.StreamOptions) error { +func (e *ex) StreamWithContext(_ context.Context, _ remotecommand.StreamOptions) error { return nil } @@ -165,48 +164,54 @@ func TestKubeStart(t *testing.T) { ku, mockbwu, mockNet, w, mockKubeAPI, ctx := createKubernetesTestSetup(t) startTestCases := []struct { - name string + name string + expectedCalls func() }{ - {name: "test1"}, + { + name: "test1", + expectedCalls: func() { + mockbwu.EXPECT().UpdateBasicStatus(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + config := rest.Config{} + mockKubeAPI.EXPECT().InClusterConfig().Return(&config, nil) + mockbwu.EXPECT().GetWorkceptor().Return(w).AnyTimes() + logger := logger.NewReceptorLogger("") + mockNet.EXPECT().GetLogger().Return(logger).AnyTimes() + clientset := kubernetes.Clientset{} + mockKubeAPI.EXPECT().NewForConfig(gomock.Any()).Return(&clientset, nil) + mockbwu.EXPECT().MonitorLocalStatus().AnyTimes() + lock := &sync.RWMutex{} + mockbwu.EXPECT().GetStatusLock().Return(lock).AnyTimes() + kubeExtraData := workceptor.KubeExtraData{} + status := workceptor.StatusFileData{ExtraData: &kubeExtraData} + mockbwu.EXPECT().GetStatusWithoutExtraData().Return(&status).AnyTimes() + mockbwu.EXPECT().GetStatusCopy().Return(status).AnyTimes() + mockbwu.EXPECT().GetContext().Return(ctx).AnyTimes() + pod := corev1.Pod{TypeMeta: metav1.TypeMeta{}, ObjectMeta: metav1.ObjectMeta{Name: "Test Name"}, Spec: corev1.PodSpec{}, Status: corev1.PodStatus{}} + + mockKubeAPI.EXPECT().Create(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(&pod, nil).AnyTimes() + mockbwu.EXPECT().UpdateFullStatus(gomock.Any()).AnyTimes() + + field := hasTerm{} + mockKubeAPI.EXPECT().OneTermEqualSelector(gomock.Any(), gomock.Any()).Return(&field).AnyTimes() + ev := watch.Event{Object: &pod} + mockKubeAPI.EXPECT().UntilWithSync(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(&ev, nil).AnyTimes() + apierr := apierrors.StatusError{} + mockKubeAPI.EXPECT().NewNotFound(gomock.Any(), gomock.Any()).Return(&apierr).AnyTimes() + mockbwu.EXPECT().MonitorLocalStatus().AnyTimes() + + c := rest.RESTClient{} + req := rest.NewRequest(&c) + mockKubeAPI.EXPECT().SubResource(gomock.Any(), gomock.Any(), gomock.Any()).Return(req).AnyTimes() + exec := ex{} + mockKubeAPI.EXPECT().NewSPDYExecutor(gomock.Any(), gomock.Any(), gomock.Any()).Return(&exec, nil).AnyTimes() + mockbwu.EXPECT().UnitDir().Return("TestDir").AnyTimes() + }, + }, } for _, testCase := range startTestCases { t.Run(testCase.name, func(t *testing.T) { - mockbwu.EXPECT().UpdateBasicStatus(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() - config := rest.Config{} - mockKubeAPI.EXPECT().InClusterConfig().Return(&config, nil) - mockbwu.EXPECT().GetWorkceptor().Return(w).AnyTimes() - logger := logger.NewReceptorLogger("") - mockNet.EXPECT().GetLogger().Return(logger).AnyTimes() - clientset := kubernetes.Clientset{} - mockKubeAPI.EXPECT().NewForConfig(gomock.Any()).Return(&clientset, nil) - mockbwu.EXPECT().MonitorLocalStatus().AnyTimes() - lock := &sync.RWMutex{} - mockbwu.EXPECT().GetStatusLock().Return(lock).AnyTimes() - kubeExtraData := workceptor.KubeExtraData{} - status := workceptor.StatusFileData{ExtraData: &kubeExtraData} - mockbwu.EXPECT().GetStatusWithoutExtraData().Return(&status).AnyTimes() - mockbwu.EXPECT().GetStatusCopy().Return(status).AnyTimes() - mockbwu.EXPECT().GetContext().Return(ctx).AnyTimes() - pod := corev1.Pod{metav1.TypeMeta{}, metav1.ObjectMeta{Name: "Test Name"}, corev1.PodSpec{}, corev1.PodStatus{}} - - mockKubeAPI.EXPECT().Create(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(&pod, nil).AnyTimes() - mockbwu.EXPECT().UpdateFullStatus(gomock.Any()).AnyTimes() - - field := hasTerm{} - mockKubeAPI.EXPECT().OneTermEqualSelector(gomock.Any(), gomock.Any()).Return(&field).AnyTimes() - ev := watch.Event{Object: &pod} - mockKubeAPI.EXPECT().UntilWithSync(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(&ev, nil).AnyTimes() - apierr := apierrors.StatusError{} - mockKubeAPI.EXPECT().NewNotFound(gomock.Any(), gomock.Any()).Return(&apierr).AnyTimes() - mockbwu.EXPECT().MonitorLocalStatus().AnyTimes() - - c := rest.RESTClient{} - req := rest.NewRequest(&c) - mockKubeAPI.EXPECT().SubResource(gomock.Any(), gomock.Any(), gomock.Any()).Return(req).AnyTimes() - exec := ex{} - mockKubeAPI.EXPECT().NewSPDYExecutor(gomock.Any(), gomock.Any(), gomock.Any()).Return(&exec, nil).AnyTimes() - mockbwu.EXPECT().UnitDir().Return("TestDir").AnyTimes() + testCase.expectedCalls() err := ku.Start() if err != nil { diff --git a/pkg/workceptor/mock_workceptor/kubernetes.go b/pkg/workceptor/mock_workceptor/kubernetes.go index 82c93d6af..f72746955 100644 --- a/pkg/workceptor/mock_workceptor/kubernetes.go +++ b/pkg/workceptor/mock_workceptor/kubernetes.go @@ -49,148 +49,148 @@ func (m *MockKubeAPIer) EXPECT() *MockKubeAPIerMockRecorder { } // NewNotFound mocks base method -func (m *MockKubeAPIer) NewNotFound(qualifiedResource schema.GroupResource, name string) *errors.StatusError { +func (m *MockKubeAPIer) NewNotFound(arg0 schema.GroupResource, arg1 string) *errors.StatusError { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NewNotFound", qualifiedResource, name) + ret := m.ctrl.Call(m, "NewNotFound", arg0, arg1) ret0, _ := ret[0].(*errors.StatusError) return ret0 } // NewNotFound indicates an expected call of NewNotFound -func (mr *MockKubeAPIerMockRecorder) NewNotFound(qualifiedResource, name interface{}) *gomock.Call { +func (mr *MockKubeAPIerMockRecorder) NewNotFound(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewNotFound", reflect.TypeOf((*MockKubeAPIer)(nil).NewNotFound), qualifiedResource, name) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewNotFound", reflect.TypeOf((*MockKubeAPIer)(nil).NewNotFound), arg0, arg1) } // OneTermEqualSelector mocks base method -func (m *MockKubeAPIer) OneTermEqualSelector(k, v string) fields.Selector { +func (m *MockKubeAPIer) OneTermEqualSelector(arg0, arg1 string) fields.Selector { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "OneTermEqualSelector", k, v) + ret := m.ctrl.Call(m, "OneTermEqualSelector", arg0, arg1) ret0, _ := ret[0].(fields.Selector) return ret0 } // OneTermEqualSelector indicates an expected call of OneTermEqualSelector -func (mr *MockKubeAPIerMockRecorder) OneTermEqualSelector(k, v interface{}) *gomock.Call { +func (mr *MockKubeAPIerMockRecorder) OneTermEqualSelector(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OneTermEqualSelector", reflect.TypeOf((*MockKubeAPIer)(nil).OneTermEqualSelector), k, v) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OneTermEqualSelector", reflect.TypeOf((*MockKubeAPIer)(nil).OneTermEqualSelector), arg0, arg1) } // NewForConfig mocks base method -func (m *MockKubeAPIer) NewForConfig(c *rest.Config) (*kubernetes.Clientset, error) { +func (m *MockKubeAPIer) NewForConfig(arg0 *rest.Config) (*kubernetes.Clientset, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NewForConfig", c) + ret := m.ctrl.Call(m, "NewForConfig", arg0) ret0, _ := ret[0].(*kubernetes.Clientset) ret1, _ := ret[1].(error) return ret0, ret1 } // NewForConfig indicates an expected call of NewForConfig -func (mr *MockKubeAPIerMockRecorder) NewForConfig(c interface{}) *gomock.Call { +func (mr *MockKubeAPIerMockRecorder) NewForConfig(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewForConfig", reflect.TypeOf((*MockKubeAPIer)(nil).NewForConfig), c) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewForConfig", reflect.TypeOf((*MockKubeAPIer)(nil).NewForConfig), arg0) } // GetLogs mocks base method -func (m *MockKubeAPIer) GetLogs(clientset *kubernetes.Clientset, namespace, name string, opts *v1.PodLogOptions) *rest.Request { +func (m *MockKubeAPIer) GetLogs(arg0 *kubernetes.Clientset, arg1, arg2 string, arg3 *v1.PodLogOptions) *rest.Request { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetLogs", clientset, namespace, name, opts) + ret := m.ctrl.Call(m, "GetLogs", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(*rest.Request) return ret0 } // GetLogs indicates an expected call of GetLogs -func (mr *MockKubeAPIerMockRecorder) GetLogs(clientset, namespace, name, opts interface{}) *gomock.Call { +func (mr *MockKubeAPIerMockRecorder) GetLogs(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLogs", reflect.TypeOf((*MockKubeAPIer)(nil).GetLogs), clientset, namespace, name, opts) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLogs", reflect.TypeOf((*MockKubeAPIer)(nil).GetLogs), arg0, arg1, arg2, arg3) } // Get mocks base method -func (m *MockKubeAPIer) Get(clientset *kubernetes.Clientset, namespace string, ctx context.Context, name string, opts v10.GetOptions) (*v1.Pod, error) { +func (m *MockKubeAPIer) Get(arg0 context.Context, arg1 *kubernetes.Clientset, arg2, arg3 string, arg4 v10.GetOptions) (*v1.Pod, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Get", clientset, namespace, ctx, name, opts) + ret := m.ctrl.Call(m, "Get", arg0, arg1, arg2, arg3, arg4) ret0, _ := ret[0].(*v1.Pod) ret1, _ := ret[1].(error) return ret0, ret1 } // Get indicates an expected call of Get -func (mr *MockKubeAPIerMockRecorder) Get(clientset, namespace, ctx, name, opts interface{}) *gomock.Call { +func (mr *MockKubeAPIerMockRecorder) Get(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockKubeAPIer)(nil).Get), clientset, namespace, ctx, name, opts) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockKubeAPIer)(nil).Get), arg0, arg1, arg2, arg3, arg4) } // Create mocks base method -func (m *MockKubeAPIer) Create(clientset *kubernetes.Clientset, namespace string, ctx context.Context, pod *v1.Pod, opts v10.CreateOptions) (*v1.Pod, error) { +func (m *MockKubeAPIer) Create(arg0 context.Context, arg1 *kubernetes.Clientset, arg2 string, arg3 *v1.Pod, arg4 v10.CreateOptions) (*v1.Pod, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Create", clientset, namespace, ctx, pod, opts) + ret := m.ctrl.Call(m, "Create", arg0, arg1, arg2, arg3, arg4) ret0, _ := ret[0].(*v1.Pod) ret1, _ := ret[1].(error) return ret0, ret1 } // Create indicates an expected call of Create -func (mr *MockKubeAPIerMockRecorder) Create(clientset, namespace, ctx, pod, opts interface{}) *gomock.Call { +func (mr *MockKubeAPIerMockRecorder) Create(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Create", reflect.TypeOf((*MockKubeAPIer)(nil).Create), clientset, namespace, ctx, pod, opts) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Create", reflect.TypeOf((*MockKubeAPIer)(nil).Create), arg0, arg1, arg2, arg3, arg4) } // List mocks base method -func (m *MockKubeAPIer) List(clientset *kubernetes.Clientset, namespace string, ctx context.Context, opts v10.ListOptions) (*v1.PodList, error) { +func (m *MockKubeAPIer) List(arg0 context.Context, arg1 *kubernetes.Clientset, arg2 string, arg3 v10.ListOptions) (*v1.PodList, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "List", clientset, namespace, ctx, opts) + ret := m.ctrl.Call(m, "List", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(*v1.PodList) ret1, _ := ret[1].(error) return ret0, ret1 } // List indicates an expected call of List -func (mr *MockKubeAPIerMockRecorder) List(clientset, namespace, ctx, opts interface{}) *gomock.Call { +func (mr *MockKubeAPIerMockRecorder) List(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockKubeAPIer)(nil).List), clientset, namespace, ctx, opts) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockKubeAPIer)(nil).List), arg0, arg1, arg2, arg3) } // Watch mocks base method -func (m *MockKubeAPIer) Watch(clientset *kubernetes.Clientset, namespace string, ctx context.Context, opts v10.ListOptions) (watch.Interface, error) { +func (m *MockKubeAPIer) Watch(arg0 context.Context, arg1 *kubernetes.Clientset, arg2 string, arg3 v10.ListOptions) (watch.Interface, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Watch", clientset, namespace, ctx, opts) + ret := m.ctrl.Call(m, "Watch", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(watch.Interface) ret1, _ := ret[1].(error) return ret0, ret1 } // Watch indicates an expected call of Watch -func (mr *MockKubeAPIerMockRecorder) Watch(clientset, namespace, ctx, opts interface{}) *gomock.Call { +func (mr *MockKubeAPIerMockRecorder) Watch(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Watch", reflect.TypeOf((*MockKubeAPIer)(nil).Watch), clientset, namespace, ctx, opts) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Watch", reflect.TypeOf((*MockKubeAPIer)(nil).Watch), arg0, arg1, arg2, arg3) } // Delete mocks base method -func (m *MockKubeAPIer) Delete(clientset *kubernetes.Clientset, namespace string, ctx context.Context, name string, opts v10.DeleteOptions) error { +func (m *MockKubeAPIer) Delete(arg0 context.Context, arg1 *kubernetes.Clientset, arg2, arg3 string, arg4 v10.DeleteOptions) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Delete", clientset, namespace, ctx, name, opts) + ret := m.ctrl.Call(m, "Delete", arg0, arg1, arg2, arg3, arg4) ret0, _ := ret[0].(error) return ret0 } // Delete indicates an expected call of Delete -func (mr *MockKubeAPIerMockRecorder) Delete(clientset, namespace, ctx, name, opts interface{}) *gomock.Call { +func (mr *MockKubeAPIerMockRecorder) Delete(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockKubeAPIer)(nil).Delete), clientset, namespace, ctx, name, opts) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockKubeAPIer)(nil).Delete), arg0, arg1, arg2, arg3, arg4) } // SubResource mocks base method -func (m *MockKubeAPIer) SubResource(clientset *kubernetes.Clientset, podName, podNamespace string) *rest.Request { +func (m *MockKubeAPIer) SubResource(arg0 *kubernetes.Clientset, arg1, arg2 string) *rest.Request { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SubResource", clientset, podName, podNamespace) + ret := m.ctrl.Call(m, "SubResource", arg0, arg1, arg2) ret0, _ := ret[0].(*rest.Request) return ret0 } // SubResource indicates an expected call of SubResource -func (mr *MockKubeAPIerMockRecorder) SubResource(clientset, podName, podNamespace interface{}) *gomock.Call { +func (mr *MockKubeAPIerMockRecorder) SubResource(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubResource", reflect.TypeOf((*MockKubeAPIer)(nil).SubResource), clientset, podName, podNamespace) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubResource", reflect.TypeOf((*MockKubeAPIer)(nil).SubResource), arg0, arg1, arg2) } // InClusterConfig mocks base method @@ -223,69 +223,69 @@ func (mr *MockKubeAPIerMockRecorder) NewDefaultClientConfigLoadingRules() *gomoc } // BuildConfigFromFlags mocks base method -func (m *MockKubeAPIer) BuildConfigFromFlags(masterUrl, kubeconfigPath string) (*rest.Config, error) { +func (m *MockKubeAPIer) BuildConfigFromFlags(arg0, arg1 string) (*rest.Config, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "BuildConfigFromFlags", masterUrl, kubeconfigPath) + ret := m.ctrl.Call(m, "BuildConfigFromFlags", arg0, arg1) ret0, _ := ret[0].(*rest.Config) ret1, _ := ret[1].(error) return ret0, ret1 } // BuildConfigFromFlags indicates an expected call of BuildConfigFromFlags -func (mr *MockKubeAPIerMockRecorder) BuildConfigFromFlags(masterUrl, kubeconfigPath interface{}) *gomock.Call { +func (mr *MockKubeAPIerMockRecorder) BuildConfigFromFlags(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BuildConfigFromFlags", reflect.TypeOf((*MockKubeAPIer)(nil).BuildConfigFromFlags), masterUrl, kubeconfigPath) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BuildConfigFromFlags", reflect.TypeOf((*MockKubeAPIer)(nil).BuildConfigFromFlags), arg0, arg1) } // NewClientConfigFromBytes mocks base method -func (m *MockKubeAPIer) NewClientConfigFromBytes(configBytes []byte) (clientcmd.ClientConfig, error) { +func (m *MockKubeAPIer) NewClientConfigFromBytes(arg0 []byte) (clientcmd.ClientConfig, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NewClientConfigFromBytes", configBytes) + ret := m.ctrl.Call(m, "NewClientConfigFromBytes", arg0) ret0, _ := ret[0].(clientcmd.ClientConfig) ret1, _ := ret[1].(error) return ret0, ret1 } // NewClientConfigFromBytes indicates an expected call of NewClientConfigFromBytes -func (mr *MockKubeAPIerMockRecorder) NewClientConfigFromBytes(configBytes interface{}) *gomock.Call { +func (mr *MockKubeAPIerMockRecorder) NewClientConfigFromBytes(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewClientConfigFromBytes", reflect.TypeOf((*MockKubeAPIer)(nil).NewClientConfigFromBytes), configBytes) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewClientConfigFromBytes", reflect.TypeOf((*MockKubeAPIer)(nil).NewClientConfigFromBytes), arg0) } // NewSPDYExecutor mocks base method -func (m *MockKubeAPIer) NewSPDYExecutor(config *rest.Config, method string, url *url.URL) (remotecommand.Executor, error) { +func (m *MockKubeAPIer) NewSPDYExecutor(arg0 *rest.Config, arg1 string, arg2 *url.URL) (remotecommand.Executor, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NewSPDYExecutor", config, method, url) + ret := m.ctrl.Call(m, "NewSPDYExecutor", arg0, arg1, arg2) ret0, _ := ret[0].(remotecommand.Executor) ret1, _ := ret[1].(error) return ret0, ret1 } // NewSPDYExecutor indicates an expected call of NewSPDYExecutor -func (mr *MockKubeAPIerMockRecorder) NewSPDYExecutor(config, method, url interface{}) *gomock.Call { +func (mr *MockKubeAPIerMockRecorder) NewSPDYExecutor(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewSPDYExecutor", reflect.TypeOf((*MockKubeAPIer)(nil).NewSPDYExecutor), config, method, url) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewSPDYExecutor", reflect.TypeOf((*MockKubeAPIer)(nil).NewSPDYExecutor), arg0, arg1, arg2) } // StreamWithContext mocks base method -func (m *MockKubeAPIer) StreamWithContext(exec remotecommand.Executor, ctx context.Context, options remotecommand.StreamOptions) error { +func (m *MockKubeAPIer) StreamWithContext(arg0 context.Context, arg1 remotecommand.Executor, arg2 remotecommand.StreamOptions) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "StreamWithContext", exec, ctx, options) + ret := m.ctrl.Call(m, "StreamWithContext", arg0, arg1, arg2) ret0, _ := ret[0].(error) return ret0 } // StreamWithContext indicates an expected call of StreamWithContext -func (mr *MockKubeAPIerMockRecorder) StreamWithContext(exec, ctx, options interface{}) *gomock.Call { +func (mr *MockKubeAPIerMockRecorder) StreamWithContext(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StreamWithContext", reflect.TypeOf((*MockKubeAPIer)(nil).StreamWithContext), exec, ctx, options) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StreamWithContext", reflect.TypeOf((*MockKubeAPIer)(nil).StreamWithContext), arg0, arg1, arg2) } // UntilWithSync mocks base method -func (m *MockKubeAPIer) UntilWithSync(ctx context.Context, lw cache.ListerWatcher, objType runtime.Object, precondition watch0.PreconditionFunc, conditions ...watch0.ConditionFunc) (*watch.Event, error) { +func (m *MockKubeAPIer) UntilWithSync(arg0 context.Context, arg1 cache.ListerWatcher, arg2 runtime.Object, arg3 watch0.PreconditionFunc, arg4 ...watch0.ConditionFunc) (*watch.Event, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, lw, objType, precondition} - for _, a := range conditions { + varargs := []interface{}{arg0, arg1, arg2, arg3} + for _, a := range arg4 { varargs = append(varargs, a) } ret := m.ctrl.Call(m, "UntilWithSync", varargs...) @@ -295,9 +295,9 @@ func (m *MockKubeAPIer) UntilWithSync(ctx context.Context, lw cache.ListerWatche } // UntilWithSync indicates an expected call of UntilWithSync -func (mr *MockKubeAPIerMockRecorder) UntilWithSync(ctx, lw, objType, precondition interface{}, conditions ...interface{}) *gomock.Call { +func (mr *MockKubeAPIerMockRecorder) UntilWithSync(arg0, arg1, arg2, arg3 interface{}, arg4 ...interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, lw, objType, precondition}, conditions...) + varargs := append([]interface{}{arg0, arg1, arg2, arg3}, arg4...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UntilWithSync", reflect.TypeOf((*MockKubeAPIer)(nil).UntilWithSync), varargs...) }