原文地址:https://blog.csdn.net/qq_40404477/article/details/125982161
常见的例子是:ReplicaSet 是 Pod 的 Owner,ReplicaSet 维持 Pod 的数量,Pod 通过其 metadata.ownerReferences
字段链接到 ReplicaSet。同样地,ReplicaSet 和 Deployment 也是从属关系。
Kubernetes 资源默认使用级联删除策略。比如当删除 Deployment 时,与其关联的 ReplicaSet 和 Pod 也将被删除。
级联删除分为前台(Foregroup)和后台(Backgroup)两种模式,默认是 Background。
前台删除:
metadata.deletionTimestamp
字段metadata.finalizers
字段设置为 foregroundDeletion
后台删除:
Finalizers 是 Kubernetes 的拦截机制。Finalizers 只是一个标记,Controller 在删除对象时,不关心其具体值。值中通常存放备注信息,提醒操作者注意释放的资源等。
拥有 Finalizers 的对象的删除分为三个步骤:
metadata.deletionTimestamp
更新为当前时间metadata.finalizers
字段为空以下命令可以移除资源的 Finalizers,之后对象将自动进入删除队列。
kubectl -n namespaceName patch configmap configmapName --type json -p '[{"op": "remove", "path": "/metadata/finalizers"}]'
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: myapp
annotations:
kubernetes.io/ingress.class: "nginx"
spec:
rules:
- host: myapp.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: myapp
port:
number: 80
---
apiVersion: v1
kind: Service
metadata:
name: myapp
spec:
selector:
app: myapp
release: canary
ports:
- name: http-port
targetPort: 80
port: 80
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: myapp
spec:
replicas: 2
selector:
matchLabels:
app: myapp
release: canary
template:
metadata:
labels:
app: myapp
release: canary
spec:
containers:
- name: myapp
image: ikubernetes/myapp:v2
imagePullPolicy: IfNotPresent
ports:
- name: http-port
containerPort: 80
测试命令:
curl --resolve "myapp.com:30080:192.168.56.102" "http://myapp.com:30080/hostname.html"
mkdir myapp
cd myapp/
go mod init myapp
kubebuilder init --plugins go/v3 --domain timd.cn --owner "Tim Chow"
kubebuilder create api --group myapp --version v1 --kind Myapp
/*
Copyright 2024 Tim Chow.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
// MyappSpec defines the desired state of Myapp
type MyappSpec struct {
// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
// Important: Run "make" to regenerate code after modifying this file
// Pod 副本数
// +kubebuilder:validation:Minimum:=1
// +kubebuilder:validation:Maximum:=1000
Replicas *int32 `json:"replicas,omitempty"`
// 镜像地址
// +kubebuilder:validation:MinLength=3
// +kubebuilder:validation:MaxLength=1000
Image string `json:"image,omitempty"`
// 容器端口
// +kubebuilder:validation:Minimum:=1
// +kubebuilder:validation:Maximum:=65535
ContainerPort int32 `json:"containerPort,omitempty"`
// 资源限制
Resources corev1.ResourceRequirements `json:"resources,omitempty"`
// 环境变量
Env []corev1.EnvVar `json:"env,omitempty"`
// 服务端口
// +kubebuilder:validation:Minimum:=1
// +kubebuilder:validation:Maximum:=65535
ServicePort int32 `json:"servicePort,omitempty"`
// 服务域名
// +kubebuilder:validation:MinLength=3
// +kubebuilder:validation:MaxLength=1000
Host string `json:"host,omitempty"`
}
const (
Running = "Running"
Pending = "Pending"
NotReady = "NotReady"
Failed = "Failed"
)
// MyappStatus defines the observed state of Myapp
type MyappStatus struct {
// INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
// Important: Run "make" to regenerate code after modifying this file
// 期望的 Pod 副本数
Replicas int32 `json:"replicas,omitempty"`
// 就绪的 Pod 数量
ReadyReplicas int32 `json:"readyReplicas,omitempty"`
// 标签选择器
LabelSelector map[string]string `json:"labelSelector,omitempty"`
// 状态
Status string `json:"status,omitempty"`
}
//+kubebuilder:object:root=true
//+kubebuilder:subresource:status
// Myapp is the Schema for the myapps API
type Myapp struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec MyappSpec `json:"spec,omitempty"`
Status MyappStatus `json:"status,omitempty"`
}
//+kubebuilder:object:root=true
// MyappList contains a list of Myapp
type MyappList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []Myapp `json:"items"`
}
func init() {
SchemeBuilder.Register(&Myapp{}, &MyappList{})
}
/*
Copyright 2024 Tim Chow.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"context"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/api/networking/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/intstr"
"reflect"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"time"
myappv1 "myapp/api/v1"
)
// MyappReconciler reconciles a Myapp object
type MyappReconciler struct {
client.Client
Scheme *runtime.Scheme
}
//+kubebuilder:rbac:groups=myapp.timd.cn,resources=myapps,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=myapp.timd.cn,resources=myapps/status,verbs=get;update;patch
//+kubebuilder:rbac:groups=myapp.timd.cn,resources=myapps/finalizers,verbs=update
// Reconcile is part of the main kubernetes reconciliation loop which aims to
// move the current state of the cluster closer to the desired state.
// TODO(user): Modify the Reconcile function to compare the state specified by
// the Myapp object against the actual cluster state, and then
// perform operations to make the cluster state reflect the state specified by
// the user.
//
// For more details, check Reconcile and its Result here:
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.14.4/pkg/reconcile
func (r *MyappReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
logger := log.FromContext(ctx)
// TODO(user): your logic here
app := &myappv1.Myapp{}
err := r.Client.Get(ctx, req.NamespacedName, app)
if err != nil {
// 未找到请求对象,可能已经被删除。
// 拥有的对象将被自动地垃圾回收。对于额外的清理逻辑,可以使用 finalizers。
// 返回,并且不重新放回队列
if errors.IsNotFound(err) {
logger.Info(req.NamespacedName.String() + " not found")
return ctrl.Result{}, nil
}
// 读取对象出错,将对象重新放回队列
logger.Error(err, "error occurs while getting "+req.NamespacedName.String())
return ctrl.Result{}, err
}
if app.DeletionTimestamp != nil {
logger.Info(req.NamespacedName.String() + " deleted at " + app.DeletionTimestamp.Format(time.RFC3339))
return reconcile.Result{}, nil
}
// 处理 Ingress
ingress := NewIngress(app)
if err := r.Client.Get(ctx, req.NamespacedName, ingress); err != nil {
if errors.IsNotFound(err) {
logger.Info("Ingress of " + req.NamespacedName.String() + " not found, creating ...")
// 如果 Ingress 不存在,那么创建它
if err := r.Client.Create(ctx, ingress); err != nil {
logger.Error(err, "error occurs while creating Ingress of "+req.NamespacedName.String())
return ctrl.Result{}, err
}
logger.Info("Ingress of " + req.NamespacedName.String() + " created successfully")
} else {
logger.Error(err, "error occurs while getting "+req.NamespacedName.String())
return ctrl.Result{}, err
}
} else {
// TODO:如果未发生变更,那么不更新
logger.Info("updating Ingress of " + req.NamespacedName.String())
if err := r.Client.Update(ctx, ingress); err != nil {
logger.Error(err, "error occurs while updating Ingress of "+req.NamespacedName.String())
return ctrl.Result{}, err
}
logger.Info("Ingress of " + req.NamespacedName.String() + " updated successfully")
}
// 处理 Service
service := NewService(app)
if err := r.Client.Get(ctx, req.NamespacedName, service); err != nil {
if errors.IsNotFound(err) {
logger.Info("Service of " + req.NamespacedName.String() + " not found, creating ...")
// 如果 Service 不存在,那么创建它
if err := r.Client.Create(ctx, service); err != nil {
logger.Error(err, "error occurs while creating Service of "+req.NamespacedName.String())
return ctrl.Result{}, err
}
logger.Info("Service of " + req.NamespacedName.String() + " created successfully")
} else {
return ctrl.Result{}, err
}
} else {
// TODO:如果未发生变更,那么不更新
logger.Info("updating Service of " + req.NamespacedName.String())
if err := r.Client.Update(ctx, service); err != nil {
logger.Error(err, "error occurs while updating Service of "+req.NamespacedName.String())
return ctrl.Result{}, err
}
logger.Info("Service of " + req.NamespacedName.String() + " updated successfully")
}
// 处理 Deployment
deployment := NewDeployment(app)
if err := r.Client.Get(ctx, req.NamespacedName, deployment); err != nil {
if errors.IsNotFound(err) {
logger.Info("Deployment of " + req.NamespacedName.String() + " not found, creating ...")
// 如果 Deployment 不存在,那么创建它
if err := r.Client.Create(ctx, deployment); err != nil {
logger.Error(err, "error occurs while creating Deployment of "+req.NamespacedName.String())
return ctrl.Result{}, err
}
logger.Info("Deployment of " + req.NamespacedName.String() + " created successfully")
} else {
return ctrl.Result{}, err
}
} else {
// TODO:如果未发生变更,那么不更新
logger.Info("updating Deployment of " + req.NamespacedName.String())
if err := r.Client.Update(ctx, deployment); err != nil {
logger.Error(err, "error occurs while updating Deployment of "+req.NamespacedName.String())
return ctrl.Result{}, err
}
logger.Info("Deployment of " + req.NamespacedName.String() + " updated successfully")
}
// 更新状态
newStatus := myappv1.MyappStatus{
Replicas: *deployment.Spec.Replicas,
ReadyReplicas: deployment.Status.ReadyReplicas,
LabelSelector: getLabels(app),
}
if newStatus.ReadyReplicas < newStatus.Replicas {
newStatus.Status = myappv1.NotReady
} else {
newStatus.Status = myappv1.Running
}
if !reflect.DeepEqual(newStatus, app.Status) {
logger.Info("updating status of " + req.NamespacedName.String())
app.Status = newStatus
err := r.Client.Status().Update(ctx, app)
if err != nil {
logger.Error(err, "error occurs while updating status of "+req.NamespacedName.String())
return reconcile.Result{}, err
}
}
return ctrl.Result{}, nil
}
// SetupWithManager sets up the controller with the Manager.
func (r *MyappReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&myappv1.Myapp{}).
Owns(&appsv1.Deployment{}).
Owns(&corev1.Service{}).
Owns(&v1.Ingress{}).
Complete(r)
}
func getLabels(app *myappv1.Myapp) map[string]string {
return map[string]string{
"app": app.Namespace,
"release": "canary",
}
}
func NewDeployment(app *myappv1.Myapp) *appsv1.Deployment {
labels := getLabels(app)
return &appsv1.Deployment{
TypeMeta: metav1.TypeMeta{
Kind: "Deployment",
APIVersion: "apps/v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: app.Name,
Namespace: app.Namespace,
OwnerReferences: []metav1.OwnerReference{
*metav1.NewControllerRef(app, app.GroupVersionKind()),
},
},
Spec: appsv1.DeploymentSpec{
Replicas: app.Spec.Replicas,
Selector: &metav1.LabelSelector{
MatchLabels: labels,
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{Labels: labels},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: app.Name,
Image: app.Spec.Image,
Ports: []corev1.ContainerPort{
{
ContainerPort: app.Spec.ContainerPort,
},
},
ImagePullPolicy: corev1.PullIfNotPresent,
Resources: app.Spec.Resources,
Env: app.Spec.Env,
},
},
},
},
},
}
}
func NewService(app *myappv1.Myapp) *corev1.Service {
return &corev1.Service{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "Service",
},
ObjectMeta: metav1.ObjectMeta{
Name: app.Name,
Namespace: app.Namespace,
OwnerReferences: []metav1.OwnerReference{
*metav1.NewControllerRef(app, app.GroupVersionKind()),
},
},
Spec: corev1.ServiceSpec{
Type: corev1.ServiceTypeClusterIP,
Ports: []corev1.ServicePort{
{
Port: app.Spec.ServicePort,
TargetPort: intstr.IntOrString{IntVal: app.Spec.ContainerPort},
},
},
Selector: getLabels(app),
},
}
}
func NewIngress(app *myappv1.Myapp) *v1.Ingress {
pathType := v1.PathTypePrefix
return &v1.Ingress{
TypeMeta: metav1.TypeMeta{
APIVersion: "networking.k8s.io/v1",
Kind: "Ingress",
},
ObjectMeta: metav1.ObjectMeta{
Name: app.Name,
Namespace: app.Namespace,
Annotations: map[string]string{
"kubernetes.io/ingress.class": "nginx",
},
OwnerReferences: []metav1.OwnerReference{
*metav1.NewControllerRef(app, app.GroupVersionKind()),
},
},
Spec: v1.IngressSpec{
Rules: []v1.IngressRule{
{
Host: app.Spec.Host,
IngressRuleValue: v1.IngressRuleValue{
HTTP: &v1.HTTPIngressRuleValue{
Paths: []v1.HTTPIngressPath{
{
Path: "/",
PathType: &pathType,
Backend: v1.IngressBackend{
Service: &v1.IngressServiceBackend{
Name: app.Name,
Port: v1.ServiceBackendPort{
Number: app.Spec.ServicePort,
},
},
},
},
},
},
},
},
},
},
}
}
apiVersion: myapp.timd.cn/v1
kind: Myapp
metadata:
name: test-myapp
spec:
replicas: 2
image: ikubernetes/myapp:v2
containerPort: 80
servicePort: 80
host: myapp.com