kubelet通过DockerManager与docker交互,即DockerManager对于kubelet来说就是一种runtime。本次分析将介绍dockerManager。

dockerManager

DockerManager定义在/pkg/kbuelet/dockertools/docker_manager.go中:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
type DockerManager struct {
client DockerInterface
recorder record.EventRecorder
containerRefManager *kubecontainer.RefManager
os kubecontainer.OSInterface
machineInfo *cadvisorapi.MachineInfo
// The image name of the pod infra container.
podInfraContainerImage string
// (Optional) Additional environment variables to be set for the pod infra container.
podInfraContainerEnv []api.EnvVar
// TODO(yifan): Record the pull failure so we can eliminate the image checking?
// Lower level docker image puller.
dockerPuller DockerPuller
// wrapped image puller.
imagePuller images.ImageManager
// Root of the Docker runtime.
dockerRoot string
// cgroup driver used by Docker runtime.
cgroupDriver string
// Directory of container logs.
containerLogsDir string
// Network plugin.
networkPlugin network.NetworkPlugin
// Health check results.
//***livenessManager为liveness检测结果***//
livenessManager proberesults.Manager
// RuntimeHelper that wraps kubelet to generate runtime container options.
runtimeHelper kubecontainer.RuntimeHelper
// Runner of lifecycle events.
runner kubecontainer.HandlerRunner
// Handler used to execute commands in containers.
//***Fankang***//
//***在容器中执行命令***//
execHandler ExecHandler
// Used to set OOM scores of processes.
oomAdjuster *oom.OOMAdjuster
// Get information from /proc mount.
procFs procfs.ProcFSInterface
// If true, enforce container cpu limits with CFS quota support
cpuCFSQuota bool
// Container GC manager
//***container回收器***//
containerGC *containerGC
// Support for gathering custom metrics.
enableCustomMetrics bool
// If true, the "hairpin mode" flag is set on container interfaces.
// A false value means the kubelet just backs off from setting it,
// it might already be true.
configureHairpinMode bool
// Provides image stats
//***Fankang***//
//***镜像信息提供者***//
*imageStatsProvider
// The version cache of docker daemon.
versionCache *cache.ObjectCache
// Directory to host local seccomp profiles.
seccompProfileRoot string
}

可以使用NewDockerManager()生成DockerManager。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
//***生成dockerManager***//
func NewDockerManager(
client DockerInterface,
recorder record.EventRecorder,
livenessManager proberesults.Manager,
containerRefManager *kubecontainer.RefManager,
podGetter podGetter,
machineInfo *cadvisorapi.MachineInfo,
podInfraContainerImage string,
qps float32,
burst int,
containerLogsDir string,
osInterface kubecontainer.OSInterface,
networkPlugin network.NetworkPlugin,
runtimeHelper kubecontainer.RuntimeHelper,
httpClient types.HttpGetter,
execHandler ExecHandler,
oomAdjuster *oom.OOMAdjuster,
procFs procfs.ProcFSInterface,
cpuCFSQuota bool,
imageBackOff *flowcontrol.Backoff,
serializeImagePulls bool,
enableCustomMetrics bool,
hairpinMode bool,
seccompProfileRoot string,
options ...kubecontainer.Option) *DockerManager {
// Wrap the docker client with instrumentedDockerInterface
//***client为instrumentedDockerInterface***//
client = NewInstrumentedDockerInterface(client)
// Work out the location of the Docker runtime, defaulting to /var/lib/docker
// if there are any problems.
dockerRoot := "/var/lib/docker"
// cgroup driver is only detectable in docker 1.12+
// when the execution driver is not detectable, we provide the cgroupfs form.
// if your docker engine is configured to use the systemd cgroup driver, and you
// want to use pod level cgroups, you must be on docker 1.12+ to ensure cgroup-parent
// is converted appropriately. otherwise, docker will fail to launch the container
// and complain the cgroup name provided did not conform to systemd conventions.
var cgroupDriver string
dockerInfo, err := client.Info()
if err != nil {
glog.Errorf("Failed to execute Info() call to the Docker client: %v", err)
glog.Warningf("Using fallback default of /var/lib/docker for location of Docker runtime")
} else {
dockerRoot = dockerInfo.DockerRootDir
glog.Infof("Setting dockerRoot to %s", dockerRoot)
cgroupDriver = dockerInfo.CgroupDriver
glog.Infof("Setting cgroupDriver to %s", cgroupDriver)
}
dm := &DockerManager{
client: client,
recorder: recorder,
containerRefManager: containerRefManager,
os: osInterface,
machineInfo: machineInfo,
podInfraContainerImage: podInfraContainerImage,
dockerPuller: newDockerPuller(client),
dockerRoot: dockerRoot,
cgroupDriver: cgroupDriver,
containerLogsDir: containerLogsDir,
networkPlugin: networkPlugin,
livenessManager: livenessManager,
runtimeHelper: runtimeHelper,
execHandler: execHandler,
oomAdjuster: oomAdjuster,
procFs: procFs,
cpuCFSQuota: cpuCFSQuota,
enableCustomMetrics: enableCustomMetrics,
configureHairpinMode: hairpinMode,
imageStatsProvider: newImageStatsProvider(client),
seccompProfileRoot: seccompProfileRoot,
}
cmdRunner := kubecontainer.DirectStreamingRunner(dm)
dm.runner = lifecycle.NewHandlerRunner(httpClient, cmdRunner, dm)
dm.imagePuller = images.NewImageManager(kubecontainer.FilterEventRecorder(recorder), dm, imageBackOff, serializeImagePulls, qps, burst)
dm.containerGC = NewContainerGC(client, podGetter, containerLogsDir)
dm.versionCache = cache.NewObjectCache(
func() (interface{}, error) {
return dm.getVersionInfo()
},
versionCacheTTL,
)
// apply optional settings..
for _, optf := range options {
optf(dm)
}
return dm
}

DockerManager::SyncPod()

先来看Docker最重要的方法——SyncPod()的实现。SyncPod()的流程如下:

  1. 调用computePodContainerChanges()方法计算pod与实际Docker容器之前的差异;
  2. 删除多余的容器;
  3. 处理pause容器;
  4. 处理init容器,init容器是最近版本才推出的概念,为一次执行容器;
  5. 处理普通容器。
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
//***Fankang***//
//***检查所有Pod***//
func (dm *DockerManager) SyncPod(pod *api.Pod, _ api.PodStatus, podStatus *kubecontainer.PodStatus, pullSecrets []api.Secret, backOff *flowcontrol.Backoff) (result kubecontainer.PodSyncResult) {
start := time.Now()
defer func() {
metrics.ContainerManagerLatency.WithLabelValues("SyncPod").Observe(metrics.SinceInMicroseconds(start))
}()
//***计算pod的变化***//
containerChanges, err := dm.computePodContainerChanges(pod, podStatus)
if err != nil {
result.Fail(err)
return
}
glog.V(3).Infof("Got container changes for pod %q: %+v", format.Pod(pod), containerChanges)
if containerChanges.InfraChanged {
dm.recorder.Eventf(pod, api.EventTypeNormal, "InfraChanged", "Pod infrastructure changed, it will be killed and re-created.")
}
if containerChanges.StartInfraContainer || (len(containerChanges.ContainersToKeep) == 0 && len(containerChanges.ContainersToStart) == 0) {
if len(containerChanges.ContainersToKeep) == 0 && len(containerChanges.ContainersToStart) == 0 {
glog.V(4).Infof("Killing Infra Container for %q because all other containers are dead.", format.Pod(pod))
} else {
glog.V(4).Infof("Killing Infra Container for %q, will start new one", format.Pod(pod))
}
// Killing phase: if we want to start new infra container, or nothing is running kill everything (including infra container)
// TODO(random-liu): We'll use pod status directly in the future
killResult := dm.killPodWithSyncResult(pod, kubecontainer.ConvertPodStatusToRunningPod(dm.Type(), podStatus), nil)
result.AddPodSyncResult(killResult)
if killResult.Error() != nil {
return
}
} else {
//***删除无需keep的容器***//
// Otherwise kill any running containers in this pod which are not specified as ones to keep.
runningContainerStatues := podStatus.GetRunningContainerStatuses()
for _, containerStatus := range runningContainerStatues {
_, keep := containerChanges.ContainersToKeep[kubecontainer.DockerID(containerStatus.ID.ID)]
_, keepInit := containerChanges.InitContainersToKeep[kubecontainer.DockerID(containerStatus.ID.ID)]
if !keep && !keepInit {
glog.V(3).Infof("Killing unwanted container %q(id=%q) for pod %q", containerStatus.Name, containerStatus.ID, format.Pod(pod))
// attempt to find the appropriate container policy
var podContainer *api.Container
var killMessage string
for i, c := range pod.Spec.Containers {
if c.Name == containerStatus.Name {
podContainer = &pod.Spec.Containers[i]
killMessage = containerChanges.ContainersToStart[i]
break
}
}
killContainerResult := kubecontainer.NewSyncResult(kubecontainer.KillContainer, containerStatus.Name)
result.AddSyncResult(killContainerResult)
if err := dm.KillContainerInPod(containerStatus.ID, podContainer, pod, killMessage, nil); err != nil {
killContainerResult.Fail(kubecontainer.ErrKillContainer, err.Error())
glog.Errorf("Error killing container %q(id=%q) for pod %q: %v", containerStatus.Name, containerStatus.ID, format.Pod(pod), err)
return
}
}
}
}
// Keep terminated init containers fairly aggressively controlled
dm.pruneInitContainersBeforeStart(pod, podStatus, containerChanges.InitContainersToKeep)
// We pass the value of the podIP down to runContainerInPod, which in turn
// passes it to various other functions, in order to facilitate
// functionality that requires this value (hosts file and downward API)
// and avoid races determining the pod IP in cases where a container
// requires restart but the podIP isn't in the status manager yet.
//
// We default to the IP in the passed-in pod status, and overwrite it if the
// infra container needs to be (re)started.
podIP := ""
if podStatus != nil {
podIP = podStatus.IP
}
// If we should create infra container then we do it first.
//***起pause容器***//
podInfraContainerID := containerChanges.InfraContainerId
if containerChanges.StartInfraContainer && (len(containerChanges.ContainersToStart) > 0) {
glog.V(4).Infof("Creating pod infra container for %q", format.Pod(pod))
startContainerResult := kubecontainer.NewSyncResult(kubecontainer.StartContainer, PodInfraContainerName)
result.AddSyncResult(startContainerResult)
var msg string
podInfraContainerID, err, msg = dm.createPodInfraContainer(pod)
if err != nil {
startContainerResult.Fail(err, msg)
glog.Errorf("Failed to create pod infra container: %v; Skipping pod %q: %s", err, format.Pod(pod), msg)
return
}
//***设置pause容器网络***//
setupNetworkResult := kubecontainer.NewSyncResult(kubecontainer.SetupNetwork, kubecontainer.GetPodFullName(pod))
result.AddSyncResult(setupNetworkResult)
if !kubecontainer.IsHostNetworkPod(pod) {
glog.V(3).Infof("Calling network plugin %s to setup pod for %s", dm.networkPlugin.Name(), format.Pod(pod))
//***Fankang***//
//***默认情况networkPlugin为kubernetes.io/no-op***//
err = dm.networkPlugin.SetUpPod(pod.Namespace, pod.Name, podInfraContainerID.ContainerID())
if err != nil {
// TODO: (random-liu) There shouldn't be "Skipping pod" in sync result message
message := fmt.Sprintf("Failed to setup network for pod %q using network plugins %q: %v; Skipping pod", format.Pod(pod), dm.networkPlugin.Name(), err)
setupNetworkResult.Fail(kubecontainer.ErrSetupNetwork, message)
glog.Error(message)
// Delete infra container
killContainerResult := kubecontainer.NewSyncResult(kubecontainer.KillContainer, PodInfraContainerName)
result.AddSyncResult(killContainerResult)
if delErr := dm.KillContainerInPod(kubecontainer.ContainerID{
ID: string(podInfraContainerID),
Type: "docker"}, nil, pod, message, nil); delErr != nil {
killContainerResult.Fail(kubecontainer.ErrKillContainer, delErr.Error())
glog.Warningf("Clear infra container failed for pod %q: %v", format.Pod(pod), delErr)
}
return
}
// Setup the host interface unless the pod is on the host's network (FIXME: move to networkPlugin when ready)
podInfraContainer, err := dm.client.InspectContainer(string(podInfraContainerID))
if err != nil {
glog.Errorf("Failed to inspect pod infra container: %v; Skipping pod %q", err, format.Pod(pod))
result.Fail(err)
return
}
if dm.configureHairpinMode {
if err = hairpin.SetUpContainerPid(podInfraContainer.State.Pid, network.DefaultInterfaceName); err != nil {
glog.Warningf("Hairpin setup failed for pod %q: %v", format.Pod(pod), err)
}
}
// Overwrite the podIP passed in the pod status, since we just started the infra container.
podIP, err = dm.determineContainerIP(pod.Namespace, pod.Name, podInfraContainer)
if err != nil {
glog.Errorf("Network error: %v; Skipping pod %q", err, format.Pod(pod))
result.Fail(err)
return
}
glog.Infof("Determined pod ip after infra change: %q: %q", format.Pod(pod), podIP)
}
}
//***处理init container***//
next, status, done := findActiveInitContainer(pod, podStatus)
if status != nil {
if status.ExitCode != 0 {
// container initialization has failed, flag the pod as failed
initContainerResult := kubecontainer.NewSyncResult(kubecontainer.InitContainer, status.Name)
initContainerResult.Fail(kubecontainer.ErrRunInitContainer, fmt.Sprintf("init container %q exited with %d", status.Name, status.ExitCode))
result.AddSyncResult(initContainerResult)
if pod.Spec.RestartPolicy == api.RestartPolicyNever {
utilruntime.HandleError(fmt.Errorf("error running pod %q init container %q, restart=Never: %#v", format.Pod(pod), status.Name, status))
return
}
utilruntime.HandleError(fmt.Errorf("Error running pod %q init container %q, restarting: %#v", format.Pod(pod), status.Name, status))
}
}
// Note: when configuring the pod's containers anything that can be configured by pointing
// to the namespace of the infra container should use namespaceMode. This includes things like the net namespace
// and IPC namespace. PID mode cannot point to another container right now.
// See createPodInfraContainer for infra container setup.
namespaceMode := fmt.Sprintf("container:%v", podInfraContainerID)
pidMode := getPidMode(pod)
if next != nil {
if len(containerChanges.ContainersToStart) == 0 {
glog.V(4).Infof("No containers to start, stopping at init container %+v in pod %v", next.Name, format.Pod(pod))
return
}
// If we need to start the next container, do so now then exit
container := next
startContainerResult := kubecontainer.NewSyncResult(kubecontainer.StartContainer, container.Name)
result.AddSyncResult(startContainerResult)
// containerChanges.StartInfraContainer causes the containers to be restarted for config reasons
if !containerChanges.StartInfraContainer {
isInBackOff, err, msg := dm.doBackOff(pod, container, podStatus, backOff)
if isInBackOff {
startContainerResult.Fail(err, msg)
glog.V(4).Infof("Backing Off restarting init container %+v in pod %v", container, format.Pod(pod))
return
}
}
glog.V(4).Infof("Creating init container %+v in pod %v", container, format.Pod(pod))
if err, msg := dm.tryContainerStart(container, pod, podStatus, pullSecrets, namespaceMode, pidMode, podIP); err != nil {
startContainerResult.Fail(err, msg)
utilruntime.HandleError(fmt.Errorf("container start failed: %v: %s", err, msg))
return
}
// Successfully started the container; clear the entry in the failure
glog.V(4).Infof("Completed init container %q for pod %q", container.Name, format.Pod(pod))
return
}
if !done {
// init container still running
glog.V(4).Infof("An init container is still running in pod %v", format.Pod(pod))
return
}
if containerChanges.InitFailed {
// init container still running
glog.V(4).Infof("Not all init containers have succeeded for pod %v", format.Pod(pod))
return
}
//***启动普通容器***//
// Start regular containers
for idx := range containerChanges.ContainersToStart {
container := &pod.Spec.Containers[idx]
startContainerResult := kubecontainer.NewSyncResult(kubecontainer.StartContainer, container.Name)
result.AddSyncResult(startContainerResult)
// containerChanges.StartInfraContainer causes the containers to be restarted for config reasons
if !containerChanges.StartInfraContainer {
isInBackOff, err, msg := dm.doBackOff(pod, container, podStatus, backOff)
if isInBackOff {
startContainerResult.Fail(err, msg)
glog.V(4).Infof("Backing Off restarting container %+v in pod %v", container, format.Pod(pod))
continue
}
}
glog.V(4).Infof("Creating container %+v in pod %v", container, format.Pod(pod))
if err, msg := dm.tryContainerStart(container, pod, podStatus, pullSecrets, namespaceMode, pidMode, podIP); err != nil {
startContainerResult.Fail(err, msg)
utilruntime.HandleError(fmt.Errorf("container start failed: %v: %s", err, msg))
continue
}
}
return
}

DockerManager::computePodContainerChanges()

computePodContainerChanges()会统计需要重启及需要保持的容器。具体流程比较复杂,直接读代码会有更好地了解。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
func (dm *DockerManager) computePodContainerChanges(pod *api.Pod, podStatus *kubecontainer.PodStatus) (podContainerChangesSpec, error) {
start := time.Now()
defer func() {
metrics.ContainerManagerLatency.WithLabelValues("computePodContainerChanges").Observe(metrics.SinceInMicroseconds(start))
}()
glog.V(5).Infof("Syncing Pod %q: %#v", format.Pod(pod), pod)
//***Fankang***//
//***containersToStart中存储需要启动的容器***//
//***containersToKeep中存储需要保持的容器***//
containersToStart := make(map[int]string)
containersToKeep := make(map[kubecontainer.DockerID]int)
var err error
var podInfraContainerID kubecontainer.DockerID
var changed bool
//***Fankang***//
//***获取pause容器的状态***//
podInfraContainerStatus := podStatus.FindContainerStatusByName(PodInfraContainerName)
if podInfraContainerStatus != nil && podInfraContainerStatus.State == kubecontainer.ContainerStateRunning {
glog.V(4).Infof("Found pod infra container for %q", format.Pod(pod))
changed, err = dm.podInfraContainerChanged(pod, podInfraContainerStatus)
if err != nil {
return podContainerChangesSpec{}, err
}
}
//***Fankang***//
//***createPodInfraContainer是标记要不要启动pause容器***//
createPodInfraContainer := true
if podInfraContainerStatus == nil || podInfraContainerStatus.State != kubecontainer.ContainerStateRunning {
//***Fankang***//
//***如果pause容器不存在,则createPodInfraContainer为true***//
glog.V(2).Infof("Need to restart pod infra container for %q because it is not found", format.Pod(pod))
} else if changed {
//***Fankang***//
//***如果pause容器有改变,则createPodInfraContainer为true***//
glog.V(2).Infof("Need to restart pod infra container for %q because it is changed", format.Pod(pod))
} else {
//***Fankang***//
//***pause容器正常,所以把createPodInfraContainer置为false***//
glog.V(4).Infof("Pod infra container looks good, keep it %q", format.Pod(pod))
createPodInfraContainer = false
podInfraContainerID = kubecontainer.DockerID(podInfraContainerStatus.ID.ID)
containersToKeep[podInfraContainerID] = -1
}
// check the status of the init containers
initFailed := false
initContainersToKeep := make(map[kubecontainer.DockerID]int)
// always reset the init containers if the pod is reset
//***Fankang***//
//***如果pause容器不需重启,则统计需要保持的容器***//
if !createPodInfraContainer {
// keep all successfully completed containers up to and including the first failing container
Containers:
for i, container := range pod.Spec.InitContainers {
containerStatus := podStatus.FindContainerStatusByName(container.Name)
switch {
case containerStatus == nil:
continue
case containerStatus.State == kubecontainer.ContainerStateRunning:
initContainersToKeep[kubecontainer.DockerID(containerStatus.ID.ID)] = i
case containerStatus.State == kubecontainer.ContainerStateExited:
initContainersToKeep[kubecontainer.DockerID(containerStatus.ID.ID)] = i
// TODO: should we abstract the "did the init container fail" check?
if containerStatus.ExitCode != 0 {
initFailed = true
break Containers
}
}
}
}
//***Fankang***//
//***统计需要重新启动的容器***//
for index, container := range pod.Spec.Containers {
containerStatus := podStatus.FindContainerStatusByName(container.Name)
if containerStatus == nil || containerStatus.State != kubecontainer.ContainerStateRunning {
if kubecontainer.ShouldContainerBeRestarted(&container, pod, podStatus) {
// If we are here it means that the container is dead and should be restarted, or never existed and should
// be created. We may be inserting this ID again if the container has changed and it has
// RestartPolicy::Always, but it's not a big deal.
message := fmt.Sprintf("Container %+v is dead, but RestartPolicy says that we should restart it.", container)
glog.V(3).Info(message)
containersToStart[index] = message
}
continue
}
containerID := kubecontainer.DockerID(containerStatus.ID.ID)
glog.V(3).Infof("pod %q container %q exists as %v", format.Pod(pod), container.Name, containerID)
if createPodInfraContainer {
// createPodInfraContainer == true and Container exists
// If we're creating infra container everything will be killed anyway
// If RestartPolicy is Always or OnFailure we restart containers that were running before we
// killed them when restarting Infra Container.
if pod.Spec.RestartPolicy != api.RestartPolicyNever {
message := fmt.Sprintf("Infra Container is being recreated. %q will be restarted.", container.Name)
glog.V(1).Info(message)
containersToStart[index] = message
}
continue
}
if initFailed {
// initialization failed and Container exists
// If we have an initialization failure everything will be killed anyway
// If RestartPolicy is Always or OnFailure we restart containers that were running before we
// killed them when re-running initialization
if pod.Spec.RestartPolicy != api.RestartPolicyNever {
message := fmt.Sprintf("Failed to initialize pod. %q will be restarted.", container.Name)
glog.V(1).Info(message)
containersToStart[index] = message
}
continue
}
// At this point, the container is running and pod infra container is good.
// We will look for changes and check healthiness for the container.
expectedHash := kubecontainer.HashContainer(&container)
hash := containerStatus.Hash
containerChanged := hash != 0 && hash != expectedHash
if containerChanged {
message := fmt.Sprintf("pod %q container %q hash changed (%d vs %d), it will be killed and re-created.", format.Pod(pod), container.Name, hash, expectedHash)
glog.Info(message)
containersToStart[index] = message
continue
}
liveness, found := dm.livenessManager.Get(containerStatus.ID)
if !found || liveness == proberesults.Success {
containersToKeep[containerID] = index
continue
}
if pod.Spec.RestartPolicy != api.RestartPolicyNever {
message := fmt.Sprintf("pod %q container %q is unhealthy, it will be killed and re-created.", format.Pod(pod), container.Name)
glog.Info(message)
containersToStart[index] = message
}
}
//***函数作用***//
// After the loop one of the following should be true:
// - createPodInfraContainer is true and containersToKeep is empty.
// (In fact, when createPodInfraContainer is false, containersToKeep will not be touched).
// - createPodInfraContainer is false and containersToKeep contains at least ID of Infra Container
// If Infra container is the last running one, we don't want to keep it, and we don't want to
// keep any init containers.
if !createPodInfraContainer && len(containersToStart) == 0 && len(containersToKeep) == 1 {
containersToKeep = make(map[kubecontainer.DockerID]int)
initContainersToKeep = make(map[kubecontainer.DockerID]int)
}
return podContainerChangesSpec{
StartInfraContainer: createPodInfraContainer,
InfraChanged: changed,
InfraContainerId: podInfraContainerID,
InitFailed: initFailed,
InitContainersToKeep: initContainersToKeep,
ContainersToStart: containersToStart,
ContainersToKeep: containersToKeep,
}, nil
}

其他方法:

APIVersion(): APIVersion()返回Docker的版本。

DeleteContainer(): DeleteContainer()可以删除一个容器

ExecInContainer(): ExecInContainer()可以在容器中执行命令。

GarbageCollect(): GarbageCollect()调用containerGC的GarbageCollect()进行”dead”容器回收。

GetContainerLogs(): GetContainerLogs()可以获取pod中containerID容器的日志。

GetKubeletDockerContainers(): GetKubeletDockerContainers()可以获取running状态的容器或所有状态的容器。

GetNetNS(): GetNetNS()可以获取containerID容器的Network Namespace。从”/proc/%v/ns/net”中获取。

GetPodContainerID(): GetPodContainerID()可以获取pod中pause容器的ID。

GetPodStatus(): GetPodStatus()可以依据pod中容器的状态获取整个pod的状态。

GetPods(): GetPods()可以获取Docker容器对应的Pod列表。

KillContainerInPod(): KillContainerInPod()可以杀死pod中某个容器。

KillPod(): KillPod()可以删除整个pod。

ListImages(): ListImages()可以获取镜像列表。

PullImage(): PullImage()可以下载镜像。

RemoveImage(): RemoveImage()可以删除镜像。

Status(): Status()可以返回Dockerd的状态。

createPodInfraContainer(): createPodInfraContainer()可以创建pause容器,在SyncPod()中会有调用。

runContainerInPod(): runContainerInPod()可以在pod中运行容器。

runContainerInPod():runContainerInPod()可以尝试下载镜像并启动容器。