到达syncPodFn方法调用,他是podWorkers的一个字段,在构造podWorkers的时候传入,实际就是kubelet.syncPod方法
func (p *podWorkers) managePodLoop(podUpdates <-chan UpdatePodOptions) {
	...
			err = p.syncPodFn(syncPodOptions{
mirrorPod:
update.MirrorPod,
pod:
update.Pod,
podStatus:
status,
killPodOptions: update.KillPodOptions,
updateType:
update.UpdateType,
			})
	...
}
Pod sync(Kubelet.syncPod)
1 如果是 pod 创建事件,会记录一些 pod latency 相关的 metrics;
2 生成一个 v1.PodStatus 对象,Pod的状态包括这些    Pending    Running    Succeeded    Failed    Unknown
3 PodStatus 生成之后,将发送给 Pod status manager
4 运行一系列 admission handlers,确保 pod 有正确的安全权限
5 kubelet 将为这个 pod 创建 cgroups。
6 创建容器目录 /var/run/kubelet/pods/podid  volume $poddir/volumes  plugins $poddir/plugins
7 volume manager 将 等待volumes attach 完成
8 从 apiserver 获取 Spec.ImagePullSecrets 中指定的 secrets,注入容器
9 容器运行时(runtime)创建容器
由于代码篇幅较长,这里就只粘出关键的方法或函数调用,代码位于/pkg/kubelet/kubelet.go
func (kl *Kubelet) syncPod(o syncPodOptions) error {
	//1. 如果是 pod 创建事件,会记录一些 pod latency 相关的 metrics
	// Record pod worker start latency if being created
	// TODO: make pod workers record their own latencies
	if updateType == kubetypes.SyncPodCreate {
		if !firstSeenTime.IsZero() {
			// This is the first time we are syncing the pod. Record the latency
			// since kubelet first saw the pod if firstSeenTime is set.
			metrics.PodWorkerStartDuration.Observe(metrics.SinceInSeconds(firstSeenTime))
		} else {
			klog.V(3).Infof("First seen time not recorded for pod %q", pod.UID)
		}
	}
	//2. 生成一个 v1.PodStatus 对象
	apiPodStatus := kl.generateAPIPodStatus(pod, podStatus)
	//3.1. 生成PodStatus
	apiPodStatus := kl.generateAPIPodStatus(pod, podStatus)
	//4. 运行一系列 admission handlers,确保 pod 有正确的安全权限
	runnable := kl.canRunPod(pod)	
	....
	//3.2. PodStatus 生成之后,将发送给 Pod status manager
	kl.statusManager.SetPodStatus(pod, apiPodStatus)
	//5. kubelet 将为这个 pod 创建 cgroups
	if !kl.podIsTerminated(pod) {
		if !(podKilled && pod.Spec.RestartPolicy == v1.RestartPolicyNever) {
			if !pcm.Exists(pod) {
if err := kl.containerManager.UpdateQOSCgroups(); err != nil {
klog.V(2).Infof("Failed to update QoS cgroups while syncing pod: %v", err)
}
if err := pcm.EnsureExists(pod); err != nil {
kl.recorder.Eventf(pod, v1.EventTypeWarning, events.FailedToCreatePodContainer, "unable to ensure pod container exists: %v", err)
return fmt.Errorf("failed to ensure that the pod: %v cgroups exist and are correctly applied: %v", pod.UID, err)
}
			}
		}
	}
	//6 创建容器目录
	// Make data directories for the pod
	if err := kl.makePodDataDirs(pod); err != nil {
		kl.recorder.Eventf(pod, v1.EventTypeWarning, events.FailedToMakePodDataDirectories, "error making pod data directories: %v", err)
		klog.Errorf("Unable to make pod data directories for pod %q: %v", format.Pod(pod), err)
		return err
	}
	// Volume manager will not mount volumes for terminated pods
	if !kl.podIsTerminated(pod) {
		//7 volume manager 将 等待volumes attach 完成
		//等待挂载,但是挂载不在这里执行
		// Wait for volumes to attach/mount
		if err := kl.volumeManager.WaitForAttachAndMount(pod); err != nil {
			kl.recorder.Eventf(pod, v1.EventTypeWarning, events.FailedMountVolume, "Unable to attach or mount volumes: %v", err)
			klog.Errorf("Unable to attach or mount volumes for pod %q: %v; skipping pod", format.Pod(pod), err)
			return err
		}
	}
	//8 从 apiserver 获取 Spec.ImagePullSecrets 中指定的 secrets,注入容器
	//部分pod会有ImagePullSecrets,用于登录镜像库拉镜像
	// Fetch the pull secrets for the pod
	pullSecrets := kl.getPullSecretsForPod(pod)
	//9 容器运行时(runtime)创建容器
	// Call the container runtime's SyncPod callback
	result := kl.containerRuntime.SyncPod(pod, podStatus, pullSecrets, kl.backOff)
}
运行时创建容器(kubeGenericRuntimeManager.SyncPod)
1 计算sandbox和container变化
2 如果sandbox变更了就要把pod kill了
3 kill掉pod中没有运行的container
4 要创建sandbox的就创建
5 创建临时容器
6 创建init容器
7 创建业务容器
代码位于/pkg/kubelet/kuberuntime/kuberuntime_manager.go
func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, podStatus *kubecontainer.PodStatus, pullSecrets []v1.Secret, backOff *flowcontrol.Backoff) (result kubecontainer.PodSyncResult) {
	// Step 1: Compute sandbox and container changes.
	podContainerChanges := m.computePodActions(pod, podStatus)
	// Step 2: Kill the pod if the sandbox has changed.
	if podContainerChanges.KillPod {
		killResult := m.killPodWithSyncResult(pod, kubecontainer.ConvertPodStatusToRunningPod(m.runtimeName, podStatus), nil)
	} else {
		// Step 3: kill any running containers in this pod which are not to keep.
		for containerID, containerInfo := range podContainerChanges.ContainersToKill {
			if err := m.killContainer(pod, containerID, containerInfo.name, containerInfo.message, nil); err != nil {
			}
		}
	}
	// Step 4: Create a sandbox for the pod if necessary.
	podSandboxID := podContainerChanges.SandboxID
	if podContainerChanges.CreateSandbox {
		podSandboxID, msg, err = m.createPodSandbox(pod, podContainerChanges.Attempt)
	}
	// Step 5: start ephemeral containers
	if utilfeature.DefaultFeatureGate.Enabled(features.EphemeralContainers) {
		for _, idx := range podContainerChanges.EphemeralContainersToStart {
			start("ephemeral container", ephemeralContainerStartSpec(&pod.Spec.EphemeralContainers[idx]))
		}
	}
	// Step 6: start the init container.
	if container := podContainerChanges.NextInitContainerToStart; container != nil {
		// Start the next init container.
		if err := start("init container", containerStartSpec(container)); err != nil {
			return
		}
	}
	// Step 7: start containers in podContainerChanges.ContainersToStart.
	for _, idx := range podContainerChanges.ContainersToStart {
		start("container", containerStartSpec(&pod.Spec.Containers[idx]))
	}
	return
}
创建sandbox