-
Notifications
You must be signed in to change notification settings - Fork 92
ImagePullBackOff on first Deployement of Pods #10
Comments
@keviiin38 I have encountered this issue and the way I fixed it was to add the imagePullSecrets:
- name: image-pull-secret Would be nice for imagepullsecret-patcher though to be able to handle this :) |
I think it's a classic async distributed system design problem... possibly, the imagepullsecret-patcher could deploy an admission controller to patch newly created service accounts? 🤔 |
You could try setting |
This is almost 2 years old - so I know it's a long shot, but curious if anyone has another workaround for this? I'm using Terraform to set this up (adapted some from another issue in this repo), but I get first pod ImagePullBackOff as well. My Loop duration is 30s at the moment. locals {
imagepullsecrets_patcher_name = "imagepullsecrets-patcher"
imagepullsecrets_patcher_namespace = "kube-system"
}
resource "kubernetes_service_account" "image_pull_secrets" {
metadata {
name = local.imagepullsecrets_patcher_name
namespace = local.imagepullsecrets_patcher_namespace
}
image_pull_secret {
name = local.imagepullsecrets_patcher_name
}
}
resource "kubernetes_cluster_role" "image_pull_secrets" {
metadata {
name = local.imagepullsecrets_patcher_name
labels = {
k8s-app = local.imagepullsecrets_patcher_name
}
}
rule {
api_groups = [""]
resources = ["secrets", "serviceaccounts"]
verbs = ["list", "get", "patch", "create", "delete"]
}
rule {
api_groups = [""]
resources = ["namespaces"]
verbs = ["list", "get"]
}
}
resource "kubernetes_cluster_role_binding" "image_pull_secrets" {
metadata {
name = local.imagepullsecrets_patcher_name
}
role_ref {
api_group = "rbac.authorization.k8s.io"
kind = "ClusterRole"
name = kubernetes_cluster_role.image_pull_secrets.metadata[0].name
}
subject {
kind = "ServiceAccount"
name = kubernetes_service_account.image_pull_secrets.metadata[0].name
namespace = kubernetes_service_account.image_pull_secrets.metadata[0].namespace
}
}
resource "kubernetes_secret" "image_pull_secrets" {
metadata {
name = local.imagepullsecrets_patcher_name
namespace = local.imagepullsecrets_patcher_namespace
}
# We write a JSON file similar to what we have in ~/.docker/config.json to allow access to private registries
type = "kubernetes.io/dockerconfigjson"
data = {
".dockerconfigjson" = jsonencode({
"auths" : {
(var.url) : {
auth = var.auth_base64
}
}
})
}
}
resource "kubernetes_deployment" "image_pull_secrets" {
metadata {
name = local.imagepullsecrets_patcher_name
namespace = local.imagepullsecrets_patcher_namespace
labels = {
name = local.imagepullsecrets_patcher_name
}
}
spec {
replicas = 1
selector {
match_labels = {
name = local.imagepullsecrets_patcher_name
}
}
template {
metadata {
labels = {
name = local.imagepullsecrets_patcher_name
}
}
spec {
automount_service_account_token = true
service_account_name = kubernetes_service_account.image_pull_secrets.metadata[0].name
container {
name = "imagepullsecret-patcher"
image = "quay.io/titansoft/imagepullsecret-patcher:v0.14"
resources {
requests = {
cpu = "100m"
memory = "15Mi"
}
limits = {
cpu = "200m"
memory = "30Mi"
}
}
env {
name = "CONFIG_ALLSERVICEACCOUNT"
value = true
}
env {
name = "CONFIG_EXCLUDED_NAMESPACES"
value = join(",", var.excluded_namespaces)
}
env {
name = "CONFIG_LOOP_DURATION"
value = var.check_interval
}
env {
name = "CONFIG_SECRETNAME"
value = kubernetes_secret.image_pull_secrets.metadata[0].name
}
env {
name = "CONFIG_DOCKERCONFIGJSONPATH"
value = "/app/secrets/.dockerconfigjson"
}
volume_mount {
name = "src-dockerconfigjson"
mount_path = "/app/secrets"
read_only = true
}
}
volume {
name = "src-dockerconfigjson"
secret {
secret_name = kubernetes_secret.image_pull_secrets.metadata[0].name
}
}
}
}
}
} |
The project was solved "ImagePullBackOff on first Deployement of Pods", https://github.com/cccfs/kube-credential-helper |
Thanks for this useful tool first !
I use it to patch all service accounts for our private registry !
But I've a little problem, when creating a
Deployment
, using a custom service account, I end up stuck with anImagePullBackOff
error... With anAccess Denied
to my private registry...I think, the time lapse during which the
Pod
is scheduled with the service account (without the patch), and when the service account is patched using your tool is too large... And I end up with aPod
stuck inImagePullBackOff
error...If you have any idea or solution I'd like to know, same, if the problem is related to Kubernetes and not to your tool
Deleting the stuck Pod solves the issue... But it is not a good solution for our CI workflow...
Thanks 👍
The text was updated successfully, but these errors were encountered: