Skip to content

Commit 16a6227

Browse files
committed
switch test-cmd tests use mostly kubeconfig
1 parent 2804dd4 commit 16a6227

File tree

7 files changed

+31
-30
lines changed

7 files changed

+31
-30
lines changed

hack/test-cmd.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -104,7 +104,7 @@ for test in "${tests[@]}"; do
104104
done
105105

106106
os::log::debug "Metrics information logged to ${LOG_DIR}/metrics.log"
107-
oc get --raw /metrics --config="${MASTER_CONFIG_DIR}/admin.kubeconfig"> "${LOG_DIR}/metrics.log"
107+
oc get --raw /metrics --kubeconfig="${MASTER_CONFIG_DIR}/admin.kubeconfig"> "${LOG_DIR}/metrics.log"
108108

109109
if [[ -n "${failed:-}" ]]; then
110110
exit 1

test/cmd/config.sh

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -34,15 +34,15 @@ os::cmd::expect_success_and_not_text 'oc get bc' 'does not exist'
3434
# need some level of default (both upstream and here) to get the pretty auth message because you fail on namespace first.
3535
os::cmd::expect_failure_and_text 'KUBERNETES_MASTER=anything env -u KUBERNETES_SERVICE_HOST oc get buildconfigs --user="test"' 'auth info "test" does not exist'
3636

37-
os::cmd::expect_failure_and_text 'oc get bc --config=missing' 'missing: no such file or directory'
37+
os::cmd::expect_failure_and_text 'oc get bc --kubeconfig=missing' 'missing: no such file or directory'
3838

3939
# define temp location for new config
4040
NEW_CONFIG_LOC="${BASETMPDIR}/new-config.yaml"
4141

4242
# make sure non-existing --cluster and --user can still be set
43-
os::cmd::expect_success_and_text "oc config set-context new-context-name --cluster=missing-cluster --user=missing-user --namespace=default --config='${NEW_CONFIG_LOC}'" 'Context "new-context-name" '
44-
os::cmd::expect_failure_and_text "env -u KUBERNETES_SERVICE_HOST -u KUBECONFIG -u KUBERNETES_MASTER oc get buildconfigs --config='${NEW_CONFIG_LOC}'" 'Missing or incomplete configuration info'
45-
os::cmd::expect_failure_and_text "env -u KUBERNETES_SERVICE_HOST oc get buildconfigs --config='${NEW_CONFIG_LOC}'" 'Missing or incomplete configuration info'
43+
os::cmd::expect_success_and_text "oc config set-context new-context-name --cluster=missing-cluster --user=missing-user --namespace=default --kubeconfig='${NEW_CONFIG_LOC}'" 'Context "new-context-name" '
44+
os::cmd::expect_failure_and_text "env -u KUBERNETES_SERVICE_HOST -u KUBECONFIG -u KUBERNETES_MASTER oc get buildconfigs --kubeconfig='${NEW_CONFIG_LOC}'" 'Missing or incomplete configuration info'
45+
os::cmd::expect_failure_and_text "env -u KUBERNETES_SERVICE_HOST oc get buildconfigs --kubeconfig='${NEW_CONFIG_LOC}'" 'Missing or incomplete configuration info'
4646
)
4747
echo "config error handling: ok"
4848
os::test::junit::declare_suite_end

test/cmd/login.sh

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -44,10 +44,10 @@ if [[ "${API_SCHEME}" == "https" ]]; then
4444
fi
4545

4646
# remove self-provisioner role from user and test login prompt before creating any projects
47-
os::cmd::expect_success "oc adm policy remove-cluster-role-from-group self-provisioner system:authenticated:oauth --config='${login_kubeconfig}'"
47+
os::cmd::expect_success "oc adm policy remove-cluster-role-from-group self-provisioner system:authenticated:oauth --kubeconfig='${login_kubeconfig}'"
4848
os::cmd::expect_success_and_text "oc login --server=${KUBERNETES_MASTER} --certificate-authority='${MASTER_CONFIG_DIR}/ca.crt' -u no-project-test-user -p anything" "You don't have any projects. Contact your system administrator to request a project"
4949
# make sure standard login prompt is printed once self-provisioner status is restored
50-
os::cmd::expect_success "oc adm policy add-cluster-role-to-group self-provisioner system:authenticated:oauth --config='${login_kubeconfig}'"
50+
os::cmd::expect_success "oc adm policy add-cluster-role-to-group self-provisioner system:authenticated:oauth --kubeconfig='${login_kubeconfig}'"
5151
os::cmd::expect_success_and_text "oc login --server=${KUBERNETES_MASTER} --certificate-authority='${MASTER_CONFIG_DIR}/ca.crt' -u no-project-test-user -p anything" "You don't have any projects. You can try to create a new project, by running"
5252
# make sure `oc login` fails with unauthorized error
5353
os::cmd::expect_failure_and_text 'oc login <<< \n' 'Login failed \(401 Unauthorized\)'
@@ -87,7 +87,7 @@ os::cmd::expect_failure_and_text 'oc get pods' '"system:anonymous" cannot list p
8787

8888
# make sure we report an error if the config file we pass is not writable
8989
# Does not work inside of a container, determine why and reenable
90-
# os::cmd::expect_failure_and_text "oc login '${KUBERNETES_MASTER}' -u test -p test '--config=${templocation}/file' --insecure-skip-tls-verify" 'KUBECONFIG is set to a file that cannot be created or modified'
90+
# os::cmd::expect_failure_and_text "oc login '${KUBERNETES_MASTER}' -u test -p test '--kubeconfig=${templocation}/file' --insecure-skip-tls-verify" 'KUBECONFIG is set to a file that cannot be created or modified'
9191
echo "login warnings: ok"
9292

9393
# login and create serviceaccount and test login and logout with a service account token
@@ -106,12 +106,13 @@ os::cmd::expect_success 'oc get projects'
106106
os::cmd::expect_success 'oc project project-foo'
107107
os::cmd::expect_success_and_text 'oc config view' "current-context.+project-foo/${API_HOST}:${API_PORT}/test-user"
108108
os::cmd::expect_success_and_text 'oc whoami' 'test-user'
109-
os::cmd::expect_success_and_text "oc whoami --config='${login_kubeconfig}'" 'system:admin'
109+
os::cmd::expect_success_and_text "oc whoami --kubeconfig='${login_kubeconfig}'" 'system:admin'
110+
os::cmd::expect_success_and_text "oc whoami --kubeconfig='${login_kubeconfig}'" 'system:admin'
110111
os::cmd::expect_success_and_text 'oc whoami -t' '.'
111112
os::cmd::expect_success_and_text 'oc whoami -c' '.'
112113

113-
# test config files from the --config flag
114-
os::cmd::expect_success "oc get services --config='${login_kubeconfig}'"
114+
# test config files from the --kubeconfig flag
115+
os::cmd::expect_success "oc get services --kubeconfig='${login_kubeconfig}'"
115116
# test config files from env vars
116117
os::cmd::expect_success "KUBECONFIG='${login_kubeconfig}' oc get services"
117118
os::test::junit::declare_suite_end

test/cmd/policy.sh

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -251,28 +251,28 @@ os::cmd::expect_success 'oc adm policy add-cluster-role-to-user alternate-cluste
251251
# switch to test user to be sure that default project admin policy works properly
252252
new_kubeconfig="${workingdir}/tempconfig"
253253
os::cmd::expect_success "oc config view --raw > $new_kubeconfig"
254-
os::cmd::expect_success "oc login -u alternate-cluster-admin-user -p anything --config=${new_kubeconfig}"
254+
os::cmd::expect_success "oc login -u alternate-cluster-admin-user -p anything --kubeconfig=${new_kubeconfig}"
255255

256256
# alternate-cluster-admin should default to having star rights, so he should be able to update his role to that
257257
os::cmd::try_until_text "oc policy who-can update clusterrroles" "alternate-cluster-admin-user"
258258
resourceversion=$(oc get clusterrole/alternate-cluster-admin -o=jsonpath="{.metadata.resourceVersion}")
259259
cp ${OS_ROOT}/test/testdata/bootstrappolicy/alternate_cluster_admin.yaml ${workingdir}
260260
os::util::sed "s/RESOURCE_VERSION/${resourceversion}/g" ${workingdir}/alternate_cluster_admin.yaml
261-
os::cmd::expect_success "oc replace --config=${new_kubeconfig} clusterrole/alternate-cluster-admin -f ${workingdir}/alternate_cluster_admin.yaml"
261+
os::cmd::expect_success "oc replace --kubeconfig=${new_kubeconfig} clusterrole/alternate-cluster-admin -f ${workingdir}/alternate_cluster_admin.yaml"
262262

263263
# alternate-cluster-admin can restrict himself to less groups (no star)
264264
os::cmd::try_until_text "oc policy who-can update clusterrroles" "alternate-cluster-admin-user"
265265
resourceversion=$(oc get clusterrole/alternate-cluster-admin -o=jsonpath="{.metadata.resourceVersion}")
266266
cp ${OS_ROOT}/test/testdata/bootstrappolicy/cluster_admin_without_apigroups.yaml ${workingdir}
267267
os::util::sed "s/RESOURCE_VERSION/${resourceversion}/g" ${workingdir}/cluster_admin_without_apigroups.yaml
268-
os::cmd::expect_success "oc replace --config=${new_kubeconfig} clusterrole/alternate-cluster-admin -f ${workingdir}/cluster_admin_without_apigroups.yaml"
268+
os::cmd::expect_success "oc replace --kubeconfig=${new_kubeconfig} clusterrole/alternate-cluster-admin -f ${workingdir}/cluster_admin_without_apigroups.yaml"
269269

270270
# alternate-cluster-admin should NOT have the power add back star now (anything other than star is considered less so this mimics testing against no groups)
271271
os::cmd::try_until_failure "oc policy who-can update hpa.autoscaling | grep -q alternate-cluster-admin-user"
272272
resourceversion=$(oc get clusterrole/alternate-cluster-admin -o=jsonpath="{.metadata.resourceVersion}")
273273
cp ${OS_ROOT}/test/testdata/bootstrappolicy/alternate_cluster_admin.yaml ${workingdir}
274274
os::util::sed "s/RESOURCE_VERSION/${resourceversion}/g" ${workingdir}/alternate_cluster_admin.yaml
275-
os::cmd::expect_failure_and_text "oc replace --config=${new_kubeconfig} clusterrole/alternate-cluster-admin -f ${workingdir}/alternate_cluster_admin.yaml" "attempt to grant extra privileges"
275+
os::cmd::expect_failure_and_text "oc replace --kubeconfig=${new_kubeconfig} clusterrole/alternate-cluster-admin -f ${workingdir}/alternate_cluster_admin.yaml" "attempt to grant extra privileges"
276276

277277
# This test validates cluster level policy for serviceaccounts
278278
# ensure service account cannot list pods at the namespace level

test/cmd/status.sh

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ os::cmd::try_until_text "oc get projects -o jsonpath='{.items}'" "^\[\]$"
2727
os::cmd::expect_success 'oc logout'
2828

2929
# remove self-provisioner role from user and test login prompt before creating any projects
30-
os::cmd::expect_success "oc adm policy remove-cluster-role-from-group self-provisioner system:authenticated:oauth --config='${login_kubeconfig}'"
30+
os::cmd::expect_success "oc adm policy remove-cluster-role-from-group self-provisioner system:authenticated:oauth --kubeconfig='${login_kubeconfig}'"
3131

3232
# login as 'test-user'
3333
os::cmd::expect_success "oc login --server=${KUBERNETES_MASTER} --certificate-authority='${MASTER_CONFIG_DIR}/ca.crt' -u test-user -p anything"
@@ -37,7 +37,7 @@ os::cmd::expect_success_and_text 'oc status' "You don't have any projects. Conta
3737
os::cmd::expect_success_and_text 'oc status --all-namespaces' "Showing all projects on server"
3838
# make sure standard login prompt is printed once self-provisioner status is restored
3939
os::cmd::expect_success "oc logout"
40-
os::cmd::expect_success "oc adm policy add-cluster-role-to-group self-provisioner system:authenticated:oauth --config='${login_kubeconfig}'"
40+
os::cmd::expect_success "oc adm policy add-cluster-role-to-group self-provisioner system:authenticated:oauth --kubeconfig='${login_kubeconfig}'"
4141
os::cmd::expect_success_and_text "oc login --server=${KUBERNETES_MASTER} --certificate-authority='${MASTER_CONFIG_DIR}/ca.crt' -u test-user -p anything" "You don't have any projects. You can try to create a new project, by running"
4242

4343
# make sure `oc status` re-uses the correct "no projects" message from `oc login`

test/cmd/volumes.sh

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ os::cmd::expect_success 'oc set volume dc/test-deployment-config --add --name=vo
2121
os::cmd::expect_success 'oc set volume dc/test-deployment-config --add --name=vol2 --type=emptydir -m /opt'
2222
os::cmd::expect_failure_and_text "oc set volume dc/test-deployment-config --add --name=vol1 --type=secret --secret-name='\$ecret' -m /data" 'overwrite to replace'
2323
os::cmd::expect_success "oc set volume dc/test-deployment-config --add --name=vol10 --secret-name='my-secret' -m /data-2"
24-
os::cmd::expect_success "oc set volume dc/test-deployment-config --add --name=vol11 --configmap-name='my-configmap' -m /data-21"
24+
os::cmd::expect_success "oc set volume dc/test-deployment-config --add --name=vol11 --kubeconfigmap-name='my-configmap' -m /data-21"
2525
os::cmd::expect_success_and_text 'oc get dc/test-deployment-config -o jsonpath={.spec.template.spec.containers[0].volumeMounts}' '/data-21'
2626
os::cmd::expect_success_and_text 'oc get dc/test-deployment-config -o jsonpath={.spec.template.spec.volumes[4].configMap}' 'my-configmap'
2727
os::cmd::expect_success 'oc set volume dc/test-deployment-config --add --name=vol1 --type=emptyDir -m /data --overwrite'
@@ -48,7 +48,7 @@ os::cmd::expect_success 'oc set volume dc/test-deployment-config --remove --conf
4848

4949
os::cmd::expect_failure "oc set volume dc/test-deployment-config --add -t 'secret' --secret-name='asdf' --default-mode '888'"
5050

51-
os::cmd::expect_success "oc set volume dc/test-deployment-config --add -t 'configmap' --configmap-name='asdf' --default-mode '123'"
51+
os::cmd::expect_success "oc set volume dc/test-deployment-config --add -t 'configmap' --kubeconfigmap-name='asdf' --default-mode '123'"
5252
os::cmd::expect_success_and_text 'oc get dc/test-deployment-config -o jsonpath={.spec.template.spec.volumes[0]}' '83'
5353
os::cmd::expect_success 'oc set volume dc/test-deployment-config --remove --confirm'
5454

@@ -87,7 +87,7 @@ spec:
8787

8888
os::cmd::expect_success_and_text 'oc get dc simple-dc' 'simple-dc'
8989
os::cmd::expect_success 'oc create cm cmvol'
90-
os::cmd::expect_success 'oc set volume dc/simple-dc --add --name=cmvolume --type=configmap --configmap-name=cmvol'
90+
os::cmd::expect_success 'oc set volume dc/simple-dc --add --name=cmvolume --type=configmap --kubeconfigmap-name=cmvol'
9191
os::cmd::expect_success_and_text 'oc set volume dc/simple-dc' 'configMap/cmvol as cmvolume'
9292

9393
# command alias

test/extended/alternate_certs.sh

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -52,24 +52,24 @@ OPENSHIFT_ON_PANIC=crash openshift start master \
5252
OS_PID=$!
5353

5454
# Wait for the server to be up
55-
os::cmd::try_until_success "oc whoami --config=master/admin.kubeconfig"
55+
os::cmd::try_until_success "oc whoami --kubeconfig=master/admin.kubeconfig"
5656

5757
# Verify the server is serving with the custom and internal CAs, and that the generated ca-bundle.crt works for both
5858
os::cmd::expect_success_and_text "curl -vvv https://localhost:${API_PORT} --cacert master/ca-bundle.crt -s 2>&1" 'my-custom-ca'
5959
os::cmd::expect_success_and_text "curl -vvv https://127.0.0.1:${API_PORT} --cacert master/ca-bundle.crt -s 2>&1" 'openshift-signer'
6060

6161
# Verify kubeconfigs have connectivity to hosts serving with custom and generated certs
62-
os::cmd::expect_success_and_text "oc whoami --config=master/admin.kubeconfig" 'system:admin'
63-
os::cmd::expect_success_and_text "oc whoami --config=master/admin.kubeconfig --server=https://localhost:${API_PORT}" 'system:admin'
64-
os::cmd::expect_success_and_text "oc whoami --config=master/admin.kubeconfig --server=https://127.0.0.1:${API_PORT}" 'system:admin'
62+
os::cmd::expect_success_and_text "oc whoami --kubeconfig=master/admin.kubeconfig" 'system:admin'
63+
os::cmd::expect_success_and_text "oc whoami --kubeconfig=master/admin.kubeconfig --server=https://localhost:${API_PORT}" 'system:admin'
64+
os::cmd::expect_success_and_text "oc whoami --kubeconfig=master/admin.kubeconfig --server=https://127.0.0.1:${API_PORT}" 'system:admin'
6565

66-
os::cmd::expect_success_and_text "oc whoami --config=master/openshift-master.kubeconfig" 'system:openshift-master'
67-
os::cmd::expect_success_and_text "oc whoami --config=master/openshift-master.kubeconfig --server=https://localhost:${API_PORT}" 'system:openshift-master'
68-
os::cmd::expect_success_and_text "oc whoami --config=master/openshift-master.kubeconfig --server=https://127.0.0.1:${API_PORT}" 'system:openshift-master'
66+
os::cmd::expect_success_and_text "oc whoami --kubeconfig=master/openshift-master.kubeconfig" 'system:openshift-master'
67+
os::cmd::expect_success_and_text "oc whoami --kubeconfig=master/openshift-master.kubeconfig --server=https://localhost:${API_PORT}" 'system:openshift-master'
68+
os::cmd::expect_success_and_text "oc whoami --kubeconfig=master/openshift-master.kubeconfig --server=https://127.0.0.1:${API_PORT}" 'system:openshift-master'
6969

70-
os::cmd::expect_success_and_text "oc whoami --config=node-mynode/node.kubeconfig" 'system:node:mynode'
71-
os::cmd::expect_success_and_text "oc whoami --config=node-mynode/node.kubeconfig --server=https://localhost:${API_PORT}" 'system:node:mynode'
72-
os::cmd::expect_success_and_text "oc whoami --config=node-mynode/node.kubeconfig --server=https://127.0.0.1:${API_PORT}" 'system:node:mynode'
70+
os::cmd::expect_success_and_text "oc whoami --kubeconfig=node-mynode/node.kubeconfig" 'system:node:mynode'
71+
os::cmd::expect_success_and_text "oc whoami --kubeconfig=node-mynode/node.kubeconfig --server=https://localhost:${API_PORT}" 'system:node:mynode'
72+
os::cmd::expect_success_and_text "oc whoami --kubeconfig=node-mynode/node.kubeconfig --server=https://127.0.0.1:${API_PORT}" 'system:node:mynode'
7373

7474
os::test::junit::declare_suite_end
7575

0 commit comments

Comments
 (0)