This repository was archived by the owner on Nov 9, 2020. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 6
Expand file tree
/
Copy pathpks-params.sample.yml
More file actions
236 lines (207 loc) · 12.9 KB
/
pks-params.sample.yml
File metadata and controls
236 lines (207 loc) · 12.9 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
# Pipeline resource configuration
pivnet_token: Xsd...23KS # Pivnet token for downloading resources from Pivnet. Find this token at https://network.pivotal.io/users/dashboard/edit-profile
opsman_major_minor_version: ^2\.1\.2$ # PCF Ops Manager minor version to track
pks_major_minor_version: ^1\..*$ # Can pick 1.0 (or v1.1 once it becomes GA and available on pivnet)
# vCenter configuration
vcenter_host: 10.1.1.10 # vCenter host or IP
vcenter_usr: administrator@vsphere.local # vCenter username. If user is tied to a domain, then escape the \, example `domain\\user`
vcenter_pwd: "k32...la!" # vCenter password
vcenter_data_center: Datacenter # vCenter datacenter
vcenter_insecure: 1 # vCenter skip TLS cert validation; enter `1` to disable cert verification, `0` to enable verification
vcenter_ca_cert: # vCenter CA cert at the API endpoint; enter a value if `vcenter_insecure: 0`
# Ops Manager VM configuration
om_vm_host: 192.168.10.5 # Optional - vCenter host to deploy Ops Manager in
om_data_store: vsan2 # vCenter datastore name to deploy Ops Manager in
opsman_domain_or_ip_address: opsmgr.test-cf-app.com # FQDN to access Ops Manager without protocol (will use https), ex: opsmgr.example.com
opsman_admin_username: admin # Username for Ops Manager admin account
opsman_admin_password: password123 # Password for Ops Manager admin account
om_ssh_pwd: pas..d123 # SSH password for Ops Manager (ssh user is ubuntu)
om_decryption_pwd: pas...rd123 # Decryption password for Ops Manager exported settings
om_ntp_servers: us.pool.ntp.org # Comma-separated list of NTP Servers
om_dns_servers: 10.1.1.2 # Comma-separated list of DNS Servers
om_gateway: 192.168.10.1 # Gateway for Ops Manager network
om_netmask: 255.255.255.192 # Netmask for Ops Manager network
om_ip: 192.168.10.5 # IP to assign to Ops Manager VM
om_vm_network: infra # vCenter network name to use to deploy Ops Manager in
om_vm_name: opsmgr-nsx # Name to use for Ops Manager VM
opsman_disk_type: "thin" # Disk type for Ops Manager VM (thick|thin)
om_vm_power_state: true # Whether to power on Ops Manager VM after creation
# vCenter Cluster or Resource Pool to use to deploy Ops Manager.
# Possible formats:
# cluster: /<Data Center Name>/host/<Cluster Name>
# resource pool: /<Data Center Name>/host/<Cluster Name>/Resources/<Resource Pool Name>
om_resource_pool: rpcluster-1
ephemeral_storage_names: vsan # Ephemeral Storage names in vCenter for use by PCF
persistent_storage_names: vsan # Persistent Storage names in vCenter for use by PCF
bosh_vm_folder: "nsx_vms" # vSphere datacenter folder (such as pcf_vms) where VMs will be placed
bosh_template_folder: "nsx_templates" # vSphere datacenter folder (such as pcf_templates) where templates will be placed
bosh_disk_path: "nsx_disk" # vSphere datastore folder (such as pcf_disk) where attached disk images will be created
trusted_certificates: # Trusted certificates to be deployed along with all VM's provisioned by BOSH
vm_disk_type: "thick" # Disk type for BOSH provisioned VM. (thick|thin)
# AZ configuration for Ops Director
az_1_name: PKS # Logical name of availability zone. No spaces or special characters.
az_1_cluster_name: Cluster1 # Name of cluster in vCenter for PKS
az_1_rp_name: pks-rp # Resource pool name in vCenter for PKS
ntp_servers: us.pool.ntp.org # Comma-separated list of NTP servers to use for VMs deployed by BOSH
enable_vm_resurrector: true # Whether to enable BOSH VM resurrector
max_threads: 30 # Max threads count for deploying VMs
# Network configuration for Ops Director
icmp_checks_enabled: false # Enable or disable ICMP checks
infra_network_name: "INFRASTRUCTURE"
infra_vsphere_network: infra # vCenter Infrastructure network name
infra_nw_cidr: 192.168.20.0/24 # Infrastructure network CIDR, ex: 10.0.0.0/22
infra_excluded_range: 192.168.20.0-192.168.20.9,192.168.20.25-192.168.20.64 # Infrastructure network exclusion range
infra_nw_dns: 10.1.1.2 # Infrastructure network DNS
infra_nw_gateway: 192.168.20.1 # Infrastructure network Gateway
infra_nw_azs: PKS # Comma separated list of AZ's to be associated with this network
nsx_networking_enabled: false # (true|false) to use nsx networking feature
# Careful with the indent and the | for multi-line input
nsx_ca_certificate: | # cert for nsx
-----BEGIN CERTIFICATE-----
MIIDjDCCAnSasfasdfsfd324242342UAMIG3232GMS4wLAYDVQQD
asfsafsafasf
.............
-----END CERTIFICATE-----
# Additional network for PKS Clusters - set to empty "" string for the vsphere network if its not required
pks_network_name: "PKS-k8s-clusters" # Logical switch for PKS-k8s-clusters
pks_vsphere_network: "PKS-k8s-clusters" # vCenter PKS-k8s-clusters network name for PKS - if empty quotes, then this network would be skipped
pks_nw_cidr: 192.168.40.0/24 # PKS-k8s-clusters network CIDR, ex: 10.0.0.0/22
pks_excluded_range: 192.168.40.1 # PKS-k8s-clusters network exclusion range
pks_nw_dns: 10.1.1.2 # PKS-k8s-clusters network DNS
pks_nw_gateway: 192.168.40.1 # PKS-k8s-clusters network Gateway
pks_nw_azs: az1 # Comma separated list of AZ's to be associated with this network
is_service_network: true # Required select service network option in Ops man true or false
## NSX Mgr section
nsx_address: nsx-t-mgr.test-domain.com # address of nsx-t mgr
nsx_username: admin # username for nsx-t access
nsx_password: 23..d! # password for nsx-t access
## PKS Tile section
# Create DNS entry in the loadbalancer and DNAT/SNAT entries for following
pks_tile_system_domain: pks.test.corp.com
# Just provide the prefix like uaa or api for domain_prefix.
# The prefix together with system domain would be used like api.pks.test.corp.com or uaa.pks.test.corp.com
pks_tile_uaa_domain_prefix: api # Would be used for UAA as ${prefix}.${pks_system_domain}
pks_tile_cli_username: pksadmin
pks_tile_cli_password: pksadmin123
pks_tile_cli_useremail: pksadmin@corp.com
pks_tile_cert_pem: # Would be generated or provide
#cert_pem: |
# -----BEGIN CERTIFICATE-----
# MIIDjDCCAnSasfasdfsfd324242342UAMIG3232GMS4wLAYDVQQD
# asfsafsafasf
# .............
# -----END CERTIFICATE-----
pks_tile_private_key_pem: # Would be generated
pks_tile_vcenter_host: EDIT_ME
pks_tile_vcenter_usr: EDIT_ME
pks_tile_vcenter_pwd: EDIT_ME
pks_tile_vcenter_data_center: EDIT_ME
pks_tile_vcenter_cluster: EDIT_ME
pks_tile_vcenter_datastore: EDIT_ME
pks_tile_vm_folder: EDIT_ME
pks_tile_singleton_job_az: az1 # Check
pks_tile_nw_azs: az1
pks_tile_deployment_network_name: INFRASTRUCTURE # Can be any network created already, default: INFRASTRUCTURE
pks_tile_cluster_service_network_name: PKS-k8s-clusters # Should match the PKS-k8s-clusters network created already
# ALERT - The underlying Edge instances should be large, 8 vcpus.
# Otherwise the pks-nsx-t-precheck errrand would fail.
pks_tile_nsx_skip_ssl_verification: true # can be true or false
pks_tile_t0_router_id: EDIT_ME # UUID of T0 Router to be used for PKS
pks_tile_ip_block_id: EDIT_ME # UUID of Container IP Block in NSX Mgr to be used for PKS
pks_tile_floating_ip_pool_id: EDIT_ME # UUID of External Floating IP Pool in NSX Mgr to be used for PKS
pks_tile_nodes_ip_block_id: EDIT_ME # UUID of Node IP BLock in NSX Mgr to be used for PKS nodes in v1.1
# Syslog Flags
pks_tile_syslog_migration_enabled: disabled # can be set to 'enabled', if 'disabled' all syslog properties ignored
pks_tile_syslog_address: #101.10.10.10
pks_tile_syslog_port: # 0
pks_tile_syslog_transport_protocol: #tcp
pks_tile_syslog_tls_enabled: # true
pks_tile_syslog_peer: # *.test.corp.com
pks_tile_syslog_ca_cert:
# Allow public ip
pks_tile_allow_public_ip: true # leave it to empty or false to turn off public ip
# vRealize Log Insight (vrli) flags
pks_tile_vrli_enabled: false # Change to true and fill following fields for using vrli
pks_tile_vrli_host:
pks_tile_vrli_use_ssl: true
pks_tile_vrli_skip_cert_verify: true
pks_tile_vrli_ca_cert: # cert contents
pks_tile_vrli_rate_limit: 0
# PKS use Proxy
pks_tile_enable_http_proxy: false # Change to true and fill following fields for using proxy
pks_tile_http_proxy_url:
pks_tile_http_proxy_user:
pks_tile_http_proxy_password:
pks_tile_https_proxy_url:
pks_tile_https_proxy_user:
pks_tile_https_proxy_password:
pks_tile_no_proxy:
# Use LDAP for PKS UAA
# Default is to use internal uaa
pks_tile_uaa_use_ldap: false # Change to true and fill following fields for using LDAP
pks_tile_ldap_url:
pks_tile_ldap_user:
pks_tile_ldap_password:
pks_tile_ldap_search_base:
pks_tile_ldap_group_search_base:
pks_tile_ldap_server_ssl_cert:
pks_tile_ldap_server_ssl_cert_alias:
pks_tile_ldap_email_domains:
pks_tile_ldap_first_name_attribute:
pks_tile_ldap_last_name_attribute:
# PKS Wavefront integration
pks_tile_wavefront_api_url: # wavefront api url, empty is disabled
pks_tile_wavefront_token: # wavefront access token
pks_tile_wavefront_alert_targets: # email ids to sent alerts
pks_tile_telemetry: disabled # can be enabled or disabled
pks_tile_plan_details:
- plan_detail:
name: "small" # the name that appears for end users to choose
plan_selector: plan1_selector # Dont change the value of selector - Needs to be planN_selector
is_active: true
description: "Small plan"
# AZ can be only a single value for PKS v1.0
# There can be a comma separated list for PKS v1.1 which supports multiple AZs
az_placement: az1 # Specify the AZ in which the cluster will be created, single for PKS v1.0
authorization_mode: rbac # rbac - Default Cluster Authorization Mode
master_vm_type: medium
worker_vm_type: medium
persistent_disk_type: "10240" # Used in PKS 1.0
master_persistent_disk_type: "10240" # Used in PKS 1.1+
worker_persistent_disk_type: "10240" # Used in PKS 1.1+
worker_instances: 3 # The number of K8s worker instances
errand_vm_type: micro
addons_spec: "" # Kubernetes yml that contains specifications of addons to run on every cluster. This is an experimental feature. Please consider carefully before applying this to your plan
allow_privileged_containers: false # Privileged containers run with host-like permissions. Allowing your users to deploy privileged containers in clusters using this plan can create security vulnerabilities and may impact other tiles. Use with caution."
- plan_detail:
name: "medium" # the name that appears for end users to choose
is_active: false
plan_selector: plan2_selector # Dont change the value of selector - Needs to be planN_selector
description: "Medium plan"
# AZ can be only a single value for PKS v1.0
# There can be a comma separated list for PKS v1.1 which supports multiple AZs
az_placement: az1 # Specify the AZ in which the cluster will be created, single for PKS v1.0
authorization_mode: rbac # rbac or abac - Default Cluster Authorization Mode
master_vm_type: medium
worker_vm_type: medium
persistent_disk_type: "10240" # Used in PKS 1.0
master_persistent_disk_type: "10240" # Used in PKS 1.1+
worker_persistent_disk_type: "10240" # Used in PKS 1.1+
worker_instances: 5 # The number of K8s worker instances
errand_vm_type: micro
addons_spec: "" # Kubernetes yml that contains specifications of addons to run on every cluster. This is an experimental feature. Please consider carefully before applying this to your plan
allow_privileged_containers: false # Privileged containers run with host-like permissions. Allowing your users to deploy privileged containers in clusters using this plan can create security vulnerabilities and may impact other tiles. Use with caution."
- plan_detail:
name: "large" # the name that appears for end users to choose
is_active: false
plan_selector: plan3_selector # Dont change the value of selector - Needs to be planN_selector
# other fields not considered if its not active
# Use this for downloading PKS from a s3 bucket
# s3_bucket: pks-tile-s3 # Required for S3. ID of the AWS S3 bucket to download Pivotal releases from
# s3_creds_access_key_id: AAa23....asdfZA # Required for S3. Access key of the AWS S3 bucket
# s3_creds_secret_access_key: s6A234...asfsadUL # Required for S3. Secret access key of the AWS S3 bucket
# s3_region: us-west-2 # The region the bucket is in. Leave it blank if not applicable (e.g. for Minio)
# s3_pks_tile_path: pivotal-container-service-(.*).pivotal # file path name to the tile in the s3 bucket
# Use this for downloading PKS tile from a web server (till it becomes available on Pivnet)
# pks_tile_webserver: http://101.101.101.101:8080 # EDIT ME
# pksv11_tile: pivotal-container-service-1.1.0.pivotal # EDIT ME