From fc25b8e38e079612abbf21fdbc8aa611723bb2f6 Mon Sep 17 00:00:00 2001 From: Tim Bannister Date: Wed, 3 Jun 2020 22:43:52 +0100 Subject: [PATCH] =?UTF-8?q?Remove=20=E2=80=9CConfigure=20Your=20Cloud=20Pr?= =?UTF-8?q?ovider's=20Firewalls=E2=80=9D?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Removing this page as it doesn't meet the Kubernetes website content guidelines https://kubernetes.io/docs/contribute/style/content-guide/ --- .../configure-cloud-provider-firewall.md | 110 ------------------ .../web-ui-dashboard.md | 6 +- 2 files changed, 5 insertions(+), 111 deletions(-) delete mode 100644 content/en/docs/tasks/access-application-cluster/configure-cloud-provider-firewall.md diff --git a/content/en/docs/tasks/access-application-cluster/configure-cloud-provider-firewall.md b/content/en/docs/tasks/access-application-cluster/configure-cloud-provider-firewall.md deleted file mode 100644 index 385f226a98a..00000000000 --- a/content/en/docs/tasks/access-application-cluster/configure-cloud-provider-firewall.md +++ /dev/null @@ -1,110 +0,0 @@ ---- -reviewers: -- bprashanth -- davidopp -title: Configure Your Cloud Provider's Firewalls -content_type: task -weight: 90 ---- - - - -Many cloud providers (e.g. Google Compute Engine) define firewalls that help prevent inadvertent -exposure to the internet. When exposing a service to the external world, you may need to open up -one or more ports in these firewalls to serve traffic. This document describes this process, as -well as any provider specific details that may be necessary. - - - - -## {{% heading "prerequisites" %}} - - -{{< include "task-tutorial-prereqs.md" >}} {{< version-check >}} - - - - - -## Restrict Access For LoadBalancer Service - - When using a Service with `spec.type: LoadBalancer`, you can specify the IP ranges that are allowed to access the load balancer - by using `spec.loadBalancerSourceRanges`. This field takes a list of IP CIDR ranges, which Kubernetes will use to configure firewall exceptions. - This feature is currently supported on Google Compute Engine, Google Kubernetes Engine, AWS Elastic Kubernetes Service, Azure Kubernetes Service, and IBM Cloud Kubernetes Service. This field will be ignored if the cloud provider does not support the feature. - - Assuming 10.0.0.0/8 is the internal subnet. In the following example, a load balancer will be created that is only accessible to cluster internal IPs. - This will not allow clients from outside of your Kubernetes cluster to access the load balancer. - -```yaml -apiVersion: v1 -kind: Service -metadata: - name: myapp -spec: - ports: - - port: 8765 - targetPort: 9376 - selector: - app: example - type: LoadBalancer - loadBalancerSourceRanges: - - 10.0.0.0/8 -``` - - In the following example, a load balancer will be created that is only accessible to clients with IP addresses from 130.211.204.1 and 130.211.204.2. - -```yaml -apiVersion: v1 -kind: Service -metadata: - name: myapp -spec: - ports: - - port: 8765 - targetPort: 9376 - selector: - app: example - type: LoadBalancer - loadBalancerSourceRanges: - - 130.211.204.1/32 - - 130.211.204.2/32 -``` - -## Google Compute Engine - -When using a Service with `spec.type: LoadBalancer`, the firewall will be -opened automatically. When using `spec.type: NodePort`, however, the firewall -is *not* opened by default. - -Google Compute Engine firewalls are documented [elsewhere](https://cloud.google.com/compute/docs/networking#firewalls_1). - -You can add a firewall with the `gcloud` command line tool: - -```shell -gcloud compute firewall-rules create my-rule --allow=tcp: -``` - -{{< note >}} -GCE firewalls are defined per-vm, rather than per-ip address. This means that -when you open a firewall for a service's ports, anything that serves on that -port on that VM's host IP address may potentially serve traffic. Note that this -is not a problem for other Kubernetes services, as they listen on IP addresses -that are different than the host node's external IP address. - -Consider: - - * You create a Service with an external load balancer (IP Address 1.2.3.4) - and port 80 - * You open the firewall for port 80 for all nodes in your cluster, so that - the external Service actually can deliver packets to your Service - * You start an nginx server, running on port 80 on the host virtual machine - (IP Address 2.3.4.5). This nginx is also exposed to the internet on - the VM's external IP address. - -Consequently, please be careful when opening firewalls in Google Compute Engine -or Google Kubernetes Engine. You may accidentally be exposing other services to -the wilds of the internet. - -{{< /note >}} - - diff --git a/content/en/docs/tasks/access-application-cluster/web-ui-dashboard.md b/content/en/docs/tasks/access-application-cluster/web-ui-dashboard.md index 4da7cdf3d61..7a37fdc20bf 100644 --- a/content/en/docs/tasks/access-application-cluster/web-ui-dashboard.md +++ b/content/en/docs/tasks/access-application-cluster/web-ui-dashboard.md @@ -83,7 +83,11 @@ The deploy wizard expects that you provide the following information: A [Deployment](/docs/concepts/workloads/controllers/deployment/) will be created to maintain the desired number of Pods across your cluster. -- **Service** (optional): For some parts of your application (e.g. frontends) you may want to expose a [Service](/docs/concepts/services-networking/service/) onto an external, maybe public IP address outside of your cluster (external Service). For external Services, you may need to open up one or more ports to do so. Find more details [here](/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/). +- **Service** (optional): For some parts of your application (e.g. frontends) you may want to expose a [Service](/docs/concepts/services-networking/service/) onto an external, maybe public IP address outside of your cluster (external Service). + + {{< note >}} + For external Services, you may need to open up one or more ports to do so. + {{< /note >}} Other Services that are only visible from inside the cluster are called internal Services.