diff --git a/Makefile b/Makefile index 8e27d0895634a..e8a2ee6fdecd0 100644 --- a/Makefile +++ b/Makefile @@ -1,5 +1,5 @@ DOCKER = docker -HUGO_VERSION = 0.53 +HUGO_VERSION = 0.57.2 DOCKER_IMAGE = kubernetes-hugo DOCKER_RUN = $(DOCKER) run --rm --interactive --tty --volume $(CURDIR):/src NODE_BIN = node_modules/.bin diff --git a/OWNERS_ALIASES b/OWNERS_ALIASES index 9405515e5e789..6fc7a2f2a3b99 100644 --- a/OWNERS_ALIASES +++ b/OWNERS_ALIASES @@ -123,6 +123,7 @@ aliases: - makocchi-git - MasayaAoyama - nasa9084 + - oke-py sig-docs-ko-owners: # Admins for Korean content - ClaudiaJKang - gochist diff --git a/assets/sass/_base.sass b/assets/sass/_base.sass index 1ccd4feae39f1..2a0d6a269679b 100644 --- a/assets/sass/_base.sass +++ b/assets/sass/_base.sass @@ -70,7 +70,7 @@ section background-color: white section, header, footer - main + .main-section position: relative margin: auto @@ -249,7 +249,7 @@ header color: $blue font-weight: normal - main + .main-section white-space: nowrap overflow: hidden clear: both @@ -439,7 +439,7 @@ footer background-image: url(/images/texture.png) background-color: $dark-grey - main + .main-section padding: 20px 0 nav @@ -1121,7 +1121,7 @@ $feature-box-div-margin-bottom: 40px margin-bottom: 0 padding-bottom: 1px - main + .main-section padding: 0 10px margin-bottom: 30px @@ -1147,7 +1147,7 @@ $feature-box-div-margin-bottom: 40px a color: $blue - main + .main-section margin-bottom: $ocean-nodes-padding-Y min-height: 160px @@ -1160,7 +1160,7 @@ $feature-box-div-margin-bottom: 40px width: 100% max-width: 160px - main:first-child + .main-section:first-child .image-wrapper max-width: 100% @@ -1356,7 +1356,7 @@ $feature-box-div-margin-bottom: 40px #mainContent padding: 20px 0 - main + .main-section max-width: none a @@ -1775,7 +1775,7 @@ $feature-box-div-margin-bottom: 40px #home #talkToUs - main + .main-section padding: 30px 0 h5 diff --git a/assets/sass/_desktop.sass b/assets/sass/_desktop.sass index b1c292557b4e7..c8f7f44f84581 100644 --- a/assets/sass/_desktop.sass +++ b/assets/sass/_desktop.sass @@ -95,7 +95,7 @@ $video-section-height: 550px right: -25px section, header, footer - main + .main-section max-width: $main-max-width header, #vendorStrip, #encyclopedia, #hero h1, #hero h5, #docs #hero h1, #docs #hero h5, @@ -108,11 +108,11 @@ $video-section-height: 550px #home section, header, footer - main + .main-section max-width: 1000px #oceanNodes - main + .main-section position: relative max-width: 830px @@ -172,7 +172,7 @@ $video-section-height: 550px background-image: url(../images/texture.png) background-color: $dark-grey - main + .main-section padding: 20px 0 nav diff --git a/assets/sass/_size.sass b/assets/sass/_size.sass index 76a2771fa719e..470a2f57650a0 100644 --- a/assets/sass/_size.sass +++ b/assets/sass/_size.sass @@ -26,7 +26,7 @@ section, header, #vendorStrip padding-left: $full-width-paddingX padding-right: $full-width-paddingX - main + .main-section width: $main-width max-width: $main-max-width @@ -59,7 +59,7 @@ header .nav-box + .nav-box margin-left: $nav-box-sibling-margin-left - main + main + .main-secton + .main-section margin-top: $main-nav-main-sibling-margin-top .left .button diff --git a/assets/sass/_tablet.sass b/assets/sass/_tablet.sass index 4434ed5b82bf5..96b24322a2e3d 100644 --- a/assets/sass/_tablet.sass +++ b/assets/sass/_tablet.sass @@ -116,7 +116,7 @@ $feature-box-div-width: 45% text-align: left margin-bottom: 18px - main + main, .main-section position: relative clear: both display: table diff --git a/content/en/blog/_posts/2016-03-00-Building-Highly-Available-Applications-Using-Kubernetes-New-Multi-Zone-Clusters-aka-Ubernetes-Lite.md b/content/en/blog/_posts/2016-03-00-Building-Highly-Available-Applications-Using-Kubernetes-New-Multi-Zone-Clusters-aka-Ubernetes-Lite.md index 83b11cab28937..5f5b2146923b8 100644 --- a/content/en/blog/_posts/2016-03-00-Building-Highly-Available-Applications-Using-Kubernetes-New-Multi-Zone-Clusters-aka-Ubernetes-Lite.md +++ b/content/en/blog/_posts/2016-03-00-Building-Highly-Available-Applications-Using-Kubernetes-New-Multi-Zone-Clusters-aka-Ubernetes-Lite.md @@ -2,7 +2,7 @@ title: " Building highly available applications using Kubernetes new multi-zone clusters (a.k.a. 'Ubernetes Lite') " date: 2016-03-29 slug: building-highly-available-applications-using-kubernetes-new-multi-zone-clusters-a.k.a-ubernetes-lite -url: /blog/2016/03/Building-Highly-Available-Applications-Using-Kubernetes-New-Multi-Zone-Clusters-A.K.A-Ubernetes-Lite +url: /blog/2016/03/Building-Highly-Available-Applications-Using-Kubernetes-New-Multi-Zone-Clusters-aka-Ubernetes-Lite --- _Editor's note: this is the third post in a [series of in-depth posts](https://kubernetes.io/blog/2016/03/five-days-of-kubernetes-12) on what's new in Kubernetes 1.2_ diff --git a/content/en/blog/_posts/2016-03-00-Kubernetes-1-2-And-Simplifying-Advanced-Networking-With-Ingress.md b/content/en/blog/_posts/2016-03-00-Kubernetes-1-2-And-Simplifying-Advanced-Networking-With-Ingress.md index 3373680d42628..31bd68f863446 100644 --- a/content/en/blog/_posts/2016-03-00-Kubernetes-1-2-And-Simplifying-Advanced-Networking-With-Ingress.md +++ b/content/en/blog/_posts/2016-03-00-Kubernetes-1-2-And-Simplifying-Advanced-Networking-With-Ingress.md @@ -2,7 +2,7 @@ title: " Kubernetes 1.2 and simplifying advanced networking with Ingress " date: 2016-03-31 slug: kubernetes-1.2-and-simplifying-advanced-networking-with-ingress -url: /blog/2016/03/Kubernetes-1.2-And-Simplifying-Advanced-Networking-With-Ingress +url: /blog/2016/03/Kubernetes-1-2-And-Simplifying-Advanced-Networking-With-Ingress --- _Editor's note: This is the sixth post in a [series of in-depth posts](https://kubernetes.io/blog/2016/03/five-days-of-kubernetes-12) on what's new in Kubernetes 1.2._ _Ingress is currently in beta and under active development._ diff --git a/content/en/blog/_posts/2016-03-00-Kubernetes-1-2-Even-More-Performance-Upgrades-Plus-Easier-Application-Deployment-And-Management-.md b/content/en/blog/_posts/2016-03-00-Kubernetes-1-2-Even-More-Performance-Upgrades-Plus-Easier-Application-Deployment-And-Management-.md index aae3e88517b0e..c63763d4e5462 100644 --- a/content/en/blog/_posts/2016-03-00-Kubernetes-1-2-Even-More-Performance-Upgrades-Plus-Easier-Application-Deployment-And-Management-.md +++ b/content/en/blog/_posts/2016-03-00-Kubernetes-1-2-Even-More-Performance-Upgrades-Plus-Easier-Application-Deployment-And-Management-.md @@ -2,7 +2,7 @@ title: " Kubernetes 1.2: Even more performance upgrades, plus easier application deployment and management " date: 2016-03-17 slug: kubernetes-1.2-even-more-performance-upgrades-plus-easier-application-deployment-and-management -url: /blog/2016/03/Kubernetes-1.2-Even-More-Performance-Upgrades-Plus-Easier-Application-Deployment-And-Management +url: /blog/2016/03/Kubernetes-1-2-Even-More-Performance-Upgrades-Plus-Easier-Application-Deployment-And-Management --- Today we released Kubernetes 1.2. This release represents significant improvements for large organizations building distributed systems. Now with over 680 unique contributors to the project, this release represents our largest yet. diff --git a/content/en/blog/_posts/2016-07-00-Five-Days-Of-Kubernetes-1-3.md b/content/en/blog/_posts/2016-07-00-Five-Days-Of-Kubernetes-1-3.md index c020ecb0f47cd..ef941eeaad283 100644 --- a/content/en/blog/_posts/2016-07-00-Five-Days-Of-Kubernetes-1-3.md +++ b/content/en/blog/_posts/2016-07-00-Five-Days-Of-Kubernetes-1-3.md @@ -2,7 +2,7 @@ title: " Five Days of Kubernetes 1.3 " date: 2016-07-11 slug: five-days-of-kubernetes-1.3 -url: /blog/2016/07/Five-Days-Of-Kubernetes-1.3 +url: /blog/2016/07/Five-Days-Of-Kubernetes-1-3 --- Last week we [released Kubernetes 1.3](https://kubernetes.io/blog/2016/07/kubernetes-1.3-bridging-cloud-native-and-enterprise-workloads), two years from the day when the first Kubernetes commit was pushed to GitHub. Now 30,000+ commits later from over 800 contributors, this 1.3 releases is jam packed with updates driven by feedback from users. diff --git a/content/en/blog/_posts/2016-07-00-Kubernetes-1-3-Bridging-Cloud-Native-And-Enterprise-Workloads.md b/content/en/blog/_posts/2016-07-00-Kubernetes-1-3-Bridging-Cloud-Native-And-Enterprise-Workloads.md index c13eac3f19d0e..13e682c0a81aa 100644 --- a/content/en/blog/_posts/2016-07-00-Kubernetes-1-3-Bridging-Cloud-Native-And-Enterprise-Workloads.md +++ b/content/en/blog/_posts/2016-07-00-Kubernetes-1-3-Bridging-Cloud-Native-And-Enterprise-Workloads.md @@ -2,7 +2,7 @@ title: " Kubernetes 1.3: Bridging Cloud Native and Enterprise Workloads " date: 2016-07-06 slug: kubernetes-1.3-bridging-cloud-native-and-enterprise-workloads -url: /blog/2016/07/Kubernetes-1.3-Bridging-Cloud-Native-And-Enterprise-Workloads +url: /blog/2016/07/Kubernetes-1-3-Bridging-Cloud-Native-And-Enterprise-Workloads --- Nearly two years ago, when we officially kicked off the Kubernetes project, we wanted to simplify distributed systems management and provide the core technology required to everyone. The community’s response to this effort has blown us away. Today, thousands of customers, partners and developers are running clusters in production using Kubernetes and have joined the cloud native revolution.  diff --git a/content/en/blog/_posts/2016-09-00-Kubernetes-1-4-Making-It-Easy-To-Run-On-Kuberentes-Anywhere.md b/content/en/blog/_posts/2016-09-00-Kubernetes-1-4-Making-It-Easy-To-Run-On-Kuberentes-Anywhere.md index 07efc8de42e25..e1e8652abec4e 100644 --- a/content/en/blog/_posts/2016-09-00-Kubernetes-1-4-Making-It-Easy-To-Run-On-Kuberentes-Anywhere.md +++ b/content/en/blog/_posts/2016-09-00-Kubernetes-1-4-Making-It-Easy-To-Run-On-Kuberentes-Anywhere.md @@ -2,7 +2,7 @@ title: " Kubernetes 1.4: Making it easy to run on Kubernetes anywhere " date: 2016-09-26 slug: kubernetes-1.4-making-it-easy-to-run-on-kuberentes-anywhere -url: /blog/2016/09/Kubernetes-1.4-Making-It-Easy-To-Run-On-Kuberentes-Anywhere +url: /blog/2016/09/Kubernetes-1-4-Making-It-Easy-To-Run-On-Kuberentes-Anywhere --- Today we’re happy to announce the release of Kubernetes 1.4. diff --git a/content/en/blog/_posts/2016-10-00-Production-Kubernetes-Dashboard-UI-1-4-improvements_3.md b/content/en/blog/_posts/2016-10-00-Production-Kubernetes-Dashboard-UI-1-4-improvements_3.md index eb84069798588..ef39fbcdc3122 100644 --- a/content/en/blog/_posts/2016-10-00-Production-Kubernetes-Dashboard-UI-1-4-improvements_3.md +++ b/content/en/blog/_posts/2016-10-00-Production-Kubernetes-Dashboard-UI-1-4-improvements_3.md @@ -2,7 +2,7 @@ title: " How we improved Kubernetes Dashboard UI in 1.4 for your production needs​ " date: 2016-10-03 slug: production-kubernetes-dashboard-ui-1.4-improvements_3 -url: /blog/2016/10/Production-Kubernetes-Dashboard-UI-1.4-improvements_3 +url: /blog/2016/10/Production-Kubernetes-Dashboard-UI-1-4-improvements_3 --- With the release of [Kubernetes 1.4](https://kubernetes.io/blog/2016/09/kubernetes-1.4-making-it-easy-to-run-on-kuberentes-anywhere) last week, Dashboard – the official web UI for Kubernetes – has a number of exciting updates and improvements of its own. The past three months have been busy ones for the Dashboard team, and we’re excited to share the resulting features of that effort here. If you’re not familiar with Dashboard, the [GitHub repo](https://github.com/kubernetes/dashboard#kubernetes-dashboard) is a great place to get started. diff --git a/content/en/blog/_posts/2016-12-00-Cluster-Federation-In-Kubernetes-1-5.md b/content/en/blog/_posts/2016-12-00-Cluster-Federation-In-Kubernetes-1-5.md index 05330bd067b18..be07a5fee7fda 100644 --- a/content/en/blog/_posts/2016-12-00-Cluster-Federation-In-Kubernetes-1-5.md +++ b/content/en/blog/_posts/2016-12-00-Cluster-Federation-In-Kubernetes-1-5.md @@ -2,7 +2,7 @@ title: " Cluster Federation in Kubernetes 1.5 " date: 2016-12-22 slug: cluster-federation-in-kubernetes-1.5 -url: /blog/2016/12/Cluster-Federation-In-Kubernetes-1.5 +url: /blog/2016/12/Cluster-Federation-In-Kubernetes-1-5 --- _Editor’s note: this post is part of a [series of in-depth articles](https://kubernetes.io/blog/2016/12/five-days-of-kubernetes-1.5) on what's new in Kubernetes 1.5_ diff --git a/content/en/blog/_posts/2016-12-00-Five-Days-Of-Kubernetes-1-5.md b/content/en/blog/_posts/2016-12-00-Five-Days-Of-Kubernetes-1-5.md index 2788eb3ab8851..c2fa8c96ed9fe 100644 --- a/content/en/blog/_posts/2016-12-00-Five-Days-Of-Kubernetes-1-5.md +++ b/content/en/blog/_posts/2016-12-00-Five-Days-Of-Kubernetes-1-5.md @@ -2,7 +2,7 @@ title: " Five Days of Kubernetes 1.5 " date: 2016-12-19 slug: five-days-of-kubernetes-1.5 -url: /blog/2016/12/Five-Days-Of-Kubernetes-1.5 +url: /blog/2016/12/Five-Days-Of-Kubernetes-1-5 --- With the help of our growing community of 1,000 contributors, we pushed some 5,000 commits to extend support for production workloads and deliver [Kubernetes 1.5](https://kubernetes.io/blog/2016/12/kubernetes-1.5-supporting-production-workloads). While many improvements and new features have been added, we selected few to highlight in a series of in-depths posts listed below.  diff --git a/content/en/blog/_posts/2016-12-00-Kubernetes-1-5-Supporting-Production-Workloads.md b/content/en/blog/_posts/2016-12-00-Kubernetes-1-5-Supporting-Production-Workloads.md index 5f44ce1767fa2..1ac009b778439 100644 --- a/content/en/blog/_posts/2016-12-00-Kubernetes-1-5-Supporting-Production-Workloads.md +++ b/content/en/blog/_posts/2016-12-00-Kubernetes-1-5-Supporting-Production-Workloads.md @@ -2,7 +2,7 @@ title: " Kubernetes 1.5: Supporting Production Workloads " date: 2016-12-13 slug: kubernetes-1.5-supporting-production-workloads -url: /blog/2016/12/Kubernetes-1.5-Supporting-Production-Workloads +url: /blog/2016/12/Kubernetes-1-5-Supporting-Production-Workloads --- Today we’re announcing the release of Kubernetes 1.5. This release follows close on the heels of KubeCon/CloundNativeCon, where users gathered to share how they’re running their applications on Kubernetes. Many of you expressed interest in running stateful applications in containers with the eventual goal of running all applications on Kubernetes. If you have been waiting to try running a distributed database on Kubernetes, or for ways to guarantee application disruption SLOs for stateful and stateless apps, this release has solutions for you.  diff --git a/content/en/blog/_posts/2017-03-00-Five-Days-Of-Kubernetes-1-6.md b/content/en/blog/_posts/2017-03-00-Five-Days-Of-Kubernetes-1-6.md index 52915e69ed9a9..d6d1121ad3033 100644 --- a/content/en/blog/_posts/2017-03-00-Five-Days-Of-Kubernetes-1-6.md +++ b/content/en/blog/_posts/2017-03-00-Five-Days-Of-Kubernetes-1-6.md @@ -2,7 +2,7 @@ title: " Five Days of Kubernetes 1.6 " date: 2017-03-29 slug: five-days-of-kubernetes-1.6 -url: /blog/2017/03/Five-Days-Of-Kubernetes-1.6 +url: /blog/2017/03/Five-Days-Of-Kubernetes-1-6 --- With the help of our growing community of 1,110 plus contributors, we pushed around 5,000 commits to deliver [Kubernetes 1.6](https://kubernetes.io/blog/2017/03/kubernetes-1.6-multi-user-multi-workloads-at-scale), bringing focus on multi-user, multi-workloads at scale. While many improvements have been contributed, we selected few features to highlight in a series of in-depths posts listed below.  diff --git a/content/en/blog/_posts/2017-03-00-Kubernetes-1-6-Multi-User-Multi-Workloads-At-Scale.md b/content/en/blog/_posts/2017-03-00-Kubernetes-1-6-Multi-User-Multi-Workloads-At-Scale.md index d6a16b391da1b..ab6393a4fdeaf 100644 --- a/content/en/blog/_posts/2017-03-00-Kubernetes-1-6-Multi-User-Multi-Workloads-At-Scale.md +++ b/content/en/blog/_posts/2017-03-00-Kubernetes-1-6-Multi-User-Multi-Workloads-At-Scale.md @@ -2,7 +2,7 @@ title: " Kubernetes 1.6: Multi-user, Multi-workloads at Scale " date: 2017-03-28 slug: kubernetes-1.6-multi-user-multi-workloads-at-scale -url: /blog/2017/03/Kubernetes-1.6-Multi-User-Multi-Workloads-At-Scale +url: /blog/2017/03/Kubernetes-1-6-Multi-User-Multi-Workloads-At-Scale --- Today we’re announcing the release of Kubernetes 1.6. diff --git a/content/en/blog/_posts/2017-03-00-Scalability-Updates-In-Kubernetes-1-6.md b/content/en/blog/_posts/2017-03-00-Scalability-Updates-In-Kubernetes-1-6.md index 3ed8bf2d4466e..c92f46d48b593 100644 --- a/content/en/blog/_posts/2017-03-00-Scalability-Updates-In-Kubernetes-1-6.md +++ b/content/en/blog/_posts/2017-03-00-Scalability-Updates-In-Kubernetes-1-6.md @@ -2,7 +2,7 @@ title: " Scalability updates in Kubernetes 1.6: 5,000 node and 150,000 pod clusters " date: 2017-03-30 slug: scalability-updates-in-kubernetes-1.6 -url: /blog/2017/03/Scalability-Updates-In-Kubernetes-1.6 +url: /blog/2017/03/Scalability-Updates-In-Kubernetes-1-6 --- _Editor’s note: this post is part of a [series of in-depth articles](https://kubernetes.io/blog/2017/03/five-days-of-kubernetes-1.6) on what's new in Kubernetes 1.6_ diff --git a/content/en/blog/_posts/2017-06-00-Kubernetes-1-7-Security-Hardening-Stateful-Application-Extensibility-Updates.md b/content/en/blog/_posts/2017-06-00-Kubernetes-1-7-Security-Hardening-Stateful-Application-Extensibility-Updates.md index 123841116c17f..bdda0dd548928 100644 --- a/content/en/blog/_posts/2017-06-00-Kubernetes-1-7-Security-Hardening-Stateful-Application-Extensibility-Updates.md +++ b/content/en/blog/_posts/2017-06-00-Kubernetes-1-7-Security-Hardening-Stateful-Application-Extensibility-Updates.md @@ -2,7 +2,7 @@ title: " Kubernetes 1.7: Security Hardening, Stateful Application Updates and Extensibility " date: 2017-06-30 slug: kubernetes-1.7-security-hardening-stateful-application-extensibility-updates -url: /blog/2017/06/Kubernetes-1.7-Security-Hardening-Stateful-Application-Extensibility-Updates +url: /blog/2017/06/Kubernetes-1-7-Security-Hardening-Stateful-Application-Extensibility-Updates --- Today we’re announcing Kubernetes 1.7, a milestone release that adds security, storage and extensibility features motivated by widespread production use of Kubernetes in the most demanding enterprise environments.  diff --git a/content/en/blog/_posts/2019-10-29-2019-sig-docs-survey.md b/content/en/blog/_posts/2019-10-29-2019-sig-docs-survey.md new file mode 100644 index 0000000000000..6079040a51864 --- /dev/null +++ b/content/en/blog/_posts/2019-10-29-2019-sig-docs-survey.md @@ -0,0 +1,131 @@ +--- +layout: blog +title: "Kubernetes Documentation Survey" +date: 2019-10-29 +slug: kubernetes-documentation-end-user-survey +--- + +**Author:** [Aimee Ukasick](https://www.linkedin.com/in/aimee-ukasick/) and SIG Docs + +In September, SIG Docs conducted its first survey about the [Kubernetes +documentation](https://kubernetes.io/docs/). We'd like to thank the CNCF's Kim +McMahon for helping us create the survey and access the results. + +# Key takeaways + +Respondents would like more example code, more detailed content, and more +diagrams in the Concepts, Tasks, and Reference sections. + +74% of respondents would like the Tutorials section to contain advanced content. + +69.70% said the Kubernetes documentation is the first place they look for +information about Kubernetes. + +# Survey methodology and respondents + +We conducted the survey in English. The survey was only available for 4 days due +to time constraints. We announced the survey on Kubernetes mailing lists, in +Kubernetes Slack channels, on Twitter, and in Kube Weekly. There were 23 +questions, and respondents took an average of 4 minutes to complete the survey. + +## Quick facts about respondents: + +- 48.48% are experienced Kubernetes users, 26.26% expert, and 25.25% beginner +- 57.58% use Kubernetes in both administrator and developer roles +- 64.65% have been using the Kubernetes documentation for more than 12 months +- 95.96% read the documentation in English + +# Question and response highlights + +## Why people access the Kubernetes documentation + +The majority of respondents stated that they access the documentation for the Concepts. + +{{< figure + src="/images/blog/2019-sig-docs-survey/Q9-k8s-docs-use.png" + alt="Why respondents access the Kubernetes documentation" +>}} + +This deviates only slightly from what we see in Google Analytics: of the top 10 +most viewed pages this year, #1 is the kubectl cheatsheet in the Reference section, +followed overwhelmingly by pages in the Concepts section. + +## Satisfaction with the documentation + +We asked respondents to record their level of satisfaction with the detail in +the Concepts, Tasks, Reference, and Tutorials sections: + +- Concepts: 47.96% Moderately Satisfied +- Tasks: 50.54% Moderately Satisfied +- Reference: 40.86% Very Satisfied +- Tutorial: 47.25% Moderately Satisfied + +## How SIG Docs can improve each documentation section + +We asked how we could improve each section, providing respondents with +selectable answers as well as a text field. The clear majority would like more +example code, more detailed content, more diagrams, and advanced tutorials: + +```text +- Personally, would like to see more analogies to help further understanding. +- Would be great if corresponding sections of code were explained too +- Expand on the concepts to bring them together - they're a bucket of separate eels moving in different directions right now +- More diagrams, and more example code + ``` + +Respondents used the "Other" text box to record areas causing frustration: + +```text +- Keep concepts up to date and accurate +- Keep task topics up to date and accurate. Human testing. +- Overhaul the examples. Many times the output of commands shown is not actual. +- I've never understood how to navigate or interpret the reference section +- Keep the tutorials up to date, or remove them +``` + +## How SIG Docs can improve the documentation overall + +We asked respondents how we can improve the Kubernetes documentation +overall. Some took the opportunity to tell us we are doing a good job: + +```text +- For me, it is the best documented open source project. +- Keep going! +- I find the documentation to be excellent. +- You [are] doing a great job. For real. +``` + +Other respondents provided feedback on the content: + +```text +- ...But since we're talking about docs, more is always better. More +advanced configuration examples would be, to me, the way to go. Like a Use Case page for each +configuration topic with beginner to advanced example scenarios. Something like that would be +awesome.... +- More in-depth examples and use cases would be great. I often feel that the Kubernetes +documentation scratches the surface of a topic, which might be great for new users, but it leaves +more experienced users without much "official" guidance on how to implement certain things. +- More production like examples in the resource sections (notably secrets) or links to production like +examples +- It would be great to see a very clear "Quick Start" A->Z up and running like many other tech +projects. There are a handful of almost-quick-starts, but no single guidance. The result is +information overkill. +``` + +A few respondents provided technical suggestions: + +```text +- Make table columns sortable and filterable using a ReactJS or Angular component. +- For most, I think creating documentation with Hugo - a system for static site generation - is not +appropriate. There are better systems for documenting large software project. Specifically, I would +like to see k8s switch to Sphinx for documentation. It has an excellent built-in search, it is easy to +learn if you know markdown, it is widely adopted by other projects (e.g. every software project in +readthedocs.io, linux kernel, docs.python.org etc). +``` + +Overall, respondents provided constructive criticism focusing on the need for +advanced use cases as well as more in-depth examples, guides, and walkthroughs. + +# Where to see more + +Survey results summary, charts, and raw data are available in `kubernetes/community` sig-docs [survey](https://github.com/kubernetes/community/tree/master/sig-docs/survey) directory. diff --git a/content/en/docs/concepts/architecture/nodes.md b/content/en/docs/concepts/architecture/nodes.md index b9aacc4a80156..e148e555318f5 100644 --- a/content/en/docs/concepts/architecture/nodes.md +++ b/content/en/docs/concepts/architecture/nodes.md @@ -12,7 +12,7 @@ weight: 10 A node is a worker machine in Kubernetes, previously known as a `minion`. A node may be a VM or physical machine, depending on the cluster. Each node contains the services necessary to run [pods](/docs/concepts/workloads/pods/pod/) and is managed by the master -components. The services on a node include the [container runtime](/docs/concepts/overview/components/#node-components), kubelet and kube-proxy. See +components. The services on a node include the [container runtime](/docs/concepts/overview/components/#container-runtime), kubelet and kube-proxy. See [The Kubernetes Node](https://git.k8s.io/community/contributors/design-proposals/architecture/architecture.md#the-kubernetes-node) section in the architecture design doc for more details. @@ -284,7 +284,7 @@ capacity when adding a node. The Kubernetes scheduler ensures that there are enough resources for all the pods on a node. It checks that the sum of the requests of containers on the node is no greater than the node capacity. It -includes all containers started by the kubelet, but not containers started directly by the [container runtime](/docs/concepts/overview/components/#node-components) nor any process running outside of the containers. +includes all containers started by the kubelet, but not containers started directly by the [container runtime](/docs/concepts/overview/components/#container-runtime) nor any process running outside of the containers. If you want to explicitly reserve resources for non-Pod processes, follow this tutorial to [reserve resources for system daemons](/docs/tasks/administer-cluster/reserve-compute-resources/#system-reserved). diff --git a/content/en/docs/concepts/cluster-administration/cloud-providers.md b/content/en/docs/concepts/cluster-administration/cloud-providers.md index bd41643c31ba7..bdbbeecdfa2ba 100644 --- a/content/en/docs/concepts/cluster-administration/cloud-providers.md +++ b/content/en/docs/concepts/cluster-administration/cloud-providers.md @@ -370,13 +370,18 @@ Note that the Kubernetes Node name must match the VM FQDN (reported by OVirt und The Photon cloud provider uses the hostname of the node (as determined by the kubelet or overridden with `--hostname-override`) as the name of the Kubernetes Node object. Note that the Kubernetes Node name must match the Photon VM name (or if `overrideIP` is set to true in the `--cloud-config`, the Kubernetes Node name must match the Photon VM IP address). -## VSphere - -### Node Name - -The VSphere cloud provider uses the detected hostname of the node (as determined by the kubelet) as the name of the Kubernetes Node object. - -The `--hostname-override` parameter is ignored by the VSphere cloud provider. +## vSphere + +{{< tabs name="vSphere cloud provider" >}} +{{% tab name="vSphere >= 6.7U3" %}} +For all vSphere deployments on vSphere >= 6.7U3, the [external vSphere cloud provider](https://github.com/kubernetes/cloud-provider-vsphere), along with the [vSphere CSI driver](https://github.com/kubernetes-sigs/vsphere-csi-driver) is recommended. See [Deploying a Kubernetes Cluster on vSphere with CSI and CPI](https://cloud-provider-vsphere.sigs.k8s.io/tutorials/kubernetes-on-vsphere-with-kubeadm.html) for a quick start guide. +{{% /tab %}} +{{% tab name="vSphere < 6.7U3" %}} +If you are running vSphere < 6.7U3, the in-tree vSphere cloud provider is recommended. See [Running a Kubernetes Cluster on vSphere with kubeadm](https://cloud-provider-vsphere.sigs.k8s.io/tutorials/k8s-vcp-on-vsphere-with-kubeadm.html) for a quick start guide. +{{% /tab %}} +{{< /tabs >}} + +For in-depth documentation on the vSphere cloud provider, visit the [vSphere cloud provider docs site](https://cloud-provider-vsphere.sigs.k8s.io). ## IBM Cloud Kubernetes Service diff --git a/content/en/docs/concepts/configuration/scheduling-framework.md b/content/en/docs/concepts/configuration/scheduling-framework.md index 7e1fd970e3ad0..58fb36b192307 100644 --- a/content/en/docs/concepts/configuration/scheduling-framework.md +++ b/content/en/docs/concepts/configuration/scheduling-framework.md @@ -10,7 +10,7 @@ weight: 70 {{< feature-state for_k8s_version="1.15" state="alpha" >}} -The scheduling framework is a new plugable architecture for Kubernetes Scheduler +The scheduling framework is a new pluggable architecture for Kubernetes Scheduler that makes scheduler customizations easy. It adds a new set of "plugin" APIs to the existing scheduler. Plugins are compiled into the scheduler. The APIs allow most scheduling features to be implemented as plugins, while keeping the diff --git a/content/en/docs/concepts/containers/container-lifecycle-hooks.md b/content/en/docs/concepts/containers/container-lifecycle-hooks.md index 08d855732fabb..3d4f81152d20e 100644 --- a/content/en/docs/concepts/containers/container-lifecycle-hooks.md +++ b/content/en/docs/concepts/containers/container-lifecycle-hooks.md @@ -99,7 +99,7 @@ Here is some example output of events from running this command: ``` Events: - FirstSeen LastSeen Count From SubobjectPath Type Reason Message + FirstSeen LastSeen Count From SubObjectPath Type Reason Message --------- -------- ----- ---- ------------- -------- ------ ------- 1m 1m 1 {default-scheduler } Normal Scheduled Successfully assigned test-1730497541-cq1d2 to gke-test-cluster-default-pool-a07e5d30-siqd 1m 1m 1 {kubelet gke-test-cluster-default-pool-a07e5d30-siqd} spec.containers{main} Normal Pulling pulling image "test:1.0" diff --git a/content/en/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins.md b/content/en/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins.md index 8e4b9434cf9b7..b731b8e02f025 100644 --- a/content/en/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins.md +++ b/content/en/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins.md @@ -13,7 +13,7 @@ Kubernetes provides a [device plugin framework](https://github.com/kubernetes/co that you can use to advertise system hardware resources to the {{< glossary_tooltip term_id="kubelet" >}}. -Instead of customising the code for Kubernetes itself, vendors can implement a +Instead of customizing the code for Kubernetes itself, vendors can implement a device plugin that you deploy either manually or as a {{< glossary_tooltip term_id="daemonset" >}}. The targeted devices include GPUs, high-performance NICs, FPGAs, InfiniBand adapters, and other similar computing resources that may require vendor specific initialization diff --git a/content/en/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins.md b/content/en/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins.md index 64954f2dfe2e0..cb9b6d83a9d70 100644 --- a/content/en/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins.md +++ b/content/en/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins.md @@ -25,7 +25,7 @@ Network plugins in Kubernetes come in a few flavors: ## Installation -The kubelet has a single default network plugin, and a default network common to the entire cluster. It probes for plugins when it starts up, remembers what it found, and executes the selected plugin at appropriate times in the pod lifecycle (this is only true for Docker, as rkt manages its own CNI plugins). There are two Kubelet command line parameters to keep in mind when using plugins: +The kubelet has a single default network plugin, and a default network common to the entire cluster. It probes for plugins when it starts up, remembers what it finds, and executes the selected plugin at appropriate times in the pod lifecycle (this is only true for Docker, as rkt manages its own CNI plugins). There are two Kubelet command line parameters to keep in mind when using plugins: * `cni-bin-dir`: Kubelet probes this directory for plugins on startup * `network-plugin`: The network plugin to use from `cni-bin-dir`. It must match the name reported by a plugin probed from the plugin directory. For CNI plugins, this is simply "cni". @@ -40,7 +40,7 @@ By default if no kubelet network plugin is specified, the `noop` plugin is used, The CNI plugin is selected by passing Kubelet the `--network-plugin=cni` command-line option. Kubelet reads a file from `--cni-conf-dir` (default `/etc/cni/net.d`) and uses the CNI configuration from that file to set up each pod's network. The CNI configuration file must match the [CNI specification](https://github.com/containernetworking/cni/blob/master/SPEC.md#network-configuration), and any required CNI plugins referenced by the configuration must be present in `--cni-bin-dir` (default `/opt/cni/bin`). -If there are multiple CNI configuration files in the directory, the first one in lexicographic order of file name is used. +If there are multiple CNI configuration files in the directory, the kubelet uses the configuration file that comes first by name in lexicographic order. In addition to the CNI plugin specified by the configuration file, Kubernetes requires the standard CNI [`lo`](https://github.com/containernetworking/plugins/blob/master/plugins/main/loopback/loopback.go) plugin, at minimum version 0.2.0 diff --git a/content/en/docs/concepts/extend-kubernetes/extend-cluster.md b/content/en/docs/concepts/extend-kubernetes/extend-cluster.md index d5ab77a28b55f..b75d37e335821 100644 --- a/content/en/docs/concepts/extend-kubernetes/extend-cluster.md +++ b/content/en/docs/concepts/extend-kubernetes/extend-cluster.md @@ -147,7 +147,7 @@ Kubernetes provides several built-in authentication methods, and an [Authenticat ### Authorization - [Authorization](/docs/reference/access-authn-authz/webhook/) determines whether specific users can read, write, and do other operations on API resources. It just works at the level of whole resources -- it doesn't discriminate based on arbitrary object fields. If the built-in authorization options don't meet your needs, and [Authorization webhook](/docs/reference/access-authn-authz/webhook/) allows calling out to user-provided code to make an authorization decision. +[Authorization](/docs/reference/access-authn-authz/webhook/) determines whether specific users can read, write, and do other operations on API resources. It just works at the level of whole resources -- it doesn't discriminate based on arbitrary object fields. If the built-in authorization options don't meet your needs, and [Authorization webhook](/docs/reference/access-authn-authz/webhook/) allows calling out to user-provided code to make an authorization decision. ### Dynamic Admission Control diff --git a/content/en/docs/concepts/policy/limit-range.md b/content/en/docs/concepts/policy/limit-range.md index 5fe14e18dc946..b49c012ca42aa 100644 --- a/content/en/docs/concepts/policy/limit-range.md +++ b/content/en/docs/concepts/policy/limit-range.md @@ -24,7 +24,7 @@ A limit range, defined by a `LimitRange` object, provides constraints that can: - Enforce a ratio between request and limit for a resource in a namespace. - Set default request/limit for compute resources in a namespace and automatically inject them to Containers at runtime. -## Enabling Limit Range +## Enabling Limit Range Limit Range support is enabled by default for many Kubernetes distributions. It is enabled when the apiserver `--enable-admission-plugins=` flag has `LimitRanger` admission controller as @@ -40,8 +40,8 @@ A limit range is enforced in a particular namespace when there is a - The `LimitRanger` admission controller enforces defaults limits for all Pods and Container that do not set compute resource requirements and tracks usage to ensure it does not exceed resource minimum , maximum and ratio defined in any `LimitRange` present in the namespace. - If creating or updating a resource (Pod, Container, PersistentVolumeClaim) violates a limit range constraint, the request to the API server will fail with HTTP status code `403 FORBIDDEN` and a message explaining the constraint that would have been violated. - If limit range is activated in a namespace for compute resources like `cpu` and `memory`, users must specify - requests or limits for those values; otherwise, the system may reject pod creation. -- LimitRange validations occurs only at Pod Admission stage, not on Running pods. + requests or limits for those values; otherwise, the system may reject pod creation. +- LimitRange validations occurs only at Pod Admission stage, not on Running pods. Examples of policies that could be created using limit range are: @@ -54,19 +54,19 @@ there may be contention for resources; The Containers or Pods will not be creat Neither contention nor changes to limitrange will affect already created resources. -## Limiting Container compute resources +## Limiting Container compute resources The following section discusses the creation of a LimitRange acting at Container Level. -A Pod with 04 containers is first created; each container within the Pod has a specific `spec.resource` configuration +A Pod with 04 containers is first created; each container within the Pod has a specific `spec.resource` configuration each container within the pod is handled differently by the LimitRanger admission controller. - Create a namespace `limitrange-demo` using the following kubectl command +Create a namespace `limitrange-demo` using the following kubectl command: ```shell kubectl create namespace limitrange-demo ``` -To avoid passing the target limitrange-demo in your kubectl commands, change your context with the following command +To avoid passing the target limitrange-demo in your kubectl commands, change your context with the following command: ```shell kubectl config set-context --current --namespace=limitrange-demo @@ -77,16 +77,15 @@ Here is the configuration file for a LimitRange object: This object defines minimum and maximum Memory/CPU limits, default cpu/Memory requests and default limits for CPU/Memory resources to be apply to containers. -Create the `limit-mem-cpu-per-container` LimitRange in the `limitrange-demo` namespace with the following kubectl command. +Create the `limit-mem-cpu-per-container` LimitRange in the `limitrange-demo` namespace with the following kubectl command: + ```shell kubectl create -f https://k8s.io/examples/admin/resource/limit-mem-cpu-container.yaml -n limitrange-demo ``` - ```shell - kubectl describe limitrange/limit-mem-cpu-per-container -n limitrange-demo - ``` - +kubectl describe limitrange/limit-mem-cpu-per-container -n limitrange-demo +``` ```shell Type Resource Min Max Default Request Default Limit Max Limit/Request Ratio @@ -95,21 +94,20 @@ Container cpu 100m 800m 110m 700m - Container memory 99Mi 1Gi 111Mi 900Mi - ``` - - Here is the configuration file for a Pod with 04 containers to demonstrate LimitRange features : {{< codenew file="admin/resource/limit-range-pod-1.yaml" >}} -Create the `busybox1` Pod : +Create the `busybox1` Pod: ```shell kubectl apply -f https://k8s.io/examples/admin/resource/limit-range-pod-1.yaml -n limitrange-demo ``` -### Container spec with valid CPU/Memory requests and limits -View the `busybox-cnt01` resource configuration +### Container spec with valid CPU/Memory requests and limits + +View the `busybox-cnt01` resource configuration: -```shell +```shell kubectl get po/busybox1 -n limitrange-demo -o json | jq ".spec.containers[0].resources" ``` @@ -127,7 +125,7 @@ kubectl get po/busybox1 -n limitrange-demo -o json | jq ".spec.containers[0].re ``` - The `busybox-cnt01` Container inside `busybox` Pod defined `requests.cpu=100m` and `requests.memory=100Mi`. -- `100m <= 500m <= 800m` , The container cpu limit (500m) falls inside the authorized CPU limit range. +- `100m <= 500m <= 800m` , The container cpu limit (500m) falls inside the authorized CPU limit range. - `99Mi <= 200Mi <= 1Gi` , The container memory limit (200Mi) falls inside the authorized Memory limit range. - No request/limits ratio validation for CPU/Memory , thus the container is valid and created. @@ -136,7 +134,7 @@ kubectl get po/busybox1 -n limitrange-demo -o json | jq ".spec.containers[0].re View the `busybox-cnt02` resource configuration -```shell +```shell kubectl get po/busybox1 -n limitrange-demo -o json | jq ".spec.containers[1].resources" ``` @@ -154,7 +152,7 @@ kubectl get po/busybox1 -n limitrange-demo -o json | jq ".spec.containers[1].re ``` - The `busybox-cnt02` Container inside `busybox1` Pod defined `requests.cpu=100m` and `requests.memory=100Mi` but not limits for cpu and memory. - The container do not have a limits section, the default limits defined in the limit-mem-cpu-per-container LimitRange object are injected to this container `limits.cpu=700mi` and `limits.memory=900Mi`. -- `100m <= 700m <= 800m` , The container cpu limit (700m) falls inside the authorized CPU limit range. +- `100m <= 700m <= 800m` , The container cpu limit (700m) falls inside the authorized CPU limit range. - `99Mi <= 900Mi <= 1Gi` , The container memory limit (900Mi) falls inside the authorized Memory limit range. - No request/limits ratio set , thus the container is valid and created. @@ -162,10 +160,10 @@ kubectl get po/busybox1 -n limitrange-demo -o json | jq ".spec.containers[1].re ### Container spec with a valid CPU/Memory limits but no requests View the `busybox-cnt03` resource configuration -```shell +```shell kubectl get po/busybox1 -n limitrange-demo -o json | jq ".spec.containers[2].resources" ``` -```json +```json { "limits": { "cpu": "500m", @@ -180,18 +178,19 @@ kubectl get po/busybox1 -n limitrange-demo -o json | jq ".spec.containers[2].re - The `busybox-cnt03` Container inside `busybox1` Pod defined `limits.cpu=500m` and `limits.memory=200Mi` but no `requests` for cpu and memory. - The container do not define a request section, the defaultRequest defined in the limit-mem-cpu-per-container LimitRange is not used to fill its limits section but the limits defined by the container are set as requests `limits.cpu=500m` and `limits.memory=200Mi`. -- `100m <= 500m <= 800m` , The container cpu limit (500m) falls inside the authorized CPU limit range. -- `99Mi <= 200Mi <= 1Gi` , The container memory limit (200Mi) falls inside the authorized Memory limit range. +- `100m <= 500m <= 800m` , The container cpu limit (500m) falls inside the authorized CPU limit range. +- `99Mi <= 200Mi <= 1Gi` , The container memory limit (200Mi) falls inside the authorized Memory limit range. - No request/limits ratio set , thus the container is valid and created. +### Container spec with no CPU/Memory requests/limits +View the `busybox-cnt04` resource configuration: -### Container spec with no CPU/Memory requests/limits -View the `busybox-cnt04` resource configuration -```shell +```shell kubectl get po/busybox1 -n limitrange-demo -o json | jq ".spec.containers[3].resources" ``` -```json + +```json { "limits": { "cpu": "700m", @@ -205,29 +204,34 @@ kubectl get po/busybox1 -n limitrange-demo -o json | jq ".spec.containers[3].re ``` - The `busybox-cnt04` Container inside `busybox1` define neither `limits` nor `requests`. -- The container do not define a limit section, the default limit defined in the limit-mem-cpu-per-container LimitRange is used to fill its request +- The container do not define a limit section, the default limit defined in the limit-mem-cpu-per-container LimitRange is used to fill its request `limits.cpu=700m and` `limits.memory=900Mi` . - The container do not define a request section, the defaultRequest defined in the limit-mem-cpu-per-container LimitRange is used to fill its request section requests.cpu=110m and requests.memory=111Mi -- `100m <= 700m <= 800m` , The container cpu limit (700m) falls inside the authorized CPU limit range. +- `100m <= 700m <= 800m` , The container cpu limit (700m) falls inside the authorized CPU limit range. - `99Mi <= 900Mi <= 1Gi` , The container memory limit (900Mi) falls inside the authorized Memory limitrange . - No request/limits ratio set , thus the container is valid and created. All containers defined in the `busybox` Pod passed LimitRange validations, this the Pod is valid and create in the namespace. -## Limiting Pod compute resources +## Limiting Pod compute resources + The following section discusses how to constrain resources at Pod level. {{< codenew file="admin/resource/limit-mem-cpu-pod.yaml" >}} -Without having to delete `busybox1` Pod, create the `limit-mem-cpu-pod` LimitRange in the `limitrange-demo` namespace +Without having to delete `busybox1` Pod, create the `limit-mem-cpu-pod` LimitRange in the `limitrange-demo` namespace: + ```shell kubectl apply -f https://k8s.io/examples/admin/resource/limit-mem-cpu-pod.yaml -n limitrange-demo ``` -The limitrange is created and limits CPU to 2 Core and Memory to 2Gi per Pod. -```shell +The limitrange is created and limits CPU to 2 Core and Memory to 2Gi per Pod: + +```shell limitrange/limit-mem-cpu-per-pod created ``` -Describe the `limit-mem-cpu-per-pod` limit object using the following kubectl command + +Describe the `limit-mem-cpu-per-pod` limit object using the following kubectl command: + ```shell kubectl describe limitrange/limit-mem-cpu-per-pod ``` @@ -239,51 +243,56 @@ Type Resource Min Max Default Request Default Limit Max Limit/Reques ---- -------- --- --- --------------- ------------- ----------------------- Pod cpu - 2 - - - Pod memory - 2Gi - - - -``` -Now create the `busybox2` Pod. +``` + +Now create the `busybox2` Pod: {{< codenew file="admin/resource/limit-range-pod-2.yaml" >}} ```shell kubectl apply -f https://k8s.io/examples/admin/resource/limit-range-pod-2.yaml -n limitrange-demo ``` -The `busybox2` Pod definition is identical to `busybox1` but an error is reported since Pod's resources are now limited + +The `busybox2` Pod definition is identical to `busybox1` but an error is reported since Pod's resources are now limited: + ```shell Error from server (Forbidden): error when creating "limit-range-pod-2.yaml": pods "busybox2" is forbidden: [maximum cpu usage per Pod is 2, but limit is 2400m., maximum memory usage per Pod is 2Gi, but limit is 2306867200.] ``` ```shell -kubectl get po/busybox1 -n limitrange-demo -o json | jq ".spec.containers[].resources.limits.memory" +kubectl get po/busybox1 -n limitrange-demo -o json | jq ".spec.containers[].resources.limits.memory" "200Mi" "900Mi" "200Mi" "900Mi" ``` -`busybox2` Pod will not be admitted on the cluster since the total memory limit of its container is greater than the limit defined in the LimitRange. -`busybox1` will not be evicted since it was created and admitted on the cluster before the LimitRange creation. +`busybox2` Pod will not be admitted on the cluster since the total memory limit of its container is greater than the limit defined in the LimitRange. +`busybox1` will not be evicted since it was created and admitted on the cluster before the LimitRange creation. ## Limiting Storage resources -You can enforce minimum and maximum size of [storage resources](/docs/concepts/storage/persistent-volumes/) that can be requested by each PersistentVolumeClaim in a namespace using a LimitRange. +You can enforce minimum and maximum size of [storage resources](/docs/concepts/storage/persistent-volumes/) that can be requested by each PersistentVolumeClaim in a namespace using a LimitRange: {{< codenew file="admin/resource/storagelimits.yaml" >}} -Apply the YAML using `kubectl create`. +Apply the YAML using `kubectl create`: ```shell -kubectl create -f https://k8s.io/examples/admin/resource/storagelimits.yaml -n limitrange-demo +kubectl create -f https://k8s.io/examples/admin/resource/storagelimits.yaml -n limitrange-demo ``` ```shell limitrange/storagelimits created ``` -Describe the created object, + +Describe the created object: ```shell -kubectl describe limits/storagelimits +kubectl describe limits/storagelimits ``` -the output should look like + +The output should look like: ```shell Name: storagelimits @@ -297,31 +306,31 @@ PersistentVolumeClaim storage 1Gi 2Gi - - - ```shell kubectl create -f https://k8s.io/examples/admin/resource//pvc-limit-lower.yaml -n limitrange-demo -``` +``` -While creating a PVC with `requests.storage` lower than the Min value in the LimitRange, an Error thrown by the server +While creating a PVC with `requests.storage` lower than the Min value in the LimitRange, an Error thrown by the server: ```shell Error from server (Forbidden): error when creating "pvc-limit-lower.yaml": persistentvolumeclaims "pvc-limit-lower" is forbidden: minimum storage usage per PersistentVolumeClaim is 1Gi, but request is 500Mi. ``` -Same behaviour is noted if the `requests.storage` is greater than the Max value in the LimitRange +Same behaviour is noted if the `requests.storage` is greater than the Max value in the LimitRange: {{< codenew file="admin/resource/pvc-limit-greater.yaml" >}} ```shell kubectl create -f https://k8s.io/examples/admin/resource/pvc-limit-greater.yaml -n limitrange-demo -``` +``` ```shell Error from server (Forbidden): error when creating "pvc-limit-greater.yaml": persistentvolumeclaims "pvc-limit-greater" is forbidden: maximum storage usage per PersistentVolumeClaim is 2Gi, but request is 5Gi. ``` -## Limits/Requests Ratio +## Limits/Requests Ratio If `LimitRangeItem.maxLimitRequestRatio` if specified in th `LimitRangeSpec`, the named resource must have a request and limit that are both non-zero where limit divided by request is less than or equal to the enumerated value - the following `LimitRange` enforces memory limit to be at most twice the amount of the memory request for any pod in the namespace. +The following `LimitRange` enforces memory limit to be at most twice the amount of the memory request for any pod in the namespace. {{< codenew file="admin/resource/limit-memory-ratio-pod.yaml" >}} @@ -335,7 +344,7 @@ Describe the LimitRange with the following kubectl comm $ kubectl describe limitrange/limit-memory-ratio-pod ``` -```shell +```shell Name: limit-memory-ratio-pod Namespace: limitrange-demo Type Resource Min Max Default Request Default Limit Max Limit/Request Ratio @@ -343,30 +352,28 @@ Type Resource Min Max Default Request Default Limit Max Limit/Reques Pod memory - - - - 2 ``` +Let's create a pod with `requests.memory=100Mi` and `limits.memory=300Mi`: -Let's create a pod with `requests.memory=100Mi` and `limits.memory=300Mi` {{< codenew file="admin/resource/limit-range-pod-3.yaml" >}} - ```shell kubectl apply -f https://k8s.io/examples/admin/resource/limit-range-pod-3.yaml ``` The pod creation failed as the ratio here (`3`) is greater than the enforced limit (`2`) in `limit-memory-ratio-pod` LimitRange - ```shell Error from server (Forbidden): error when creating "limit-range-pod-3.yaml": pods "busybox3" is forbidden: memory max limit to request ratio per Pod is 2, but provided ratio is 3.000000. ``` +### Clean up + +Delete the `limitrange-demo` namespace to free all resources: -### Clean up -Delete the `limitrange-demo` namespace to free all resources ```shell kubectl delete ns limitrange-demo ``` - ## Examples - See [a tutorial on how to limit compute resources per namespace](/docs/tasks/administer-cluster/manage-resources/cpu-constraint-namespace/) . diff --git a/content/en/docs/concepts/services-networking/connect-applications-service.md b/content/en/docs/concepts/services-networking/connect-applications-service.md index 96d7cc195b1f0..4a1c2b1e8c9b8 100644 --- a/content/en/docs/concepts/services-networking/connect-applications-service.md +++ b/content/en/docs/concepts/services-networking/connect-applications-service.md @@ -136,8 +136,8 @@ and DNS. The former works out of the box while the latter requires the [CoreDNS cluster addon](http://releases.k8s.io/{{< param "githubbranch" >}}/cluster/addons/dns/coredns). {{< note >}} If the service environment variables are not desired (because possible clashing with expected program ones, -too many variables to process, only using DNS, etc) you can disable this mode by setting the `enableServiceLinks` -flag to `false` on the [pod spec](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#pod-v1-core). +too many variables to process, only using DNS, etc) you can disable this mode by setting the `enableServiceLinks` +flag to `false` on the [pod spec](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#pod-v1-core). {{< /note >}} @@ -254,9 +254,9 @@ nginxsecret Opaque 2 1m Following are the manual steps to follow in case you run into problems running make (on windows for example): ```shell -#create a public private key pair +# Create a public private key pair openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout /d/tmp/nginx.key -out /d/tmp/nginx.crt -subj "/CN=my-nginx/O=my-nginx" -#convert the keys to base64 encoding +# Convert the keys to base64 encoding cat /d/tmp/nginx.crt | base64 cat /d/tmp/nginx.key | base64 ``` diff --git a/content/en/docs/concepts/workloads/controllers/deployment.md b/content/en/docs/concepts/workloads/controllers/deployment.md index a428b84819e48..975131b996c10 100644 --- a/content/en/docs/concepts/workloads/controllers/deployment.md +++ b/content/en/docs/concepts/workloads/controllers/deployment.md @@ -249,7 +249,7 @@ up to 3 replicas, as well as scaling down the old ReplicaSet to 0 replicas. ```shell kubectl describe deployments ``` - The output is similar to this: + The output is similar to this: ``` Name: nginx-deployment Namespace: default @@ -407,12 +407,12 @@ rolled back. Kubernetes by default sets the value to 25%. {{< /note >}} -* Get the description of the Deployment: +* Get the description of the Deployment: ```shell kubectl describe deployment ``` - The output is similar to this: + The output is similar to this: ``` Name: nginx-deployment Namespace: default @@ -441,7 +441,7 @@ rolled back. OldReplicaSets: nginx-deployment-1564180365 (3/3 replicas created) NewReplicaSet: nginx-deployment-3066724191 (1/1 replicas created) Events: - FirstSeen LastSeen Count From SubobjectPath Type Reason Message + FirstSeen LastSeen Count From SubObjectPath Type Reason Message --------- -------- ----- ---- ------------- -------- ------ ------- 1m 1m 1 {deployment-controller } Normal ScalingReplicaSet Scaled up replica set nginx-deployment-2035384211 to 3 22s 22s 1 {deployment-controller } Normal ScalingReplicaSet Scaled up replica set nginx-deployment-1564180365 to 1 @@ -459,11 +459,11 @@ rolled back. Follow the steps given below to check the rollout history: -1. First, check the revisions of this Deployment: +1. First, check the revisions of this Deployment: ```shell kubectl rollout history deployment.v1.apps/nginx-deployment ``` - The output is similar to this: + The output is similar to this: ``` deployments "nginx-deployment" REVISION CHANGE-CAUSE @@ -483,7 +483,7 @@ Follow the steps given below to check the rollout history: kubectl rollout history deployment.v1.apps/nginx-deployment --revision=2 ``` - The output is similar to this: + The output is similar to this: ``` deployments "nginx-deployment" revision 2 Labels: app=nginx @@ -508,7 +508,7 @@ Follow the steps given below to rollback the Deployment from the current version kubectl rollout undo deployment.v1.apps/nginx-deployment ``` - The output is similar to this: + The output is similar to this: ``` deployment.apps/nginx-deployment ``` @@ -518,7 +518,7 @@ Follow the steps given below to rollback the Deployment from the current version kubectl rollout undo deployment.v1.apps/nginx-deployment --to-revision=2 ``` - The output is similar to this: + The output is similar to this: ``` deployment.apps/nginx-deployment ``` @@ -533,7 +533,7 @@ Follow the steps given below to rollback the Deployment from the current version kubectl get deployment nginx-deployment ``` - The output is similar to this: + The output is similar to this: ``` NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE nginx-deployment 3 3 3 3 30m @@ -542,7 +542,7 @@ Follow the steps given below to rollback the Deployment from the current version ```shell kubectl describe deployment nginx-deployment ``` - The output is similar to this: + The output is similar to this: ``` Name: nginx-deployment Namespace: default @@ -662,13 +662,13 @@ ReplicaSet with the most replicas. ReplicaSets with zero replicas are not scaled In our example above, 3 replicas are added to the old ReplicaSet and 2 replicas are added to the new ReplicaSet. The rollout process should eventually move all replicas to the new ReplicaSet, assuming -the new replicas become healthy. To confirm this, run: +the new replicas become healthy. To confirm this, run: ```shell kubectl get deploy ``` -The output is similar to this: +The output is similar to this: ``` NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE nginx-deployment 15 18 7 8 7m diff --git a/content/en/docs/contribute/intermediate.md b/content/en/docs/contribute/intermediate.md index e114250e57f95..2da6ccf5232d5 100644 --- a/content/en/docs/contribute/intermediate.md +++ b/content/en/docs/contribute/intermediate.md @@ -324,38 +324,43 @@ graphical Git client instead. You only need to clone the repository once per physical system where you work on the Kubernetes documentation. -1. In a terminal window, use `git clone` to clone the repository. You do not - need any credentials to clone the repository. +1. Create a fork of the `kubernetes/website` repository on GitHub. In your + web browser, go to + [https://github.com/kubernetes/website](https://github.com/kubernetes/website) + and click the **Fork** button. After a few seconds, you are redirected to + the URL for your fork, which is `https://github.com//website`. + +2. In a terminal window, use `git clone` to clone the your fork. ```bash - git clone https://github.com/kubernetes/website + git clone git@github.com//website ``` The new directory `website` is created in your current directory, with - the contents of the GitHub repository. + the contents of your GitHub repository. Your fork is your `origin`. -2. Change to the new `website` directory. Rename the default `origin` remote - to `upstream`. +3. Change to the new `website` directory. Set the `kubernetes/website` repository as the `upstream` remote. ```bash cd website - git remote rename origin upstream + git remote add upstream https://github.com/kubernetes/website.git ``` -3. If you have not done so, create a fork of the repository on GitHub. In your - web browser, go to - [https://github.com/kubernetes/website](https://github.com/kubernetes/website) - and click the **Fork** button. After a few seconds, you are redirected to - the URL for your fork, which is typically something like - `https://github.com//website` unless you already had a repository - called `website`. Copy this URL. +4. Confirm your `origin` and `upstream` repositories. -4. Add your fork as a second remote, called `origin`: + ```bash + git remote -v + ``` - ```bash - git remote add origin - ``` + Output is similar to: + + ```bash + origin git@github.com:/website.git (fetch) + origin git@github.com:/website.git (push) + upstream https://github.com/kubernetes/website (fetch) + upstream https://github.com/kubernetes/website (push) + ``` ### Work on the local repository @@ -379,25 +384,32 @@ After you decide which branch to start your work (or _base it on_, in Git terminology), use the following workflow to be sure your work is based on the most up-to-date version of that branch. -1. Fetch both the `upstream` and `origin` remotes. This updates your local - notion of what those branches contain, but does not change your local - branches at all. +1. There are three different copies of the repository when you work locally: + `local`, `upstream`, and `origin`. Fetch both the `origin` and `upstream` remotes. This + updates your cache of the remotes without actually changing any of the copies. ```bash - git fetch upstream git fetch origin + git fetch upstream ``` -2. Create a new tracking branch based on the branch you decided is the most - appropriate. This example assumes you are using `master`. + This workflow deviates from the one defined in the Community's [GitHub + Workflow](https://github.com/kubernetes/community/blob/master/contributors/guide/github-workflow.md). + In this workflow, you do not need to merge your local copy of `master` with `upstream/master` before + pushing the updates to your fork. That step is not required in + `kubernetes/website` because you are basing your branch on the upstream repository. + +2. Create a local working branch based on the most appropriate upstream branch: + `upstream/dev-1.xx` for feature developers or `upstream/master` for all other + contributors. This example assumes you are basing your work on + `upstream/master`. Because you didn't update your local `master` to match + `upstream/master` in the previous step, you need to explicitly create your + branch off of `upstream/master`. ```bash git checkout -b upstream/master ``` - This new branch is based on `upstream/master`, not your local `master`. - It tracks `upstream/master`. - 3. With your new branch checked out, make your changes using a text editor. At any time, use the `git status` command to see what you've changed. @@ -412,7 +424,7 @@ most up-to-date version of that branch. git add example-file.md ``` - When all your intended changes are included, create a commit, using the + When all your intended changes are included, create a commit using the `git commit` command: ```bash @@ -423,7 +435,7 @@ most up-to-date version of that branch. Do not reference a GitHub issue or pull request by ID or URL in the commit message. If you do, it will cause that issue or pull request to get a notification every time the commit shows up in a new Git branch. You can - link issues and pull requests together later, in the GitHub UI. + link issues and pull requests together later in the GitHub UI. {{< /note >}} 5. Optionally, you can test your change by staging the site locally using the @@ -443,9 +455,9 @@ most up-to-date version of that branch. the behavior in that case depends upon the version of Git you are using. The results are more repeatable if you include the branch name. -7. At this point, if you go to https://github.com/kubernetes/website in your - web browser, GitHub detects that you pushed a new branch to your fork and - offers to create a pull request. Fill in the pull request template. +7. Go to https://github.com/kubernetes/website in your web browser. GitHub + detects that you pushed a new branch to your fork and offers to create a pull + request. Fill in the pull request template. - The title should be no more than 50 characters and summarize the intent of the change. @@ -465,10 +477,25 @@ most up-to-date version of that branch. **Details** link goes to a staged version of the Kubernetes website with your changes applied. This is how reviewers will check your changes. -9. If you notice that more changes need to be made, or if reviewers give you - feedback, address the feedback locally, then repeat step 4 - 6 again, - creating a new commit. The new commit is added to your pull request and the - tests run again, including re-staging the Netlify staged site. +9. When you need to make more changes, address the feedback locally and amend + your original commit. + + ```bash + git commit -a --amend + ``` + + - `-a`: commit all changes + - `--amend`: amend the previous commit, rather than creating a new one + + An editor will open so you can update your commit message if necessary. + + If you use `git commit -m` as in Step 4, you will create a new commit rather + than amending changes to your original commit. Creating a new commit means + you must squash your commits before your pull request can be merged. + + Follow the instructions in Step 6 to push your commit. The commit is added + to your pull request and the tests run again, including re-staging the + Netlify staged site. 10. If a reviewer adds changes to your pull request, you need to fetch those changes from your fork before you can add more changes. Use the following @@ -479,11 +506,11 @@ most up-to-date version of that branch. git rebase origin/ ``` - After rebasing, you need to add the `-f` flag to force-push new changes to - the branch to your fork. + After rebasing, you need to add the `--force-with-lease` flag to + force push the branch's new changes to your fork. ```bash - git push -f origin + git push --force-with-lease origin ``` 11. If someone else's change is merged into the branch your work is based on, @@ -513,6 +540,52 @@ most up-to-date version of that branch. the branch to your fork, and the pull request should no longer show any conflicts. +12. If your PR still has multiple commits after amending previous commits, you + must squash multiple commits into a single commit before your PR can be merged. + You can check the number of commits on your PR's `Commits` tab or by running + `git log` locally. Squashing commits is a form of rebasing. + + ```bash + git rebase -i HEAD~ + ``` + + The `-i` switch tells git you want to rebase interactively. This enables + you to tell git which commits to squash into the first one. For + example, you have 3 commits on your branch: + + ``` + 12345 commit 4 (2 minutes ago) + 6789d commit 3 (30 minutes ago) + 456df commit 2 (1 day ago) + ``` + + You must squash your last three commits into the first one. + + ``` + git rebase -i HEAD~3 + ``` + + That command opens an editor with the following: + + ``` + pick 456df commit 2 + pick 6789d commit 3 + pick 12345 commit 4 + ``` + + Change `pick` to `squash` on the commits you want to squash, and make sure + the one `pick` commit is at the top of the editor. + + ``` + pick 456df commit 2 + squash 6789d commit 3 + squash 12345 commit 4 + ``` + + Save and close your editor. Then push your squashed + commit with `git push --force-with-lease origin `. + + If you're having trouble resolving conflicts or you get stuck with anything else related to your pull request, ask for help on the `#sig-docs` Slack channel or the diff --git a/content/en/docs/reference/access-authn-authz/extensible-admission-controllers.md b/content/en/docs/reference/access-authn-authz/extensible-admission-controllers.md index 019ae48a3d99c..5b774cf222a0b 100644 --- a/content/en/docs/reference/access-authn-authz/extensible-admission-controllers.md +++ b/content/en/docs/reference/access-authn-authz/extensible-admission-controllers.md @@ -184,21 +184,21 @@ the webhooks. There are three steps to complete the configuration. (yes, the same schema that's used by kubectl), so the field name is `kubeConfigFile`. Here is an example admission control configuration file: -```yaml -apiVersion: apiserver.k8s.io/v1alpha1 -kind: AdmissionConfiguration -plugins: -- name: ValidatingAdmissionWebhook - configuration: - apiVersion: apiserver.config.k8s.io/v1alpha1 - kind: WebhookAdmission - kubeConfigFile: "" -- name: MutatingAdmissionWebhook - configuration: - apiVersion: apiserver.config.k8s.io/v1alpha1 - kind: WebhookAdmission - kubeConfigFile: "" -``` + ```yaml + apiVersion: apiserver.k8s.io/v1alpha1 + kind: AdmissionConfiguration + plugins: + - name: ValidatingAdmissionWebhook + configuration: + apiVersion: apiserver.config.k8s.io/v1alpha1 + kind: WebhookAdmission + kubeConfigFile: "" + - name: MutatingAdmissionWebhook + configuration: + apiVersion: apiserver.config.k8s.io/v1alpha1 + kind: WebhookAdmission + kubeConfigFile: "" + ``` The schema of `admissionConfiguration` is defined [here](https://github.com/kubernetes/kubernetes/blob/v1.13.0/staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/types.go#L27). @@ -206,51 +206,51 @@ See the [webhook configuration](#webhook-configuration) section for details abou * In the kubeConfig file, provide the credentials: -```yaml -apiVersion: v1 -kind: Config -users: -# name should be set to the DNS name of the service or the host (including port) of the URL the webhook is configured to speak to. -# If a non-443 port is used for services, it must be included in the name when configuring 1.16+ API servers. -# -# For a webhook configured to speak to a service on the default port (443), specify the DNS name of the service: -# - name: webhook1.ns1.svc -# user: ... -# -# For a webhook configured to speak to a service on non-default port (e.g. 8443), specify the DNS name and port of the service in 1.16+: -# - name: webhook1.ns1.svc:8443 -# user: ... -# and optionally create a second stanza using only the DNS name of the service for compatibility with 1.15 API servers: -# - name: webhook1.ns1.svc -# user: ... -# -# For webhooks configured to speak to a URL, match the host (and port) specified in the webhook's URL. Examples: -# A webhook with `url: https://www.example.com`: -# - name: www.example.com -# user: ... -# -# A webhook with `url: https://www.example.com:443`: -# - name: www.example.com:443 -# user: ... -# -# A webhook with `url: https://www.example.com:8443`: -# - name: www.example.com:8443 -# user: ... -# -- name: 'webhook1.ns1.svc' - user: - client-certificate-data: "" - client-key-data: "" -# The `name` supports using * to wildcard-match prefixing segments. -- name: '*.webhook-company.org' - user: - password: "" - username: "" -# '*' is the default match. -- name: '*' - user: - token: "" -``` + ```yaml + apiVersion: v1 + kind: Config + users: + # name should be set to the DNS name of the service or the host (including port) of the URL the webhook is configured to speak to. + # If a non-443 port is used for services, it must be included in the name when configuring 1.16+ API servers. + # + # For a webhook configured to speak to a service on the default port (443), specify the DNS name of the service: + # - name: webhook1.ns1.svc + # user: ... + # + # For a webhook configured to speak to a service on non-default port (e.g. 8443), specify the DNS name and port of the service in 1.16+: + # - name: webhook1.ns1.svc:8443 + # user: ... + # and optionally create a second stanza using only the DNS name of the service for compatibility with 1.15 API servers: + # - name: webhook1.ns1.svc + # user: ... + # + # For webhooks configured to speak to a URL, match the host (and port) specified in the webhook's URL. Examples: + # A webhook with `url: https://www.example.com`: + # - name: www.example.com + # user: ... + # + # A webhook with `url: https://www.example.com:443`: + # - name: www.example.com:443 + # user: ... + # + # A webhook with `url: https://www.example.com:8443`: + # - name: www.example.com:8443 + # user: ... + # + - name: 'webhook1.ns1.svc' + user: + client-certificate-data: "" + client-key-data: "" + # The `name` supports using * to wildcard-match prefixing segments. + - name: '*.webhook-company.org' + user: + password: "" + username: "" + # '*' is the default match. + - name: '*' + user: + token: "" + ``` Of course you need to set up the webhook server to handle these authentications. @@ -766,7 +766,7 @@ Use the object selector only if the webhook is opt-in, because end users may ski This example shows a mutating webhook that would match a `CREATE` of any resource with the label `foo: bar`: -{{< tabs name="ValidatingWebhookConfiguration_example_1" >}} +{{< tabs name="objectSelector_example" >}} {{% tab name="admissionregistration.k8s.io/v1" %}} ```yaml apiVersion: admissionregistration.k8s.io/v1 diff --git a/content/en/docs/reference/command-line-tools-reference/kubelet.md b/content/en/docs/reference/command-line-tools-reference/kubelet.md index b9191494d968b..859e5a0beb00b 100644 --- a/content/en/docs/reference/command-line-tools-reference/kubelet.md +++ b/content/en/docs/reference/command-line-tools-reference/kubelet.md @@ -825,8 +825,14 @@ kubelet [flags] --maximum-dead-containers int32 - Maximum number of old instances of containers to retain globally. Each container takes up some disk space. To disable, set to a negative number. (default -1) (DEPRECATED: Use --eviction-hard or --eviction-soft instead. Will be removed in a future version.) - --maximum-dead-containers-per-container int32 Maximum number of old instances to retain per container. Each container takes up some disk space. (default 1) (DEPRECATED: Use --eviction-hard or --eviction-soft instead. Will be removed in a future version.) + Maximum number of old instances of containers to retain globally. Each container takes up some disk space. To disable, set to a negative number. (default -1) (DEPRECATED: Use --eviction-hard or --eviction-soft instead. Will be removed in a future version.) + + + + --maximum-dead-containers-per-container int32 + + + Maximum number of old instances to retain per container. Each container takes up some disk space. (default 1) (DEPRECATED: Use --eviction-hard or --eviction-soft instead. Will be removed in a future version.) diff --git a/content/en/docs/setup/production-environment/container-runtimes.md b/content/en/docs/setup/production-environment/container-runtimes.md index e156df730cd8d..a721d9ee1dfcd 100644 --- a/content/en/docs/setup/production-environment/container-runtimes.md +++ b/content/en/docs/setup/production-environment/container-runtimes.md @@ -288,7 +288,7 @@ systemctl restart containerd To use the `systemd` cgroup driver, set `plugins.cri.systemd_cgroup = true` in `/etc/containerd/config.toml`. When using kubeadm, manually configure the -[cgroup driver for kubelet](/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#configure-cgroup-driver-used-by-kubelet-on-master-node) +[cgroup driver for kubelet](/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#configure-cgroup-driver-used-by-kubelet-on-control-plane-node) ## Other CRI runtimes: frakti diff --git a/content/en/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md b/content/en/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md index e965e43614ca0..3b71be6a4e511 100644 --- a/content/en/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md +++ b/content/en/docs/setup/production-environment/tools/kubeadm/install-kubeadm.md @@ -66,10 +66,10 @@ switching to legacy mode, and is therefore incompatible with current kubeadm pac {{< tabs name="iptables_legacy" >}} {{% tab name="Debian or Ubuntu" %}} ```bash -update-alternatives --set iptables /usr/sbin/iptables-legacy -update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy -update-alternatives --set arptables /usr/sbin/arptables-legacy -update-alternatives --set ebtables /usr/sbin/ebtables-legacy +sudo update-alternatives --set iptables /usr/sbin/iptables-legacy +sudo update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy +sudo update-alternatives --set arptables /usr/sbin/arptables-legacy +sudo update-alternatives --set ebtables /usr/sbin/ebtables-legacy ``` {{% /tab %}} {{% tab name="Fedora" %}} @@ -177,14 +177,14 @@ For more information on version skews, see: {{< tabs name="k8s_install" >}} {{% tab name="Ubuntu, Debian or HypriotOS" %}} ```bash -apt-get update && apt-get install -y apt-transport-https curl -curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - -cat </etc/apt/sources.list.d/kubernetes.list +sudo apt-get update && sudo apt-get install -y apt-transport-https curl +curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add - +cat <`. On Fedora, edit `/etc/kubernetes/kubelet` to include this line: ``` - KUBELET_ARGS="--cluster-dns=10.254.0.10 --cluster-domain=kube.local --manifest-url=` + KUBELET_ARGS="--cluster-dns=10.254.0.10 --cluster-domain=kube.local --manifest-url=" ``` 3. Restart the kubelet. On Fedora, you would run: diff --git a/content/en/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough.md b/content/en/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough.md index 68ebef2bc2b87..4e2f109bcb708 100644 --- a/content/en/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough.md +++ b/content/en/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough.md @@ -304,7 +304,7 @@ spec: resource: name: cpu target: - type: AverageUtilization + type: Utilization averageUtilization: 50 - type: Pods pods: @@ -322,7 +322,7 @@ spec: kind: Ingress name: main-route target: - kind: Value + type: Value value: 10k status: observedGeneration: 1 diff --git a/content/en/docs/tutorials/hello-minikube.md b/content/en/docs/tutorials/hello-minikube.md index 5dfaac4949c04..e4a5e70451f49 100644 --- a/content/en/docs/tutorials/hello-minikube.md +++ b/content/en/docs/tutorials/hello-minikube.md @@ -16,7 +16,7 @@ card: {{% capture overview %}} This tutorial shows you how to run a simple Hello World Node.js app -on Kubernetes using [Minikube](/docs/getting-started-guides/minikube) and Katacoda. +on Kubernetes using [Minikube](/docs/setup/learning-environment/minikube) and Katacoda. Katacoda provides a free, in-browser Kubernetes environment. {{< note >}} diff --git a/content/en/docs/tutorials/kubernetes-basics/_index.html b/content/en/docs/tutorials/kubernetes-basics/_index.html index 4adbf98f192c6..39204e19dee96 100644 --- a/content/en/docs/tutorials/kubernetes-basics/_index.html +++ b/content/en/docs/tutorials/kubernetes-basics/_index.html @@ -14,6 +14,8 @@ + +
diff --git a/content/en/docs/tutorials/kubernetes-basics/create-cluster/cluster-intro.html b/content/en/docs/tutorials/kubernetes-basics/create-cluster/cluster-intro.html index a203274a6c5a8..2f1b59d83f7c8 100644 --- a/content/en/docs/tutorials/kubernetes-basics/create-cluster/cluster-intro.html +++ b/content/en/docs/tutorials/kubernetes-basics/create-cluster/cluster-intro.html @@ -9,7 +9,7 @@ - +
diff --git a/content/en/docs/tutorials/kubernetes-basics/deploy-app/deploy-intro.html b/content/en/docs/tutorials/kubernetes-basics/deploy-app/deploy-intro.html index 9c7a36916b0b9..8f38960de7d54 100644 --- a/content/en/docs/tutorials/kubernetes-basics/deploy-app/deploy-intro.html +++ b/content/en/docs/tutorials/kubernetes-basics/deploy-app/deploy-intro.html @@ -9,7 +9,7 @@ - +
diff --git a/content/en/docs/tutorials/kubernetes-basics/public/css/styles.css b/content/en/docs/tutorials/kubernetes-basics/public/css/styles.css index 3f728278a8dd1..3ee982aa1179f 100644 --- a/content/en/docs/tutorials/kubernetes-basics/public/css/styles.css +++ b/content/en/docs/tutorials/kubernetes-basics/public/css/styles.css @@ -5,6 +5,8 @@ * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) */ /*! normalize.css v3.0.3 | MIT License | github.com/necolas/normalize.css */ + +/* html { font-family: sans-serif; @@ -57,6 +59,7 @@ template { display: none; } +*/ a { @@ -69,6 +72,7 @@ a:hover outline: 0; } +/* abbr[title] { border-bottom: 1px dotted; @@ -91,7 +95,9 @@ h1 margin: .67em 0; } +*/ +/* mark { color: #000; @@ -123,6 +129,7 @@ sub { bottom: -.25em; } +*/ img { @@ -366,17 +373,22 @@ th } } +/* * { box-sizing: border-box; } +*/ +/* *:before, *:after { box-sizing: border-box; } +*/ +/* html { font-size: 10px; @@ -393,6 +405,7 @@ body color: #333; background-color: #fff; } +*/ input, button, @@ -404,6 +417,7 @@ textarea line-height: inherit; } +/* a { text-decoration: none; @@ -422,6 +436,8 @@ a:focus outline: 5px auto -webkit-focus-ring-color; outline-offset: -2px; } +*/ + figure { @@ -511,6 +527,7 @@ hr cursor: pointer; } +/* h1, h2, h3, @@ -560,7 +577,10 @@ h6 .small, color: #777; } +*/ + +/* h1, .h1, h2, @@ -586,7 +606,9 @@ h3 .small, { font-size: 65%; } +*/ +/* h4, .h4, h5, @@ -612,12 +634,15 @@ h6 .small, { font-size: 75%; } +*/ +/* h1, .h1 { font-size: 36px; } +*/ h2, .h2 @@ -637,6 +662,7 @@ h4, font-size: 18px; } +/* h5, .h5 { @@ -648,6 +674,7 @@ h6, { font-size: 12px; } +*/ p { @@ -853,6 +880,7 @@ a.bg-danger:focus border-bottom: 1px solid #eee; } +/* ul, ol { @@ -888,6 +916,9 @@ ol ol padding-right: 5px; padding-left: 5px; } +*/ + + dl { @@ -5976,7 +6007,7 @@ a.badge:focus { padding: 9px; - color: #333; + /*color: #333;*/ } a.thumbnail:hover, @@ -8769,6 +8800,7 @@ button.close border-top: 1px solid #e5eaf9; } + .content { position: relative; @@ -8788,6 +8820,8 @@ button.close -ms-flex: 1 1 auto; flex: 1 1 auto; } + + @media screen and (max-width: 998px) { .content @@ -9042,6 +9076,7 @@ button.close max-width: 300px; } +/* .header { padding: 20px 20px; @@ -9532,7 +9567,9 @@ button.close display: none; } } +*/ +/* .scrolltop { position: fixed; @@ -9603,17 +9640,16 @@ button.close opacity: 1; } +*/ + +/* body { font-family: Roboto, 'Helvetica Neue', Helvetica, 'Open Sans', Arial, sans-serif; font-size: 15px; line-height: 1.5; - /*display: -webkit-box;*/ - /*display: -ms-flexbox;*/ - /*display: flex;*/ - min-height: 100vh; background: #eee; @@ -9672,6 +9708,8 @@ p a:hover { text-decoration: none; } +*/ + .breadcrumb { @@ -10106,6 +10144,7 @@ p a:hover { display: block; } + @media screen and (max-width: 992px) { .quiz__box diff --git a/content/en/docs/tutorials/kubernetes-basics/update/update-intro.html b/content/en/docs/tutorials/kubernetes-basics/update/update-intro.html index 0febbcee99658..aa6f9b406345e 100644 --- a/content/en/docs/tutorials/kubernetes-basics/update/update-intro.html +++ b/content/en/docs/tutorials/kubernetes-basics/update/update-intro.html @@ -9,8 +9,8 @@ - - + +
diff --git a/content/en/partners/_index.html b/content/en/partners/_index.html index a3da8daa570aa..3e76bffbc60df 100644 --- a/content/en/partners/_index.html +++ b/content/en/partners/_index.html @@ -7,7 +7,7 @@ ---
-
+
Kubernetes works with partners to create a strong, vibrant codebase that supports a spectrum of complementary platforms.
diff --git a/content/id/docs/concepts/configuration/organize-cluster-access-kubeconfig.md b/content/id/docs/concepts/configuration/organize-cluster-access-kubeconfig.md new file mode 100644 index 0000000000000..ab20d640d6dab --- /dev/null +++ b/content/id/docs/concepts/configuration/organize-cluster-access-kubeconfig.md @@ -0,0 +1,165 @@ +--- +title: Mengatur Akses Kluster Menggunakan Berkas kubeconfig +content_template: templates/concept +weight: 60 +--- + +{{% capture overview %}} + +Gunakan berkas kubeconfig untuk mengatur informasi mengenai kluster, pengguna, +_namespace_, dan mekanisme autentikasi. Perintah `kubectl` menggunakan berkas +kubeconfig untuk mencari informasi yang dibutuhkan untuk memilih kluster dan +berkomunikasi dengan API server dari suatu kluster. + +{{< note >}} +Sebuah berkas yang digunakan untuk mengatur akses pada kluster disebut dengan +berkas kubeconfig. Ini cara yang umum digunakan untuk mereferensikan berkas +konfigurasi. Ini tidak berarti ada berkas dengan nama `kubeconfig`. +{{< /note >}} + +Secara _default_, `kubectl` mencari berkas dengan nama `config` pada direktori +`$HOME/.kube`. Kamu bisa mengatur lokasi berkas kubeconfig dengan mengatur +nilai `KUBECONFIG` pada variabel _environment_ atau dengan mengatur menggunakan +tanda [`--kubeconfig`](/docs/reference/generated/kubectl/kubectl/). + +Instruksi langkah demi langkah untuk membuat dan menentukan berkas kubeconfig, +bisa mengacu pada [Mengatur Akses Pada Beberapa Kluster] +(/docs/tasks/access-application-cluster/configure-access-multiple-clusters). + +{{% /capture %}} + + +{{% capture body %}} + +## Mendukung beberapa kluster, pengguna, dan mekanisme autentikasi + +Misalkan kamu memiliki beberapa kluster, pengguna serta komponen dapat melakukan +autentikasi dengan berbagai cara. Sebagai contoh: + +- Kubelet yang berjalan dapat melakukan autentikasi dengan menggunakan sertifikat +- Pengguna bisa melakukan autentikasi dengan menggunakan token +- Administrator bisa memiliki beberapa sertifikat yang diberikan kepada pengguna +individu. + +Dengan berkas kubeconfig, kamu bisa mengatur kluster, pengguna, dan _namespace_. +Kamu juga bisa menentukan konteks untuk mempercepat dan mempermudah perpindahan +antara kluster dan _namespace_. + +## Konteks + +Sebuah elemen konteks pada berkas kubeconfig digunakan untuk mengelompokkan +parameter akses dengan nama yang mudah. Setiap konteks akan memiliki 3 parameter: +kluster, pengguna, dan _namespace_. Secara _default_, perintah `kubectl` menggunakan +parameter dari konteks yang aktif untuk berkomunikasi dengan kluster. + +Untuk memilih konteks yang aktif, bisa menggunakan perintah berikut: +``` +kubectl config use-context +``` + +## Variabel _environment_ KUBECONFIG + +Variabel _environment_ `KUBECONFIG` berisikan beberapa berkas kubeconfig. Untuk +Linux dan Mac, beberapa berkas tersebut dipisahkan dengan tanda titik dua (:). +Untuk Windows, dipisahkan dengan menggunakan tanda titik koma (;). Variabel +_environment_ `KUBECONFIG` tidak diwajibkan untuk ada. Jika variabel _environment_ +`KUBECONFIG` tidak ada, maka `kubectl` akan menggunakan berkas kubeconfig pada +`$HOME/.kube/config`. + +Jika variabel _environment_ `KUBECONFIG` ternyata ada, maka `kubectl` akan menggunakan +konfigurasi yang merupakan hasil gabungan dari berkas-berkas yang terdapat pada +variabel _environment_ `KUBECONFIG`. + +## Menggabungkan berkas-berkas kubeconfig + +Untuk melihat konfigurasimu, gunakan perintah berikut ini: + +```shell +kubectl config view +``` + +Seperti yang dijelaskan sebelumnya, hasil perintah diatas bisa berasal dari sebuah +berkas kubeconfig, atau bisa juga merupakan hasil gabungan dari beberapa berkas kubeconfig. + +Berikut adalah aturan yang digunakan `kubectl` ketika menggabungkan beberapa berkas +kubeconfig: + +1. Jika menggunakan tanda `--kubeconfig`, maka akan menggunakan berkas yang ditentukan. + Tidak digabungkan. Hanya 1 tanda `--kubeconfig` yang diperbolehkan. + + Sebaliknya, jika variabel _environment_ `KUBECONFIG` digunakan, maka akan menggunakan + ini sebagai berkas-berkas yang akan digabungkan. Penggabungan berkas-berkas yang terdapat + pada variabel _environment_ `KUBECONFIG` akan mengikuti aturan sebagai berikut: + + * Mengabaikan berkas tanpa nama. + * Mengeluarkan pesan kesalahan untuk berkas dengan isi yang tidak dapat dideserialisasi. + * Berkas pertama yang menentukan nilai atau _key_ pada _map_ maka akan digunakan + pada _map_ tersebut. + * Tidak pernah mengubah nilai atau _key_ dari suatu _map_. + Contoh: Pertahankan konteks pada berkas pertama yang mengatur `current-context`. + Contoh: Jika terdapat dua berkas yang menentukan nilai `red-user`, maka hanya gunakan + nilai `red-user` dari berkas pertama. + Meskipun berkas kedua tidak memiliki entri yang bertentangan pada `red-user`, + abaikan mereka. + + Beberapa contoh pengaturan variabel _environment_ `KUBECONFIG`, bisa melihat pada + [pengaturan vaiabel _environment_ KUBECONFIG](/docs/tasks/access-application-cluster/configure-access-multiple-clusters/#set-the-kubeconfig-environment-variable). + + Sebaliknya, bisa menggunakan berkas kubeconfig _default_, `$HOME/.kube/config`, + tanpa melakukan penggabungan. + +1. Konteks ditentukan oleh yang pertama sesuai dari pilihan berikut: + + 1. Menggunakan tanda `--context` pada perintah + 1. Menggunakan nilai `current-context` dari hasil gabungan berkas kubeconfig. + + Konteks yang kosong masih diperbolehkan pada tahap ini. + +1. Menentukan kluster dan pengguna. Pada tahap ini, mungkin akan ada atau tidak ada konteks. + Menentukan kluster dan pengguna berdasarkan yang pertama sesuai dengan pilihan berikut, + yang mana akan dijalankan dua kali: sekali untuk pengguna dan sekali untuk kluster: + + 1. Jika ada, maka gunakan tanda pada perintah: `--user` atau `--cluster`. + 1. Jika konteks tidak kosong, maka pengguna dan kluster didapat dari konteks. + + Pengguna dan kluster masih diperbolehkan kosong pada tahap ini. + +1. Menentukan informasi kluster sebenarnya yang akan digunakan. Pada tahap ini, mungkin + akan ada atau tidak ada informasi kluster. Membentuk informasi kluster berdasarkan urutan + berikut dan yang pertama sesuai akan digunakan: + + 1. Jika ada, maka gunakan tanda pada perintah: `--server`, `--certificate-authority`, `--insecure-skip-tls-verify`. + 1. Jika terdapat atribut informasi kluster dari hasil gabungan berkas kubeconfig, + maka gunakan itu. + 1. Jika tidak terdapat informasi mengenai lokasi server, maka dianggap gagal. + +1. Menentukan informasi pengguna sebenarnya yang akan digunakan. Membentuk informasi + pengguna dengan aturan yang sama dengan pembentukan informasi kluster, namun hanya + diperbolehkan ada satu teknik autentikasi untuk setiap pengguna: + + 1. Jika ada, gunakan tanda pada perintah: `--client-certificate`, `--client-key`, `--username`, `--password`, `--token`. + 1. Menggunakan _field_ `user` dari hasil gabungan berkas kubeconfig. + 1. Jika terdapat dua teknik yang bertentangan, maka dianggap gagal. + +1. Untuk setiap informasi yang masih belum terisi, akan menggunakan nilai `default` dan + kemungkinan akan meminta informasi autentikasi. + +## Referensi berkas + +Referensi _file_ dan _path_ pada berkas kubeconfig adalah bernilai relatif terhadap +lokasi dari berkas kubeconfig. +Referensi _file_ pada perintah adalah relatif terhadap direktori kerja saat ini. +Dalam `$HOME/.kube/config`, _relative path_ akan disimpan secara relatif, dan +_absolute path_ akan disimpan secara mutlak. + +{{% /capture %}} + + +{{% capture whatsnext %}} + +* [Mengatur Akses Pada Beberapa Kluster](/docs/tasks/access-application-cluster/configure-access-multiple-clusters/) +* [`kubectl config`](/docs/reference/generated/kubectl/kubectl-commands#config) + +{{% /capture %}} + + diff --git a/content/id/docs/concepts/extend-kubernetes/poseidon-firmament-alternate-scheduler.md b/content/id/docs/concepts/extend-kubernetes/poseidon-firmament-alternate-scheduler.md new file mode 100644 index 0000000000000..2e1058baf7b7e --- /dev/null +++ b/content/id/docs/concepts/extend-kubernetes/poseidon-firmament-alternate-scheduler.md @@ -0,0 +1,114 @@ +--- +title: Poseidon-Firmament - Sebuah Penjadwal Alternatif +content_template: templates/concept +weight: 80 +--- + +{{% capture overview %}} + +**Rilis saat ini dari Penjadwal Poseidon-Firmament adalah rilis alpha .** + +Penjadwal Poseidon-Firmament adalah penjadwal alternatif yang dapat digunakan bersama penjadwal Kubernetes bawaan. + +{{% /capture %}} + +{{% capture body %}} + + +## Pengenalan + +Poseidon adalah sebuah layanan yang berperan sebagai pemersatu antara [Penjadwal Firmament](https://github.com/Huawei-PaaS/firmament) dengan Kubernetes. Penjadwal Poseidon-Firmament menambah kapabilitas penjadwal Kubernetes saat ini. Penjadwal ini menggabungkan kemampuan penjadwalan berbasis grafik jaringan grafis (_flow network graph_) baru bersama penjadwal Kubernetes bawaan. Penjadwal Firmament memodelkan beban-beban kerja dan kluster-kluster sebagai jaringan aliran dan menjalankan optimisasi aliran biaya-minimum kepada jaringan ini untuk membuat keputusan penjadwalan. + +Penjadwal ini memodelkan masalah penjadwalan sebagai optimasi berbasis batasan atas grafik jaringan aliran. Hal ini dicapai dengan mengurangi penjadwalan ke masalah optimisasi biaya-minimum aliran-maksimum. Penjadwal Poseidon-Firmament secara dinamis memperbaiki penempatan beban kerja. + +Penjadwal Poseidon-Firmament berjalan bersamaan dengan penjadwal Kubernetes bawaan sebagai penjadwal alternatif, sehingga beberapa penjadwal dapat berjalan secara bersamaan. + +## Keuntungan Utama + +### Penjadwalan grafik jaringan (_network graph_) berbasis penjadwalan Poseidon-Firmament memberikan beberapa keuntungan utama sebagai berikut: + +- Beban kerja (Pod) dijadwalkan secara kolektif untuk memungkinkan penjadwalan dalam skala besar. +- Berdasarkan hasil tes kinerja yang ekstensif, skala Poseidon-Firmament jauh lebih baik daripada penjadwal bawaan Kubernetes dilihat dari jumlah node meningkat dalam sebuah kluster. Hal ini disebabkan oleh fakta bahwa Poseidon-Firmament mampu mengamortisasi lebih banyak pekerjaan di seluruh beban kerja. +- Penjadwal Poseidon-Firmament mengungguli penjadwal bawaan Kubernetes dengan margin lebar ketika menyangkut jumlah kinerja _throughput_ untuk skenario di mana kebutuhan sumber daya komputasi agak seragam di seluruh pekerjaan (Replicaset / Deployment / Job). Angka kinerja _throughput_ _end-to-end_ penjadwal Poseidon-Firmament , termasuk waktu _bind_, secara konsisten menjadi lebih baik seiring jumlah Node dalam sebuah kluster meningkat. Misalnya, untuk kluster 2.700 Node (ditampilkan dalam grafik [di sini](https://github.com/kubernetes-sigs/poseidon/blob/master/docs/benchmark/README.md)), penjadwal Poseidon-Firmament berhasil mencapai 7X atau lebih _throughput_ _end-to-end_ yang lebih besar dibandingkan dengan penjadwal bawaan Kubernetes, yang mencakup waktu _bind_. +- Tersedianya pembatasan aturan yang kompleks. +- Penjadwalan dalam Poseidon-Firmament bersifat dinamis; ini membuat sumber daya klaster dalam keadaan optimal secara global selama setiap berjalannya penjadwalan. +- Pemanfaatan sumber daya yang sangat efisien. + +## Penjadwal Poseidon-Firmament - Bagaimana cara kerjanya + +Sebagai bagian dari pendukung penjadwal-penjadwal Kubernetes, setiap Pod baru biasanya dijadwalkan oleh penjadwal bawaan. Kubernetes dapat diinstruksikan untuk menggunakan penjadwal lain dengan menentukan nama penjadwal _custom_ lain ("poseidon" dalam kasus ini) di _field_ **schedulerName** dari PodSpec pada saat pembuatan pod. Dalam kasus ini, penjadwal bawaan akan mengabaikan Pod itu dan memungkinkan penjadwal Poseidon untuk menjadwalkan Pod pada Node yang relevan. + +```yaml +apiVersion: v1 +kind: Pod + +... +spec: + schedulerName: poseidon +``` + +{{< note >}} +Untuk detail tentang desain proyek ini, lihat [dokumen desain](https://github.com/kubernetes-sigs/poseidon/blob/master/docs/design/README.md). +{{< /note >}} + +## Kemungkinan Skenario Kasus Penggunaan - Kapan menggunakannya + +Seperti yang disebutkan sebelumnya, penjadwal Poseidon-Firmament memungkinkan lingkungan penjadwalan dengan _throughput_ yang sangat tinggi bahkan pada ukuran kluster dengan beban kerja besar, dikarenakan pendekatan penjadwalannya yang sekaligus dalam jumlah besar, dibandingkan dengan pendekatan bawaan _pod-at-a-time_ Kubernetes. Dalam pengujian ekstensif kami, kami telah mengamati manfaat _throughput_ substansial selama kebutuhan sumber daya (CPU / Memori) untuk Pod yang masuk seragam di seluruh tugas (Replicaset / Deployment / Job), terutama karena amortisasi pekerjaan yang efisien di seluruh tugas. + +Meskipun penjadwal Poseidon-Firmament mampu menjadwalkan berbagai jenis beban kerja, seperti layanan-layanan, _batch_, dll., berikut ini adalah beberapa kasus penggunaan yang paling unggul: + +1. Untuk pekerjaan "Big Data / AI" yang terdiri dari sejumlah besar tugas, manfaat dari _throughput_ luar biasa. +2. Pekerjaan layanan atau _batch job_ di mana kebutuhan sumber dayanya seragam di seluruh pekerjaan (Replicaset / Deployment / Job). + +## Tahap Proyek Saat Ini + +- **Rilis Alpha - Repo Inkubasi.** di https://github.com/kubernetes-sigs/poseidon. +- Saat ini, penjadwal Poseidon-Firmament **tidak memberikan dukungan untuk ketersediaan tinggi**, implementasi kami mengasumsikan bahwa penjadwal tidak mungkin gagal. [Dokumen desain](https://github.com/kubernetes-sigs/poseidon/blob/master/docs/design/README.md) menjelaskan cara-cara yang memungkinkan untuk mengaktifkan ketersediaan tinggi, tetapi kami membiarkannya untuk pekerjaan mendatang. +- Kami **tidak mengetahui adanya _production deployment_** dari penjadwal Poseidon-Firmament saat ini. +- Poseidon-Firmament didukung dari rilis Kubernetes 1.6 dan bekerja dengan semua rilis berikutnya. +- Proses rilis untuk _repo_ Poseidon dan Firmament berjalan secara serentak. Rilis Poseidon saat ini dapat ditemukan [di sini](https://github.com/kubernetes-sigs/poseidon/releases) dan rilis Firmament yang sesuai dapat ditemukan [di sini](https://github.com/Huawei-PaaS/firmament/releases). + +## Matriks Perbandingan Fitur + +| Fitur | Penjadwal Bawaan Kubernetes | Penjadwal Poseidon-Firmament | Catatan | +|--- |--- |--- |--- | +|_Node Affinity_/_Anti-Affinity_|Y|Y|| +| _Pod Affinity_ / _Anti-Affinity_ - termasuk dukungan untuk simetri _anti-affinity_ Pod | Y | Y | Saat ini penjadwal bawaan mengungguli penjadwal Poseidon-Firmament Pod dalam segi fungsionalitas _affinity_/_anti-affinity_. Kami sedang berupaya menyelesaikan ini. | +|_Taints_ & _Toleration_|Y|Y|| +| Kemampuan Penjadwalan Dasar sesuai dengan sumber daya komputasi yang tersedia (CPU & Memori) pada sebuah Node | Y | Y** | Tidak semua Predikat & Prioritas sudah didukung saat ini. | +| _Throughput_ ekstrim pada skala besar | Y** | Y | Pendekatan penjadwalan massal mengukur atau meningkatkan penempatan beban kerja. Manfaat _throughput_ substansial menggunakan penjadwal Firmament selama persyaratan sumber daya (CPU / Memori) untuk Pod yang masuk seragam di seluruh Replicaset / Deployment / Job. Hal ini terutama disebabkan oleh amortisasi pekerjaan yang efisien di seluruh Replicaset / Deployment / Job. 1) Untuk pekerjaan "Big Data / AI" yang terdiri dari jumlah tugas yang besar, manfaat _throughput_ yang luar biasa. 2) Manfaat _throughput_ substansial juga untuk skenario layanan atau sekumpulan pekerjaan di mana persyaratan sumber daya beban kerja seragam di seluruh Replicaset / Deployment / Job. | +| Penjadwalan Optimal | Penjadwalan _Pod-by-Pod_, memproses satu Pod pada satu waktu (dapat mengakibatkan penjadwalan sub-optimal) | Penjadwalan Massal (Penjadwalan optimal) | Penjadwal bawaan _Pod-by-Pod_ Kubernetes dapat menetapkan tugas ke mesin sub-optimal. Sebaliknya, Firmament mempertimbangkan semua tugas yang tidak terjadwal pada saat yang bersamaan bersama dengan batasan lunak dan kerasnya. | +| Penghindaran Gangguan Kolokasi | N | N** | Direncanakan di Poseidon-Firmament. | +| _Pre-emption_ Prioritas | Y | N** | Tersedia secara parsial pada Poseidon-Firmament, dibandingkan dengan dukungan ekstensif di penjadwal bawaan Kubernetes. | +| Penjadwalan Ulang yang Inheren | N | Y** | Penjadwal Poseidon-Firmament mendukung penjadwalan ulang beban kerja. Dalam setiap penjadwalan, penjadwal Poseidon-Firmament mempertimbangkan semua Pod, termasuk Pod yang sedang berjalan, dan sebagai hasilnya dapat melakukan migrasi atau mengeluarkan Pod - sebuah lingkungan penjadwalan yang optimal secara global. | +| Penjadwalan Berkelompok | N | Y || +| Dukungan untuk Penjadwalan Volume Persisten Pra-terikat | Y | Y || +| Dukungan untuk Volume Lokal & Penjadwalan _Binding_ Volume Persisten Dinamis | Y | N** | Direncanakan. | +| Ketersediaan Tinggi | Y | N** | Direncanakan. | +| Penjadwalan berbasis metrik _real-time_ | N | Y** | Awalnya didukung menggunakan Heapster (sekarang tidak digunakan lagi) untuk menempatkan Pod menggunakan statistik penggunaan kluster aktual ketimbang reservasi. Rencananya akan dialihkan ke "server metrik". | +| Dukungan untuk _Max-Pod_ per Node | Y | Y | Penjadwal Poseidon-Firmament secara mulus berdampingan dengan penjadwal bawaan Kubernetes. +| Dukungan untuk Penyimpanan _Ephemeral_, selain CPU / Memori | Y | Y || + + +## Instalasi + +Untuk instalasi Poseidon dalam-kluster, silakan mulai dari [Petunjuk Instalasi](https://github.com/kubernetes-sigs/poseidon/blob/master/docs/install/README.md). + +## Pengembangan + +Untuk developer, silakan merujuk ke [Instruksi _Setup_ Developer](https://github.com/kubernetes-sigs/poseidon/blob/master/docs/devel/README.md). + +## Hasil Pengujian Kinerja _Throughput_ Terbaru + +Penjadwal _pod-by-pod_, seperti penjadwal bawaan Kubernetes, biasanya memproses satu Pod pada satu waktu. Penjadwal ini memiliki kelemahan penting berikut: + +1. Penjadwal berkomitmen untuk penempatan Pod lebih awal dan membatasi pilihan untuk Pod lain yang menunggu untuk ditempatkan. +2. Ada peluang terbatas untuk amortisasi pekerjaan lintas Pod karena mereka dipertimbangkan untuk ditempatkan secara individual. + +Kelemahan dari penjadwal _pod-by-pod_ ini diatasi dengan penjadwalan secara terkumpul atau dalam jumlah banyak secara bersamaan di penjadwal Poseidon-Firmament. Memproses beberapa Pod dalam satu kumpulan memungkinkan penjadwal untuk bersama-sama mempertimbangkan penempatan mereka, dan dengan demikian untuk menemukan untung-rugi terbaik untuk seluruh kumpulan ketimbang satu Pod saja. Pada saat yang sama, amortisasi berfungsi lintas Pod yang menghasilkan _throughput_ yang jauh lebih tinggi. + +{{< note >}} + Silakan merujuk ke [hasil _benchmark_ terbaru](https://github.com/kubernetes-sigs/poseidon/blob/master/docs/benchmark/README.md) untuk hasil uji perbandingan kinerja _throughput_ terperinci antara penjadwal Poseidon-Firmament dan Penjadwal bawaan Kubernetes. +{{< /note >}} + +{{% /capture %}} diff --git a/content/id/docs/concepts/overview/working-with-objects/object-management.md b/content/id/docs/concepts/overview/working-with-objects/object-management.md new file mode 100644 index 0000000000000..dd61ebe04a5f2 --- /dev/null +++ b/content/id/docs/concepts/overview/working-with-objects/object-management.md @@ -0,0 +1,185 @@ +--- +title: Pengaturan Objek Kubernetes +content_template: templates/concept +weight: 15 +--- + +{{% capture overview %}} +Perangkat `kubectl` mendukung beberapa cara untuk membuat dan mengatur objek-objek Kubernetes. +Laman ini menggambarkan berbagai macam metodenya. Baca [Kubectl gitbook](https://kubectl.docs.kubernetes.io) +untuk penjelasan pengaturan objek dengan Kubectl secara detail. +{{% /capture %}} + +{{% capture body %}} + +## Metode pengaturan + +{{< warning >}} +Sebuah objek Kubernetes hanya boleh diatur dengan menggunakan satu metode saja. Mengkombinasikan +beberapa metode untuk objek yang sama dapat menghasilkan perilaku yang tidak diinginkan. +{{< /warning >}} + +| Metode pengaturan | Dijalankan pada | _Environment_ yang disarankan | Jumlah penulis yang didukung | Tingkat kesulitan mempelajari | +|----------------------------------|----------------------|--------------------------------|-------------------------------|-------------------------------| +| Perintah imperatif | Objek _live_ | Proyek pengembangan (_dev_) | 1+ | Terendah | +| Konfigurasi objek imperatif | Berkas individu | Proyek produksi (_prod_) | 1 | Sedang | +| Konfigurasi objek deklaratif | Direktori berkas | Proyek produksi (_prod_) | 1+ | Tertinggi | + +## Perintah imperatif + +Ketika menggunakan perintah-perintah imperatif, seorang pengguna menjalankan operasi secara langsung +pada objek-objek _live_ dalam sebuah kluster. Pengguna menjalankan operasi tersebut melalui +argumen atau _flag_ pada perintah `kubectl`. + +Ini merupakan cara yang paling mudah untuk memulai atau menjalankan tugas "sekali jalan" pada sebuah kluster. +Karena metode ini dijalankan secara langsung pada objek _live_, tidak ada _history_ yang menjelaskan konfigurasi-konfigurasi terkait sebelumnya. + +### Contoh + +Menjalankan sebuah instans Container nginx dengan membuat suatu objek Deployment: + +```sh +kubectl run nginx --image nginx +``` + +Melakukan hal yang sama menggunakan sintaks yang berbeda: + +```sh +kubectl create deployment nginx --image nginx +``` + +### Kelebihan dan kekurangan + +Beberapa kelebihan metode ini dibandingkan metode konfigurasi objek: + +- Sederhana, mudah dipelajari dan diingat. +- Hanya memerlukan satu langkah untuk membuat perubahan pada kluster. + +Beberapa kekurangan metode ini dibandingkan metode konfigurasi objek: + +- Tidak terintegrasi dengan proses peninjauan (_review_) perubahan. +- Tidak menyediakan jejak audit yang terkait dengan perubahan. +- Tidak menyediakan sumber _record_ kecuali dari apa yang _live_ terlihat. +- Tidak menyediakan templat untuk membuat objek-objek baru. + +## Konfigurasi objek imperatif + +Pada konfigurasi objek imperatif, perintah kubectl menetapkan jenis operasi +(_create_, _replace_, etc.), _flag-flag_ pilihan dan minimal satu nama berkas. +Berkas ini harus berisi definisi lengkap dari objek tersebut +dalam bentuk YAML atau JSON. + +Lihat [referensi API](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/) +untuk info lebih detail mengenai definisi objek. + +{{< warning >}} +Perintah imperatif `replace` menggantikan spek yang sudah ada dengan spek yang baru, +membuang semua perubahan terhadap objek tersebut yang tidak didefinisikan pada berkas konfigurasi. +Metode ini sebaiknya tidak dilakukan pada tipe sumber daya yang spek-nya diperbarui +secara independen di luar berkas konfigurasi. Service dengan tipe `LoadBalancer`, sebagai contoh, +memiliki _field_ `externalIPs` yang diperbarui secara independen di luar konfigurasi, dilakukan +oleh kluster. +{{< /warning >}} + +### Contoh + +Membuat objek yang didefinisikan pada sebuah berkas konfigurasi: + +```sh +kubectl create -f nginx.yaml +``` + +Menghapus objek-objek yang didefinisikan pada dua berkas konfigurasi: + +```sh +kubectl delete -f nginx.yaml -f redis.yaml +``` + +Memperbarui objek yang didefinisikan pada sebuah berkas konfigurasi dengan +menimpa konfigurasi _live_: + +```sh +kubectl replace -f nginx.yaml +``` + +### Kelebihan dan kekurangan + +Beberapa kelebihan dibandingkan metode perintah imperatif: + +- Konfigurasi objek dapat disimpan pada suatu sistem kontrol kode seperti Git. +- Konfigurasi objek dapat diintegrasikan dengan proses-proses, misalnya peninjauan (_review_) perubahan sebelum _push_ dan jejak audit. +- Konfigurasi objek dapat menyediakan templat untuk membuat objek-objek baru. + +Beberapa kekurangan dibandingkan metode perintah imperatif: + +- Konfigurasi objek memerlukan pemahaman yang mendasar soal skema objek. +- Konfigurasi objek memerlukan langkah tambahan untuk menulis berkas YAML. + +Beberapa kelebihan dibandingkan metode konfigurasi objek deklaratif: + +- Konfigurasi objek imperatif memiliki perilaku yang lebih sederhana dan mudah dimengerti. +- Sejak Kubernetes versi 1.5, konfigurasi objek imperatif sudah lebih stabil. + +Beberapa kekurangan dibandingkan metode konfigurasi objek deklaratif: + +- Konfigurasi objek imperatif bekerja dengan baik untuk berkas-berkas, namun tidak untuk direktori. +- Pembaruan untuk objek-objek _live_ harus diterapkan pada berkas-berkas konfigurasi, jika tidak, hasil perubahan akan hilang pada penggantian berikutnya. + +## Konfigurasi objek deklaratif + +Ketika menggunakan konfigurasi objek deklaratif, seorang pengguna beroperasi pada berkas-berkas +konfigurasi objek yang disimpan secara lokal, namun pengguna tidak mendefinisikan operasi +yang akan dilakukan pada berkas-berkas tersebut. Operasi _create_, _update_, dan _delete_ +akan dideteksi secara otomatis per-objek dengan `kubectl`. Hal ini memungkinkan penerapan +melalui direktori, dimana operasi yang berbeda mungkin diperlukan untuk objek-objek yang berbeda. + +{{< note >}} +Konfigurasi objek deklaratif mempertahankan perubahan yang dibuat oleh penulis lainnya, bahkan +jika perubahan tidak digabungkan (_merge_) kembali pada berkas konfigurasi objek. Hal ini +bisa terjadi dengan menggunakan operasi API `patch` supaya hanya perbedaannya saja yang ditulis, +daripada menggunakan operasi API `replace` untuk menggantikan seluruh konfigurasi objek. +{{< /note >}} + +### Contoh + +Melakukan pemrosesan pada semua berkas konfigurasi objek di direktori `configs`, dan melakukan +_create_ atau _patch_ untuk objek-objek _live_. Kamu dapat terlebih dahulu melakukan `diff` untuk +melihat perubahan-perubahan apa saja yang akan dilakukan, dan kemudian terapkan: + +```sh +kubectl diff -f configs/ +kubectl apply -f configs/ +``` + +Melakukan pemrosesan direktori secara rekursif: + +```sh +kubectl diff -R -f configs/ +kubectl apply -R -f configs/ +``` + +### Kelebihan dan kekurangan + +Beberapa kelebihan dibandingkan konfigurasi objek imperatif: + +- Perubahan-perubahan yang dilakukan secara langsung pada objek-objek _live_ akan dipertahankan, bahkan jika perubahan tersebut tidak digabungkan kembali pada berkas-berkas konfigurasi. +- Konfigurasi objek deklaratif memiliki dukungan yang lebih baik dalam mengoperasikan direktori dan secara otomatis mendeteksi tipe operasi (_create_, _patch_, _delete_) per-objek. + +Beberapa kekurangan dibandingkan konfigurasi objek imperatif: + +- Konfigurasi objek deklaratif lebih sulit untuk di-_debug_ dan hasilnya lebih sulit dimengerti untuk perilaku yang tidak diinginkan. +- Pembaruan sebagian menggunakan _diff_ menghasilkan operasi _merge_ dan _patch_ yang rumit. + +{{% /capture %}} + +{{% capture whatsnext %}} + +- [Mengatur Objek Kubernetes menggunakan Perintah Imperatif](/docs/tasks/manage-kubernetes-objects/imperative-command/) +- [Mengatur Objek Kubernetes menggunakan Konfigurasi Objek (Imperatif)](/docs/tasks/manage-kubernetes-objects/imperative-config/) +- [Mengatur Objek Kubernetes menggunakan Konfigurasi Objek (Deklaratif)](/docs/tasks/manage-kubernetes-objects/declarative-config/) +- [Mengatur Objek Kubernetes menggunakan Kustomize (Deklaratif)](/docs/tasks/manage-kubernetes-objects/kustomization/) +- [Referensi Perintah Kubectl](/docs/reference/generated/kubectl/kubectl-commands/) +- [Kubectl Gitbook](https://kubectl.docs.kubernetes.io) +- [Referensi API Kubernetes](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/) + +{{% /capture %}} diff --git a/content/zh/docs/concepts/services-networking/service.md b/content/zh/docs/concepts/services-networking/service.md index f0484315cc8b5..0915c40d756ac 100644 --- a/content/zh/docs/concepts/services-networking/service.md +++ b/content/zh/docs/concepts/services-networking/service.md @@ -364,7 +364,7 @@ iptables mode for kube-proxy became the default. Kubernetes v1.8 added ipvs proxy mode. --> -###版本兼容性 +### 版本兼容性 从Kubernetes v1.0开始,您已经可以使用 [用户空间代理模式](#proxy-mode-userspace)。 Kubernetes v1.1添加了 iptables 模式代理,在 Kubernetes v1.2 中,kube-proxy 的 iptables 模式成为默认设置。 diff --git a/layouts/_default/baseof.html b/layouts/_default/baseof.html index 440655af41748..d23a1141a3a37 100644 --- a/layouts/_default/baseof.html +++ b/layouts/_default/baseof.html @@ -8,20 +8,22 @@ {{ block "hero" . }}
-
+

{{ block "hero-title" . }}{{ .Params.bigheader | default .Title }}{{ end }}

{{ .Params.abstract }}
{{ block "hero-more" . }}{{ end }} -
+
{{ block "post-hero" . }}{{ end }} {{ end }} - -
- {{ block "main" . }}{{ end }} -
+ +
+
+ {{ block "main" . }}{{ end }} +
+
{{ partialCached "footer.html" . }} {{ partialCached "footer-scripts.html" . }} diff --git a/layouts/blog/baseof.html b/layouts/blog/baseof.html index 85095f152c165..c774f688f66e9 100644 --- a/layouts/blog/baseof.html +++ b/layouts/blog/baseof.html @@ -11,29 +11,31 @@

{{ .Title }}

-
-
-
- {{ block "main" . }}{{ end }} -
-
-
- {{ with site.Home.OutputFormats.Get "rss" -}} - -
- {{ end -}} -
-
-
-
-
-
-
+
+
+
+
+ {{ block "main" . }}{{ end }} +
+
+
+ {{ with site.Home.OutputFormats.Get "rss" -}} + +
+ {{ end -}} +
+
+
+
+
+
+
+
+ {{ partialCached "blog/archive.html" . }}
- {{ partialCached "blog/archive.html" . }}
-
+
{{ partialCached "footer.html" . }} {{ partialCached "footer-scripts.html" . }} diff --git a/layouts/case-studies/list.html b/layouts/case-studies/list.html index 1dbd6351a4a90..f8e723c8198d4 100644 --- a/layouts/case-studies/list.html +++ b/layouts/case-studies/list.html @@ -10,7 +10,7 @@ {{ end }} {{ $featured := (where $pages "Params.featured" true).ByWeight | first 4 }}
-
+
{{ range $featured }} @@ -18,13 +18,13 @@ {{ end }}
-
+
{{ $featuredVideos := where .Pages ".Params.video" "!=" nil }} {{ with $featuredVideos }} {{ with index $featuredVideos 0 }}
-
+

"{{ .Params.quote | html }}"

{{ $img := .Resources.GetMatch "video.png" }} @@ -32,7 +32,7 @@

"{{ .Params.quote | html }}"

{{ with $small }}{{ .Title }}{{ end }}
-
+
@@ -41,7 +41,7 @@

"{{ .Params.quote | html }}"

{{ end }} {{ end }}
-
+

{{ .Title }}

{{ range $pages.ByTitle }} @@ -60,7 +60,7 @@

{{ .Title }}

{{ end }} {{ T
-
+
{{ .Content }} {{ end }} diff --git a/layouts/docs/baseof.html b/layouts/docs/baseof.html index 11342585f1fff..cb3071f5f573f 100644 --- a/layouts/docs/baseof.html +++ b/layouts/docs/baseof.html @@ -13,16 +13,18 @@ {{ end }} {{ block "announcement" . }}{{ partial "announcement.html" . }}{{ end }} {{ block "deprecation" . }}{{ partial "deprecation-warning.html" . }}{{ end }} -
- {{ block "side-menu" . }}{{ end }} -
- {{ block "content" . }}{{ end }} - - {{ partial "feedback.html" . }} - - {{ partial "git-info.html" . }} -
-
+
+
+ {{ block "side-menu" . }}{{ end }} +
+ {{ block "content" . }}{{ end }} + + {{ partial "feedback.html" . }} + + {{ partial "git-info.html" . }} +
+
+
{{ partialCached "footer.html" . }} {{ partialCached "footer-scripts.html" . }}