diff --git a/OWNERS_ALIASES b/OWNERS_ALIASES index 613797246b41b..8c6c0026944e3 100644 --- a/OWNERS_ALIASES +++ b/OWNERS_ALIASES @@ -10,18 +10,19 @@ aliases: - sftim sig-docs-website-owners: # Admins for overall website - divya-mohan0209 + - katcosgrove - natalisucks - reylejano + - salaxander - sftim - tengqm - - drewhagen # RT 1.30 Docs Lead - - katcosgrove # RT 1.30 Lead sig-docs-localization-owners: # Admins for localization content - a-mccarthy - divya-mohan0209 - natalisucks - nate-double-u - reylejano + - salaxander - sftim - seokho-son - tengqm @@ -31,9 +32,21 @@ aliases: - natalisucks - nate-double-u - reylejano + - salaxander - sftim - seokho-son - tengqm + sig-docs-bn-owners: # Admins for Bengali content + - asem-hamid + - Imtiaz1234 + - mitul3737 + - rajibmitra + sig-docs-bn-reviews: # PR reviews for Bengali content + - asem-hamid + - Imtiaz1234 + - mitul3737 + - rajibmitra + - sajibAdhi sig-docs-de-owners: # Admins for German content - bene2k1 - rlenferink @@ -44,47 +57,43 @@ aliases: - celestehorgan - dipesh-rawat - divya-mohan0209 + - katcosgrove - natalisucks - nate-double-u - reylejano + - salaxander - sftim - tengqm sig-docs-en-reviews: # PR reviews for English content - celestehorgan - dipesh-rawat - divya-mohan0209 + - katcosgrove - kbhawkey - mengjiao-liu - mickeyboxell - natalisucks - nate-double-u - reylejano + - salaxander - sftim - shannonxtreme - tengqm - windsonsea sig-docs-es-owners: # Admins for Spanish content - - 92nqb - electrocucaracha - krol3 - - raelga - ramrodo sig-docs-es-reviews: # PR reviews for Spanish content - - 92nqb - electrocucaracha - jossemarGT - krol3 - - raelga - ramrodo sig-docs-fr-owners: # Admins for French content - - awkif - - feloy - perriea - rekcah78 - remyleone sig-docs-fr-reviews: # PR reviews for French content - - awkif - - feloy - perriea - rekcah78 - remyleone @@ -121,6 +130,7 @@ aliases: - atoato88 - bells17 - kakts + - Okabe-Junya - t-inu sig-docs-ko-owners: # Admins for Korean content - gochist @@ -140,6 +150,7 @@ aliases: - divya-mohan0209 - natalisucks - reylejano + - salaxander - sftim - tengqm sig-docs-zh-owners: # Admins for Chinese content @@ -173,18 +184,14 @@ aliases: sig-docs-pt-owners: # Admins for Portuguese content - devlware - edsoncelio - - femrtnz - jcjesus - stormqueen1990 - - yagonobre sig-docs-pt-reviews: # PR reviews for Portugese content - devlware - edsoncelio - - femrtnz - jcjesus - mrerlison - stormqueen1990 - - yagonobre sig-docs-vi-owners: # Admins for Vietnamese content - huynguyennovem - truongnh1992 diff --git a/README-bn.md b/README-bn.md new file mode 100644 index 0000000000000..765d05e2afabe --- /dev/null +++ b/README-bn.md @@ -0,0 +1,210 @@ +# কুবারনেটিস ডকুমেন্টেশন + +[![Netlify Status](https://api.netlify.com/api/v1/badges/be93b718-a6df-402a-b4a4-855ba186c97d/deploy-status)](https://app.netlify.com/sites/kubernetes-io-main-staging/deploys) [![GitHub release](https://img.shields.io/github/release/kubernetes/website.svg)](https://github.com/kubernetes/website/releases/latest) + +এই রিপোজিটোরিতে [কুবারনেটিস ওয়েবসাইট এবং ডকুমেন্টেশন](https://kubernetes.io/) তৈরি করার জন্য প্রয়োজনীয় সমস্ত উপাদান রয়েছে। আমরা খুবই আনন্দিত যে আপনি অবদান রাখতে চান! + +- [ডকুমেন্টেশন এ অবদান](#contributing-to-the-docs) +- [স্থানীয়করণ ReadMeগুলো](#localization-readmemds) + +## এই রিপোজিটোরি ব্যবহার + +আপনি [Hugo (বর্ধিত সংস্করণ)](https://gohugo.io/) ব্যবহার করে স্থানীয়ভাবে ওয়েবসাইটটি চালাতে পারেন, অথবা আপনি এটি একটি কন্টেইনার রানটাইমে চালাতে পারেন। আমরা দৃঢ়ভাবে কন্টেইনার রানটাইম ব্যবহার করার পরামর্শ দিই, কারণ এটি লাইভ ওয়েবসাইটের সাথে ডিপ্লয়মেন্টের ধারাবাহিকতা দেয়। + +## পূর্বশর্ত + +এই রিপোজিটোরিটি ব্যবহার করার জন্য, আপনাকে লোকাল সিস্টেম বা, ডিভাইস এ নিম্নলিখিত জিনিস ইনস্টল করতে হবে: + +- [npm](https://www.npmjs.com/) +- [Go](https://go.dev/) +- [Hugo (বর্ধিত সংস্করণ)](https://gohugo.io/) +- একটি কন্টেইনার রানটাইম, যেমন [Docker](https://www.docker.com/). + +> [!NOTE] +[`netlify.toml`](netlify.toml#L11) ফাইলে `HUGO_VERSION` এনভায়রনমেন্ট ভেরিয়েবল দ্বারা নির্দিষ্ট করা Hugo বর্ধিত সংস্করণ ইনস্টল করা নিশ্চিত করুন৷ + +আপনি কাজ শুরু করার আগে, দরকারি জিনিসগুলো ইনস্টল করুন। রিপোজিটোরি ক্লোন(clone) করুন এবং ডিরেক্টরিতে(directory) প্রবেশ করুন: + +```bash +git clone https://github.com/kubernetes/website.git +cd website +``` + +কুবারনেটিস ওয়েবসাইটটি [Docsy Hugo থিম](https://github.com/google/docsy#readme) ব্যবহার করে। এমনকি যদি আপনি একটি কন্টেইনারে ওয়েবসাইট চালানোর পরিকল্পনা করেন, আমরা দৃঢ়ভাবে নিম্নলিখিতগুলি চালিয়ে সাবমডিউল এবং অন্যান্য প্রয়োজনীয় জিনিসগুলো পুল(pull) করার পরামর্শ দিই: + +### Windows +```powershell +# fetch submodule dependencies +git submodule update --init --recursive --depth 1 +``` + +### Linux / other Unix +```bash +# fetch submodule dependencies +make module-init +``` + +## একটি কন্টেইনার ব্যবহার করে ওয়েবসাইট চালানো + +একটি কন্টেইনারে সাইটটি তৈরি করতে, নিম্নলিখিতটি চালান: + +```bash +# You can set $CONTAINER_ENGINE to the name of any Docker-like container tool +make container-serve +``` + +আপনি যদি ত্রুটি দেখতে পান, তাহলে সম্ভবত এর অর্থ হলো Hugo কন্টেইনারে যথেষ্ট কম্পিউটিং রিসোর্স নেই। এটি সমাধান করতে, আপনার মেশিনে Docker এর জন্য অনুমোদিত CPU এবং মেমরি ব্যবহারের পরিমাণ বাড়ান ([MacOS](https://docs.docker.com/desktop/settings/mac/) এবং [Windows](https://docs.docker.com/desktop/settings/windows/))। + +ওয়েবসাইটটি দেখতে -এ আপনার ব্রাউজার খুলুন। আপনি সোর্স ফাইলগুলোতে পরিবর্তন করার সাথে সাথে, Hugo ওয়েবসাইট আপডেট করে এবং একটি ব্রাউজার রিফ্রেশ করতে বাধ্য করে। + +## Hugo ব্যবহার করে লোকাল ডিভাইস এ ওয়েবসাইট চালানো + +দরকারি জিনিসগুলো ইনস্টল করতে, স্থানীয়ভাবে সাইট তৈরি এবং পরীক্ষা করতে, চালান: + +- For macOS and Linux + ```bash + npm ci + make serve + ``` +- For Windows (PowerShell) + ```powershell + npm ci + hugo.exe server --buildFuture --environment development + ``` + +এটি পোর্ট 1313-এ স্থানীয় Hugo সার্ভার শুরু করবে। ওয়েবসাইট দেখতে -এ আপনার ব্রাউজার খুলুন। আপনি সোর্স ফাইলগুলোতে পরিবর্তন করার সাথে সাথে, Hugo ওয়েবসাইট আপডেট হবে এবং একটি ব্রাউজার রিফ্রেশ করতে বাধ্য করে। + +## API রেফারেন্স পৃষ্ঠা তৈরি করা + +`content/en/docs/reference/kubernetes-api` এ অবস্থিত API রেফারেন্স পৃষ্ঠাগুলো ব্যবহার করে Swagger স্পেসিফিকেশন থেকে তৈরি করা হয়েছে, যা OpenAPI স্পেসিফিকেশন নামেও পরিচিত। + +একটি নতুন কুবারনেটিস রিলিজের জন্য রেফারেন্স পৃষ্ঠাগুলো আপডেট করতে এই পদক্ষেপগুলো অনুসরণ করুন: + +1. `api-ref-generator` সাবমডিউল পুল (Pull) করুন: + + ```bash + git submodule update --init --recursive --depth 1 + ``` + +2. Swagger স্পেসিফিকেশন আপডেট করুন: + + ```bash + curl 'https://raw.githubusercontent.com/kubernetes/kubernetes/master/api/openapi-spec/swagger.json' > api-ref-assets/api/swagger.json + ``` + +3. `api-ref-assets/config/`-এ, নতুন রিলিজের পরিবর্তনগুলো প্রতিফলিত করতে `toc.yaml` এবং `fields.yaml` ফাইলগুলোকে হালনাগাদ করে নিন। + +4. পরবর্তী, পৃষ্ঠাগুলো তৈরি করুন: + + ```bash + make api-reference + ``` + + আপনি একটি কন্টেইনার থেকে সাইট তৈরি এবং পরিবেশন করে স্থানীয়ভাবে ফলাফল পরীক্ষা করতে পারেন: + + ```bash + make container-serve + ``` + + একটি ওয়েব ব্রাউজারে, API রেফারেন্স দেখতে এ যান। + +5. যখন নতুন চুক্তির সমস্ত পরিবর্তন কনফিগারেশন ফাইল `toc.yaml` এবং `fields.yaml`-এ প্রতিফলিত হয়, তখন নতুন তৈরি করা API রেফারেন্স পৃষ্ঠাগুলোর সাথে একটি পুল রিকোয়েস্ট তৈরি করুন। + +## সমস্যা সমাধান (Troubleshooting) + +### error: failed to transform resource: TOCSS: failed to transform "scss/main.scss" (text/x-scss): this feature is not available in your current Hugo version + +প্রযুক্তিগত কারণে Hugo কে দুই সেট বাইনারিতে পাঠানো হয়। বর্তমান ওয়েবসাইটটি শুধুমাত্র **Hugo Extended** সংস্করণের উপর ভিত্তি করে চলে। [রিলিজ পৃষ্ঠা](https://github.com/gohugoio/hugo/releases) নামের মধ্যে `বর্ধিত(extended)` সহ সংরক্ষণাগারগুলো খুঁজুন। নিশ্চিত করতে, `hugo version` চালান এবং `extended` শব্দটি সন্ধান করুন। + +### অনেকগুলো খোলা ফাইলের জন্য macOS সমস্যা সমাধান করা + +আপনি যদি macOS-এ `make serve` চালান এবং নিম্নলিখিত ত্রুটি পান: + +```bash +ERROR 2020/08/01 19:09:18 Error: listen tcp 127.0.0.1:1313: socket: too many open files +make: *** [serve] Error 1 +``` + +খোলা ফাইলগুলির জন্য বর্তমান সীমা পরীক্ষা করার চেষ্টা করুন: + +`launchctl limit maxfiles` + +তারপর নিম্নলিখিত কমান্ডগুলি চালান ( থেকে নেয়া): + +```shell +#!/bin/sh + +# These are the original gist links, linking to my gists now. +# curl -O https://gist.githubusercontent.com/a2ikm/761c2ab02b7b3935679e55af5d81786a/raw/ab644cb92f216c019a2f032bbf25e258b01d87f9/limit.maxfiles.plist +# curl -O https://gist.githubusercontent.com/a2ikm/761c2ab02b7b3935679e55af5d81786a/raw/ab644cb92f216c019a2f032bbf25e258b01d87f9/limit.maxproc.plist + +curl -O https://gist.githubusercontent.com/tombigel/d503800a282fcadbee14b537735d202c/raw/ed73cacf82906fdde59976a0c8248cce8b44f906/limit.maxfiles.plist +curl -O https://gist.githubusercontent.com/tombigel/d503800a282fcadbee14b537735d202c/raw/ed73cacf82906fdde59976a0c8248cce8b44f906/limit.maxproc.plist + +sudo mv limit.maxfiles.plist /Library/LaunchDaemons +sudo mv limit.maxproc.plist /Library/LaunchDaemons + +sudo chown root:wheel /Library/LaunchDaemons/limit.maxfiles.plist +sudo chown root:wheel /Library/LaunchDaemons/limit.maxproc.plist + +sudo launchctl load -w /Library/LaunchDaemons/limit.maxfiles.plist +``` + +এটি Catalina পাশাপাশি Mojave macOS এর জন্য কাজ করে। + +## SIG Docs এর সাথে জড়িত হন + +[কমিউনিটি পৃষ্ঠা](https://github.com/kubernetes/community/tree/master/sig-docs#meetings) থেকে SIG Docs কুবারনেটিস কমিউনিটি এবং মিটিং সম্পর্কে আরও জানুন। + +এছাড়াও আপনি এই প্রকল্পের রক্ষণাবেক্ষণকারীদের কাছে পৌঁছাতে পারেন: + +- [Slack](https://kubernetes.slack.com/messages/sig-docs) + - [এই Slack এর জন্য একটি আমন্ত্রণ পান](https://slack.k8s.io/) +- [মেইলিং তালিকা](https://groups.google.com/forum/#!forum/kubernetes-sig-docs) + +## Docs এ অবদান রাখুন + +আপনি আপনার GitHub অ্যাকাউন্টে এই রিপোজিটোরি এর একটি অনুলিপি তৈরি করতে স্ক্রিনের উপরের ডানদিকে **Fork** বোতামে ক্লিক করতে পারেন। এই অনুলিপিটিকে _ফর্ক(fork)_ বলা হয়। আপনার ফর্কটিতে আপনি যে কোনো পরিবর্তন করতে চান এবং আপনি যখন সেই পরিবর্তনগুলো আমাদের কাছে পাঠাতে প্রস্তুত হন, তখন আপনার ফর্কে যান এবং এটি সম্পর্কে আমাদের জানাতে একটি নতুন পুল রিকোয়েস্ট (Pull request) তৈরি করুন৷ + +একবার আপনার পুল রিকোয়েস্ট তৈরি হয়ে গেলে, একজন কুবারনেটিস পর্যালোচক স্পষ্ট, কার্যকর প্রতিক্রিয়া প্রদানের দায়িত্ব নেবেন। পুল রিকোয়েস্টের মালিক হিসাবে, **কুবারনেটিস পর্যালোচক আপনাকে যে প্রতিক্রিয়া প্রদান করেছেন তা সমাধান করার জন্য আপনার পুল রিকোয়েস্ট সংশোধন করার দায়িত্ব আপনার।** + +এছাড়াও, মনে রাখবেন যে আপনার কাছে একাধিক কুবারনেটিস পর্যালোচক আপনাকে প্রতিক্রিয়া প্রদান করতে পারেন বা আপনি একজন কুবারনেটিস পর্যালোচকের কাছ থেকে প্রতিক্রিয়া পেতে পারেন যা আপনাকে প্রতিক্রিয়া প্রদানের জন্য প্রাথমিকভাবে নির্ধারিত একটি থেকে আলাদা। + +তদুপরি, কিছু ক্ষেত্রে, আপনার একজন পর্যালোচক প্রয়োজনের সময় একজন কুবারনেটিস টেকনিকাল পর্যালোচনাকারীর কাছ থেকে প্রযুক্তিগত পর্যালোচনা চাইতে পারেন। পর্যালোচকরা যথাসময়ে প্রতিক্রিয়া প্রদানের জন্য তাদের যথাসাধ্য চেষ্টা করবেন কিন্তু প্রতিক্রিয়ার সময় পরিস্থিতির উপর ভিত্তি করে পরিবর্তিত হতে পারে। + +কুবারনেটিস ডকুমেন্টেশনে অবদান সম্পর্কে আরও তথ্যের জন্য, দেখুন: + +- [কুবারনেটিস ডক্সে অবদান রাখুন](https://kubernetes.io/docs/contribute/) +- [পৃষ্ঠা বিষয়বস্তুর প্রকার](https://kubernetes.io/docs/contribute/style/page-content-types/) +- [ডকুমেন্টেশন শৈলী গাইড](https://kubernetes.io/docs/contribute/style/style-guide/) +- [কুবারনেটিস ডকুমেন্টেশন স্থানীয়করণ](https://kubernetes.io/docs/contribute/localization/) +- [কুবারনেটিস ডক্সের পরিচিতি](https://www.youtube.com/watch?v=pprMgmNzDcw) + +### নতুন অবদানকারী অ্যাম্বাসেডর + +অবদান রাখার সময় আপনার যদি যেকোনো সময়ে সাহায্যের প্রয়োজন হয়, [নতুন কন্ট্রিবিউটর অ্যাম্বাসেডর](https://kubernetes.io/docs/contribute/advanced/#serve-as-a-new-contributor-ambassador) যোগাযোগের একটি ভালো জায়গা। . এগুলো হলো SIG ডক্স অনুমোদনকারীযাদের দায়িত্ব গুলোর মধ্যে নতুন অবদানকারীদের পরামর্শ দেওয়া এবং তাদের প্রথম কয়েকটি পুল রিকোয়েস্টের মাধ্যমে তাদের সাহায্য করা অন্তর্ভুক্ত৷ নতুন কন্ট্রিবিউটর অ্যাম্বাসেডরদের সাথে যোগাযোগ করার সবচেয়ে ভালো জায়গা হবে [Kubernetes Slack](https://slack.k8s.io/)। SIG ডক্সের জন্য বর্তমান নতুন অবদানকারী অ্যাম্বাসেডর: + +| Name | Slack | GitHub | +| -------------------------- | -------------------------- | -------------------------- | +| Arsh Sharma | @arsh | @RinkiyaKeDad | + +## Localization READMEs + +| Language | Language | +| -------------------------- | -------------------------- | +| [Bengali](README-bn.md) | [Korean](README-ko.md) | +| [Chinese](README-zh.md) | [Polish](README-pl.md) | +| [French](README-fr.md) | [Portuguese](README-pt.md) | +| [German](README-de.md) | [Russian](README-ru.md) | +| [Hindi](README-hi.md) | [Spanish](README-es.md) | +| [Indonesian](README-id.md) | [Ukrainian](README-uk.md) | +| [Italian](README-it.md) | [Vietnamese](README-vi.md) | +| [Japanese](README-ja.md) | | + +## কোড অফ কন্ডাক্ট + +কুবারনেটিস কমিউনিটিয়ের অংশগ্রহণ [CNCF কোড অফ কন্ডাক্ট](https://github.com/cncf/foundation/blob/main/code-of-conduct.md) দ্বারা পরিচালিত হয়। + +## ধন্যবাদ + +কুবারনেটিস কমিউনিটিয়ের অংশগ্রহণে উন্নতি লাভ করে, এবং আমরা আমাদের ওয়েবসাইট এবং আমাদের ডকুমেন্টেশনে আপনার অবদানের প্রশংসা করি! diff --git a/README-ja.md b/README-ja.md index 047d1c9ceb7fe..a796164244e1a 100644 --- a/README-ja.md +++ b/README-ja.md @@ -2,14 +2,14 @@ [![Netlify Status](https://api.netlify.com/api/v1/badges/be93b718-a6df-402a-b4a4-855ba186c97d/deploy-status)](https://app.netlify.com/sites/kubernetes-io-main-staging/deploys) [![GitHub release](https://img.shields.io/github/release/kubernetes/website.svg)](https://github.com/kubernetes/website/releases/latest) -このリポジトリには、[KubernetesのWebサイトとドキュメント](https://kubernetes.io/)をビルドするために必要な全アセットが格納されています。貢献に興味を持っていただきありがとうございます! +このリポジトリには、[KubernetesのWebサイトとドキュメント](https://kubernetes.io/)をビルドするために必要な全アセットが格納されています。あなたの貢献をお待ちしています! - [ドキュメントに貢献する](#contributing-to-the-docs) - [翻訳された`README.md`一覧](#localization-readmemds) -# リポジトリの使い方 +## リポジトリの使い方 -Hugo(Extended version)を使用してWebサイトをローカルで実行することも、コンテナランタイムで実行することもできます。コンテナランタイムを使用することを強くお勧めします。これにより、本番Webサイトとのデプロイメントの一貫性が得られます。 +Hugo(Extended version)を使用してWebサイトをローカルで実行することも、コンテナランタイムで実行することもできます。コンテナランタイムを使用することを強くお勧めします。これにより、本番Webサイトとのデプロイの一貫性が得られます。 ## 前提条件 @@ -17,75 +17,92 @@ Hugo(Extended version)を使用してWebサイトをローカルで実行する - [npm](https://www.npmjs.com/) - [Go](https://go.dev/) -- [Hugo(Extended version)](https://gohugo.io/) +- [Hugo (Extended version)](https://gohugo.io/) - [Docker](https://www.docker.com/)などのコンテナランタイム -開始する前に、依存関係をインストールしてください。リポジトリのクローンを作成し、ディレクトリに移動します。 +> [!NOTE] +[`netlify.toml`](netlify.toml#L11)の`HUGO_VERSION`環境変数で指定されたHugo extended versionをインストールしてください。 -``` +始める前に、依存関係をインストールしてください。リポジトリをクローンし、ディレクトリに移動します。 + +```bash git clone https://github.com/kubernetes/website.git cd website ``` -KubernetesのWebサイトではDocsyというHugoテーマを使用しています。コンテナでWebサイトを実行する場合でも、以下を実行して、サブモジュールおよびその他の開発依存関係をプルすることを強くお勧めします。 +KubernetesのWebサイトでは[DocsyというHugoテーマ](https://github.com/google/docsy#readme)を使用しています。コンテナでWebサイトを実行する場合でも、以下を実行して、サブモジュールおよびその他の依存関係を取得することを強くお勧めします。 -``` -# pull in the Docsy submodule +### Windows + +```powershell +# サブモジュールの依存関係を取得 git submodule update --init --recursive --depth 1 ``` +### Linux / other Unix + +```bash +# サブモジュールの依存関係を取得 +make module-init +``` + ## コンテナを使ってウェブサイトを動かす コンテナ内でサイトを構築するには、以下を実行してコンテナイメージを構築し、実行します。 -``` -make container-image +```bash +# 環境変数$CONTAINER_ENGINEを設定することで、Docker以外のコンテナランタイムを使用することもできます make container-serve ``` -お使いのブラウザにて http://localhost:1313 にアクセスしてください。リポジトリ内のソースファイルに変更を加えると、HugoがWebサイトの内容を更新してブラウザに反映します。 +エラーが発生した場合はhugoコンテナの計算リソースが不足しています。これを解決するには、使用しているマシン([MacOS](https://docs.docker.com/desktop/settings/mac/)と[Windows](https://docs.docker.com/desktop/settings/windows/))でDockerが使用できるCPUとメモリを増やしてください。 + +ブラウザでにアクセスしてください。リポジトリ内のソースファイルに変更を加えると、HugoがWebサイトの内容を更新してブラウザに反映します。 ## Hugoを使ってローカル環境でWebサイトを動かす -[`netlify.toml`](netlify.toml#L10)ファイルに記述されている`HUGO_VERSION`と同じExtended versionのHugoをインストールするようにしてください。 +ローカルで依存関係をインストールし、サイトを構築してテストするには、次のコマンドを実行します。 -ローカルでサイトを構築してテストするには、次のコマンドを実行します。 +- For macOS and Linux + ```bash + npm ci + make serve + ``` +- For Windows (PowerShell) + ```powershell + npm ci + hugo.exe server --buildFuture --environment development + ``` -```bash -# install dependencies -npm ci -make serve -``` - -これで、Hugoのサーバーが1313番ポートを使って開始します。お使いのブラウザにて http://localhost:1313 にアクセスしてください。リポジトリ内のソースファイルに変更を加えると、HugoがWebサイトの内容を更新してブラウザに反映します。 +これで、Hugoのサーバーが1313番ポートを使って起動します。使用しているブラウザでにアクセスしてください。リポジトリ内のソースファイルに変更を加えると、HugoがWebサイトの内容を更新してブラウザに反映します。 ## API reference pagesをビルドする -`content/en/docs/reference/kubernetes-api`に配置されているAPIリファレンスページはを使ってSwagger仕様書からビルドされています。 +`content/ja/docs/reference/kubernetes-api`に配置されているAPIリファレンスページはを使ってSwagger Specification (OpenAPI Specification)からビルドされています。 新しいKubernetesリリースのためにリファレンスページをアップデートするには、次の手順を実行します: -1. `api-ref-generator`サブモジュールをプルする: +1. `api-ref-generator`サブモジュールを取得します: ```bash git submodule update --init --recursive --depth 1 ``` -2. Swagger仕様書を更新する: +2. Swagger Specificationを更新します: ```bash curl 'https://raw.githubusercontent.com/kubernetes/kubernetes/master/api/openapi-spec/swagger.json' > api-ref-assets/api/swagger.json ``` -3. 新しいリリースの変更を反映するため、`api-ref-assets/config/`で`toc.yaml`と`fields.yaml`を適用する。 +3. `api-ref-assets/config/`内の`toc.yaml`と`fields.yaml`を新しいリリースの変更に合わせます。 -4. 次に、ページをビルドする: +4. 次に、ページをビルドします: ```bash make api-reference ``` - コンテナイメージからサイトを作成・サーブする事でローカルで結果をテストすることができます: + コンテナイメージからサイトを作成、サーブする事でローカルで結果をテストすることができます: ```bash make container-image @@ -94,19 +111,19 @@ make serve APIリファレンスを見るために、ブラウザでを開いてください。 -5. 新しいコントラクトのすべての変更が設定ファイル`toc.yaml`と`fields.yaml`に反映されたら、新しく生成されたAPIリファレンスページとともにPull Requestを作成します。 +5. 新しいコントラクトのすべての変更が設定ファイル`toc.yaml`と`fields.yaml`に反映されたら、新しく生成されたAPIリファレンスのページとともにPull Requestを作成します。 ## トラブルシューティング ### error: failed to transform resource: TOCSS: failed to transform "scss/main.scss" (text/x-scss): this feature is not available in your current Hugo version -Hugoは、技術的な理由から2種類のバイナリがリリースされています。現在のウェブサイトは**Hugo Extended**バージョンのみに基づいて運営されています。[リリースページ](https://github.com/gohugoio/hugo/releases)で名前に「extended」が含まれるアーカイブを探します。確認するには、`hugo version`を実行し、「extended」という単語を探します。 +Hugoは、技術的な理由から2種類のバイナリがリリースされています。現在のウェブサイトは**Hugo Extended**バージョンのみに基づいて運営されています。[リリースページ](https://github.com/gohugoio/hugo/releases)で名前に`extended`が含まれるアーカイブを探します。確認するには、`hugo version`を実行し、`extended`という単語を探します。 ### macOSにてtoo many open filesというエラーが表示される macOS上で`make serve`を実行した際に以下のエラーが表示される場合 -``` +```bash ERROR 2020/08/01 19:09:18 Error: listen tcp 127.0.0.1:1313: socket: too many open files make: *** [serve] Error 1 ``` @@ -115,9 +132,9 @@ OS上で同時に開けるファイルの上限を確認してください。 `launchctl limit maxfiles` -続いて、以下のコマンドを実行します(https://gist.github.com/tombigel/d503800a282fcadbee14b537735d202c より引用)。 +続いて、以下のコマンドを実行します(より引用)。 -``` +```shell #!/bin/sh # These are the original gist links, linking to my gists now. @@ -140,31 +157,40 @@ sudo launchctl load -w /Library/LaunchDaemons/limit.maxfiles.plist ## SIG Docsに参加する -[コミュニティのページ](https://github.com/kubernetes/community/tree/master/sig-docs#meetings)をご覧になることで、SIG Docs Kubernetesコミュニティとの関わり方を学ぶことができます。 +[コミュニティのページ](https://github.com/kubernetes/community/tree/master/sig-docs#meetings)を確認することで、SIG Docs Kubernetesコミュニティとの関わり方を学ぶことができます。 本プロジェクトのメンテナーには以下の方法で連絡することができます: -- [Slack](https://kubernetes.slack.com/messages/kubernetes-docs-ja) +- [Slack #kubernetes-docs-ja チャンネル](https://kubernetes.slack.com/messages/kubernetes-docs-ja) - [メーリングリスト](https://groups.google.com/forum/#!forum/kubernetes-sig-docs) ## ドキュメントに貢献する {#contributing-to-the-docs} -GitHubの画面右上にある**Fork**ボタンをクリックすると、お使いのGitHubアカウントに紐付いた本リポジトリのコピーが作成され、このコピーのことを*フォーク*と呼びます。フォークリポジトリの中ではお好きなように変更を加えていただいて構いません。加えた変更をこのリポジトリに追加したい任意のタイミングにて、フォークリポジトリからPull Reqeustを作成してください。 +GitHubの画面右上にある**Fork**ボタンをクリックすると、GitHubアカウントに紐付いた本リポジトリのコピーが作成されます。このコピーのことを*フォーク*と呼びます。フォークリポジトリの中では好きなように変更を加えることができます。加えた変更をこのリポジトリに反映したい好きなタイミングで、フォークリポジトリからPull Reqeustを作成してください。 + +Pull Requestが作成されると、レビュー担当者が責任を持って明確かつ実用的なフィードバックを返します。Pull Requestの所有者は作成者であるため、**自分自身で作成したPull Requestを編集し、フィードバックに対応するのはあなたの責任です。** -Pull Requestが作成されると、レビュー担当者が責任を持って明確かつ実用的なフィードバックを返します。Pull Requestの所有者は作成者であるため、**ご自身で作成したPull Requestを編集し、フィードバックに対応するのはご自身の役目です。** +また、状況によっては2人以上のレビュアーからフィードバックが返されたり、アサインされていないレビュアーからのフィードバックが来ることがある点も留意してください。 -また、状況によっては2人以上のレビュアーからフィードバックが返されたり、アサインされていないレビュー担当者からのフィードバックが来ることがある点もご注意ください。 +さらに、特定のケースにおいては、レビュアーがKubernetesの技術的なレビュアーに対してレビューを依頼することもあります。レビュー担当者はタイムリーにフィードバックを提供するために最善を尽くしますが、応答時間は状況に応じて異なる場合があります。 -さらに、特定のケースにおいては、レビュー担当者がKubernetesの技術的なレビュアーに対してレビューを依頼することもあります。レビュー担当者はタイムリーにフィードバックを提供するために最善を尽くしますが、応答時間は状況に応じて異なる場合があります。 +> [!NOTE] +ローカライゼーションにおいては、技術的なレビューを行うことはありません。技術的なレビューは英語版のドキュメントに対してのみ行われます。 -Kubernetesのドキュメントへの貢献に関する詳細については以下のページをご覧ください: +Kubernetesのドキュメントへの貢献に関する詳細については以下のページを確認してください: + +> [!NOTE] +日本語のローカライゼーションを行う際は、[Kubernetesのドキュメントを翻訳する](https://kubernetes.io/ja/docs/contribute/localization/)が参照すべきガイドとなります。 * [Kubernetesのドキュメントへの貢献](https://kubernetes.io/ja/docs/contribute/) * [ページコンテントタイプ](https://kubernetes.io/docs/contribute/style/page-content-types/) * [ドキュメントのスタイルガイド](https://kubernetes.io/docs/contribute/style/style-guide/) * [Kubernetesドキュメントの翻訳方法](https://kubernetes.io/docs/contribute/localization/) -### New Contributor Ambassadors +### 新たなコントリビューターのためのアンバサダー + +> [!NOTE] +日本語のローカライゼーションに関する質問は、[Slack #kubernetes-docs-ja チャンネル](https://kubernetes.slack.com/messages/kubernetes-docs-ja)にてお気軽にお尋ねください。 コントリビュートする時に何か助けが必要なら、[New Contributor Ambassadors](https://kubernetes.io/docs/contribute/advanced/#serve-as-a-new-contributor-ambassador)に聞いてみると良いでしょう。彼らはSIG Docsのapproverで、最初の数回のPull Requestを通して新しいコントリビューターを指導し助けることを責務としています。New Contributors Ambassadorsにコンタクトするには、[Kubernetes Slack](https://slack.k8s.io)が最適な場所です。現在のSIG DocsのNew Contributor Ambassadorは次の通りです: @@ -186,7 +212,7 @@ Kubernetesのドキュメントへの貢献に関する詳細については以 ### 行動規範 -Kubernetesコミュニティへの参加については、[CNCFの行動規範](https://github.com/cncf/foundation/blob/master/code-of-conduct.md)によって管理されています。 +Kubernetesコミュニティへの参加については、[CNCFの行動規範](https://github.com/cncf/foundation/blob/main/code-of-conduct-languages/ja.md)によって管理されています。 ## ありがとうございます! diff --git a/README.md b/README.md index 6e05028efde82..d3117991cb501 100644 --- a/README.md +++ b/README.md @@ -192,13 +192,14 @@ If you need help at any point when contributing, the [New Contributor Ambassador | Language | Language | | -------------------------- | -------------------------- | -| [Chinese](README-zh.md) | [Korean](README-ko.md) | -| [French](README-fr.md) | [Polish](README-pl.md) | -| [German](README-de.md) | [Portuguese](README-pt.md) | -| [Hindi](README-hi.md) | [Russian](README-ru.md) | -| [Indonesian](README-id.md) | [Spanish](README-es.md) | -| [Italian](README-it.md) | [Ukrainian](README-uk.md) | -| [Japanese](README-ja.md) | [Vietnamese](README-vi.md) | +| [Bengali](README-bn.md) | [Korean](README-ko.md) | +| [Chinese](README-zh.md) | [Polish](README-pl.md) | +| [French](README-fr.md) | [Portuguese](README-pt.md) | +| [German](README-de.md) | [Russian](README-ru.md) | +| [Hindi](README-hi.md) | [Spanish](README-es.md) | +| [Indonesian](README-id.md) | [Ukrainian](README-uk.md) | +| [Italian](README-it.md) | [Vietnamese](README-vi.md) | +| [Japanese](README-ja.md) | | ## Code of conduct diff --git a/SECURITY_CONTACTS b/SECURITY_CONTACTS index 45a06ee063d91..8641869b25aa5 100644 --- a/SECURITY_CONTACTS +++ b/SECURITY_CONTACTS @@ -17,3 +17,5 @@ tengqm onlydole kbhawkey natalisucks +salaxander +katcosgrove diff --git a/static/images/copycode.svg b/assets/images/copycode.svg similarity index 100% rename from static/images/copycode.svg rename to assets/images/copycode.svg diff --git a/static/images/favicon.png b/assets/images/kubernetes.png similarity index 100% rename from static/images/favicon.png rename to assets/images/kubernetes.png diff --git a/assets/scss/_base.scss b/assets/scss/_base.scss index c432b9d26addc..64e84295ff476 100644 --- a/assets/scss/_base.scss +++ b/assets/scss/_base.scss @@ -77,10 +77,10 @@ footer { font-size: 1rem; border: 0px; - } - - .button:hover { - background-color: darken($blue, 10%); + &:hover { + background-color: darken($blue, 10%); + color: white; + } } #cellophane { @@ -104,6 +104,11 @@ main { text-decoration: none; font-size: 1rem; border: 0px; + + &:hover { + background-color: darken($blue, 10%); + color: white; + } } } @@ -554,7 +559,7 @@ section#cncf { #desktopKCButton:hover{ background-color: #ffffff; - color: #3371e3; + color: #326ce5; transition: 150ms; } diff --git a/assets/scss/_custom.scss b/assets/scss/_custom.scss index 934371db72061..5f8e91f6f21af 100644 --- a/assets/scss/_custom.scss +++ b/assets/scss/_custom.scss @@ -135,7 +135,7 @@ body.td-404 main .error-details { height: 44px; background-repeat: no-repeat; background-size: contain; - background-image: url("/images/favicon.png"); + background-image: url("/images/logo-header.png"); } #hamburger { @@ -497,14 +497,33 @@ body { border-left-width: calc(max(0.5em, 4px)); border-top-left-radius: calc(max(0.5em, 4px)); border-bottom-left-radius: calc(max(0.5em, 4px)); + padding-top: 0.75rem; } - .alert.callout.caution { + .alert.alert-caution { border-left-color: #f0ad4e; } - .alert.callout.note { + .alert.alert-info { border-left-color: #428bca; + h4, h4.alert-heading { + color: #000; + display: block; + float: left; + font-size: 1rem; + padding: 0; + padding-right: 0.5rem; + margin: 0; + line-height: 1.5; + font-weight: bolder; + } + } + .alert.alert-caution { + border-left-color: #f0ad4e; + h4, h4.alert-heading { + font-size: 1em; + font-weight: bold; + } } - .alert.callout.warning { + .alert.alert-warning { border-left-color: #d9534f; } .alert.third-party-content { @@ -728,7 +747,7 @@ body.cid-partners { line-height: 40px; color: #ffffff; font-size: 16px; - background-color: #3371e3; + background-color: #326ce5; text-decoration: none; } @@ -850,6 +869,44 @@ body.cid-community > #deprecation-warning > .deprecation-warning > * { background-color: inherit; } +body.cid-code-of-conduct main { + max-width: calc(min(90vw, 100em)); + padding-top: 3rem; + padding-left: 0.5em; + padding-right: 0.5em; + margin-left: auto; + margin-right: auto; + + #cncf-code-of-conduct { + margin-top: 4rem; + margin-bottom: 4rem; + padding-left: 4rem; + + > h2, h3, h4, h5 { + color: #0662EE; + } + + > h2:first-child { + margin-top: 0.25em; + margin-bottom: 1em; + } + } + + > hr { + margin-top: 4rem; + margin-bottom: 4rem; + } + + > hr:last-of-type ~ * { + text-align: center; + font-size: 1.15rem; + } + + > *:last-child { + margin-bottom: 4rem; + } +} + #caseStudies body > #deprecation-warning > .deprecation-warning, body.cid-casestudies > #deprecation-warning > .deprecation-warning { color: inherit; background: inherit; @@ -1296,10 +1353,11 @@ div.alert > em.javascript-required { flex-grow: 1; overflow-x: hidden; width: auto; -} -.search-bar:focus-within { - border: 2.5px solid rgba(47, 135, 223, 0.7); + &:focus-within { + outline: 1.5px solid rgba(47, 135, 223, 0.7); + border: 1px solid rgba(47, 135, 223, 0.7); + } } .search-bar i.search-icon { @@ -1313,3 +1371,46 @@ div.alert > em.javascript-required { outline: none; padding: .5em 0 .5em 0; } + +/* CSS for 'figure' full-screen display */ + +/* Define styles for full-screen overlay */ +.figure-fullscreen-overlay { + position: fixed; + inset: 0; + z-index: 9999; + background-color: rgba(255, 255, 255, 0.95); /* White background with some transparency */ + display: flex; + justify-content: center; + align-items: center; + padding: calc(5% + 20px); + box-sizing: border-box; +} + +/* CSS class to scale the image when zoomed */ +.figure-zoomed { + transform: scale(1.2); +} + +/* Define styles for full-screen image */ +.figure-fullscreen-img { + max-width: 100%; + max-height: 100%; + object-fit: contain; /* Maintain aspect ratio and fit within the container */ +} + +/* Define styles for close button */ +.figure-close-button { + position: absolute; + top: 1%; + right: 2%; + cursor: pointer; + font-size: calc(5vw + 10px); + color: #333; +} + +.code-sample > .copy-code-icon { + cursor: pointer; + text-align: right; + padding: 0.2rem; +} \ No newline at end of file diff --git a/assets/scss/_skin.scss b/assets/scss/_skin.scss index 32c94b9dc521a..d8b69d1218027 100644 --- a/assets/scss/_skin.scss +++ b/assets/scss/_skin.scss @@ -1,4 +1,4 @@ -$blue: #3371e3; +$blue: #326ce5; $light-grey: #f7f7f7; $dark-grey: #303030; $medium-grey: #4c4c4c; diff --git a/assets/scss/_variables_project.scss b/assets/scss/_variables_project.scss index fd1dddd6075f4..386881e9ff8b1 100644 --- a/assets/scss/_variables_project.scss +++ b/assets/scss/_variables_project.scss @@ -12,7 +12,7 @@ Add styles or override variables from the theme here. */ @import "tablet"; @import "desktop"; -$primary: #3371e3; +$primary: #326ce5; // tooltip $tooltip-bg: #555; diff --git a/content/bn/OWNERS b/content/bn/OWNERS new file mode 100644 index 0000000000000..7edc7c9b3b641 --- /dev/null +++ b/content/bn/OWNERS @@ -0,0 +1,14 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +# This is the localization project for Bengali. +# Teams and members are visible at https://github.com/orgs/kubernetes/teams. + +reviewers: +- sig-docs-bn-reviews + +approvers: +- sig-docs-bn-owners + +labels: +- area/localization +- language/bn diff --git a/content/bn/_common-resources/images/blocks.svg b/content/bn/_common-resources/images/blocks.svg new file mode 100644 index 0000000000000..623019b1e9d98 --- /dev/null +++ b/content/bn/_common-resources/images/blocks.svg @@ -0,0 +1 @@ +kubernetes_icons diff --git a/content/bn/_common-resources/images/flower.svg b/content/bn/_common-resources/images/flower.svg new file mode 100644 index 0000000000000..334ae9c8b4b80 --- /dev/null +++ b/content/bn/_common-resources/images/flower.svg @@ -0,0 +1 @@ +kubernetes_icons diff --git a/content/bn/_common-resources/images/kub_video_banner_homepage.jpg b/content/bn/_common-resources/images/kub_video_banner_homepage.jpg new file mode 100644 index 0000000000000..e40d92a50363b Binary files /dev/null and b/content/bn/_common-resources/images/kub_video_banner_homepage.jpg differ diff --git a/content/bn/_common-resources/images/scalable.svg b/content/bn/_common-resources/images/scalable.svg new file mode 100644 index 0000000000000..900eef34ff10a --- /dev/null +++ b/content/bn/_common-resources/images/scalable.svg @@ -0,0 +1 @@ +kubernetes_icons diff --git a/content/bn/_common-resources/images/suitcase.svg b/content/bn/_common-resources/images/suitcase.svg new file mode 100644 index 0000000000000..687a444bc2981 --- /dev/null +++ b/content/bn/_common-resources/images/suitcase.svg @@ -0,0 +1 @@ +kubernetes_icons diff --git a/content/bn/_common-resources/index.md b/content/bn/_common-resources/index.md new file mode 100644 index 0000000000000..ca03031f1ee91 --- /dev/null +++ b/content/bn/_common-resources/index.md @@ -0,0 +1,3 @@ +--- +headless: true +--- diff --git a/content/bn/_index.html b/content/bn/_index.html new file mode 100644 index 0000000000000..999713808f0e3 --- /dev/null +++ b/content/bn/_index.html @@ -0,0 +1,65 @@ +--- +title: "প্রোডাকশন-গ্রেড কন্টেইনার অর্কেস্ট্রেশন" +abstract: "স্বয়ংক্রিয় কন্টেইনার ডিপ্লয়মেন্ট, স্কেলিং এবং ব্যবস্থাপনা" +cid: home +sitemap: + priority: 1.0 +--- + +{{< site-searchbar >}} + +{{< blocks/section id="oceanNodes" >}} +{{% blocks/feature image="flower" %}} +[কুবারনেটিস]({{< relref "/docs/concepts/overview/" >}}), K8s নামেও পরিচিত, কনটেইনারাইজড অ্যাপ্লিকেশনের স্বয়ংক্রিয় ডিপ্লয়মেন্ট, স্কেলিং এবং পরিচালনার জন্য একটি ওপেন-সোর্স সিস্টেম। + +এটি সহজ ব্যবস্থাপনা এবং আবিষ্কারের জন্য লজিক্যাল ইউনিটে একটি অ্যাপ্লিকেশন তৈরি করে এমন কন্টেইনারগুলিকে গোষ্ঠীভুক্ত করে। কুবারনেটিস [Google-এ প্রোডাকশন ওয়ার্কলোড চালানোর 15 বছরের অভিজ্ঞতার ভিত্তিতে](http://queue.acm.org/detail.cfm?id=2898444) তৈরি করে, কমিউনিটির সেরা ধারণা এবং অনুশীলনের সাথে মিলিত ভাবে। +{{% /blocks/feature %}} + +{{% blocks/feature image="scalable" %}} +#### বিশ্বব্যাপী স্কেল + +Google সপ্তাহে বিলিয়ন কন্টেইনার চালানোর জন্য যে নীতিতে ডিজাইন প্রয়োগ করে, সেই একই নীতিতে কুবারনেটিস ডিজাইন করা হয়, ফলস্বরূপ কুবারনেটিস ব্যবহারকারীরা অপারেশন টিম না বাড়িয়ে স্কেল করতে পারে। + +{{% /blocks/feature %}} + +{{% blocks/feature image="blocks" %}} +#### কখনই আউটগ্রো করবে না + +স্থানীয়ভাবে পরীক্ষা করা হোক বা বিশ্বব্যাপী এন্টারপ্রাইজ চালানো হোক না কেন, আপনার প্রয়োজনীয়তা যত জটিলই হোক না কেন আপনার অ্যাপ্লিকেশনগুলিকে ধারাবাহিকভাবে এবং সহজে সরবরাহ করতে কুবারনেটিসের নমনীয়তা আপনার সাথে বৃদ্ধি পায়। + +{{% /blocks/feature %}} + +{{% blocks/feature image="suitcase" %}} +#### যে কোন জায়গায় K8s চালান + +কুবারনেটিস হল ওপেন সোর্স যা আপনাকে অন-প্রিমিসেস, হাইব্রিড বা পাবলিক ক্লাউড অবকাঠামোর সুবিধা নেওয়ার স্বাধীনতা দেয়, যাতে আপনি সহজেই কাজের চাপগুলি যেখানে আপনার কাছে গুরুত্বপূর্ণ সেখানে স্থানান্তর করতে পারেন। + +কুবারনেটিস ডাউনলোড করতে, [ডাউনলোড](/bn/releases/download/) বিভাগে যান। + +{{% /blocks/feature %}} + +{{< /blocks/section >}} + +{{< blocks/section id="video" background-image="kub_video_banner_homepage" >}} +
+

150+ মাইক্রোসার্ভিস কুবারনেটিসে স্থানান্তরিত করার চ্যালেঞ্জ

+

সারাহ ওয়েলস দ্বারা, অপারেশনস এবং নির্ভরযোগ্যতার জন্য প্রযুক্তিগত পরিচালক, ফিনান্সিয়াল টাইমস

+ +
+
+ ১৯-২২ মার্চ, ২০২৪-এ KubeCon + CloudNativeCon ইউরোপে যোগ দিন +
+
+
+
+ ১২-১৫ নভেম্বর, ২০২৪-এ KubeCon + CloudNativeCon উত্তর আমেরিকাতে যোগ দিন +
+
+ + +
+{{< /blocks/section >}} + +{{< blocks/kubernetes-features >}} + +{{< blocks/case-studies >}} diff --git a/content/bn/blog/_index.md b/content/bn/blog/_index.md new file mode 100644 index 0000000000000..feaefbf8a39e6 --- /dev/null +++ b/content/bn/blog/_index.md @@ -0,0 +1,14 @@ +--- +title: কুবারনেটিস ব্লগ +linkTitle: ব্লগ +menu: + main: + title: "ব্লগ" + weight: 20 +--- +{{< comment >}} + +ব্লগে অবদান সম্পর্কে তথ্যের জন্য, দেখুন +https://kubernetes.io/docs/contribute/new-content/blogs-case-studies/#write-a-blog-post + +{{< /comment >}} diff --git a/content/bn/blog/_posts/2023-12-18-read-write-once-pod-access-mode-ga.md b/content/bn/blog/_posts/2023-12-18-read-write-once-pod-access-mode-ga.md new file mode 100644 index 0000000000000..03366c8c9fba0 --- /dev/null +++ b/content/bn/blog/_posts/2023-12-18-read-write-once-pod-access-mode-ga.md @@ -0,0 +1,99 @@ +--- +layout: blog +title: "কুবারনেটিস 1.29: পারসিস্টেন্টভলিউম গ্র্যাজুয়েটদের জন্য একক পড অ্যাক্সেস মোড" +date: 2023-12-18 +slug: read-write-once-pod-access-mode-ga +author: > + Chris Henzie (Google) +--- + +Kubernetes v1.29 প্রকাশের সাথে, ReadWriteOncePod ভলিউম অ্যাক্সেস মোড +সবার জন্য গ্র্যাজুয়েট হয়েছে: এটি কুবারনেটিস এর স্থিতিশীল API এর অংশ। +এই ব্লগ পোস্টে, আমি এই অ্যাক্সেস মোড এবং এটি কী করে তা আরও ঘনিষ্ঠভাবে দেখব। + +## `ReadWriteOncePod` কি? + +`ReadWriteOncePod` হলো +[পারসিস্টেন্টভলিউম(PersistentVolumes (PVs))](/docs/concepts/storage/persistent-volumes/#persistent-volumes) এবং +[পারসিস্টেন্টভলিউমক্লেমস(PersistentVolumeClaims (PVCs))](/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims) +এর জন্য একটি অ্যাক্সেস মোড যা কুবারনেটিস v1.22-এ চালু করা হয়েছে। এই অ্যাক্সেস মোড +আপনাকে ক্লাস্টারে একটি একক পডে ভলিউম অ্যাক্সেস সীমাবদ্ধ করতে সক্ষম করে, এটি নিশ্চিত +করে যে একটি সময়ে শুধুমাত্র একটি পড ভলিউমে লিখতে পারে। এটি স্টেটফুল ওয়ার্কলোডগুলির +জন্য বিশেষভাবে উপযোগী হতে পারে যার জন্য স্টোরেজে একক-লেখকের অ্যাক্সেস প্রয়োজন। + +অ্যাক্সেস মোড এবং `ReadWriteOncePod` কীভাবে কাজ করে সে সম্পর্কে আরও প্রসঙ্গের জন্য পড়ুন +[অ্যাক্সেস মোডগুলি কী এবং কেন সেগুলি গুরুত্বপূর্ণ?](/blog/2021/09/13/read-write-once-pod-access-mode-alpha/#what-are-access-modes-and-why-are-they-important) +2021 থেকে পারসিস্টেন্টভলিউম নিবন্ধের জন্য একক পড অ্যাক্সেস মোড প্রবর্তন করা হয়েছে । + +## কিভাবে আমি `ReadWriteOncePod` ব্যবহার শুরু করতে পারি? + +ReadWriteOncePod ভলিউম অ্যাক্সেস মোড ডিফল্টরূপে কুবারনেটিস ভার্সন v1.27 +এবং তার পরে উপলব্ধ। কুবারনেটিস v1.29 এবং পরবর্তীতে, কুবারনেটিস API +সর্বদা এই অ্যাক্সেস মোডকে স্বীকৃতি দেয়। + +মনে রাখবেন যে `ReadWriteOncePod` +[শুধুমাত্র CSI ভলিউমগুলির জন্য সাপোর্টেড](/docs/concepts/storage/persistent-volumes/#access-modes), +এবং এই বৈশিষ্ট্যটি ব্যবহার করার আগে, আপনাকে নিম্নলিখিত +[CSI সাইডকারগুলিকে](https://kubernetes-csi.github.io/docs/sidecar-containers.html) +এই ভার্সনগুলিতে বা তার বেশি আপডেট করতে হবে: + +- [csi-provisioner:v3.0.0+](https://github.com/kubernetes-csi/external-provisioner/releases/tag/v3.0.0) +- [csi-attacher:v3.3.0+](https://github.com/kubernetes-csi/external-attacher/releases/tag/v3.3.0) +- [csi-resizer:v1.3.0+](https://github.com/kubernetes-csi/external-resizer/releases/tag/v1.3.0) + +`ReadWriteOncePod` ব্যবহার শুরু করতে, আপনাকে `ReadWriteOncePod` +অ্যাক্সেস মোড সহ একটি PVC তৈরি করতে হবে: + +```yaml +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: single-writer-only +spec: + accessModes: + - ReadWriteOncePod # Allows only a single pod to access single-writer-only. + resources: + requests: + storage: 1Gi +``` + +যদি আপনার স্টোরেজ প্লাগইন +[ডায়নামিক প্রভিশনিং সাপোর্টে করে](/docs/concepts/storage/dynamic-provisioning/), +তাহলে `ReadWriteOncePod` অ্যাক্সেস মোড প্রয়োগ করে নতুন +পারসিস্টেন্টভলিউম তৈরি করা হবে। + +`ReadWriteOncePod` ব্যবহার করার জন্য বিদ্যমান ভলিউম স্থানান্তরিত করার বিশদ বিবরণের জন্য +[বিদ্যমান পারসিস্টেন্টভলিউম স্থানান্তর করা](/blog/2021/09/13/read-write-once-pod-access-mode-alpha/#migrating-existing-persistentvolumes) পড়ুন । + +## আমি কীভাবে আরও শিখতে পারি? + +`ReadWriteOncePod` অ্যাক্সেস মোড এবং CSI স্পেক পরিবর্তনের প্রেরণা সম্পর্কে +আরও বিশদ বিবরণের জন্য অনুগ্রহ করে ব্লগ পোস্টগুলি +[alpha](/blog/2021/09/13/read-write-once-pod-access-mode-alpha), +[beta](/blog/2023/04/20/read-write-once-pod-access-mode-beta), এবং +[KEP-2485](https://github.com/kubernetes/enhancements/blob/master/keps/sig-storage/2485-read-write-once-pod-pv-access-mode/README.md) দেখুন৷ + +## আমি কিভাবে জড়িত হতে পারি? + +[কুবারনেটিস #csi স্ল্যাক চ্যানেল](https://kubernetes.slack.com/messages/csi) এবং +যে কোনো স্ট্যান্ডার্ড +[SIG স্টোরেজ কমিউনিকেশন চ্যানেল](https://github.com/kubernetes/community/blob/master/sig-storage/README.md#contact) +হলো SIG স্টোরেজ এবং CSI টিমের কাছে পৌঁছানোর দুর্দান্ত পদ্ধতি। + +নিম্নলিখিত ব্যক্তিদের বিশেষ ধন্যবাদ যাদের চিন্তাশীল পর্যালোচনা এবং প্রতিক্রিয়া এই বৈশিষ্ট্যটি গঠনে সহায়তা করেছে: + +* Abdullah Gharaibeh (ahg-g) +* Aldo Culquicondor (alculquicondor) +* Antonio Ojea (aojea) +* David Eads (deads2k) +* Jan Šafránek (jsafrane) +* Joe Betz (jpbetz) +* Kante Yin (kerthcet) +* Michelle Au (msau42) +* Tim Bannister (sftim) +* Xing Yang (xing-yang) + +আপনি যদি CSI বা কুবারনেটিস স্টোরেজ সিস্টেমের যেকোন অংশের ডিজাইন +এবং বিকাশের সাথে জড়িত হতে আগ্রহী হন, তাহলে +[কুবারনেটিস স্টোরেজ স্পেশাল ইন্টারেস্ট গ্রুপে](https://github.com/kubernetes/community/tree/master/sig-storage) (Special Interest Group(SIG)) যোগ দিন। +আমরা দ্রুত বৃদ্ধি করছি এবং সবসময় নতুন অবদানকারীদের স্বাগত জানাই। diff --git a/content/bn/case-studies/_index.html b/content/bn/case-studies/_index.html new file mode 100644 index 0000000000000..48b93d73c77b2 --- /dev/null +++ b/content/bn/case-studies/_index.html @@ -0,0 +1,13 @@ +--- +title: কেস স্টাডিজ +linkTitle: কেস স্টাডিজ +bigheader: কুবারনেটিস ব্যবহারকারীদের কেস স্টাডিজ +abstract: কুবারনেটিস ব্যবহারকারীদের একটি সংগ্রহ যারা প্রোডাকশনে এটি ব্যবহার করে. +layout: basic +class: gridPage +body_class: caseStudies +cid: caseStudies +menu: + main: + weight: 60 +--- diff --git a/content/bn/case-studies/adform/adform_featured_logo.png b/content/bn/case-studies/adform/adform_featured_logo.png new file mode 100644 index 0000000000000..cd0fa7b6c9c4e Binary files /dev/null and b/content/bn/case-studies/adform/adform_featured_logo.png differ diff --git a/content/bn/case-studies/adform/adform_featured_logo.svg b/content/bn/case-studies/adform/adform_featured_logo.svg new file mode 100644 index 0000000000000..b31ef3235a471 --- /dev/null +++ b/content/bn/case-studies/adform/adform_featured_logo.svg @@ -0,0 +1 @@ +kubernetes.io-logos \ No newline at end of file diff --git a/content/bn/case-studies/adform/index.html b/content/bn/case-studies/adform/index.html new file mode 100644 index 0000000000000..0be118b04673a --- /dev/null +++ b/content/bn/case-studies/adform/index.html @@ -0,0 +1,86 @@ +--- +title: Adform Case Study +linkTitle: Adform +case_study_styles: true +cid: caseStudies +logo: adform_featured_logo.png +draft: false +featured: true +weight: 47 +quote: > + Kubernetes enabled the self-healing and immutable infrastructure. We can do faster releases, so our developers are really happy. They can ship our features faster than before, and that makes our clients happier. + +new_case_study_styles: true +heading_background: /images/case-studies/adform/banner1.jpg +heading_title_logo: /images/adform_logo.png +subheading: > + Improving Performance and Morale with Cloud Native +case_study_details: + - Company: AdForm + - Location: Copenhagen, Denmark + - Industry: Adtech +--- + +

Challenge

+ +

Adform's mission is to provide a secure and transparent full stack of advertising technology to enable digital ads across devices. The company has a large infrastructure: OpenStack-based private clouds running on 1,100 physical servers in 7 data centers around the world, 3 of which were opened in the past year. With the company's growth, the infrastructure team felt that "our private cloud was not really flexible enough," says IT System Engineer Edgaras Apšega. "The biggest pain point is that our developers need to maintain their virtual machines, so rolling out technology and new software takes time. We were really struggling with our releases, and we didn't have self-healing infrastructure."

+ +

Solution

+ +

The team, which had already been using Prometheus for monitoring, embraced Kubernetes and cloud native practices in 2017. "To start our Kubernetes journey, we had to adapt all our software, so we had to choose newer frameworks," says Apšega. "We also adopted the microservices way, so observability is much better because you can inspect the bug or the services separately."

+ +

Impact

+ +

"Kubernetes helps our business a lot because our features are coming to market faster," says Apšega. The release process went from several hours to several minutes. Autoscaling has been at least 6 times faster than the semi-manual VM bootstrapping and application deployment required before. The team estimates that the company has experienced cost savings of 4-5x due to less hardware and fewer man hours needed to set up the hardware and virtual machines, metrics, and logging. Utilization of the hardware resources has been reduced as well, with containers notching 2-3 times more efficiency over virtual machines. "The deployments are very easy because developers just push the code and it automatically appears on Kubernetes," says Apšega. Prometheus has also had a positive impact: "It provides high availability for metrics and alerting. We monitor everything starting from hardware to applications. Having all the metrics in Grafana dashboards provides great insight on your systems."

+ +{{< case-studies/quote author="Edgaras Apšega, IT Systems Engineer, Adform" >}} +"Kubernetes enabled the self-healing and immutable infrastructure. We can do faster releases, so our developers are really happy. They can ship our features faster than before, and that makes our clients happier." +{{< /case-studies/quote >}} + +{{< case-studies/lead >}} +Adform made headlines last year when it detected the HyphBot ad fraud network that was costing some businesses hundreds of thousands of dollars a day. +{{< /case-studies/lead >}} + +

With its mission to provide a secure and transparent full stack of advertising technology to enable an open internet, Adform published a white paper revealing what it did—and others could too—to limit customers' exposure to the scam.

+ +

In that same spirit, Adform is sharing its cloud native journey. "When you see that everyone shares their best practices, it inspires you to contribute back to the project," says IT Systems Engineer Edgaras Apšega.

+ +

The company has a large infrastructure: OpenStack-based private clouds running on 1,100 physical servers in their own seven data centers around the world, three of which were opened in the past year. With the company's growth, the infrastructure team felt that "our private cloud was not really flexible enough," says Apšega. "The biggest pain point is that our developers need to maintain their virtual machines, so rolling out technology and new software really takes time. We were really struggling with our releases, and we didn't have self-healing infrastructure."

+ +{{< case-studies/quote + image="/images/case-studies/adform/banner3.jpg" + author="Edgaras Apšega, IT Systems Engineer, Adform" +>}} +"The fact that Cloud Native Computing Foundation incubated Kubernetes was a really big point for us because it was vendor neutral. And we can see that a community really gathers around it. Everyone shares their experiences, their knowledge, and the fact that it's open source, you can contribute." +{{< /case-studies/quote >}} + +

The team, which had already been using Prometheus for monitoring, embraced Kubernetes, microservices, and cloud native practices. "The fact that Cloud Native Computing Foundation incubated Kubernetes was a really big point for us because it was vendor neutral," says Apšega. "And we can see that a community really gathers around it."

+ +

A proof of concept project was started, with a Kubernetes cluster running on bare metal in the data center. When developers saw how quickly containers could be spun up compared to the virtual machine process, "they wanted to ship their containers in production right away, and we were still doing proof of concept," says IT Systems Engineer Andrius Cibulskis.

+ +

Of course, a lot of work still had to be done. "First of all, we had to learn Kubernetes, see all of the moving parts, how they glue together," says Apšega. "Second of all, the whole CI/CD part had to be redone, and our DevOps team had to invest more man hours to implement it. And third is that developers had to rewrite the code, and they're still doing it."

+ +

The first production cluster was launched in the spring of 2018, and is now up to 20 physical machines dedicated for pods throughout three data centers, with plans for separate clusters in the other four data centers. The user-facing Adform application platform, data distribution platform, and back ends are now all running on Kubernetes. "Many APIs for critical applications are being developed for Kubernetes," says Apšega. "Teams are rewriting their applications to .NET core, because it supports containers, and preparing to move to Kubernetes. And new applications, by default, go in containers."

+ +{{< case-studies/quote + image="/images/case-studies/adform/banner4.jpg" + author="Andrius Cibulskis, IT Systems Engineer, Adform" +>}} +"Releases are really nice for them, because they just push their code to Git and that's it. They don't have to worry about their virtual machines anymore." +{{< /case-studies/quote >}} + +

This big push has been driven by the real impact that these new practices have had. "Kubernetes helps our business a lot because our features are coming to market faster," says Apšega. "The deployments are very easy because developers just push the code and it automatically appears on Kubernetes." The release process went from several hours to several minutes. Autoscaling is at least six times faster than the semi-manual VM bootstrapping and application deployment required before.

+ +

The team estimates that the company has experienced cost savings of 4-5x due to less hardware and fewer man hours needed to set up the hardware and virtual machines, metrics, and logging. Utilization of the hardware resources has been reduced as well, with containers notching two to three times more efficiency over virtual machines.

+ +

Prometheus has also had a positive impact: "It provides high availability for metrics and alerting," says Apšega. "We monitor everything starting from hardware to applications. Having all the metrics in Grafana dashboards provides great insight on our systems."

+ +{{< case-studies/quote author="Edgaras Apšega, IT Systems Engineer, Adform" >}} +"I think that our company just started our cloud native journey. It seems like a huge road ahead, but we're really happy that we joined it." +{{< /case-studies/quote >}} + +

All of these benefits have trickled down to individual team members, whose working lives have been changed for the better. "They used to have to get up at night to re-start some services, and now Kubernetes handles all of that," says Apšega. Adds Cibulskis: "Releases are really nice for them, because they just push their code to Git and that's it. They don't have to worry about their virtual machines anymore." Even the security teams have been impacted. "Security teams are always not happy," says Apšega, "and now they're happy because they can easily inspect the containers."

+ +

The company plans to remain in the data centers for now, "mostly because we want to keep all the data, to not share it in any way," says Cibulskis, "and it's cheaper at our scale." But, Apšega says, the possibility of using a hybrid cloud for computing is intriguing: "One of the projects we're interested in is the Virtual Kubelet that lets you spin up the working nodes on different clouds to do some computing."

+ +

Apšega, Cibulskis and their colleagues are keeping tabs on how the cloud native ecosystem develops, and are excited to contribute where they can. "I think that our company just started our cloud native journey," says Apšega. "It seems like a huge road ahead, but we're really happy that we joined it."

\ No newline at end of file diff --git a/content/bn/case-studies/adidas/adidas-featured.svg b/content/bn/case-studies/adidas/adidas-featured.svg new file mode 100644 index 0000000000000..a53d4675522fe --- /dev/null +++ b/content/bn/case-studies/adidas/adidas-featured.svg @@ -0,0 +1 @@ +kubernetes.io-54664 \ No newline at end of file diff --git a/content/bn/case-studies/adidas/index.html b/content/bn/case-studies/adidas/index.html new file mode 100644 index 0000000000000..f656698f70b9b --- /dev/null +++ b/content/bn/case-studies/adidas/index.html @@ -0,0 +1,78 @@ +--- +title: adidas Case Study +linkTitle: adidas +case_study_styles: true +cid: caseStudies +featured: false + +new_case_study_styles: true +heading_background: /images/case-studies/adidas/banner1.png +heading_title_text: adidas +use_gradient_overlay: true +subheading: > + Staying True to Its Culture, adidas Got 40% of Its Most Impactful Systems Running on Kubernetes in a Year +case_study_details: + - Company: adidas + - Location: Herzogenaurach, Germany + - Industry: Fashion +--- + +

Challenge

+ +

In recent years, the adidas team was happy with its software choices from a technology perspective—but accessing all of the tools was a problem. For instance, "just to get a developer VM, you had to send a request form, give the purpose, give the title of the project, who's responsible, give the internal cost center a call so that they can do recharges," says Daniel Eichten, Senior Director of Platform Engineering. "The best case is you got your machine in half an hour. Worst case is half a week or sometimes even a week."

+ +

Solution

+ +

To improve the process, "we started from the developer point of view," and looked for ways to shorten the time it took to get a project up and running and into the adidas infrastructure, says Senior Director of Platform Engineering Fernando Cornago. They found the solution with containerization, agile development, continuous delivery, and a cloud native platform that includes Kubernetes and Prometheus.

+ +

Impact

+ +

Just six months after the project began, 100% of the adidas e-commerce site was running on Kubernetes. Load time for the e-commerce site was reduced by half. Releases went from every 4-6 weeks to 3-4 times a day. With 4,000 pods, 200 nodes, and 80,000 builds per month, adidas is now running 40% of its most critical, impactful systems on its cloud native platform.

+ +{{< case-studies/quote + image="/images/case-studies/adidas/banner2.png" + author="FERNANDO CORNAGO, SENIOR DIRECTOR OF PLATFORM ENGINEERING AT ADIDAS" +>}} +"For me, Kubernetes is a platform made by engineers for engineers. It's relieving the development team from tasks that they don't want to do, but at the same time giving the visibility of what is behind the curtain, so they can also control it." +{{< /case-studies/quote >}} + +{{< case-studies/lead >}} +In recent years, the adidas team was happy with its software choices from a technology perspective—but accessing all of the tools was a problem. +{{< /case-studies/lead >}} + +

For engineers at adidas, says Daniel Eichten, Senior Director of Platform Engineering, "it felt like being an artist with your hands tied behind your back, and you're supposed to paint something."

+ +

For instance, "just to get a developer VM, you had to send a request form, give the purpose, give the title of the project, who's responsible, give the internal cost center a call so that they can do recharges," says Eichten. "Eventually, after a ton of approvals, then the provisioning of the machine happened within minutes, and then the best case is you got your machine in half an hour. Worst case is half a week or sometimes even a week."

+ +

To improve the process, "we started from the developer point of view," and looked for ways to shorten the time it took to get a project up and running and into the adidas infrastructure, says Senior Director of Platform Engineering Fernando Cornago.

+ +{{< case-studies/quote author="DANIEL EICHTEN, SENIOR DIRECTOR OF PLATFORM ENGINEERING AT ADIDAS" >}} +"I call our cloud native platform the field of dreams. We built it, and we never anticipated that people would come and just love it." +{{< /case-studies/quote >}} + +

"We were engineers before," adds Eichten. "We know what a typical engineer needs, is craving for, what he or she doesn't want to take care of. For us it was pretty clear. We filled the gaps that no one wants to take care of, and we make the stuff that is usually painful as painless as possible." The goals: to improve speed, operability, and observability.

+ +

Cornago and Eichten found the solution with containerization, agile development, continuous delivery, and a cloud native platform that includes Kubernetes and Prometheus. "Choosing Kubernetes was pretty clear," says Eichten. "Day zero, deciding, easy. Day one, installing, configuring, easy. Day two, keeping it up and running even with small workloads, if something goes wrong, you don't know how these things work in detail, you're lost. For day two problems, we needed a partner who's helping us."

+ +

In early 2017, adidas chose Giant Swarm to consult, install, configure, and run all of its Kubernetes clusters in AWS and on premise. "There is no competitive edge over our competitors like Puma or Nike in running and operating a Kubernetes cluster," says Eichten. "Our competitive edge is that we teach our internal engineers how to build cool e-comm stores that are fast, that are resilient, that are running perfectly."

+ +{{< case-studies/quote + image="/images/case-studies/adidas/banner3.png" + author="DANIEL EICHTEN, SENIOR DIRECTOR OF PLATFORM ENGINEERING AT ADIDAS" +>}} +"There is no competitive edge over our competitors like Puma or Nike in running and operating a Kubernetes cluster. Our competitive edge is that we teach our internal engineers how to build cool e-comm stores that are fast, that are resilient, that are running perfectly." +{{< /case-studies/quote >}} + +

Adds Cornago: "For me, our Kubernetes platform is made by engineers for engineers. It's relieving the development team from tasks that they don't want to do, but at the same time giving the visibility of what is behind the curtain, so they can also control it."

+ +

Case in point: For Cyber Week, the team has to create a lot of custom metrics. In November 2017, "because we used the same Prometheus that we use for monitoring the cluster, we really filled the Prometheus database, and we were not able to reduce the retention period [enough]," says Cornago. So during the freeze period before the peak shopping week, five engineers from the platform team worked with five engineers from the e-comm team to figure out a federated solution that was implemented in two days.

+ +

In addition to being ready for Cyber Week—100% of the adidas e-commerce site was running on Kubernetes then, just six months after the project began—the cloud native stack has had other impressive results. Load time for the e-commerce site was reduced by half. Releases went from every 4-6 weeks to 3-4 times a day. With 4,000 pods, 200 nodes, and 80,000 builds per month, adidas is now running 40% of its most critical, impactful systems on its cloud native platform.

+ +

And adoption has spread quickly among adidas's 300-strong engineering corps. "I call our cloud native platform the field of dreams," says Eichten. "We built it, and we never anticipated that people would come and just love it."

+ +

For one thing, "everybody who can touch a line of code" has spent one full week onboarding and learning the platform with members of the 35-person platform engineering team, says Cornago. "We try to spend 50% of our time sitting with the teams, because this is the only way to understand how our platform is being used. And this is how the teams will feel safe that there is someone on the other side of the wall, also feeling the pain."

+ +

Additionally, Cornago and Eichten took advantage of the fact that as a fashion athletic wear brand, adidas has sports and competition in its DNA. "Top-down mandates don't work at adidas, but gamification works," says Cornago. "So this year we had a DevOps Cup competition. Every team created new technical capabilities and had a hypothesis of how this affected business value. We announced the winner at a big internal tech summit with more than 600 people. It's been really, really useful for the teams."

+ +

So if they had any advice for other companies looking to start a cloud native journey, it would be this: "There is no one-size-fits-all for all companies," says Cornago. "Apply your company's culture to everything that you do."

\ No newline at end of file diff --git a/content/bn/case-studies/amadeus/amadeus_featured.png b/content/bn/case-studies/amadeus/amadeus_featured.png new file mode 100644 index 0000000000000..d23d7b0163854 Binary files /dev/null and b/content/bn/case-studies/amadeus/amadeus_featured.png differ diff --git a/content/bn/case-studies/amadeus/amadeus_featured.svg b/content/bn/case-studies/amadeus/amadeus_featured.svg new file mode 100644 index 0000000000000..9d0c40b8e18c3 --- /dev/null +++ b/content/bn/case-studies/amadeus/amadeus_featured.svg @@ -0,0 +1 @@ +kubernetes.io-logos \ No newline at end of file diff --git a/content/bn/case-studies/amadeus/amadeus_logo.png b/content/bn/case-studies/amadeus/amadeus_logo.png new file mode 100644 index 0000000000000..6191c7f6819f2 Binary files /dev/null and b/content/bn/case-studies/amadeus/amadeus_logo.png differ diff --git a/content/bn/case-studies/amadeus/index.html b/content/bn/case-studies/amadeus/index.html new file mode 100644 index 0000000000000..c6976f0c0d5f5 --- /dev/null +++ b/content/bn/case-studies/amadeus/index.html @@ -0,0 +1,84 @@ +--- +title: Amadeus Case Study +case_study_styles: true +cid: caseStudies + +new_case_study_styles: true +heading_background: /images/case-studies/amadeus/banner1.jpg +heading_title_logo: /images/amadeus_logo.png +subheading: > + Another Technical Evolution for a 30-Year-Old Company +case_study_details: + - Company: Amadeus IT Group + - Location: Madrid, Spain + - Industry: Travel Technology +--- + +

Challenge

+ +

In the past few years, Amadeus, which provides IT solutions to the travel industry around the world, found itself in need of a new platform for the 5,000 services supported by its service-oriented architecture. The 30-year-old company operates its own data center in Germany, and there were growing demands internally and externally for solutions that needed to be geographically dispersed. And more generally, "we had objectives of being even more highly available," says Eric Mountain, Senior Expert, Distributed Systems at Amadeus. Among the company's goals: to increase automation in managing its infrastructure, optimize the distribution of workloads, use data center resources more efficiently, and adopt new technologies more easily.

+ +

Solution

+ +

Mountain has been overseeing the company's migration to Kubernetes, using OpenShift Container Platform, Red Hat's enterprise container platform.

+ +

Impact

+ +

One of the first projects the team deployed in Kubernetes was the Amadeus Airline Cloud Availability solution, which helps manage ever-increasing flight-search volume. "It's now handling in production several thousand transactions per second, and it's deployed in multiple data centers throughout the world," says Mountain. "It's not a migration of an existing workload; it's a whole new workload that we couldn't have done otherwise. [This platform] gives us access to market opportunities that we didn't have before."

+ +{{< case-studies/quote author="Eric Mountain, Senior Expert, Distributed Systems at Amadeus IT Group" >}} +"We want multi-data center capabilities, and we want them for our mainstream system as well. We didn't think that we could achieve them with our existing system. We need new automation, things that Kubernetes and OpenShift bring." +{{< /case-studies/quote >}} + +{{< case-studies/lead >}} +In his two decades at Amadeus, Eric Mountain has been the migrations guy. +{{< /case-studies/lead >}} + +

Back in the day, he worked on the company's move from Unix to Linux, and now he's overseeing the journey to cloud native. "Technology just keeps changing, and we embrace it," he says. "We are celebrating our 30 years this year, and we continue evolving and innovating to stay cost-efficient and enhance everyone's travel experience, without interrupting workflows for the customers who depend on our technology."

+ +

That was the challenge that Amadeus—which provides IT solutions to the travel industry around the world, from flight searches to hotel bookings to customer feedback—faced in 2014. The technology team realized it was in need of a new platform for the 5,000 services supported by its service-oriented architecture.

+ +

The tipping point occurred when they began receiving many requests, internally and externally, for solutions that needed to be geographically outside the company's main data center in Germany. "Some requests were for running our applications on customer premises," Mountain says. "There were also new services we were looking to offer that required response time to the order of a few hundred milliseconds, which we couldn't achieve with transatlantic traffic. Or at least, not without eating into a considerable portion of the time available to our applications for them to process individual queries."

+ +

More generally, the company was interested in leveling up on high availability, increasing automation in managing infrastructure, optimizing the distribution of workloads and using data center resources more efficiently. "We have thousands and thousands of servers," says Mountain. "These servers are assigned roles, so even if the setup is highly automated, the machine still has a given role. It's wasteful on many levels. For instance, an application doesn't necessarily use the machine very optimally. Virtualization can help a bit, but it's not a silver bullet. If that machine breaks, you still want to repair it because it has that role and you can't simply say, 'Well, I'll bring in another machine and give it that role.' It's not fast. It's not efficient. So we wanted the next level of automation."

+ +{{< case-studies/quote image="/images/case-studies/amadeus/banner3.jpg" >}} +"We hope that if we build on what others have built, what we do might actually be upstream-able. As Kubernetes and OpenShift progress, we see that we are indeed able to remove some of the additional layers we implemented to compensate for gaps we perceived earlier." +{{< /case-studies/quote >}} + +

While mainly a C++ and Java shop, Amadeus also wanted to be able to adopt new technologies more easily. Some of its developers had started using languages like Python and databases like Couchbase, but Mountain wanted still more options, he says, "in order to better adapt our technical solutions to the products we offer, and open up entirely new possibilities to our developers." Working with recent technologies and cool new things would also make it easier to attract new talent.

+ +

All of those needs led Mountain and his team on a search for a new platform. "We did a set of studies and proofs of concept over a fairly short period, and we considered many technologies," he says. "In the end, we were left with three choices: build everything on premise, build on top of Kubernetes whatever happens to be missing from our point of view, or go with OpenShift and build whatever remains there."

+ +

The team decided against building everything themselves—though they'd done that sort of thing in the past—because "people were already inventing things that looked good," says Mountain.

+ +

Ultimately, they went with OpenShift Container Platform, Red Hat's Kubernetes-based enterprise offering, instead of building on top of Kubernetes because "there was a lot of synergy between what we wanted and the way Red Hat was anticipating going with OpenShift," says Mountain. "They were clearly developing Kubernetes, and developing certain things ahead of time in OpenShift, which were important to us, such as more security."

+ +

The hope was that those particular features would eventually be built into Kubernetes, and, in the case of security, Mountain feels that has happened. "We realize that there's always a certain amount of automation that we will probably have to develop ourselves to compensate for certain gaps," says Mountain. "The less we do that, the better for us. We hope that if we build on what others have built, what we do might actually be upstream-able. As Kubernetes and OpenShift progress, we see that we are indeed able to remove some of the additional layers we implemented to compensate for gaps we perceived earlier."

+ +{{< case-studies/quote image="/images/case-studies/amadeus/banner4.jpg" >}} +"It's not a migration of an existing workload; it's a whole new workload that we couldn't have done otherwise. [This platform] gives us access to market opportunities that we didn't have before." +{{< /case-studies/quote >}} + +

The first project the team tackled was one that they knew had to run outside the data center in Germany. Because of the project's needs, "We couldn't rely only on the built-in Kubernetes service discovery; we had to layer on top of that an extra service discovery level that allows us to load balance at the operation level within our system," says Mountain. They also built a stream dedicated to monitoring, which at the time wasn't offered in the Kubernetes or OpenShift ecosystem. Now that Prometheus and other products are available, Mountain says the company will likely re-evaluate their monitoring system: "We obviously always like to leverage what Kubernetes and OpenShift can offer." +

+ +

The second project ended up going into production first: the Amadeus Airline Cloud Availability solution, which helps manage ever-increasing flight-search volume and was deployed in public cloud. Launched in early 2016, it is "now handling in production several thousand transactions per second, and it's deployed in multiple data centers throughout the world," says Mountain. "It's not a migration of an existing workload; it's a whole new workload that we couldn't have done otherwise. [This platform] gives us access to market opportunities that we didn't have before."

+ +

Having been through this kind of technical evolution more than once, Mountain has advice on how to handle the cultural changes. "That's one aspect that we can tackle progressively," he says. "We have to go on supplying our customers with new features on our pre-existing products, and we have to keep existing products working. So we can't simply do absolutely everything from one day to the next. And we mustn't sell it that way."

+ +

The first order of business, then, is to pick one or two applications to demonstrate that the technology works. Rather than choosing a high-impact, high-risk project, Mountain's team selected a smaller application that was representative of all the company's other applications in its complexity: "We just made sure we picked something that's complex enough, and we showed that it can be done."

+ +{{< case-studies/quote >}} +"The bottom line is we want these multi-data center capabilities, and we want them as well for our mainstream system," he says. "And we don't think that we can implement them with our previous system. We need the new automation, homogeneity, and scale that Kubernetes and OpenShift bring." +{{< /case-studies/quote >}} + +

Next comes convincing people. "On the operations side and on the R&D side, there will be people who say quite rightly, 'There is a system, and it works, so why change?'" Mountain says. "The only thing that really convinces people is showing them the value." For Amadeus, people realized that the Airline Cloud Availability product could not have been made available on the public cloud with the company's existing system. The question then became, he says, "Do we go into a full-blown migration? Is that something that is justified?"

+ +

"The bottom line is we want these multi-data center capabilities, and we want them as well for our mainstream system," he says. "And we don't think that we can implement them with our previous system. We need the new automation, homogeneity, and scale that Kubernetes and OpenShift bring."

+ +

So how do you get everyone on board? "Make sure you have good links between your R&D and your operations," he says. "Also make sure you're going to talk early on to the investors and stakeholders. Figure out what it is that they will be expecting from you, that will convince them or not, that this is the right way for your company."

+ +

His other advice is simply to make the technology available for people to try it. "Kubernetes and OpenShift Origin are open source software, so there's no complicated license key for the evaluation period and you're not limited to 30 days," he points out. "Just go and get it running." Along with that, he adds, "You've got to be prepared to rethink how you do things. Of course making your applications as cloud native as possible is how you'll reap the most benefits: 12 factors, CI/CD, which is continuous integration, continuous delivery, but also continuous deployment."

+ +

And while they explore that aspect of the technology, Mountain and his team will likely be practicing what he preaches to others taking the cloud native journey. "See what happens when you break it, because it's important to understand the limits of the system," he says. Or rather, he notes, the advantages of it. "Breaking things on Kube is actually one of the nice things about it—it recovers. It's the only real way that you'll see that you might be able to do things."

\ No newline at end of file diff --git a/content/bn/case-studies/ancestry/ancestry_featured.png b/content/bn/case-studies/ancestry/ancestry_featured.png new file mode 100644 index 0000000000000..6d63daae32139 Binary files /dev/null and b/content/bn/case-studies/ancestry/ancestry_featured.png differ diff --git a/content/bn/case-studies/ancestry/ancestry_featured.svg b/content/bn/case-studies/ancestry/ancestry_featured.svg new file mode 100644 index 0000000000000..9a3e80186b0be --- /dev/null +++ b/content/bn/case-studies/ancestry/ancestry_featured.svg @@ -0,0 +1 @@ +kubernetes.io-logos-ancestry \ No newline at end of file diff --git a/content/bn/case-studies/ancestry/ancestry_logo.png b/content/bn/case-studies/ancestry/ancestry_logo.png new file mode 100644 index 0000000000000..5fbade8decbc1 Binary files /dev/null and b/content/bn/case-studies/ancestry/ancestry_logo.png differ diff --git a/content/bn/case-studies/ancestry/index.html b/content/bn/case-studies/ancestry/index.html new file mode 100644 index 0000000000000..7cc171544379f --- /dev/null +++ b/content/bn/case-studies/ancestry/index.html @@ -0,0 +1,92 @@ +--- +title: Ancestry Case Study +case_study_styles: true +cid: caseStudies + +new_case_study_styles: true +heading_background: /images/case-studies/ancestry/banner1.jpg +heading_title_logo: /images/ancestry_logo.png +subheading: > + Digging Into the Past With New Technology +case_study_details: + - Company: Ancestry + - Location: Lehi, Utah + - Industry: Internet Company, Online Services +--- + +

Challenge

+ +

Ancestry, the global leader in family history and consumer genomics, uses sophisticated engineering and technology to help everyone, everywhere discover the story of what led to them. The company has spent more than 30 years innovating and building products and technologies that at their core, result in real and emotional human responses. Ancestry currently serves more than 2.6 million paying subscribers, holds 20 billion historical records, 90 million family trees and more than four million people are in its AncestryDNA network, making it the largest consumer genomics DNA network in the world. The company's popular website, ancestry.com, has been working with big data long before the term was popularized. The site was built on hundreds of services, technologies and a traditional deployment methodology. "It's worked well for us in the past," says Paul MacKay, software engineer and architect at Ancestry, "but had become quite cumbersome in its processing and is time-consuming. As a primarily online service, we are constantly looking for ways to accelerate to be more agile in delivering our solutions and our products."

+ +

Solution

+ +

The company is transitioning to cloud native infrastructure, using Docker containerization, Kubernetes orchestration and Prometheus for cluster monitoring.

+ +

Impact

+ +

"Every single product, every decision we make at Ancestry, focuses on delighting our customers with intimate, sometimes life-changing discoveries about themselves and their families," says MacKay. "As the company continues to grow, the increased productivity gains from using Kubernetes has helped Ancestry make customer discoveries faster. With the move to Dockerization for example, instead of taking between 20 to 50 minutes to deploy a new piece of code, we can now deploy in under a minute for much of our code. We've truly experienced significant time savings in addition to the various features and benefits from cloud native and Kubernetes-type technologies."

+ +{{< case-studies/quote author="PAUL MACKAY, SOFTWARE ENGINEER AND ARCHITECT AT ANCESTRY" >}} +"At a certain point, you have to step back if you're going to push a new technology and get key thought leaders with engineers within the organization to become your champions for new technology adoption. At training sessions, the development teams were always the ones that were saying, 'Kubernetes saved our time tremendously; it's an enabler. It really is incredible.'" +{{< /case-studies/quote >}} + +{{< case-studies/lead >}} +It started with a Shaky Leaf. +{{< /case-studies/lead >}} + +

Since its introduction a decade ago, the Shaky Leaf icon has become one of Ancestry's signature features, which signals to users that there's a helpful hint you can use to find out more about your family tree.

+ +

So when the company decided to begin moving its infrastructure to cloud native technology, the first service that was launched on Kubernetes, the open source platform for managing application containers across clusters of hosts, was this hint system. Think of it as Amazon's recommended products, but instead of recommending products the company recommends records, stories, or familial connections. "It was a very important part of the site," says Ancestry software engineer and architect Paul MacKay, "but also small enough for a pilot project that we knew we could handle in a very appropriate, secure way."

+ +

And when it went live smoothly in early 2016, "our deployment time for this service literally was cut down from 50 minutes to 2 or 5 minutes," MacKay adds. "The development team was just thrilled because we're focused on supplying a great experience for our customers. And that means features, it means stability, it means all those things that we need for a first-in-class type operation."

+ +

The stability of that Shaky Leaf was a signal for MacKay and his team that their decision to embrace cloud native technologies was the right one for the company. With a private data center, Ancestry built its website (which launched in 1996) on hundreds of services and technologies and a traditional deployment methodology. "It worked well for us in the past, but the sum of the legacy systems became quite cumbersome in its processing and was time-consuming," says MacKay. "We were looking for other ways to accelerate, to be more agile in delivering our solutions and our products."

+ +{{< case-studies/quote image="/images/case-studies/ancestry/banner3.jpg" >}} +"And when it [Kubernetes] went live smoothly in early 2016, 'our deployment time for this service literally was cut down from 50 minutes to 2 or 5 minutes,' MacKay adds. 'The development team was just thrilled because we're focused on supplying a great experience for our customers. And that means features, it means stability, it means all those things that we need for a first-in-class type operation.'" +{{< /case-studies/quote >}} + +

That need led them in 2015 to explore containerization. Ancestry engineers had already been using technology like Java and Python on Linux, so part of the decision was about making the infrastructure more Linux-friendly. They quickly decided that they wanted to go with Docker for containerization, "but it always comes down to the orchestration part of it to make it really work," says MacKay.

+ +

His team looked at orchestration platforms offered by Docker Compose, Mesos and OpenStack, and even started to prototype some homegrown solutions. And then they started hearing rumblings of the imminent release of Kubernetes v1.0. "At the forefront, we were looking at the secret store, so we didn't have to manage that all ourselves, the config maps, the methodology of seamless deployment strategy," he says. "We found that how Kubernetes had done their resources, their types, their labels and just their interface was so much further advanced than the other things we had seen. It was a feature fit."

+ +{{< case-studies/lead >}} +Plus, MacKay says, "I just believed in the confidence that comes with the history that Google has with containerization. So we started out right on the leading edge of it. And we haven't looked back since." +{{< /case-studies/lead >}} + +

Which is not to say that adopting a new technology hasn't come with some challenges. "Change is hard," says MacKay. "Not because the technology is hard or that the technology is not good. It's just that people like to do things like they had done [before]. You have the early adopters and you have those who are coming in later. It was a learning experience on both sides."

+ +

Figuring out the best deployment operations for Ancestry was a big part of the work it took to adopt cloud native infrastructure. "We want to make sure the process is easy and also controlled in the manner that allows us the highest degree of security that we demand and our customers demand," says MacKay. "With Kubernetes and other products, there are some good solutions, but a little bit of glue is needed to bring it into corporate processes and governances. It's like having a set of gloves that are generic, but when you really do want to grab something you have to make it so it's customized to you. That's what we had to do."

+ +

Their best practices include allowing their developers to deploy into development stage and production, but then controlling the aspects that need governance and auditing, such as secrets. They found that having one namespace per service is useful for achieving that containment of secrets and config maps. And for their needs, having one container per pod makes it easier to manage and to have a smaller unit of deployment. +

+ +{{< case-studies/quote image="/images/case-studies/ancestry/banner4.jpg" >}} +"The success of Ancestry's first deployment of the hint system on Kubernetes helped create momentum for greater adoption of the technology." +{{< /case-studies/quote >}} + +

With that process established, the time spent on deployment was cut down to under a minute for some services. "As programmers, we have what's called REPL: read, evaluate, print, and loop, but with Kubernetes, we have CDEL: compile, deploy, execute, and loop," says MacKay. "It's a very quick loop back and a great benefit to understand that when our services are deployed in production, they're the same as what we tested in the pre-production environments. The approach of cloud native for Ancestry provides us a better ability to scale and to accommodate the business needs as work loads occur."

+ +

The success of Ancestry's first deployment of the hint system on Kubernetes helped create momentum for greater adoption of the technology. "Engineers like to code, they like to do features, they don't like to sit around waiting for things to be deployed and worrying about scaling up and out and down," says MacKay. "After a while the engineers became our champions. At training sessions, the development teams were always the ones saying, 'Kubernetes saved our time tremendously; it's an enabler; it really is incredible.' Over time, we were able to convince our management that this was a transition that the industry is making and that we needed to be a part of it."

+ +

A year later, Ancestry has transitioned a good number of applications to Kubernetes. "We have many different services that make up the rich environment that [the website] has from both the DNA side and the family history side," says MacKay. "We have front-end stacks, back-end stacks and back-end processing type stacks that are in the cluster."

+ +

The company continues to weigh which services it will move forward to Kubernetes, which ones will be kept as is, and which will be replaced in the future and thus don't have to be moved over. MacKay estimates that the company is "approaching halfway on those features that are going forward. We don't have to do a lot of convincing anymore. It's more of an issue of timing with getting product management and engineering staff the knowledge and information that they need."

+ +{{< case-studies/quote >}} +"... 'I believe in Kubernetes. I believe in containerization. I think if we can get there and establish ourselves in that world, we will be further along and far better off being agile and all the things we talk about, and it'll go forward.'" +{{< /case-studies/quote >}} + +

Looking ahead, MacKay sees Ancestry maximizing the benefits of Kubernetes in 2017. "We're very close to having everything that should be or could be in a Linux-friendly world in Kubernetes by the end of the year," he says, adding that he's looking forward to features such as federation and horizontal pod autoscaling that are currently in the works. "Kubernetes has been very wonderful for us and we continue to ride the wave."

+ +

That wave, he points out, has everything to do with the vibrant Kubernetes community, which has grown by leaps and bounds since Ancestry joined it as an early adopter. "This is just a very rough way of judging it, but on Slack in June 2015, there were maybe 500 on there," MacKay says. "The last time I looked there were maybe 8,500 just on the Slack channel. There are so many major companies and different kinds of companies involved now. It's the variety of contributors, the number of contributors, the incredibly competent and friendly community."

+ +

As much as he and his team at Ancestry have benefited from what he calls "the goodness and the technical abilities of many" in the community, they've also contributed information about best practices, logged bug issues and participated in the open source conversation. And they've been active in attending meetups to help educate and give back to the local tech community in Utah. Says MacKay: "We're trying to give back as far as our experience goes, rather than just code."

+ +

When he meets with companies considering adopting cloud native infrastructure, the best advice he has to give from Ancestry's Kubernetes journey is this: "Start small, but with hard problems," he says. And "you need a patron who understands the vision of containerization, to help you tackle the political as well as other technical roadblocks that can occur when change is needed."

+ +

With the changes that MacKay's team has led over the past year and a half, cloud native will be part of Ancestry's technological genealogy for years to come. MacKay has been such a champion of the technology that he says people have jokingly accused him of having a Kubernetes tattoo.

+ +

"I really don't," he says with a laugh. "But I'm passionate. I'm not exclusive to any technology; I use whatever I need that's out there that makes us great. If it's something else, I'll use it. But right now I believe in Kubernetes. I believe in containerization. I think if we can get there and establish ourselves in that world, we will be further along and far better off being agile and all the things we talk about, and it'll go forward."

+ +

He pauses. "So, yeah, I guess you can say I'm an evangelist for Kubernetes," he says. "But I'm not getting a tattoo!"

\ No newline at end of file diff --git a/content/bn/case-studies/ant-financial/ant-financial_featured_logo.png b/content/bn/case-studies/ant-financial/ant-financial_featured_logo.png new file mode 100644 index 0000000000000..cb4034502734d Binary files /dev/null and b/content/bn/case-studies/ant-financial/ant-financial_featured_logo.png differ diff --git a/content/bn/case-studies/ant-financial/ant-financial_featured_logo.svg b/content/bn/case-studies/ant-financial/ant-financial_featured_logo.svg new file mode 100644 index 0000000000000..4eb8a51127ea2 --- /dev/null +++ b/content/bn/case-studies/ant-financial/ant-financial_featured_logo.svg @@ -0,0 +1 @@ +kubernetes.io-logos \ No newline at end of file diff --git a/content/bn/case-studies/ant-financial/index.html b/content/bn/case-studies/ant-financial/index.html new file mode 100644 index 0000000000000..8dfcf18eb9b5e --- /dev/null +++ b/content/bn/case-studies/ant-financial/index.html @@ -0,0 +1,82 @@ +--- +title: Ant Financial Case Study +linkTitle: ant-financial +case_study_styles: true +cid: caseStudies +featured: false + +new_case_study_styles: true +heading_background: /images/case-studies/antfinancial/banner1.jpg +heading_title_logo: /images/antfinancial_logo.png +subheading: > + Ant Financial's Hypergrowth Strategy Using Kubernetes +case_study_details: + - Company: Ant Financial + - Location: Hangzhou, China + - Industry: Financial Services +--- + +

Challenge

+ +

Officially founded in October 2014, Ant Financial originated from Alipay, the world's largest online payment platform that launched in 2004. The company also offers numerous other services leveraging technology innovation. With the volume of transactions Alipay handles for its 900+ million users worldwide (through its local and global partners)—256,000 transactions per second at the peak of Double 11 Singles Day 2017, and total gross merchandise value of $31 billion for Singles Day 2018—not to mention that of its other services, Ant Financial faces "data processing challenge in a whole new way," says Haojie Hang, who is responsible for Product Management for the Storage and Compute Group. "We see three major problems of operating at that scale: how to provide real-time compute, storage, and processing capability, for instance to make real-time recommendations for fraud detection; how to provide intelligence on top of this data, because there's too much data and then we're not getting enough insight; and how to apply security in the application level, in the middleware level, the system level, even the chip level." In order to provide reliable and consistent services to its customers, Ant Financial embraced containers in early 2014, and soon needed an orchestration solution for the tens-of-thousands-of-node clusters in its data centers.

+ +

Solution

+ +

After investigating several technologies, the team chose Kubernetes for orchestration, as well as a number of other CNCF projects, including Prometheus, OpenTracing, etcd and CoreDNS. "In late 2016, we decided that Kubernetes will be the de facto standard," says Hang. "Looking back, we made the right bet on the right technology. But then we needed to move the production workload from the legacy infrastructure to the latest Kubernetes-enabled platform, and that took some time, because we are very careful in terms of reliability and consistency." All core financial systems were containerized by November 2017, and the migration to Kubernetes is ongoing.

+ +

Impact

+ +

"We've seen at least tenfold in improvement in terms of the operations with cloud native technology, which means you can have tenfold increase in terms of output," says Hang. Ant also provides its fully integrated financial cloud platform to business partners around the world, and hopes to power the next generation of digital banking with deep experience in service innovation and technology expertise. Hang says the team hasn't begun to focus on optimizing the Kubernetes platform, either: "Because we're still in the hyper growth stage, we're not in a mode where we do cost saving yet."

+ +{{< case-studies/quote author="HAOJIE HANG, PRODUCT MANAGEMENT, ANT FINANCIAL" >}} +"In late 2016, we decided that Kubernetes will be the de facto standard. Looking back, we made the right bet on the right technology." +{{< /case-studies/quote >}} + +{{< case-studies/lead >}} +A spinoff of the multinational conglomerate Alibaba, Ant Financial boasts a $150+ billion valuation and the scale to match. The fintech startup, launched in 2014, is comprised of Alipay, the world's largest online payment platform, and numerous other services leveraging technology innovation. +{{< /case-studies/lead >}} + +

And the volume of transactions that Alipay handles for over 900 million users worldwide (through its local and global partners) is staggering: 256,000 per second at the peak of Double 11 Singles Day 2017, and total gross merchandise value of $31 billion for Singles Day 2018. With the mission of "bringing the world equal opportunities," Ant Financial is dedicated to creating an open, shared credit system and financial services platform through technology innovations.

+ +

Combine that with the operations of its other properties—such as the Huabei online credit system, Jiebei lending service, and the 350-million-user Ant Forest green energy mobile app—and Ant Financial faces "data processing challenge in a whole new way," says Haojie Hang, who is responsible for Product Management for the Storage and Compute Group. "We see three major problems of operating at that scale: how to provide real-time compute, storage, and processing capability, for instance to make real-time recommendations for fraud detection; how to provide intelligence on top of this data, because there's too much data and we're not getting enough insight; and how to apply security in the application level, in the middleware level, the system level, even the chip level."

+ +

To address those challenges and provide reliable and consistent services to its customers, Ant Financial embraced Docker containerization in 2014. But they soon realized that they needed an orchestration solution for some tens-of-thousands-of-node clusters in the company's data centers.

+ +{{< case-studies/quote + image="/images/case-studies/antfinancial/banner3.jpg" + author="RANGER YU, GLOBAL TECHNOLOGY PARTNERSHIP & DEVELOPMENT, ANT FINANCIAL" +>}} +"On Double 11 this year, we had plenty of nodes on Kubernetes, but compared to the whole scale of our infrastructure, this is still in progress." +{{< /case-studies/quote >}} + +

The team investigated several technologies, including Docker Swarm and Mesos. "We did a lot of POCs, but we're very careful in terms of production systems, because we want to make sure we don't lose any data," says Hang. "You cannot afford to have a service downtime for one minute; even one second has a very, very big impact. We operate every day under pressure to provide reliable and consistent services to consumers and businesses in China and globally."

+ +

Ultimately, Hang says Ant chose Kubernetes because it checked all the boxes: a strong community, technology that "will be relevant in the next three to five years," and a good match for the company's engineering talent. "In late 2016, we decided that Kubernetes will be the de facto standard," says Hang. "Looking back, we made the right bet on the right technology. But then we needed to move the production workload from the legacy infrastructure to the latest Kubernetes-enabled platform. We spent a lot of time learning and then training our people to build applications on Kubernetes well."

+ +

All core financial systems were containerized by November 2017, and the migration to Kubernetes is ongoing. Ant's platform also leverages a number of other CNCF projects, including Prometheus, OpenTracing, etcd and CoreDNS. "On Double 11 this year, we had plenty of nodes on Kubernetes, but compared to the whole scale of our infrastructure, this is still in progress," says Ranger Yu, Global Technology Partnership & Development.

+ +{{< case-studies/quote + image="/images/case-studies/antfinancial/banner4.jpg" + author="HAOJIE HANG, PRODUCT MANAGEMENT, ANT FINANCIAL" +>}} +"We're very grateful for CNCF and this amazing technology, which we need as we continue to scale globally. We're definitely embracing the community and open source more in the future." +{{< /case-studies/quote >}} + +

Still, there has already been an impact. "Cloud native technology has benefited us greatly in terms of efficiency," says Hang. "In general, we want to make sure our infrastructure is nimble and flexible enough for the work that could happen tomorrow. That's the goal. And with cloud native technology, we've seen at least tenfold improvement in operations, which means you can have tenfold increase in terms of output. Let's say you are operating 10 nodes with one person. With cloud native, tomorrow you can have 100 nodes."

+ +

Ant also provides its financial cloud platform to partners around the world, and hopes to power the next generation of digital banking with deep experience in service innovation and technology expertise. Hang says the team hasn't begun to focus on optimizing the Kubernetes platform, either: "Because we're still in the hyper growth stage, we're not in a mode where we do cost-saving yet."

+ +

The CNCF community has also been a valuable asset during Ant Financial's move to cloud native. "If you are applying a new technology, it's very good to have a community to discuss technical problems with other users," says Hang. "We're very grateful for CNCF and this amazing technology, which we need as we continue to scale globally. We're definitely embracing the community and open sourcing more in the future."

+ +{{< case-studies/quote + image="/images/case-studies/antfinancial/banner4.jpg" + author="RANGER YU, GLOBAL TECHNOLOGY PARTNERSHIP & DEVELOPMENT, ANT FINANCIAL" +>}} +"In China, we are the North Star in terms of innovation in financial and other related services," says Hang. "We definitely want to make sure we're still leading in the next 5 to 10 years with our investment in technology." +{{< /case-studies/quote >}} + +

In fact, the company has already started to open source some of its cloud native middleware. "We are going to be very proactive about that," says Yu. "CNCF provided a platform so everyone can plug in or contribute components. This is very good open source governance."

+ +

Looking ahead, the Ant team will continue to evaluate many other CNCF projects. Building a service mesh community in China, the team has brought together many China-based companies and developers to discuss the potential of that technology. "Service mesh is very attractive for Chinese developers and end users because we have a lot of legacy systems running now, and it's an ideal mid-layer to glue everything together, both new and legacy," says Hang. "For new technologies, we look very closely at whether they will last."

+ +

At Ant, Kubernetes passed that test with flying colors, and the team hopes other companies will follow suit. "In China, we are the North Star in terms of innovation in financial and other related services," says Hang. "We definitely want to make sure we're still leading in the next 5 to 10 years with our investment in technology."

\ No newline at end of file diff --git a/content/bn/case-studies/appdirect/appdirect_featured_logo.png b/content/bn/case-studies/appdirect/appdirect_featured_logo.png new file mode 100644 index 0000000000000..724a8a75684f0 Binary files /dev/null and b/content/bn/case-studies/appdirect/appdirect_featured_logo.png differ diff --git a/content/bn/case-studies/appdirect/appdirect_featured_logo.svg b/content/bn/case-studies/appdirect/appdirect_featured_logo.svg new file mode 100644 index 0000000000000..36fcba1abba36 --- /dev/null +++ b/content/bn/case-studies/appdirect/appdirect_featured_logo.svg @@ -0,0 +1 @@ +kubernetes.io-logos \ No newline at end of file diff --git a/content/bn/case-studies/appdirect/index.html b/content/bn/case-studies/appdirect/index.html new file mode 100644 index 0000000000000..0a20dee2ac97a --- /dev/null +++ b/content/bn/case-studies/appdirect/index.html @@ -0,0 +1,85 @@ +--- +title: AppDirect Case Study +linkTitle: AppDirect +case_study_styles: true +cid: caseStudies +logo: appdirect_featured_logo.png +featured: true +weight: 4 +quote: > + We made the right decisions at the right time. Kubernetes and the cloud native technologies are now seen as the de facto ecosystem. + +new_case_study_styles: true +heading_background: /images/case-studies/appdirect/banner1.jpg +heading_title_logo: /images/appdirect_logo.png +subheading: > + AppDirect: How AppDirect Supported the 10x Growth of Its Engineering Staff with Kubernetes +case_study_details: + - Company: AppDirect + - Location: San Francisco, California + - Industry: Software +--- + +

Challenge

+ +

AppDirect provides an end-to-end commerce platform for cloud-based products and services. When Director of Software Development Pierre-Alexandre Lacerte began working there in 2014, the company had a monolith application deployed on a "tomcat infrastructure, and the whole release process was complex for what it should be," he says. "There were a lot of manual steps involved, with one engineer building a feature, then another team picking up the change. So you had bottlenecks in the pipeline to ship a feature to production." At the same time, the engineering team was growing, and the company realized it needed a better infrastructure to both support that growth and increase velocity.

+ +

Solution

+ +

"My idea was: Let's create an environment where teams can deploy their services faster, and they will say, 'Okay, I don't want to build in the monolith anymore. I want to build a service,'" says Lacerte. They considered and prototyped several different technologies before deciding to adopt Kubernetes in early 2016. Lacerte's team has also integrated Prometheus monitoring into the platform; tracing is next. Today, AppDirect has more than 50 microservices in production and 15 Kubernetes clusters deployed on AWS and on premise around the world.

+ +

Impact

+ +

The Kubernetes platform has helped support the engineering team's 10x growth over the past few years. Coupled with the fact that they were continually adding new features, Lacerte says, "I think our velocity would have slowed down a lot if we didn't have this new infrastructure." Moving to Kubernetes and services has meant that deployments have become much faster due to less dependency on custom-made, brittle shell scripts with SCP commands. Time to deploy a new version has shrunk from 4 hours to a few minutes. Additionally, the company invested a lot of effort to make things self-service for developers. "Onboarding a new service doesn't require Jira tickets or meeting with three different teams," says Lacerte. Today, the company sees 1,600 deployments per week, compared to 1-30 before. The company also achieved cost savings by moving its marketplace and billing monoliths to Kubernetes from legacy EC2 hosts as well as by leveraging autoscaling, as traffic is higher during business hours.

+ +{{< case-studies/quote author="Alexandre Gervais, Staff Software Developer, AppDirect" >}} +"It was an immense engineering culture shift, but the benefits are undeniable in terms of scale and speed." +{{< /case-studies/quote >}} + +{{< case-studies/lead >}} +With its end-to-end commerce platform for cloud-based products and services, AppDirect has been helping organizations such as Comcast and GoDaddy simplify the digital supply chain since 2009. +{{< /case-studies/lead >}} + +

When Director of Software Development Pierre-Alexandre Lacerte started working there in 2014, the company had a monolith application deployed on a "tomcat infrastructure, and the whole release process was complex for what it should be," he says. "There were a lot of manual steps involved, with one engineer building a feature then creating a pull request, and a QA or another engineer validating the feature. Then it gets merged and someone else will take care of the deployment. So we had bottlenecks in the pipeline to ship a feature to production."

+ +

At the same time, the engineering team of 40 was growing, and the company wanted to add an increasing number of features to its products. As a member of the platform team, Lacerte began hearing from multiple teams that wanted to deploy applications using different frameworks and languages, from Node.js to Spring Boot Java. He soon realized that in order to both support growth and increase velocity, the company needed a better infrastructure, and a system in which teams are autonomous, can do their own deploys, and be responsible for their services in production.

+ +{{< case-studies/quote + image="/images/case-studies/appdirect/banner3.jpg" + author="Alexandre Gervais, Staff Software Developer, AppDirect" +>}} +"We made the right decisions at the right time. Kubernetes and the cloud native technologies are now seen as the de facto ecosystem. We know where to focus our efforts in order to tackle the new wave of challenges we face as we scale out. The community is so active and vibrant, which is a great complement to our awesome internal team." +{{< /case-studies/quote >}} + +

From the beginning, Lacerte says, "My idea was: Let's create an environment where teams can deploy their services faster, and they will say, 'Okay, I don't want to build in the monolith anymore. I want to build a service.'" (Lacerte left the company in 2019.)

+ +

Working with the operations team, Lacerte's group got more control and access to the company's AWS infrastructure, and started prototyping several orchestration technologies. "Back then, Kubernetes was a little underground, unknown," he says. "But we looked at the community, the number of pull requests, the velocity on GitHub, and we saw it was getting traction. And we found that it was much easier for us to manage than the other technologies."

+ +

They spun up the first few services on Kubernetes using Chef and Terraform provisioning, and as more services were added, more automation was, too. "We have clusters around the world—in Korea, in Australia, in Germany, and in the U.S.," says Lacerte. "Automation is critical for us." They're now largely using Kops, and are looking at managed Kubernetes offerings from several cloud providers.

+ +

Today, though the monolith still exists, there are fewer and fewer commits and features. All teams are deploying on the new infrastructure, and services are the norm. AppDirect now has more than 50 microservices in production and 15 Kubernetes clusters deployed on AWS and on premise around the world.

+ +

Lacerte's strategy ultimately worked because of the very real impact the Kubernetes platform has had to deployment time. Due to less dependency on custom-made, brittle shell scripts with SCP commands, time to deploy a new version has shrunk from 4 hours to a few minutes. Additionally, the company invested a lot of effort to make things self-service for developers. "Onboarding a new service doesn't require Jira tickets or meeting with three different teams," says Lacerte. Today, the company sees 1,600 deployments per week, compared to 1-30 before.

+ +{{< case-studies/quote + image="/images/case-studies/appdirect/banner4.jpg" + author="Pierre-Alexandre Lacerte, Director of Software Development, AppDirect" +>}} +"I think our velocity would have slowed down a lot if we didn't have this new infrastructure." +{{< /case-studies/quote >}} + +

Additionally, the Kubernetes platform has helped support the engineering team's 10x growth over the past few years. "Ownership, a core value of AppDirect, reflects in our ability to ship services independently of our monolith code base," says Staff Software Developer Alexandre Gervais, who worked with Lacerte on the initiative. "Small teams now own critical parts of our business domain model, and they operate in their decoupled domain of expertise, with limited knowledge of the entire codebase. This reduces and isolates some of the complexity." Coupled with the fact that they were continually adding new features, Lacerte says, "I think our velocity would have slowed down a lot if we didn't have this new infrastructure."

+ +

The company also achieved cost savings by moving its marketplace and billing monoliths to Kubernetes from legacy EC2 hosts as well as by leveraging autoscaling, as traffic is higher during business hours.

+ +

AppDirect's cloud native stack also includes gRPC and Fluentd, and the team is currently working on setting up OpenCensus. The platform already has Prometheus integrated, so "when teams deploy their service, they have their notifications, alerts and configurations," says Lacerte. "For example, in the test environment, I want to get a message on Slack, and in production, I want a Slack message and I also want to get paged. We have integration with pager duty. Teams have more ownership on their services."

+ +{{< case-studies/quote author="Pierre-Alexandre Lacerte, Director of Software Development, AppDirect" >}} +"We moved from a culture limited to 'pushing code in a branch' to exciting new responsibilities outside of the code base: deployment of features and configurations; monitoring of application and business metrics; and on-call support in case of outages. It was an immense engineering culture shift, but the benefits are undeniable in terms of scale and speed." +{{< /case-studies/quote >}} + +

That of course also means more responsibility. "We asked engineers to expand their horizons," says Gervais. "We moved from a culture limited to 'pushing code in a branch' to exciting new responsibilities outside of the code base: deployment of features and configurations; monitoring of application and business metrics; and on-call support in case of outages. It was an immense engineering culture shift, but the benefits are undeniable in terms of scale and speed."

+ +

As the engineering ranks continue to grow, the platform team has a new challenge, of making sure that the Kubernetes platform is accessible and easily utilized by everyone. "How can we make sure that when we add more people to our team that they are efficient, productive, and know how to ramp up on the platform?" Lacerte says. So we have the evangelists, the documentation, some project examples. We do demos, we have AMA sessions. We're trying different strategies to get everyone's attention."

+ +

Three and a half years into their Kubernetes journey, Gervais feels AppDirect "made the right decisions at the right time," he says. "Kubernetes and the cloud native technologies are now seen as the de facto ecosystem. We know where to focus our efforts in order to tackle the new wave of challenges we face as we scale out. The community is so active and vibrant, which is a great complement to our awesome internal team. Going forward, our focus will really be geared towards benefiting from the ecosystem by providing added business value in our day-to-day operations."

\ No newline at end of file diff --git a/content/bn/case-studies/babylon/babylon_featured_logo.png b/content/bn/case-studies/babylon/babylon_featured_logo.png new file mode 100644 index 0000000000000..8b158b5ed3c43 Binary files /dev/null and b/content/bn/case-studies/babylon/babylon_featured_logo.png differ diff --git a/content/bn/case-studies/babylon/babylon_featured_logo.svg b/content/bn/case-studies/babylon/babylon_featured_logo.svg new file mode 100644 index 0000000000000..e84da19268e33 --- /dev/null +++ b/content/bn/case-studies/babylon/babylon_featured_logo.svg @@ -0,0 +1 @@ +babylon_featured_logo \ No newline at end of file diff --git a/content/bn/case-studies/babylon/index.html b/content/bn/case-studies/babylon/index.html new file mode 100644 index 0000000000000..156d6e0178af2 --- /dev/null +++ b/content/bn/case-studies/babylon/index.html @@ -0,0 +1,84 @@ +--- +title: Babylon Case Study +linkTitle: Babylon +case_study_styles: true +cid: caseStudies +logo: babylon_featured_logo.svg +featured: true +weight: 1 +quote: > + Kubernetes is a great platform for machine learning because it comes with all the scheduling and scalability that you need. + +new_case_study_styles: true +heading_background: /images/case-studies/babylon/banner4.jpg +heading_title_text: Babylon +use_gradient_overlay: true +subheading: > + AppDirect: How Cloud Native Is Enabling Babylon's Medical AI Innovations +case_study_details: + - Company: Babylon + - Location: United Kingdom + - Industry: AI, Healthcare +--- + +

Challenge

+ +

A large number of Babylon's products leverage machine learning and artificial intelligence, and in 2019, there wasn't enough computing power in-house to run a particular experiment. The company was also growing (from 100 to 1,600 in three years) and planning expansion into other countries.

+ +

Solution

+ +

Babylon had migrated its user-facing applications to a Kubernetes platform in 2018, so the infrastructure team turned to Kubeflow, a toolkit for machine learning on Kubernetes. "We tried to create a Kubernetes core server, we deployed Kubeflow, and we orchestrated the whole experiment, which ended up being a really good success," says AI Infrastructure Lead Jérémie Vallée. The team began building a self-service AI training platform on top of Kubernetes.

+ +

Impact

+ +

Instead of waiting hours or days to be able to compute, teams can get access instantaneously. Clinical validations used to take 10 hours; now they are done in under 20 minutes. The portability of the cloud native platform has also enabled Babylon to expand into other countries.

+ +{{< case-studies/quote + image="/images/case-studies/babylon/banner1.jpg" + author="JÉRÉMIE VALLÉE, AI INFRASTRUCTURE LEAD AT BABYLON" +>}} +"Kubernetes is a great platform for machine learning because it comes with all the scheduling and scalability that you need." +{{< /case-studies/quote >}} + +{{< case-studies/lead >}} +Babylon's mission is to put accessible and affordable healthcare services in the hands of every person on earth. +{{< /case-studies/lead >}} + +

Since its launch in the U.K. in 2013, the startup has facilitated millions of digital consultations around the world. In the U.K., patients were typically waiting a week or two for a doctor's appointment. Through Babylon's NHS service, GP at Hand—which has more than 75,000 registered patients—39% get an appointment through their phone within 30 minutes, and 89% within 6 hours.

+ +

That's just the start. "We try to combine different types of technology with the medical expertise that we have in-house to build products that will help patients manage and understand their health, and also help doctors be more efficient at what they do," says Jérémie Vallée, AI Infrastructure Lead at Babylon.

+ +

A large number of these products leverage machine learning and artificial intelligence, and in 2019, researchers hit a pain point. "We have some servers in-house where our researchers were doing a lot of AI experiments and some training of models, and we came to a point where we didn't have enough compute in-house to run a particular experiment," says Vallée.

+ +

Babylon had migrated its user-facing applications to a Kubernetes platform in 2018, "and we had a lot of Kubernetes knowledge thanks to the migration," he adds. To optimize some of the models that had been created, the team turned to Kubeflow, a toolkit for machine learning on Kubernetes. "We tried to create a Kubernetes core server, we deployed Kubeflow, and we orchestrated the whole experiment, which ended up being a really good success," he says.

+ +

Based on that experience, Vallée's team was tasked with building a self-service platform to help Babylon's AI teams become more efficient, and by extension help get products to market faster. The main requirements: (1) the ability to give researchers and engineers access to the compute they needed, regardless of the size of the experiments they may need to run; (2) a way to provide teams with the best tools that they needed to do their work, on demand and in a centralized way; and (3) the training platform had to be close to the data that was being managed, because of the company's expansion into different countries.

+ +{{< case-studies/quote author="CAROLINE HARGROVE, CHIEF TECHNOLOGY OFFICER AT BABYLON" >}} +"Delivering a self-service platform where users are empowered to run their own workload has enabled our data scientist community to do hyper parameter tuning and general algorithm development without any cloud skill and without the help of platform engineers, thus accelerating our innovation." +{{< /case-studies/quote >}} + +

Kubernetes was an enabler on every count. "Kubernetes is a great platform for machine learning because it comes with all the scheduling and scalability that you need," says Vallée. The need to keep data in every country in which Babylon operates requires a multi-region, multi-cloud strategy, and some countries might not even have a public cloud provider at all. "We wanted to make this platform portable so that we can run training jobs anywhere," he says. "Kubernetes offered a base layer that allows you to deploy the platform outside of the cloud provider, and then deploy whatever tooling you need. That was a very good selling point for us."

+ +

Once the team decided to build the Babylon AI Research platform on top of Kubernetes, they referred to the Cloud Native Landscape to build out the stack: Prometheus and Grafana for monitoring; an Istio service mesh to control the network on the training platform and control what access all of the workflows would have; Helm to deploy the stack; and Flux to manage the GitOps part of the pipeline.

+ +

The cloud native AI platform has had a huge impact at Babylon. The first research projects run on the platform mostly involved machine learning and natural language processing. These experiments required a huge amount of compute—1600 CPU, 3.2 TB RAM—which was much more than Babylon had in-house. Plus, access to compute used to take hours, or sometimes even days, depending on how busy the platform team was. "Now, with Kubernetes and the self-service platform that we provide, it's pretty much instantaneous," says Vallée.

+ +

Another important type of work that's done on the platform is clinical validation for new applications such as Babylon's Symptom Checker, which calculates the probability of a disease given the evidence input by the user. "Being in healthcare, we want all of our models to be safe before they're going to hit production," says Vallée. Using Argo for GitOps "enabled us to scale the process massively."

+ +{{< case-studies/quote + image="/images/case-studies/babylon/banner2.jpg" + author="JEAN MARIE FERDEGUE, DIRECTOR OF PLATFORM OPERATIONS AT BABYLON" +>}} +"Giving a Kubernetes-based platform to our data scientists has meant increased security, increased innovation through empowerment, and a more affordable health service as our cloud engineers are building an experience that is used by hundreds on a daily basis, rather than supporting specific bespoke use cases." +{{< /case-studies/quote >}} + +

Researchers used to have to wait up to 10 hours to get results on new versions of their models. With Kubernetes, that time is now down to under 20 minutes. Plus, previously they could only run one clinical validation at a time, now they can run many parallel ones if they need to—a huge benefit considering that in the past three years, Babylon has grown from 100 to 1,600 employees.

+ +

"Delivering a self-service platform where users are empowered to run their own workload has enabled our data scientist community to do hyper parameter tuning and general algorithm development without any cloud skill and without the help of platform engineers, thus accelerating our innovation," says Chief Technology Officer Caroline Hargrove.

+ +

Adds Director of Platform Operations Jean Marie Ferdegue: "Giving a Kubernetes-based platform to our data scientists has meant increased security, increased innovation through empowerment, and a more affordable health service as our cloud engineers are building an experience that is used by hundreds on a daily basis, rather than supporting specific bespoke use cases."

+ +

Plus, as Babylon continues to expand, "it will be very easy to onboard new countries," says Vallée. "Fifteen months ago when we deployed this platform, we had one big environment in the U.K., but now we have one in Canada, we have one in Asia, and we have one coming in the U.S. This is one of the things that Kubernetes and the other cloud native projects have enabled for us."

+ +

Babylon's road map for cloud native involves onboarding all of the company's AI efforts to the platform. Increasingly, that includes AI services of care. "I think this is going to be an interesting field where AI and healthcare meet," Vallée says. "It's kind of a complex problem and there's a lot of issues around this. So with our platform, we want to say, 'What can we do to make this less painful for our developers and machine learning engineers?'"

\ No newline at end of file diff --git a/content/bn/case-studies/blablacar/blablacar_featured.png b/content/bn/case-studies/blablacar/blablacar_featured.png new file mode 100644 index 0000000000000..cfe37257b99e7 Binary files /dev/null and b/content/bn/case-studies/blablacar/blablacar_featured.png differ diff --git a/content/bn/case-studies/blablacar/blablacar_featured.svg b/content/bn/case-studies/blablacar/blablacar_featured.svg new file mode 100644 index 0000000000000..5b887f24a8722 --- /dev/null +++ b/content/bn/case-studies/blablacar/blablacar_featured.svg @@ -0,0 +1 @@ +kubernetes.io-logos \ No newline at end of file diff --git a/content/bn/case-studies/blablacar/blablacar_logo.png b/content/bn/case-studies/blablacar/blablacar_logo.png new file mode 100644 index 0000000000000..14606e036002e Binary files /dev/null and b/content/bn/case-studies/blablacar/blablacar_logo.png differ diff --git a/content/bn/case-studies/blablacar/index.html b/content/bn/case-studies/blablacar/index.html new file mode 100644 index 0000000000000..ab3a011e52783 --- /dev/null +++ b/content/bn/case-studies/blablacar/index.html @@ -0,0 +1,85 @@ +--- +title: BlaBlaCar Case Study +case_study_styles: true +cid: caseStudies + +new_case_study_styles: true +heading_background: /images/case-studies/blablacar/banner1.jpg +heading_title_logo: /images/blablacar_logo.png +subheading: > + Turning to Containerization to Support Millions of Rideshares +case_study_details: + - Company: BlaBlaCar + - Location: Paris, France + - Industry: Ridesharing Company +--- + +

Challenge

+ +

The world's largest long-distance carpooling community, BlaBlaCar, connects 40 million members across 22 countries. The company has been experiencing exponential growth since 2012 and needed its infrastructure to keep up. "When you're thinking about doubling the number of servers, you start thinking, 'What should I do to be more efficient?'" says Simon Lallemand, Infrastructure Engineer at BlaBlaCar. "The answer is not to hire more and more people just to deal with the servers and installation." The team knew they had to scale the platform, but wanted to stay on their own bare metal servers.

+ +

Solution

+ +

Opting not to shift to cloud virtualization or use a private cloud on their own servers, the BlaBlaCar team became early adopters of containerization, using the CoreOs runtime rkt, initially deployed using fleet cluster manager. Last year, the company switched to Kubernetes orchestration, and now also uses Prometheus for monitoring.

+ +

Impact

+ +

"Before using containers, it would take sometimes a day, sometimes two, just to create a new service," says Lallemand. "With all the tooling that we made around the containers, copying a new service now is a matter of minutes. It's really a huge gain. We are better at capacity planning in our data center because we have fewer constraints due to this abstraction between the services and the hardware we run on. For the developers, it also means they can focus only on the features that they're developing, and not on the infrastructure."

+ +{{< case-studies/quote author="Simon Lallemand, Infrastructure Engineer at BlaBlaCar" >}} +"When you're switching to this cloud-native model and running everything in containers, you have to make sure that at any moment you can reboot without any downtime and without losing traffic. [With Kubernetes] our infrastructure is much more resilient and we have better availability than before." +{{< /case-studies/quote >}} + +{{< case-studies/lead >}} +For the 40 million users of BlaBlaCar, it's easy to find strangers headed in the same direction to share rides and costs. You can even choose how much "bla bla" chatter you want from a long-distance ride mate. +{{< /case-studies/lead >}} + +

Behind the scenes, though, the infrastructure was falling woefully behind the rider community's exponential growth. Founded in 2006, the company hit its current stride around 2012. "Our infrastructure was very traditional," says Infrastructure Engineer Simon Lallemand, who began working at the company in 2014. "In the beginning, it was a bit chaotic because we had to [grow] fast. But then comes the time when you have to design things to make it manageable."

+ +

By 2015, the company had about 50 bare metal servers. The team was using a MySQL database and PHP, but, Lallemand says, "it was a very static way." They also utilized the configuration management system, Chef, but had little automation in its process. "When you're thinking about doubling the number of servers, you start thinking, 'What should I do to be more efficient?'" says Lallemand. "The answer is not to hire more and more people just to deal with the servers and installation."

+ +

Instead, BlaBlaCar began its cloud-native journey but wasn't sure which route to take. "We could either decide to go into cloud virtualization or even use a private cloud on our own servers," says Lallemand. "But going into the cloud meant we had to make a lot of changes in our application work, and we were just not ready to make the switch from on premise to the cloud." They wanted to keep the great performance they got on bare metal, so they didn't want to go to virtualization on premise.

+ +

The solution: containerization. This was early 2015 and containers were still relatively new. "It was a bold move at the time," says Lallemand. "We decided that the next servers that we would buy in the new data center would all be the same model, so we could outsource the maintenance of the servers. And we decided to go with containers and with CoreOS Container Linux as an abstraction for this hardware. It seemed future-proof to go with containers because we could see what companies were already doing with containers."

+ +{{< case-studies/quote image="/images/case-studies/blablacar/banner3.jpg">}} +"With all the tooling that we made around the containers, copying a new service is a matter of minutes. It's a huge gain. For the developers, it means they can focus only on the features that they're developing and not on the infrastructure or the hour they would test their code, or the hour that it would get deployed." +{{< /case-studies/quote >}} + +

Next, they needed to choose a runtime for the containers, but "there were very few deployments in production at that time," says Lallemand. They experimented with Docker but decided to go with rkt. Lallemand explains that for BlaBlaCar, it was "much simpler to integrate things that are on rkt." At the time, the project was still pre-v1.0, so "we could speak with the developers of rkt and give them feedback. It was an advantage." Plus, he notes, rkt was very stable, even at this early stage.

+ +

Once those decisions were made that summer, the company came up with a plan for implementation. First, they formed a task force to create a workflow that would be tested by three of the 10 members on Lallemand's team. But they took care to run regular workshops with all 10 members to make sure everyone was on board. "When you're focused on your product sometimes you forget if it's really user friendly, whether other people can manage to create containers too," Lallemand says. "So we did a lot of iterations to find a good workflow."

+ +

After establishing the workflow, Lallemand says with a smile that "we had this strange idea that we should try the most difficult thing first. Because if it works, it will work for everything." So the first project the team decided to containerize was the database. "Nobody did that at the time, and there were really no existing tools for what we wanted to do, including building container images," he says. So the team created their own tools, such as dgr, which builds container images so that the whole team has a common framework to build on the same images with the same standards. They also revamped the service-discovery tools Nerve and Synapse; their versions, Go-Nerve and Go-Synapse, were written in Go and built to be more efficient and include new features. All of these tools were open-sourced.

+ +

At the same time, the company was working to migrate its entire platform to containers with a deadline set for Christmas 2015. With all the work being done in parallel, BlaBlaCar was able to get about 80 percent of its production into containers by its deadline with live traffic running on containers during December. (It's now at 100 percent.) "It's a really busy time for traffic," says Lallemand. "We knew that by using those new servers with containers, it would help us handle the traffic."

+ +

In the middle of that peak season for carpooling, everything worked well. "The biggest impact that we had was for the deployment of new services," says Lallemand. "Before using containers, we had to first deploy a new server and create configurations with Chef. It would take sometimes a day, sometimes two, just to create a new service. And with all the tooling that we made around the containers, copying a new service is a matter of minutes. So it's really a huge gain. For the developers, it means they can focus only on the features that they're developing and not on the infrastructure or the hour they would test their code, or the hour that it would get deployed."

+ +{{< case-studies/quote image="/images/case-studies/blablacar/banner4.jpg" >}} +"We realized that there was a really strong community around it [Kubernetes], which meant we would not have to maintain a lot of tools of our own," says Lallemand. "It was better if we could contribute to some bigger project like Kubernetes." +{{< /case-studies/quote >}} + +

In order to meet their self-imposed deadline, one of the decisions they made was to not do any "orchestration magic" for containers in the first production alignment. Instead, they used the basic fleet tool from CoreOS to deploy their containers. (They did build a tool called GGN, which they've open-sourced, to make it more manageable for their system engineers to use.)

+ +

Still, the team knew that they'd want more orchestration. "Our tool was doing a pretty good job, but at some point you want to give more autonomy to the developer team," Lallemand says. "We also realized that we don't want to be the single point of contact for developers when they want to launch new services." By the summer of 2016, they found their answer in Kubernetes, which had just begun supporting rkt implementation.

+ +

After discussing their needs with their contacts at CoreOS and Google, they were convinced that Kubernetes would work for BlaBlaCar. "We realized that there was a really strong community around it, which meant we would not have to maintain a lot of tools of our own," says Lallemand. "It was better if we could contribute to some bigger project like Kubernetes." They also started using Prometheus, as they were looking for "service-oriented monitoring that could be updated nightly." Production on Kubernetes began in December 2016. "We like to do crazy stuff around Christmas," he adds with a laugh.

+ +

BlaBlaCar now has about 3,000 pods, with 1200 of them running on Kubernetes. Lallemand leads a "foundations team" of 25 members who take care of the networks, databases and systems for about 100 developers. There have been some challenges getting to this point. "The rkt implementation is still not 100 percent finished," Lallemand points out. "It's really good, but there are some features still missing. We have questions about how we do things with stateful services, like databases. We know how we will be migrating some of the services; some of the others are a bit more complicated to deal with. But the Kubernetes community is making a lot of progress on that part."

+ +

The team is particularly happy that they're now able to plan capacity better in the company's data center. "We have fewer constraints since we have this abstraction between the services and the hardware we run on," says Lallemand. "If we lose a server because there's a hardware problem on it, we just move the containers onto another server. It's much more efficient. We do that by just changing a line in the configuration file. And with Kubernetes, it should be automatic, so we would have nothing to do."

+ +{{< case-studies/quote >}} +"If we lose a server because there's a hardware problem on it, we just move the containers onto another server. It's much more efficient. We do that by just changing a line in the configuration file. With Kubernetes, it should be automatic, so we would have nothing to do." +{{< /case-studies/quote >}} + +

And these advances ultimately trickle down to BlaBlaCar's users. "We have improved availability overall on our website," says Lallemand. "When you're switching to this cloud-native model with running everything in containers, you have to make sure that you can at any moment reboot a server or a data container without any downtime, without losing traffic. So now our infrastructure is much more resilient and we have better availability than before."

+ +

Within BlaBlaCar's technology department, the cloud-native journey has created some profound changes. Lallemand thinks that the regular meetings during the conception stage and the training sessions during implementation helped. "After that everybody took part in the migration process," he says. "Then we split the organization into different 'tribes'—teams that gather developers, product managers, data analysts, all the different jobs, to work on a specific part of the product. Before, they were organized by function. The idea is to give all these tribes access to the infrastructure directly in a self-service way without having to ask. These people are really autonomous. They have responsibility of that part of the product, and they can make decisions faster."

+ +

This DevOps transformation turned out to be a positive one for the company's staffers. "The team was very excited about the DevOps transformation because it was new, and we were working to make things more reliable, more future-proof," says Lallemand. "We like doing things that very few people are doing, other than the internet giants."

+ +

With these changes already making an impact, BlaBlaCar is looking to split up more and more of its application into services. "I don't say microservices because they're not so micro," Lallemand says. "If we can split the responsibilities between the development teams, it would be easier to manage and more reliable, because we can easily add and remove services if one fails. You can handle it easily, instead of adding a big monolith that we still have."

+ +

When Lallemand speaks to other European companies curious about what BlaBlaCar has done with its infrastructure, he tells them to come along for the ride. "I tell them that it's such a pleasure to deal with the infrastructure that we have today compared to what we had before," he says. "They just need to keep in mind their real motive, whether it's flexibility in development or reliability or so on, and then go step by step towards reaching those objectives. That's what we've done. It's important not to do technology for the sake of technology. Do it for a purpose. Our focus was on helping the developers."

\ No newline at end of file diff --git a/content/bn/case-studies/blackrock/blackrock_featured.png b/content/bn/case-studies/blackrock/blackrock_featured.png new file mode 100644 index 0000000000000..3898b88c9fa43 Binary files /dev/null and b/content/bn/case-studies/blackrock/blackrock_featured.png differ diff --git a/content/bn/case-studies/blackrock/blackrock_featured.svg b/content/bn/case-studies/blackrock/blackrock_featured.svg new file mode 100644 index 0000000000000..f98ea323d73e1 --- /dev/null +++ b/content/bn/case-studies/blackrock/blackrock_featured.svg @@ -0,0 +1 @@ +kubernetes.io-logos \ No newline at end of file diff --git a/content/bn/case-studies/blackrock/blackrock_logo.png b/content/bn/case-studies/blackrock/blackrock_logo.png new file mode 100644 index 0000000000000..51e914a63b259 Binary files /dev/null and b/content/bn/case-studies/blackrock/blackrock_logo.png differ diff --git a/content/bn/case-studies/blackrock/index.html b/content/bn/case-studies/blackrock/index.html new file mode 100644 index 0000000000000..50cc5ebc97e31 --- /dev/null +++ b/content/bn/case-studies/blackrock/index.html @@ -0,0 +1,83 @@ +--- +title: BlackRock Case Study +case_study_styles: true +cid: caseStudies + +new_case_study_styles: true +heading_background: /images/case-studies/blackrock/banner1.jpg +heading_title_logo: /images/blackrock_logo.png +subheading: > + Rolling Out Kubernetes in Production in 100 Days +case_study_details: + - Company: BlackRock + - Location: New York, NY + - Industry: Financial Services +--- + +

Challenge

+ +

The world's largest asset manager, BlackRock operates a very controlled static deployment scheme, which has allowed for scalability over the years. But in their data science division, there was a need for more dynamic access to resources. "We want to be able to give every investor access to data science, meaning Python notebooks, or even something much more advanced, like a MapReduce engine based on Spark," says Michael Francis, a Managing Director in BlackRock's Product Group, which runs the company's investment management platform. "Managing complex Python installations on users' desktops is really hard because everyone ends up with slightly different environments. We have existing environments that do these things, but we needed to make it real, expansive and scalable. Being able to spin that up on demand, tear it down, make that much more dynamic, became a critical thought process for us. It's not so much that we had to solve our main core production problem, it's how do we extend that? How do we evolve?"

+ +

Solution

+ +

Drawing from what they learned during a pilot done last year using Docker environments, Francis put together a cross-sectional team of 20 to build an investor research web app using Kubernetes with the goal of getting it into production within one quarter.

+ +

Impact

+ +

"Our goal was: How do you give people tools rapidly without having to install them on their desktop?" says Francis. And the team hit the goal within 100 days. Francis is pleased with the results and says, "We're going to use this infrastructure for lots of other application workloads as time goes on. It's not just data science; it's this style of application that needs the dynamism. But I think we're 6-12 months away from making a [large scale] decision. We need to gain experience of running the system in production, we need to understand failure modes and how best to manage operational issues. What's interesting is that just having this technology there is changing the way our developers are starting to think about their future development."

+ +{{< case-studies/quote author="Michael Francis, Managing Director, BlackRock">}} +"My message to other enterprises like us is you can actually integrate Kubernetes into an existing, well-orchestrated machinery. You don't have to throw out everything you do. And using Kubernetes made a complex problem significantly easier." +{{< /case-studies/quote >}} + +

One of the management objectives for BlackRock's Product Group employees in 2017 was to "build cool stuff." Led by Managing Director Michael Francis, a cross-sectional group of 20 did just that: They rolled out a full production Kubernetes environment and released a new investor research web app on it. In 100 days.

+ +

For a company that's the world's largest asset manager, "just equipment procurement can take 100 days sometimes, let alone from inception to delivery," says Karl Wieman, a Senior System Administrator. "It was an aggressive schedule. But it moved the dial." In fact, the project achieved two goals: It solved a business problem (creating the needed web app) as well as provided real-world, in-production experience with Kubernetes, a cloud-native technology that the company was eager to explore. "It's not so much that we had to solve our main core production problem, it's how do we extend that? How do we evolve?" says Francis. The ultimate success of this project, beyond delivering the app, lies in the fact that "we've managed to integrate a radically new thought process into a controlled infrastructure that we didn't want to change."

+ +

After all, in its three decades of existence, BlackRock has "a very well-established environment for managing our compute resources," says Francis. "We manage large cluster processes on machines, so we do a lot of orchestration and management for our main production processes in a way that's very cloudish in concept. We're able to manage them in a very controlled, static deployment scheme, and that has given us a huge amount of scalability."

+ +

Though that works well for the core production, the company has found that some data science workloads require more dynamic access to resources. "It's a very bursty process," says Francis, who is head of data for the company's Aladdin investment management platform division.

+ +

Aladdin, which connects the people, information and technology needed for money management in real time, is used internally and is also sold as a platform to other asset managers and insurance companies. "We want to be able to give every investor access to data science, meaning Python notebooks, or even something much more advanced, like a MapReduce engine based on Spark," says Francis. But "managing complex Python installations on users' desktops is really hard because everyone ends up with slightly different environments. Docker allows us to flatten that environment."

+ +{{< case-studies/quote image="/images/case-studies/blackrock/banner3.jpg">}} +"We manage large cluster processes on machines, so we do a lot of orchestration and management for our main production processes in a way that's very cloudish in concept. We're able to manage them in a very controlled, static deployment scheme, and that has given us a huge amount of scalability." +{{< /case-studies/quote >}} + +

Still, challenges remain. "If you have a shared cluster, you get this storming herd problem where everyone wants to do the same thing at the same time," says Francis. "You could put limits on it, but you'd have to build an infrastructure to define limits for our processes, and the Python notebooks weren't really designed for that. We have existing environments that do these things, but we needed to make it real, expansive, and scalable. Being able to spin that up on demand, tear it down, and make that much more dynamic, became a critical thought process for us."

+ +

Made up of managers from technology, infrastructure, production operations, development and information security, Francis's team was able to look at the problem holistically and come up with a solution that made sense for BlackRock. "Our initial straw man was that we were going to build everything using Ansible and run it all using some completely different distributed environment," says Francis. "That would have been absolutely the wrong thing to do. Had we gone off on our own as the dev team and developed this solution, it would have been a very different product. And it would have been very expensive. We would not have gone down the route of running under our existing orchestration system. Because we don't understand it. These guys [in operations and infrastructure] understand it. Having the multidisciplinary team allowed us to get to the right solutions and that actually meant we didn't build anywhere near the amount we thought we were going to end up building."

+ +

In search of a solution in which they could manage usage on a user-by-user level, Francis's team gravitated to Red Hat's OpenShift Kubernetes offering. The company had already experimented with other cloud-native environments, but the team liked that Kubernetes was open source, and "we felt the winds were blowing in the direction of Kubernetes long term," says Francis. "Typically we make technology choices that we believe are going to be here in 5-10 years' time, in some form. And right now, in this space, Kubernetes feels like the one that's going to be there." Adds Uri Morris, Vice President of Production Operations: "When you see that the non-Google committers to Kubernetes overtook the Google committers, that's an indicator of the momentum."

+ +

Once that decision was made, the major challenge was figuring out how to make Kubernetes work within BlackRock's existing framework. "It's about understanding how we can operate, manage and support a platform like this, in addition to tacking it onto our existing technology platform," says Project Manager Michael Maskallis. "All the controls we have in place, the change management process, the software development lifecycle, onboarding processes we go through—how can we do all these things?"

+ +

The first (anticipated) speed bump was working around issues behind BlackRock's corporate firewalls. "One of our challenges is there are no firewalls in most open source software," says Francis. "So almost all install scripts fail in some bizarre way, and pulling down packages doesn't necessarily work." The team ran into these types of problems using Minikube and did a few small pushes back to the open source project.

+ +{{< case-studies/quote image="/images/case-studies/blackrock/banner4.jpg">}} +"Typically we make technology choices that we believe are going to be here in 5-10 years' time, in some form. And right now, in this space, Kubernetes feels like the one that's going to be there." +{{< /case-studies/quote >}} + +

There were also questions about service discovery. "You can think of Aladdin as a cloud of services with APIs between them that allows us to build applications rapidly," says Francis. "It's all on a proprietary message bus, which gives us all sorts of advantages but at the same time, how does that play in a third party [platform]?"

+ +

Another issue they had to navigate was that in BlackRock's existing system, the messaging protocol has different instances in the different development, test and production environments. While Kubernetes enables a more DevOps-style model, it didn't make sense for BlackRock. "I think what we are very proud of is that the ability for us to push into production is still incredibly rapid in this [new] infrastructure, but we have the control points in place, and we didn't have to disrupt everything," says Francis. "A lot of the cost of this development was thinking how best to leverage our internal tools. So it was less costly than we actually thought it was going to be."

+ +

The project leveraged tools associated with the messaging bus, for example. "The way that the Kubernetes cluster will talk to our internal messaging platform is through a gateway program, and this gateway program already has built-in checks and throttles," says Morris. "We can use them to control and potentially throttle the requests coming in from Kubernetes's very elastic infrastructure to the production infrastructure. We'll continue to go in that direction. It enables us to scale as we need to from the operational perspective."

+ +

The solution also had to be complementary with BlackRock's centralized operational support team structure. "The core infrastructure components of Kubernetes are hooked into our existing orchestration framework, which means that anyone in our support team has both control and visibility to the cluster using the existing operational tools," Morris explains. "That means that I don't need to hire more people."

+ +

With those points established, the team created a procedure for the project: "We rolled this out first to a development environment, then moved on to a testing environment and then eventually to two production environments, in that sequential order," says Maskallis. "That drove a lot of our learning curve. We have all these moving parts, the software components on the infrastructure side, the software components with Kubernetes directly, the interconnectivity with the rest of the environment that we operate here at BlackRock, and how we connect all these pieces. If we came across issues, we fixed them, and then moved on to the different environments to replicate that until we eventually ended up in our production environment where this particular cluster is supposed to live."

+ +

The team had weekly one-hour working sessions with all the members (who are located around the world) participating, and smaller breakout or deep-dive meetings focusing on specific technical details. Possible solutions would be reported back to the group and debated the following week. "I think what made it a successful experiment was people had to work to learn, and they shared their experiences with others," says Vice President and Software Developer Fouad Semaan. Then, Francis says, "We gave our engineers the space to do what they're good at. This hasn't been top-down."

+ +{{< case-studies/quote >}} +"The core infrastructure components of Kubernetes are hooked into our existing orchestration framework, which means that anyone in our support team has both control and visibility to the cluster using the existing operational tools. That means that I don't need to hire more people." +{{< /case-studies/quote >}} + +

They were led by one key axiom: To stay focused and avoid scope creep. This meant that they wouldn't use features that weren't in the core of Kubernetes and Docker. But if there was a real need, they'd build the features themselves. Luckily, Francis says, "Because of the rapidity of the development, a lot of things we thought we would have to build ourselves have been rolled into the core product. [The package manager Helm is one example]. People have similar problems."

+ +

By the end of the 100 days, the app was up and running for internal BlackRock users. The initial capacity of 30 users was hit within hours, and quickly increased to 150. "People were immediately all over it," says Francis. In the next phase of this project, they are planning to scale up the cluster to have more capacity.

+ +

Even more importantly, they now have in-production experience with Kubernetes that they can continue to build on—and a complete framework for rolling out new applications. "We're going to use this infrastructure for lots of other application workloads as time goes on. It's not just data science; it's this style of application that needs the dynamism," says Francis. "Is it the right place to move our core production processes onto? It might be. We're not at a point where we can say yes or no, but we felt that having real production experience with something like Kubernetes at some form and scale would allow us to understand that. I think we're 6-12 months away from making a [large scale] decision. We need to gain experience of running the system in production, we need to understand failure modes and how best to manage operational issues."

+ +

For other big companies considering a project like this, Francis says commitment and dedication are key: "We got the signoff from [senior management] from day one, with the commitment that we were able to get the right people. If I had to isolate what makes something complex like this succeed, I would say senior hands-on people who can actually drive it make a huge difference." With that in place, he adds, "My message to other enterprises like us is you can actually integrate Kubernetes into an existing, well-orchestrated machinery. You don't have to throw out everything you do. And using Kubernetes made a complex problem significantly easier."

\ No newline at end of file diff --git a/content/bn/case-studies/booking-com/booking.com_featured_logo.png b/content/bn/case-studies/booking-com/booking.com_featured_logo.png new file mode 100644 index 0000000000000..623ca67345be5 Binary files /dev/null and b/content/bn/case-studies/booking-com/booking.com_featured_logo.png differ diff --git a/content/bn/case-studies/booking-com/booking.com_featured_logo.svg b/content/bn/case-studies/booking-com/booking.com_featured_logo.svg new file mode 100644 index 0000000000000..0b245c27001af --- /dev/null +++ b/content/bn/case-studies/booking-com/booking.com_featured_logo.svg @@ -0,0 +1 @@ +booking.com_featured_logo \ No newline at end of file diff --git a/content/bn/case-studies/booking-com/index.html b/content/bn/case-studies/booking-com/index.html new file mode 100644 index 0000000000000..5278cd0baba8f --- /dev/null +++ b/content/bn/case-studies/booking-com/index.html @@ -0,0 +1,86 @@ +--- +title: Booking.com Case Study +linkTitle: Booking.com +case_study_styles: true +cid: caseStudies +logo: booking.com_featured_logo.png +featured: true +weight: 3 +quote: > + We realized that we needed to learn Kubernetes better in order to fully use the potential of it. At that point, we made the shift to build our own Kubernetes platform. + +new_case_study_styles: true +heading_background: /images/case-studies/booking/banner1.jpg +heading_title_text: Booking.com +use_gradient_overlay: true +subheading: > + After Learning the Ropes with a Kubernetes Distribution, Booking.com Built a Platform of Its Own +case_study_details: + - Company: Booking.com + - Location: Netherlands + - Industry: Travel +--- + +

Challenge

+ +

In 2016, Booking.com migrated to an OpenShift platform, which gave product developers faster access to infrastructure. But because Kubernetes was abstracted away from the developers, the infrastructure team became a "knowledge bottleneck" when challenges arose. Trying to scale that support wasn't sustainable.

+ +

Solution

+ +

After a year operating OpenShift, the platform team decided to build its own vanilla Kubernetes platform—and ask developers to learn some Kubernetes in order to use it. "This is not a magical platform," says Ben Tyler, Principal Developer, B Platform Track. "We're not claiming that you can just use it with your eyes closed. Developers need to do some learning, and we're going to do everything we can to make sure they have access to that knowledge."

+ +

Impact

+ +

Despite the learning curve, there's been a great uptick in adoption of the new Kubernetes platform. Before containers, creating a new service could take a couple of days if the developers understood Puppet, or weeks if they didn't. On the new platform, it can take as few as 10 minutes. About 500 new services were built on the platform in the first 8 months.

+ +{{< case-studies/quote + image="/images/case-studies/booking/banner2.jpg" + author="BEN TYLER, PRINCIPAL DEVELOPER, B PLATFORM TRACK AT BOOKING.COM" +>}} +"As our users learn Kubernetes and become more sophisticated Kubernetes users, they put pressure on us to provide a better, more native Kubernetes experience, which is great. It's a super healthy dynamic." +{{< /case-studies/quote >}} + +​{{< case-studies/lead >}} +Booking.com has a long history with Kubernetes: In 2015, a team at the travel platform prototyped a container platform based on Mesos and Marathon. +{{< /case-studies/lead >}} + +

Impressed by what the technology offered, but in need of enterprise features at its scale—the site handles more than 1.5 million room-night reservations a day on average—the team decided to adopt an OpenShift platform.

+ +

This platform, which was wrapped in a Heroku-style, high-level CLI interface, "was definitely popular with our product developers," says Ben Tyler, Principal Developer, B Platform Track. "We gave them faster access to infrastructure."

+ +

But, he adds, "anytime something went slightly off the rails, developers didn't have any of the knowledge required to support themselves."

+ +

And after a year of operating this platform, the infrastructure team found that it had become "a knowledge bottleneck," he says. "Most of the developers who used it did not know it was Kubernetes underneath. An application failure and a platform failure both looked like failures of that Heroku-style tool."

+ +

Scaling the necessary support did not seem feasible or sustainable, so the platform team needed a new solution. The understanding of Kubernetes that they had gained operating the OpenShift platform gave them confidence to build a vanilla Kubernetes platform of their own and customize it to suit the company's needs.

+ +{{< case-studies/quote author="EDUARD IACOBOAIA, SENIOR SYSTEM ADMINISTRATOR, B PLATFORM TRACK AT BOOKING.COM" >}} +"For entering the landscape, OpenShift was definitely very helpful. It shows you what the technology can do, and it makes it easy for you to use it. After we spent some time on it, we realized that we needed to learn Kubernetes better in order to fully use the potential of it. At that point, we made the shift to build our own Kubernetes platform. We definitely benefit in the long term for taking that step and investing the time in gaining that knowledge." +{{< /case-studies/quote >}} + +

"For entering the landscape, OpenShift was definitely very helpful," says Eduard Iacoboaia, Senior System Administrator, B Platform Track. "It shows you what the technology can do, and it makes it easy for you to use it. After we spent some time on it, we realized that we needed to learn Kubernetes better in order to fully use the potential of it. At that point, we made the shift to build our own Kubernetes platform. We definitely benefit in the long term for taking that step and investing the time in gaining that knowledge."

+ +

Iacoboaia's team had customized a lot of OpenShift tools to make them work at Booking.com, and "those integrations points were kind of fragile," he says. "We spent much more time understanding all the components of Kubernetes, how they work, how they interact with each other." That research led the team to switch from OpenShift's built-in Ansible playbooks to Puppet deployments, which are used for the rest of Booking's infrastructure. The control plane was also moved from inside the cluster onto bare metal, as the company runs tens of thousands of bare-metal servers and a large infrastructure for running applications on bare metal. (Booking runs Kubernetes in multiple clusters in multiple data centers across the various regions where it has compute.) "We decided to keep it as simple as possible and to also use the tools that we know best," says Iacoboaia.

+ +

The other big change was that product engineers would have to learn Kubernetes in order to onboard. "This is not a magical platform," says Tyler. "We're not claiming that you can just use it with your eyes closed. Developers need to do some learning, and we're going to do everything we can to make sure they have access to that knowledge." That includes trainings, blog posts, videos, and Udemy courses.

+ +

Despite the learning curve, there's been a great uptick in adoption of the new Kubernetes platform. "I think the reason we've been able to strike this bargain successfully is that we're not asking them to learn a proprietary app system," says Tyler. "We're asking them to learn something that's open source, where the knowledge is transferable. They're investing in their own careers by learning Kubernetes."

+ +

One clear sign that this strategy has been a success is that in the support channel, when users have questions, other product engineers are jumping in to respond. "I haven't seen that kind of community engagement around a particular platform product internally before," says Tyler. "It helps a lot that it's visibly an ecosystem standard outside of the company, so people feel value in investing in that knowledge and sharing it with others, which is really, really powerful."

+ +{{< case-studies/quote + image="/images/case-studies/booking/banner3.jpg" + author="BEN TYLER, PRINCIPAL DEVELOPER, B PLATFORM TRACK AT BOOKING.COM" +>}} +"We have a tutorial. You follow the tutorial. Your code is running. Then, it's business-logic time. The time to gain access to resources is decreased enormously." +{{< /case-studies/quote >}} + +

There's other quantifiable evidence too: Before containers, creating a new service could take a couple of days if the developers understood Puppet, or weeks if they didn't. On the new platform, it takes 10 minutes. "We have a tutorial. You follow the tutorial. Your code is running. Then, it's business-logic time," says Tyler. "The time to gain access to resources is decreased enormously." About 500 new services were built in the first 8 months on the platform, with hundreds of releases per day.

+ +

The platform offers different "layers of contracts, so to speak," says Tyler. "At the very base, it's just Kubernetes. If you're a pro Kubernetes user, here's a Kubernetes API, just like you get from GKE or AKS. We're trying to be a provider on that same level. But our whole job inside the company is to be a bigger value add than just vanilla infrastructure, so we provide a set of base images for our main stacks, Perl and Java."

+ +

And "as our users learn Kubernetes and become more sophisticated Kubernetes users, they put pressure on us to provide a better more native Kubernetes experience, which is great," says Tyler. "It's a super healthy dynamic."

+ +

The platform also includes other CNCF technologies, such as Envoy, Helm, and Prometheus. Most of the critical service traffic for Booking.com is routed through Envoy, and Prometheus is used primarily to monitor infrastructure components. Helm is consumed as a packaging standard. The team also developed and open sourced Shipper, an extension for Kubernetes to add more complex rollout strategies and multi-cluster orchestration.

+ +

To be sure, there have been internal discussions about the wisdom of building a Kubernetes platform from the ground up. "This is not really our core competency—Kubernetes and travel, they're kind of far apart, right?" says Tyler. "But we've made a couple of bets on CNCF components that have worked out really well for us. Envoy and Kubernetes, in particular, have been really beneficial to our organization. We were able to customize them, either because we could look at the source code or because they had extension points, and we were able to get value out of them very quickly without having to change any paradigms internally."

\ No newline at end of file diff --git a/content/bn/case-studies/booz-allen/booz-allen-featured-logo.png b/content/bn/case-studies/booz-allen/booz-allen-featured-logo.png new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/content/bn/case-studies/booz-allen/booz-allen-featured-logo.svg b/content/bn/case-studies/booz-allen/booz-allen-featured-logo.svg new file mode 100644 index 0000000000000..3ce58c68f7858 --- /dev/null +++ b/content/bn/case-studies/booz-allen/booz-allen-featured-logo.svg @@ -0,0 +1 @@ +booz-allen-featured \ No newline at end of file diff --git a/content/bn/case-studies/booz-allen/booz-allen_featured_logo.png b/content/bn/case-studies/booz-allen/booz-allen_featured_logo.png new file mode 100644 index 0000000000000..f9bc64ba3bd2b Binary files /dev/null and b/content/bn/case-studies/booz-allen/booz-allen_featured_logo.png differ diff --git a/content/bn/case-studies/booz-allen/index.html b/content/bn/case-studies/booz-allen/index.html new file mode 100644 index 0000000000000..ae4adef42f7d1 --- /dev/null +++ b/content/bn/case-studies/booz-allen/index.html @@ -0,0 +1,80 @@ +--- +title: Booz Allen Case Study +linkTitle: Booz Allen Hamilton +case_study_styles: true +cid: caseStudies +logo: booz-allen-featured-logo.svg +featured: true +weight: 2 +quote: > + Kubernetes is a great solution for us. It allows us to rapidly iterate on our clients' demands. + +new_case_study_styles: true +heading_background: /images/case-studies/booz-allen/banner4.jpg +heading_title_text: Booz Allen Hamilton +use_gradient_overlay: true +subheading: > + How Booz Allen Hamilton Is Helping Modernize the Federal Government with Kubernetes +case_study_details: + - Company: Booz Allen Hamilton + - Location: United States + - Industry: Government +--- + +

Challenge

+ +

In 2017, Booz Allen Hamilton's Strategic Innovation Group worked with the federal government to relaunch the decade-old recreation.gov website, which provides information and real-time booking for more than 100,000 campsites and facilities on federal lands across the country. The infrastructure needed to be agile, reliable, and scalable—as well as repeatable for the other federal agencies that are among Booz Allen Hamilton's customers.

+ +

Solution

+ +

"The only way that we thought we could be successful with this problem across all the different agencies is to create a microservice architecture and containers, so that we could be very dynamic and very agile to any given agency for whatever requirements that they may have," says Booz Allen Hamilton Senior Lead Technologist Martin Folkoff. To meet those requirements, Folkoff's team looked to Kubernetes for orchestration.

+ +

Impact

+ +

With the recreation.gov Kubernetes platform, changes can be implemented in about 30 minutes, compared to the multiple hours or even days legacy government applications require to review the code, get approval, and deploy the fix. Recreation.gov deploys to production on average 10 times a day. With monitoring, security, and logging built in, developers can create and publish new services to production within a week. Additionally, Folkoff says, "supporting the large, existing monoliths in the government is extremely expensive," and migrating into a more modern platform has resulted in perhaps 50% cost savings.

+ +{{< case-studies/quote + image="/images/case-studies/booz-allen/banner2.jpg" + author="JOSH BOYD, CHIEF TECHNOLOGIST AT BOOZ ALLEN HAMILTON" +>}} +"When there's a regulatory change in an agency, or a legislative change in Congress, or an executive order that changes the way you do business, how do I deploy that and get that out to the people who need it rapidly? At the end of the day, that's the problem we're trying to help the government solve with tools like Kubernetes." +{{< /case-studies/quote >}} +​ +​{{< case-studies/lead >}} +The White House launched an IT modernization effort in 2017, and in addition to improving cybersecurity and shifting to the public cloud and a consolidated IT model, "the federal government is looking to provide a better experience to citizens in every way that we interact with the government through every channel," says Booz Allen Hamilton Senior Lead Technologist Martin Folkoff. +{{< /case-studies/lead >}} + +

To that end, Folkoff's Strategic Innovation Group worked with the federal government last year to relaunch the decade-old recreation.gov website, which provides information and real-time booking for more than 100,000 campsites and facilities on federal lands across the country.

+ +

The infrastructure needed to be agile, reliable, and scalable—as well as repeatable for the other federal agencies that are among Booz Allen Hamilton's customers. "The only way that we thought we could be successful with this problem across all the different agencies is to create a microservice architecture, so that we could be very dynamic and very agile to any given agency for whatever requirements that they may have," says Folkoff.

+ +{{< case-studies/quote author="MARTIN FOLKOFF, SENIOR LEAD TECHNOLOGIST AT BOOZ ALLEN HAMILTON" >}} +"With CNCF, there's a lot of focus on scale, and so there's a lot of comfort knowing that as the project grows, we're going to be comfortable using that tool set." +{{< /case-studies/quote >}} + +

Booz Allen Hamilton, which has provided consulting services to the federal government for more than a century, introduced microservices, Docker containers, and AWS to its federal agency clients about five years ago. The next logical step was Kubernetes for orchestration. "Knowing that we had to be really agile and really reliable and scalable, we felt that the only technology that we know that can enable those kinds of things are the ones the CNCF provides," Folkoff says. "One of the things that is always important for the government is to make sure that the things that we build really endure. Using technology that is supported across multiple different companies and has strong governance gives people a lot of confidence."

+ +

Kubernetes was also aligned with the government's open source and IT modernization initiatives, so there has been an uptick in its usage at federal agencies over the past two years. "Now that Kubernetes is becoming offered as a service by the cloud providers like AWS and Microsoft, we're starting to see even more interest," says Chief Technologist Josh Boyd. Adds Folkoff: "With CNCF, there's a lot of focus on scale, and so there's a lot of comfort knowing that as the project grows, we're going to be comfortable using that tool set."

+ +

The greenfield recreation.gov project allowed the team to build a new Kubernetes-enabled site running on AWS, and the migration lasted only a week, when the old site didn't take bookings. "For the actual transition, we just swapped a DNS server, and it only took about 35 seconds between the old site being down and our new site being up and available," Folkoff adds.

+​ +{{< case-studies/quote + image="/images/case-studies/booz-allen/banner1.png" + author="MARTIN FOLKOFF, SENIOR LEAD TECHNOLOGIST AT BOOZ ALLEN HAMILTON" +>}} +"Kubernetes alone enables a dramatic reduction in cost as resources are prioritized to the day's event" +{{< /case-studies/quote >}} + +

In addition to its work with the Department of Interior for recreation.gov, Booz Allen Hamilton has brought Kubernetes to various Defense, Intelligence, and civilian agencies. Says Boyd: "When there's a regulatory change in an agency, or a legislative change in Congress, or an executive order that changes the way you do business, how do I deploy that and get that out to the people who need it rapidly? At the end of the day, that's the problem we're trying to help the government solve with tools like Kubernetes."

+ +

For recreation.gov, the impact was clear and immediate. With the Kubernetes platform, Folkoff says, "if a new requirement for a permit comes out, we have the ability to design and develop and implement that completely independently of reserving a campsite. It provides a much better experience to users." Today, changes can be implemented in about 30 minutes, compared to the multiple hours or even days legacy government applications require to review the code, get approval, and deploy the fix. Recreation.gov deploys to production on average 10 times a day.

+ +

Developer velocity has been improved. "When I want to do monitoring or security or logging, I don't have to do anything to my services or my application to enable that anymore," says Boyd. "I get all of this magic just by being on the Kubernetes platform." With all of those things built in, developers can create and publish new services to production within one week.

+ +

Additionally, Folkoff says, "supporting the large, existing monoliths in the government is extremely expensive," and migrating into a more modern platform has resulted in perhaps 50% cost savings. "Kubernetes alone enables a dramatic reduction in cost as resources are prioritized to the day's event," he says. "For example, during a popular campsite release, camping-related services are scaled out while permit services are scaled down."

+ +

So far, "Kubernetes is a great solution for us," says Folkoff. "It allows us to rapidly iterate on our clients' demands." Looking ahead, the team sees further adoption of the Kubernetes platform across federal agencies. Says Boyd: "You get the ability for the rapid delivery of business value for your customers. You now have observability into everything that you're doing. You don't have these onesies and twosies unicorn servers anymore. Now everything that you deploy is deployed in the same way, it's all instrumented the same way, and it's all built and deployed the same way through our CI/CD processes."

+ +

They also see a push toward re-platforming. "There's still a lot of legacy workloads out there," says Boyd. "We've got the new challenges of greenfield development and integration with legacy systems, but also that brown field of 'Hey, how do I take this legacy monolith and get it onto a platform where now it's instrumented with all the magic of the Kubernetes platform without having to do a whole lot to my application?' I think re-platforming is a pretty big use case for the government right now."

+ +

And given the success that they've had with Kubernetes so far, Boyd says, "I think at this point that technology is becoming pretty easy to sell." Adds Folkoff: "People are really excited about being able to deploy, scale, be reliable, and do cheaper maintenance of all of this."

\ No newline at end of file diff --git a/content/bn/case-studies/bose/bose_featured_logo.png b/content/bn/case-studies/bose/bose_featured_logo.png new file mode 100644 index 0000000000000..d4af69ed7275b Binary files /dev/null and b/content/bn/case-studies/bose/bose_featured_logo.png differ diff --git a/content/bn/case-studies/bose/bose_featured_logo.svg b/content/bn/case-studies/bose/bose_featured_logo.svg new file mode 100644 index 0000000000000..58b2add6144b8 --- /dev/null +++ b/content/bn/case-studies/bose/bose_featured_logo.svg @@ -0,0 +1 @@ +kubernetes.io-logos \ No newline at end of file diff --git a/content/bn/case-studies/bose/index.html b/content/bn/case-studies/bose/index.html new file mode 100644 index 0000000000000..c8e06616643be --- /dev/null +++ b/content/bn/case-studies/bose/index.html @@ -0,0 +1,87 @@ +--- +title: Bose Case Study +linkTitle: Bose +case_study_styles: true +cid: caseStudies +logo: bose_featured_logo.png +featured: false +weight: 2 +quote: > + The CNCF Landscape quickly explains what's going on in all the different areas from storage to cloud providers to automation and so forth. This is our shopping cart to build a cloud infrastructure. We can go choose from the different aisles. + +new_case_study_styles: true +heading_background: /images/case-studies/bose/banner1.jpg +heading_title_logo: /images/bose_logo.png +subheading: > + Bose: Supporting Rapid Development for Millions of IoT Products With Kubernetes +case_study_details: + - Company: Bose Corporation + - Location: Framingham, Massachusetts + - Industry: Consumer Electronics +--- + +

Challenge

+ +

A household name in high-quality audio equipment, Bose has offered connected products for more than five years, and as that demand grew, the infrastructure had to change to support it. "We needed to provide a mechanism for developers to rapidly prototype and deploy services all the way to production pretty fast," says Lead Cloud Engineer Josh West. In 2016, the company decided to start building a platform from scratch. The primary goal: "To be one to two steps ahead of the different product groups so that we are never scrambling to catch up with their scale," says Cloud Architecture Manager Dylan O'Mahony.

+ +

Solution

+ +

From the beginning, the team knew it wanted a microservices architecture. After evaluating and prototyping a couple of orchestration solutions, the team decided to adopt Kubernetes for its scaled IoT Platform-as-a-Service running on AWS. The platform, which also incorporated Prometheus monitoring, launched in production in 2017, serving over 3 million connected products from the get-go. Bose has since adopted a number of other CNCF technologies, including Fluentd, CoreDNS, Jaeger, and OpenTracing.

+ +

Impact

+ +

With about 100 engineers onboarded, the platform is now enabling 30,000 non-production deployments across dozens of microservices per year. In 2018, there were 1250+ production deployments. Just one production cluster holds 1,800 namespaces and 340 worker nodes. "We had a brand new service taken from concept through coding and deployment all the way to production, including hardening, security testing and so forth, in less than two and a half weeks," says O'Mahony.

+ +{{< case-studies/quote author="Josh West, Lead Cloud Engineer, Bose" >}} +"At Bose we're building an IoT platform that has enabled our physical products. If it weren't for Kubernetes and the rest of the CNCF projects being free open source software with such a strong community, we would never have achieved scale, or even gotten to launch on schedule." +{{< /case-studies/quote >}} + +{{< case-studies/lead >}} +A household name in high-quality audio equipment, Bose has offered connected products for more than five years, and as that demand grew, the infrastructure had to change to support it. +{{< /case-studies/lead >}} + +

"We needed to provide a mechanism for developers to rapidly prototype and deploy services all the way to production pretty fast," says Lead Cloud Engineer Josh West. "There were a lot of cloud capabilities we wanted to provide to support our audio equipment and experiences."

+ +

In 2016, the company decided to start building an IoT platform from scratch. The primary goal: "To be one to two steps ahead of the different product groups so that we are never scrambling to catch up with their scale," says Cloud Architecture Manager Dylan O'Mahony. "If they release a new connected product, we want to be already well ahead of being able to handle whatever scale that they're going to throw at us."

+ +

From the beginning, the team knew it wanted a microservices architecture and platform as a service. After evaluating and prototyping orchestration solutions, including Mesos and Docker Swarm, the team decided to adopt Kubernetes for its platform running on AWS. Kubernetes was still in 1.5, but already the technology could do much of what the team wanted and needed for the present and the future. For West, that meant having storage and network handled. O'Mahony points to Kubernetes' portability in case Bose decides to go multi-cloud.

+ +

"Bose is a company that looks out for the long term," says West. "Going with a quick commercial off-the-shelf solution might've worked for that point in time, but it would not have carried us forward, which is what we needed from Kubernetes and the CNCF."

+ +{{< case-studies/quote + image="/images/case-studies/bose/banner3.jpg" + author="Dylan O'Mahony, Cloud Architecture Manager, Bose" +>}} +"Everybody on the team thinks in terms of automation, leaning out the processes, getting things done as quickly as possible. When you step back and look at what it means for a 50-plus-year-old speaker company to have that sort of culture, it really is quite incredible, and I think the tools that we use and the foundation that we've built with them is a huge piece of that." +{{< /case-studies/quote >}} + +

The team spent time working on choosing tooling to make the experience easier for developers. "Our developers interact with tools provided by our Ops team, and the Ops team run all of their tooling on top of Kubernetes," says O'Mahony. "We try not to make direct Kubernetes access the only way. In fact, ideally, our developers wouldn't even need to know that they're running on Kubernetes."

+ +

The platform, which also incorporated Prometheus monitoring from the beginning, backdoored its way into production in 2017, serving over 3 million connected products from the get-go. "Even though the speakers and the products that we were designing this platform for were still quite a ways away from being launched, we did have some connected speakers on the market," says O'Mahony. "We basically started to point certain features of those speakers and the apps that go with those speakers to this platform."

+ +

Today, just one of Bose's production clusters holds 1,800 namespaces/discrete services and 340 nodes. With about 100 engineers now onboarded, the platform infrastructure is now enabling 30,000 non-production deployments across dozens of microservices per year. In 2018, there were 1250+ production deployments.. It's a staggering improvement over some of Bose's previous deployment processes, which supported far fewer deployments and services.

+ +{{< case-studies/quote + image="/images/case-studies/bose/banner4.jpg" + author="Josh West, Lead Cloud Engineer, Bose" +>}} +"The CNCF Landscape quickly explains what's going on in all the different areas from storage to cloud providers to automation and so forth. This is our shopping cart to build a cloud infrastructure. We can go choose from the different aisles." +{{< /case-studies/quote >}} + +

"We had a brand new service deployed from concept through coding and deployment all the way to production, including hardening, security testing and so forth, in less than two and a half weeks," says O'Mahony. "Everybody thinks in terms of automation, leaning out the processes, getting things done as quickly as possible. When you step back and look at what it means for a 50-plus-year-old speaker company to have that sort of culture, it really is quite incredible, and I think the tools that we use and the foundation that we've built is a huge piece of that."

+ +

Many of those technologies—such as Fluentd, CoreDNS, Jaeger, and OpenTracing—come from the CNCF Landscape, which West and O'Mahony have relied upon throughout Bose's cloud native journey. "The CNCF Landscape quickly explains what's going on in all the different areas from storage to cloud providers to automation and so forth," says West. "This is our shopping cart to build a cloud infrastructure. We can go choose from the different aisles."

+ +

And, he adds, "If it weren't for Kubernetes and the rest of the CNCF projects being free open source software with such a strong community, we would never have achieved scale, or even gotten to launch on schedule."

+ +

Another benefit of going cloud native: "We are even attracting much more talent into Bose because we're so involved with the CNCF Landscape," says West. (Yes, they're hiring.) "It's just enabled so many people to do so many great things and really brought Bose into the future of cloud."

+ +{{< case-studies/quote author="Dylan O'Mahony, Cloud Architecture Manager, Bose" >}} +"We have a lot going on to support many more of our business units at Bose in addition to the consumer electronics division, which we currently do. It's only because of the cloud native landscape and the tools and the features that are available that we can provide such a fantastic cloud platform for all the developers and divisions that are trying to enable some pretty amazing experiences." +{{< /case-studies/quote >}} + +

In the coming year, the team wants to work on service mesh and serverless, as well as expansion around the world. "Getting our latency down by going multi-region is going to be a big focus for us," says O'Mahony. "In order to make sure that our customers in Japan, Australia, and everywhere else are having a good experience, we want to have points of presence closer to them. It's never been done at Bose before."

+ +

That won't stop them, because the team is all about lofty goals. "We want to get to billions of connected products!" says West. "We have a lot going on to support many more of our business units at Bose in addition to the consumer electronics division, which we currently do. It's only because of the cloud native landscape and the tools and the features that are available that we can provide such a fantastic cloud platform for all the developers and divisions that are trying to enable some pretty amazing experiences."

+ +

In fact, given the scale the platform is already supporting, says O'Mahony, "doing anything other than Kubernetes, I think, would be folly at this point."

\ No newline at end of file diff --git a/content/bn/case-studies/box/box_featured.png b/content/bn/case-studies/box/box_featured.png new file mode 100644 index 0000000000000..fc6dec602af17 Binary files /dev/null and b/content/bn/case-studies/box/box_featured.png differ diff --git a/content/bn/case-studies/box/box_featured.svg b/content/bn/case-studies/box/box_featured.svg new file mode 100644 index 0000000000000..2b4fb6552b610 --- /dev/null +++ b/content/bn/case-studies/box/box_featured.svg @@ -0,0 +1 @@ +kubernetes.io-logos \ No newline at end of file diff --git a/content/bn/case-studies/box/box_logo.png b/content/bn/case-studies/box/box_logo.png new file mode 100644 index 0000000000000..b401dec6248c6 Binary files /dev/null and b/content/bn/case-studies/box/box_logo.png differ diff --git a/content/bn/case-studies/box/box_small.png b/content/bn/case-studies/box/box_small.png new file mode 100644 index 0000000000000..105b66a5832bb Binary files /dev/null and b/content/bn/case-studies/box/box_small.png differ diff --git a/content/bn/case-studies/box/box_small.svg b/content/bn/case-studies/box/box_small.svg new file mode 100644 index 0000000000000..7030785935fc1 --- /dev/null +++ b/content/bn/case-studies/box/box_small.svg @@ -0,0 +1 @@ +box \ No newline at end of file diff --git a/content/bn/case-studies/box/index.html b/content/bn/case-studies/box/index.html new file mode 100644 index 0000000000000..a1e2beefc3f62 --- /dev/null +++ b/content/bn/case-studies/box/index.html @@ -0,0 +1,100 @@ +--- +title: Box Case Study +case_study_styles: true +cid: caseStudies +video: https://www.youtube.com/embed/of45hYbkIZs?autoplay=1 +quote: > + Kubernetes has the opportunity to be the new cloud platform. The amount of innovation that's going to come from being able to standardize on Kubernetes as a platform is incredibly exciting - more exciting than anything I've seen in the last 10 years of working on the cloud. + +new_case_study_styles: true +heading_background: /images/case-studies/box/banner1.jpg +heading_title_logo: /images/box_logo.png +subheading: > + An Early Adopter Envisions a New Cloud Platform +case_study_details: + - Company: Box + - Location: Redwood City, California + - Industry: Technology +--- + +

Challenge

+ +

Founded in 2005, the enterprise content management company allows its more than 50 million users to manage content in the cloud. Box was built primarily with bare metal inside the company's own data centers, with a monolithic PHP code base. As the company was expanding globally, it needed to focus on "how we run our workload across many different cloud infrastructures from bare metal to public cloud," says Sam Ghods, Cofounder and Services Architect of Box. "It's been a huge challenge because of different clouds, especially bare metal, have very different interfaces."

+ +

Solution

+ +

Over the past couple of years, Box has been decomposing its infrastructure into microservices, and became an early adopter of, as well as contributor to, Kubernetes container orchestration. Kubernetes, Ghods says, has allowed Box's developers to "target a universal set of concepts that are portable across all clouds."

+ +

Impact

+ +

"Before Kubernetes," Ghods says, "our infrastructure was so antiquated it was taking us more than six months to deploy a new microservice. Today, a new microservice takes less than five days to deploy. And we're working on getting it to an hour."

+ +{{< case-studies/quote author="SAM GHOUDS, CO-FOUNDER AND SERVICES ARCHITECT OF BOX" >}} +"We looked at a lot of different options, but Kubernetes really stood out....the fact that on day one it was designed to run on bare metal just as well as Google Cloud meant that we could actually migrate to it inside of our data centers, and then use those same tools and concepts to run across public cloud providers as well." +{{< /case-studies/quote >}} + +{{< case-studies/lead >}} +In the summer of 2014, Box was feeling the pain of a decade's worth of hardware and software infrastructure that wasn't keeping up with the company's needs. +{{< /case-studies/lead >}} + +

A platform that allows its more than 50 million users (including governments and big businesses like General Electric) to manage and share content in the cloud, Box was originally a PHP monolith of millions of lines of code built exclusively with bare metal inside of its own data centers. It had already begun to slowly chip away at the monolith, decomposing it into microservices. And "as we've been expanding into regions around the globe, and as the public cloud wars have been heating up, we've been focusing a lot more on figuring out how we run our workload across many different environments and many different cloud infrastructure providers," says Box Cofounder and Services Architect Sam Ghods. "It's been a huge challenge thus far because of all these different providers, especially bare metal, have very different interfaces and ways in which you work with them."

+ +

Box's cloud native journey accelerated that June, when Ghods attended DockerCon. The company had come to the realization that it could no longer run its applications only off bare metal, and was researching containerizing with Docker, virtualizing with OpenStack, and supporting public cloud.

+ +

At that conference, Google announced the release of its Kubernetes container management system, and Ghods was won over. "We looked at a lot of different options, but Kubernetes really stood out, especially because of the incredibly strong team of Borg veterans and the vision of having a completely infrastructure-agnostic way of being able to run cloud software," he says, referencing Google's internal container orchestrator Borg. "The fact that on day one it was designed to run on bare metal just as well as Google Cloud meant that we could actually migrate to it inside of our data centers, and then use those same tools and concepts to run across public cloud providers as well."

+ +

Another plus: Ghods liked that Kubernetes has a universal set of API objects like pod, service, replica set and deployment object, which created a consistent surface to build tooling against. "Even PaaS layers like OpenShift or Deis that build on top of Kubernetes still treat those objects as first-class principles," he says. "We were excited about having these abstractions shared across the entire ecosystem, which would result in a lot more momentum than we saw in other potential solutions."

+ +

Box deployed Kubernetes in a cluster in a production data center just six months later. Kubernetes was then still pre-beta, on version 0.11. They started small: The very first thing Ghods's team ran on Kubernetes was a Box API checker that confirms Box is up. "That was just to write and deploy some software to get the whole pipeline functioning," he says. Next came some daemons that process jobs, which was "nice and safe because if they experienced any interruptions, we wouldn't fail synchronous incoming requests from customers."

+ +{{< case-studies/quote image="/images/case-studies/box/banner3.jpg">}} +"As we've been expanding into regions around the globe, and as the public cloud wars have been heating up, we've been focusing a lot more on figuring out how we [can have Kubernetes help] run our workload across many different environments and many different cloud infrastructure providers." +{{< /case-studies/quote >}} + +

The first live service, which the team could route to and ask for information, was launched a few months later. At that point, Ghods says, "We were comfortable with the stability of the Kubernetes cluster. We started to port some services over, then we would increase the cluster size and port a few more, and that's ended up to about 100 servers in each data center that are dedicated purely to Kubernetes. And that's going to be expanding a lot over the next 12 months, probably too many hundreds if not thousands."

+ +

While observing teams who began to use Kubernetes for their microservices, "we immediately saw an uptick in the number of microservices being released," Ghods notes. "There was clearly a pent-up demand for a better way of building software through microservices, and the increase in agility helped our developers be more productive and make better architectural choices."

+ +{{< case-studies/lead >}} +"There was clearly a pent-up demand for a better way of building software through microservices, and the increase in agility helped our developers be more productive and make better architectural choices." +{{< /case-studies/lead >}} + +

Ghods reflects that as early adopters, Box had a different journey from what companies experience now. "We were definitely lock step with waiting for certain things to stabilize or features to get released," he says. "In the early days we were doing a lot of contributions [to components such as kubectl apply] and waiting for Kubernetes to release each of them, and then we'd upgrade, contribute more, and go back and forth several times. The entire project took about 18 months from our first real deployment on Kubernetes to having general availability. If we did that exact same thing today, it would probably be no more than six."

+ +

In any case, Box didn't have to make too many modifications to Kubernetes for it to work for the company. "The vast majority of the work our team has done to implement Kubernetes at Box has been making it work inside of our existing (and often legacy) infrastructure," says Ghods, "such as upgrading our base operating system from RHEL6 to RHEL7 or integrating it into Nagios, our monitoring infrastructure. But overall Kubernetes has been remarkably flexible with fitting into many of our constraints, and we've been running it very successfully on our bare metal infrastructure."

+ +

Perhaps the bigger challenge for Box was a cultural one. "Kubernetes, and cloud native in general, represents a pretty big paradigm shift, and it's not very incremental," Ghods says. "We're essentially making this pitch that Kubernetes is going to solve everything because it does things the right way and everything is just suddenly better. But it's important to keep in mind that it's not nearly as proven as many other solutions out there. You can't say how long this or that company took to do it because there just aren't that many yet. Our team had to really fight for resources because our project was a bit of a moonshot."

+ +{{< case-studies/quote image="/images/case-studies/box/banner4.jpg">}} +"The vast majority of the work our team has done to implement Kubernetes at Box has been making it work inside of our existing [and often legacy] infrastructure....overall Kubernetes has been remarkably flexible with fitting into many of our constraints, and we've been running it very successfully on our bare metal infrastructure." +{{< /case-studies/quote >}} + +

Having learned from experience, Ghods offers these two pieces of advice for companies going through similar challenges:

+ +{{< case-studies/lead >}} +1. Deliver early and often. +{{< /case-studies/lead >}} + +

Service discovery was a huge problem for Box, and the team had to decide whether to build an interim solution or wait for Kubernetes to natively satisfy Box's unique requirements. After much debate, "we just started focusing on delivering something that works, and then dealing with potentially migrating to a more native solution later," Ghods says. "The above-all-else target for the team should always be to serve real production use cases on the infrastructure, no matter how trivial. This helps keep the momentum going both for the team itself and for the organizational perception of the project."

+ +{{< case-studies/lead >}} +2. Keep an open mind about what your company has to abstract away from developers and what it doesn't. +{{< /case-studies/lead >}} + +

Early on, the team built an abstraction on top of Docker files to help ensure that images had the right security updates. This turned out to be superfluous work, since container images are considered immutable and you can easily scan them post-build to ensure they do not contain vulnerabilities. Because managing infrastructure through containerization is such a discontinuous leap, it's better to start by interacting directly with the native tools and learning their unique advantages and caveats. An abstraction should be built only after a practical need for it arises.

+ +

In the end, the impact has been powerful. "Before Kubernetes," Ghods says, "our infrastructure was so antiquated it was taking us more than six months to deploy a new microservice. Now a new microservice takes less than five days to deploy. And we're working on getting it to an hour. Granted, much of that six months was due to how broken our systems were, but bare metal is intrinsically a difficult platform to support unless you have a system like Kubernetes to help manage it."

+ +

By Ghods's estimate, Box is still several years away from his goal of being a 90-plus percent Kubernetes shop. "We're very far along on having a mission-critical, stable Kubernetes deployment that provides a lot of value," he says. "Right now about five percent of all of our compute runs on Kubernetes, and I think in the next six months we'll likely be between 20 to 50 percent. We're working hard on enabling all stateless service use cases, and shift our focus to stateful services after that."

+ +{{< case-studies/quote >}} +"Ghods predicts that Kubernetes has the opportunity to be the new cloud platform. '...because it's a never-before-seen level of automation and intelligence surrounding infrastructure that is portable and agnostic to every way you can run your infrastructure.'" +{{< /case-studies/quote >}} + +

In fact, that's what he envisions across the industry: Ghods predicts that Kubernetes has the opportunity to be the new cloud platform. Kubernetes provides an API consistent across different cloud platforms including bare metal, and "I don't think people have seen the full potential of what's possible when you can program against one single interface," he says. "The same way AWS changed infrastructure so that you don't have to think about servers or cabinets or networking equipment anymore, Kubernetes enables you to focus exclusively on the containers that you're running, which is pretty exciting. That's the vision."

+ +

Ghods points to projects that are already in development or recently released for Kubernetes as a cloud platform: cluster federation, the Dashboard UI, and CoreOS's etcd operator. "I honestly believe it's the most exciting thing I've seen in cloud infrastructure," he says, "because it's a never-before-seen level of automation and intelligence surrounding infrastructure that is portable and agnostic to every way you can run your infrastructure."

+ +

Box, with its early decision to use bare metal, embarked on its Kubernetes journey out of necessity. But Ghods says that even if companies don't have to be agnostic about cloud providers today, Kubernetes may soon become the industry standard, as more and more tooling and extensions are built around the API.

+ +

"The same way it doesn't make sense to deviate from Linux because it's such a standard," Ghods says, "I think Kubernetes is going down the same path. It is still early days—the documentation still needs work and the user experience for writing and publishing specs to the Kubernetes clusters is still rough. When you're on the cutting edge you can expect to bleed a little. But the bottom line is, this is where the industry is going. Three to five years from now it's really going to be shocking if you run your infrastructure any other way."

\ No newline at end of file diff --git a/content/bn/case-studies/box/video.png b/content/bn/case-studies/box/video.png new file mode 100644 index 0000000000000..4c61e7440fc48 Binary files /dev/null and b/content/bn/case-studies/box/video.png differ diff --git a/content/bn/case-studies/buffer/buffer_featured.png b/content/bn/case-studies/buffer/buffer_featured.png new file mode 100644 index 0000000000000..cd6aaba4ca6a0 Binary files /dev/null and b/content/bn/case-studies/buffer/buffer_featured.png differ diff --git a/content/bn/case-studies/buffer/buffer_featured.svg b/content/bn/case-studies/buffer/buffer_featured.svg new file mode 100644 index 0000000000000..b8e321f31d633 --- /dev/null +++ b/content/bn/case-studies/buffer/buffer_featured.svg @@ -0,0 +1 @@ +kubernetes.io-logos \ No newline at end of file diff --git a/content/bn/case-studies/buffer/buffer_logo.png b/content/bn/case-studies/buffer/buffer_logo.png new file mode 100644 index 0000000000000..1b4b3b7e525d0 Binary files /dev/null and b/content/bn/case-studies/buffer/buffer_logo.png differ diff --git a/content/bn/case-studies/buffer/index.html b/content/bn/case-studies/buffer/index.html new file mode 100644 index 0000000000000..8cc55fca92b3d --- /dev/null +++ b/content/bn/case-studies/buffer/index.html @@ -0,0 +1,83 @@ +--- +title: Buffer Case Study +case_study_styles: true +cid: caseStudies + +new_case_study_styles: true +heading_background: /images/case-studies/buffer/banner3.jpg +heading_title_logo: /images/buffer.png +subheading: > + Making Deployments Easy for a Small, Distributed Team +case_study_details: + - Company: Buffer + - Location: Around the World + - Industry: Social Media Technology +--- + +

Challenge

+ +

With a small but fully distributed team of 80 working across almost a dozen time zones, Buffer—which offers social media management to agencies and marketers—was looking to solve its "classic monolithic code base problem," says Architect Dan Farrelly. "We wanted to have the kind of liquid infrastructure where a developer could create an app and deploy it and scale it horizontally as necessary."

+ +

Solution

+ +

Embracing containerization, Buffer moved its infrastructure from Amazon Web Services' Elastic Beanstalk to Docker on AWS, orchestrated with Kubernetes.

+ +

Impact

+ +

The new system "leveled up our ability with deployment and rolling out new changes," says Farrelly. "Building something on your computer and knowing that it's going to work has shortened things up a lot. Our feedback cycles are a lot faster now too."

+ +{{< case-studies/quote author="DAN FARRELLY, BUFFER ARCHITECT" >}} +"It's amazing that we can use the Kubernetes solution off the shelf with our team. And it just keeps getting better. Before we even know that we need something, it's there in the next release or it's coming in the next few months." +{{< /case-studies/quote >}} + +{{< case-studies/lead >}} +Dan Farrelly uses a carpentry analogy to explain the problem his company, Buffer, began having as its team of developers grew over the past few years. +{{< /case-studies/lead >}} + +

"If you're building a table by yourself, it's fine," the company's architect says. "If you bring in a second person to work on the table, maybe that person can start sanding the legs while you're sanding the top. But when you bring a third or fourth person in, someone should probably work on a different table." Needing to work on more and more different tables led Buffer on a path toward microservices and containerization made possible by Kubernetes.

+ +

Since around 2012, Buffer had already been using Elastic Beanstalk, the orchestration service for deploying infrastructure offered by Amazon Web Services. "We were deploying a single monolithic PHP application, and it was the same application across five or six environments," says Farrelly. "We were very much a product-driven company. It was all about shipping new features quickly and getting things out the door, and if something was not broken, we didn't spend too much time on it. If things were getting a little bit slow, we'd maybe use a faster server or just scale up one instance, and it would be good enough. We'd move on."

+ +

But things came to a head in 2016. With the growing number of committers on staff, Farrelly and Buffer's then-CTO, Sunil Sadasivan, decided it was time to re-architect and rethink their infrastructure. "It was a classic monolithic code base problem," says Farrelly.

Some of the company's team was already successfully using Docker in their development environment, but the only application running on Docker in production was a marketing website that didn't see real user traffic. They wanted to go further with Docker, and the next step was looking at options for orchestration.

+ +{{< case-studies/quote image="/images/case-studies/buffer/banner1.jpg" >}} +And all the things Kubernetes did well suited Buffer's needs. "We wanted to have the kind of liquid infrastructure where a developer could create an app and deploy it and scale it horizontally as necessary," says Farrelly. "We quickly used some scripts to set up a couple of test clusters, we built some small proof-of-concept applications in containers, and we deployed things within an hour. We had very little experience in running containers in production. It was amazing how quickly we could get a handle on it [Kubernetes]." +{{< /case-studies/quote >}} + +

First they considered Mesosphere, DC/OS and Amazon Elastic Container Service (which their data systems team was already using for some data pipeline jobs). While they were impressed by these offerings, they ultimately went with Kubernetes. "We run on AWS still, so spinning up, creating services and creating load balancers on demand for us without having to configure them manually was a great way for our team to get into this," says Farrelly. "We didn't need to figure out how to configure this or that, especially coming from a former Elastic Beanstalk environment that gave us an automatically-configured load balancer. I really liked Kubernetes' controls of the command line. It just took care of ports. It was a lot more flexible. Kubernetes was designed for doing what it does, so it does it very well."

+ +

And all the things Kubernetes did well suited Buffer's needs. "We wanted to have the kind of liquid infrastructure where a developer could create an app and deploy it and scale it horizontally as necessary," says Farrelly. "We quickly used some scripts to set up a couple of test clusters, we built some small proof-of-concept applications in containers, and we deployed things within an hour. We had very little experience in running containers in production. It was amazing how quickly we could get a handle on it [Kubernetes]."

+ +

Above all, it provided a powerful solution for one of the company's most distinguishing characteristics: their remote team that's spread across a dozen different time zones. "The people with deep knowledge of our infrastructure live in time zones different from our peak traffic time zones, and most of our product engineers live in other places," says Farrelly. "So we really wanted something where anybody could get a grasp of the system early on and utilize it, and not have to worry that the deploy engineer is asleep. Otherwise people would sit around for 12 to 24 hours for something. It's been really cool to see people moving much faster."

+ +

With a relatively small engineering team—just 25 people, and only a handful working on infrastructure, with the majority front-end developers—Buffer needed "something robust for them to deploy whatever they wanted," says Farrelly. Before, "it was only a couple of people who knew how to set up everything in the old way. With this system, it was easy to review documentation and get something out extremely quickly. It lowers the bar for us to get everything in production. We don't have the big team to build all these tools or manage the infrastructure like other larger companies might."

+ +{{< case-studies/quote image="/images/case-studies/buffer/banner4.jpg" >}} +"In our old way of working, the feedback loop was a lot longer, and it was delicate because if you deployed something, the risk was high to potentially break something else," Farrelly says. "With the kind of deploys that we built around Kubernetes, we were able to detect bugs and fix them, and get them deployed super fast. The second someone is fixing [a bug], it's out the door." +{{< /case-studies/quote >}} + +

To help with this, Buffer developers wrote a deploy bot that wraps the Kubernetes deploy process and can be used by every team. "Before, our data analysts would update, say, a Python analysis script and have to wait for the lead on that team to click the button and deploy it," Farrelly explains. "Now our data analysts can make a change, enter a Slack command, '/deploy,' and it goes out instantly. They don't need to wait on these slow turnaround times. They don't even know where it's running; it doesn't matter."

+ +

One of the first applications the team built from scratch using Kubernetes was a new image resizing service. As a social media management tool that allows marketing teams to collaborate on posts and send updates across multiple social media profiles and networks, Buffer has to be able to resize photographs as needed to meet the varying limitations of size and format posed by different social networks. "We always had these hacked together solutions," says Farrelly.

+ +

To create this new service, one of the senior product engineers was assigned to learn Docker and Kubernetes, then build the service, test it, deploy it and monitor it—which he was able to do relatively quickly. "In our old way of working, the feedback loop was a lot longer, and it was delicate because if you deployed something, the risk was high to potentially break something else," Farrelly says. "With the kind of deploys that we built around Kubernetes, we were able to detect bugs and fix them, and get them deployed super fast. The second someone is fixing [a bug], it's out the door."

+ +

Plus, unlike with their old system, they could scale things horizontally with one command. "As we rolled it out," Farrelly says, "we could anticipate and just click a button. This allowed us to deal with the demand that our users were placing on the system and easily scale it to handle it."

+ +

Another thing they weren't able to do before was a canary deploy. This new capability "made us so much more confident in deploying big changes," says Farrelly. "Before, it took a lot of testing, which is still good, but it was also a lot of 'fingers crossed.' And this is something that gets run 800,000 times a day, the core of our business. If it doesn't work, our business doesn't work. In a Kubernetes world, I can do a canary deploy to test it for 1 percent and I can shut it down very quickly if it isn't working. This has leveled up our ability to deploy and roll out new changes quickly while reducing risk."

+ +{{< case-studies/quote >}} +"If you want to run containers in production, with nearly the power that Google uses internally, this [Kubernetes] is a great way to do that," Farrelly says. "We're a relatively small team that's actually running Kubernetes, and we've never run anything like it before. So it's more approachable than you might think. That's the one big thing that I tell people who are experimenting with it. Pick a couple of things, roll it out, kick the tires on this for a couple of months and see how much it can handle. You start learning a lot this way." +{{< /case-studies/quote >}} + +

By October 2016, 54 percent of Buffer's traffic was going through their Kubernetes cluster. "There's a lot of our legacy functionality that still runs alright, and those parts might move to Kubernetes or stay in our old setup forever," says Farrelly. But the company made the commitment at that time that going forward, "all new development, all new features, will be running on Kubernetes."

+ +

The plan for 2017 is to move all the legacy applications to a new Kubernetes cluster, and run everything they've pulled out of their old infrastructure, plus the new services they're developing in Kubernetes, on another cluster. "I want to bring all the benefits that we've seen on our early services to everyone on the team," says Farrelly.

+ +{{< case-studies/lead >}} +For Buffer's engineers, it's an exciting process. "Every time we're deploying a new service, we need to figure out: OK, what's the architecture? How do these services communicate? What's the best way to build this service?" Farrelly says. "And then we use the different features that Kubernetes has to glue all the pieces together. It's enabling us to experiment as we're learning how to design a service-oriented architecture. Before, we just wouldn't have been able to do it. This is actually giving us a blank white board so we can do whatever we want on it." +{{< /case-studies/lead >}} + +

Part of that blank slate is the flexibility that Kubernetes offers should the time come when Buffer may want or need to change its cloud. "It's cloud agnostic so maybe one day we could switch to Google or somewhere else," Farrelly says. "We're very deep in Amazon but it's nice to know we could move away if we need to."

+ +

At this point, the team at Buffer can't imagine running their infrastructure any other way—and they're happy to spread the word. "If you want to run containers in production, with nearly the power that Google uses internally, this [Kubernetes] is a great way to do that," Farrelly says. "We're a relatively small team that's actually running Kubernetes, and we've never run anything like it before. So it's more approachable than you might think. That's the one big thing that I tell people who are experimenting with it. Pick a couple of things, roll it out, kick the tires on this for a couple of months and see how much it can handle. You start learning a lot this way."

\ No newline at end of file diff --git a/content/bn/case-studies/capital-one/capitalone_featured_logo.png b/content/bn/case-studies/capital-one/capitalone_featured_logo.png new file mode 100644 index 0000000000000..f57c7697e36fd Binary files /dev/null and b/content/bn/case-studies/capital-one/capitalone_featured_logo.png differ diff --git a/content/bn/case-studies/capital-one/capitalone_featured_logo.svg b/content/bn/case-studies/capital-one/capitalone_featured_logo.svg new file mode 100644 index 0000000000000..124adae9af21e --- /dev/null +++ b/content/bn/case-studies/capital-one/capitalone_featured_logo.svg @@ -0,0 +1 @@ +kubernetes.io-logos \ No newline at end of file diff --git a/content/bn/case-studies/capital-one/index.html b/content/bn/case-studies/capital-one/index.html new file mode 100644 index 0000000000000..145b1e0b04b66 --- /dev/null +++ b/content/bn/case-studies/capital-one/index.html @@ -0,0 +1,62 @@ +--- +title: Capital One Case Study +case_study_styles: true +cid: caseStudies + +new_case_study_styles: true +heading_background: /images/case-studies/capitalone/banner1.jpg +heading_title_logo: /images/capitalone-logo.png +subheading: > + Supporting Fast Decisioning Applications with Kubernetes +case_study_details: + - Company: Capital One + - Location: McLean, Virginia + - Industry: Retail banking +--- + +

Challenge

+ +

The team set out to build a provisioning platform for Capital One applications deployed on AWS that use streaming, big-data decisioning, and machine learning. One of these applications handles millions of transactions a day; some deal with critical functions like fraud detection and credit decisioning. The key considerations: resilience and speed—as well as full rehydration of the cluster from base AMIs.

+ +

Solution

+ +

The decision to run Kubernetes "is very strategic for us," says John Swift, Senior Director Software Engineering. "We use Kubernetes as a substrate or an operating system, if you will. There's a degree of affinity in our product development."

+ +

Impact

+ +

"Kubernetes is a significant productivity multiplier," says Lead Software Engineer Keith Gasser, adding that to run the platform without Kubernetes would "easily see our costs triple, quadruple what they are now for the amount of pure AWS expense." Time to market has been improved as well: "Now, a team can come to us and we can have them up and running with a basic decisioning app in a fortnight, which before would have taken a whole quarter, if not longer." Deployments increased by several orders of magnitude. Plus, the rehydration/cluster-rebuild process, which took a significant part of a day to do manually, now takes a couple hours with Kubernetes automation and declarative configuration.

+ +{{< case-studies/quote author="Jamil Jadallah, Scrum Master" >}} + +
+"With the scalability, the management, the coordination, Kubernetes really empowers us and gives us more time back than we had before." +{{< /case-studies/quote >}} + +

As a top 10 U.S. retail bank, Capital One has applications that handle millions of transactions a day. Big-data decisioning—for fraud detection, credit approvals and beyond—is core to the business. To support the teams that build applications with those functions for the bank, the cloud team led by Senior Director Software Engineering John Swift embraced Kubernetes for its provisioning platform. "Kubernetes and its entire ecosystem are very strategic for us," says Swift. "We use Kubernetes as a substrate or an operating system, if you will. There's a degree of affinity in our product development."

+ +

Almost two years ago, the team embarked on this journey by first working with Docker. Then came Kubernetes. "We wanted to put streaming services into Kubernetes as one feature of the workloads for fast decisioning, and to be able to do batch alongside it," says Lead Software Engineer Keith Gasser. "Once the data is streamed and batched, there are so many tool sets in Flink that we use for decisioning. We want to provide the tools in the same ecosystem, in a consistent way, rather than have a large custom snowflake ecosystem where every tool needs its own custom deployment. Kubernetes gives us the ability to bring all of these together, so the richness of the open source and even the license community dealing with big data can be corralled."

+ +{{< case-studies/quote image="/images/case-studies/capitalone/banner3.jpg" >}} +"We want to provide the tools in the same ecosystem, in a consistent way, rather than have a large custom snowflake ecosystem where every tool needs its own custom deployment. Kubernetes gives us the ability to bring all of these together, so the richness of the open source and even the license community dealing with big data can be corralled." +{{< /case-studies/quote >}} + +

In this first year, the impact has already been great. "Time to market is really huge for us," says Gasser. "Especially with fraud, you have to be very nimble in the way you respond to threats in the marketplace—being able to add and push new rules, detect new patterns of behavior, detect anomalies in account and transaction flows." With Kubernetes, "a team can come to us and we can have them up and running with a basic decisioning app in a fortnight, which before would have taken a whole quarter, if not longer. Kubernetes is a manifold productivity multiplier."

+ +

Teams now have the tools to be autonomous in their deployments, and as a result, deployments have increased by two orders of magnitude. "And that was with just seven dedicated resources, without needing a whole group sitting there watching everything," says Scrum Master Jamil Jadallah. "That's a huge cost savings. With the scalability, the management, the coordination, Kubernetes really empowers us and gives us more time back than we had before."

+ +{{< case-studies/quote image="/images/case-studies/capitalone/banner4.jpg" >}} +With Kubernetes, "a team can come to us and we can have them up and running with a basic decisioning app in a fortnight, which before would have taken a whole quarter, if not longer. Kubernetes is a manifold productivity multiplier." +{{< /case-studies/quote >}} + +

Kubernetes has also been a great time-saver for Capital One's required period "rehydration" of clusters from base AMIs. To minimize the attack vulnerability profile for applications in the cloud, "Our entire clusters get rebuilt from scratch periodically, with new fresh instances and virtual server images that are patched with the latest and greatest security patches," says Gasser. This process used to take the better part of a day, and personnel, to do manually. It's now a quick Kubernetes job.

+ +

Savings extend to both capital and operating expenses. "It takes very little to get into Kubernetes because it's all open source," Gasser points out. "We went the DIY route for building our cluster, and we definitely like the flexibility of being able to embrace the latest from the community immediately without waiting for a downstream company to do it. There's capex related to those licenses that we don't have to pay for. Moreover, there's capex savings for us from some of the proprietary software that we get to sunset in our particular domain. So that goes onto our ledger in a positive way as well." (Some of those open source technologies include Prometheus, Fluentd, gRPC, Istio, CNI, and Envoy.)

+ +{{< case-studies/quote >}} +"If we had to do all of this without Kubernetes, on underlying cloud services, I could easily see our costs triple, quadruple what they are now for the amount of pure AWS expense. That doesn't account for personnel to deploy and maintain all the additional infrastructure." +{{< /case-studies/quote >}} + + +

And on the opex side, Gasser says, the savings are high. "We run dozens of services, we have scores of pods, many daemon sets, and since we're data-driven, we take advantage of EBS-backed volume claims for all of our stateful services. If we had to do all of this without Kubernetes, on underlying cloud services, I could easily see our costs triple, quadruple what they are now for the amount of pure AWS expense. That doesn't account for personnel to deploy and maintain all the additional infrastructure."

+ +

The team is confident that the benefits will continue to multiply—without a steep learning curve for the engineers being exposed to the new technology. "As we onboard additional tenants in this ecosystem, I think the need for folks to understand Kubernetes may not necessarily go up. In fact, I think it goes down, and that's good," says Gasser. "Because that really demonstrates the scalability of the technology. You start to reap the benefits, and they can concentrate on all the features they need to build for great decisioning in the business— fraud decisions, credit decisions—and not have to worry about, 'Is my AWS server broken? Is my pod not running?'"

\ No newline at end of file diff --git a/content/bn/case-studies/cern/cern_featured_logo.png b/content/bn/case-studies/cern/cern_featured_logo.png new file mode 100644 index 0000000000000..b873b828b1d41 Binary files /dev/null and b/content/bn/case-studies/cern/cern_featured_logo.png differ diff --git a/content/bn/case-studies/cern/cern_logo.svg b/content/bn/case-studies/cern/cern_logo.svg new file mode 100644 index 0000000000000..c242aafc136b0 --- /dev/null +++ b/content/bn/case-studies/cern/cern_logo.svg @@ -0,0 +1 @@ +cern \ No newline at end of file diff --git a/content/bn/case-studies/cern/index.html b/content/bn/case-studies/cern/index.html new file mode 100644 index 0000000000000..2fdd4a27a9c3a --- /dev/null +++ b/content/bn/case-studies/cern/index.html @@ -0,0 +1,81 @@ +--- +title: CERN Case Study +linkTitle: cern +case_study_styles: true +cid: caseStudies +logo: cern_featured_logo.png + +new_case_study_styles: true +heading_background: /images/case-studies/cern/banner1.jpg +heading_title_text: CERN +subheading: > + CERN: Processing Petabytes of Data More Efficiently with Kubernetes +case_study_details: + - Company: CERN + - Location: Geneva, Switzerland + - Industry: Particle physics research +--- + +

Challenge

+ +

At CERN, the European Organization for Nuclear Research, physicists conduct experiments to learn about fundamental science. In its particle accelerators, "we accelerate protons to very high energy, close to the speed of light, and we make the two beams of protons collide," says CERN Software Engineer Ricardo Rocha. "The end result is a lot of data that we have to process." CERN currently stores 330 petabytes of data in its data centers, and an upgrade of its accelerators expected in the next few years will drive that number up by 10x. Additionally, the organization experiences extreme peaks in its workloads during periods prior to big conferences, and needs its infrastructure to scale to those peaks. "We want to have a more hybrid infrastructure, where we have our on premise infrastructure but can make use of public clouds temporarily when these peaks come up," says Rocha. "We've been looking to new technologies that can help improve our efficiency in our infrastructure so that we can dedicate more of our resources to the actual processing of the data."

+ +

Solution

+ +

CERN's technology team embraced containerization and cloud native practices, choosing Kubernetes for orchestration, Helm for deployment, Prometheus for monitoring, and CoreDNS for DNS resolution inside the clusters. Kubernetes federation has allowed the organization to run some production workloads both on premise and in public clouds.

+ +

Impact

+ +

"Kubernetes gives us the full automation of the application," says Rocha. "It comes with built-in monitoring and logging for all the applications and the workloads that deploy in Kubernetes. This is a massive simplification of our current deployments." The time to deploy a new cluster for a complex distributed storage system has gone from more than 3 hours to less than 15 minutes. Adding new nodes to a cluster used to take more than an hour; now it takes less than 2 minutes. The time it takes to autoscale replicas for system components has decreased from more than an hour to less than 2 minutes. Initially, virtualization gave 20% overhead, but with tuning this was reduced to ~5%. Moving to Kubernetes on bare metal would get this to 0%. Not having to host virtual machines is expected to also get 10% of memory capacity back.

+ +{{< case-studies/quote author="Ricardo Rocha, Software Engineer, CERN" >}} +"Kubernetes is something we can relate to very much because it's naturally distributed. What it gives us is a uniform API across heterogeneous resources to define our workloads. This is something we struggled with a lot in the past when we want to expand our resources outside our infrastructure." +{{< /case-studies/quote >}} + +{{< case-studies/lead >}} +With a mission of researching fundamental science, and a stable of extremely large machines, the European Organization for Nuclear Research (CERN) operates at what can only be described as hyperscale. +{{< /case-studies/lead >}} + +

Experiments are conducted in particle accelerators, the biggest of which is 27 kilometers in circumference. "We accelerate protons to very high energy, to close to the speed of light, and we make the two beams of protons collide in well-defined places," says CERN Software Engineer Ricardo Rocha. "We build experiments around these places where we do the collisions. The end result is a lot of data that we have to process."

+ +

And he does mean a lot: CERN currently stores and processes 330 petabytes of data—gathered from 4,300 projects and 3,300 users—using 10,000 hypervisors and 320,000 cores in its data centers.

+ +

Over the years, the CERN technology department has built a large computing infrastructure, based on OpenStack private clouds, to help the organization's physicists analyze and treat all this data. The organization experiences extreme peaks in its workloads. "Very often, just before conferences, physicists want to do an enormous amount of extra analysis to publish their papers, and we have to scale to these peaks, which means overcommitting resources in some cases," says Rocha. "We want to have a more hybrid infrastructure, where we have our on premise infrastructure but can make use of public clouds temporarily when these peaks come up."

+ +

Additionally, few years ago, CERN announced that it would be doing a big upgrade of its accelerators, which will mean a ten-fold increase in the amount of data that can be collected. "So we've been looking to new technologies that can help improve our efficiency in our infrastructure, so that we can dedicate more of our resources to the actual processing of the data," says Rocha.

+ +{{< case-studies/quote + image="/images/case-studies/cern/banner3.jpg" + author="Ricardo Rocha, Software Engineer, CERN" +>}} +"Before, the tendency was always: 'I need this, I get a couple of developers, and I implement it.' Right now it's 'I need this, I'm sure other people also need this, so I'll go and ask around.' The CNCF is a good source because there's a very large catalog of applications available. It's very hard right now to justify developing a new product in-house. There is really no real reason to keep doing that. It's much easier for us to try it out, and if we see it's a good solution, we try to reach out to the community and start working with that community." +{{< /case-studies/quote >}} + +

Rocha's team started looking at Kubernetes and containerization in the second half of 2015. "We've been using distributed infrastructures for decades now," says Rocha. "Kubernetes is something we can relate to very much because it's naturally distributed. What it gives us is a uniform API across heterogeneous resources to define our workloads. This is something we struggled with a lot in the past when we want to expand our resources outside our infrastructure."

+ +

The team created a prototype system for users to deploy their own Kubernetes cluster in CERN's infrastructure, and spent six months validating the use cases and making sure that Kubernetes integrated with CERN's internal systems. The main use case is batch workloads, which represent more than 80% of resource usage at CERN. (One single project that does most of the physics data processing and analysis alone consumes 250,000 cores.) "This is something where the investment in simplification of the deployment, logging, and monitoring pays off very quickly," says Rocha. Other use cases include Spark-based data analysis and machine learning to improve physics analysis. "The fact that most of these technologies integrate very well with Kubernetes makes our lives easier," he adds.

+ +

The system went into production in October 2016, also using Helm for deployment, Prometheus for monitoring, and CoreDNS for DNS resolution within the cluster. "One thing that Kubernetes gives us is the full automation of the application," says Rocha. "So it comes with built-in monitoring and logging for all the applications and the workloads that deploy in Kubernetes. This is a massive simplification of our current deployments." The time to deploy a new cluster for a complex distributed storage system has gone from more than 3 hours to less than 15 minutes.

+ +

Adding new nodes to a cluster used to take more than an hour; now it takes less than 2 minutes. The time it takes to autoscale replicas for system components has decreased from more than an hour to less than 2 minutes.

+ +{{< case-studies/quote + image="/images/case-studies/cern/banner4.jpg" + author="Ricardo Rocha, Software Engineer, CERN" +>}} +"With Kubernetes, there's a well-established technology and a big community that we can contribute to. It allows us to do our physics analysis without having to focus so much on the lower level software. This is just exciting. We are looking forward to keep contributing to the community and collaborating with everyone." +{{< /case-studies/quote >}} + +

Rocha points out that the metric used in the particle accelerators may be events per second, but in reality "it's how fast and how much of the data we can process that actually counts." And efficiency has certainly been improved with Kubernetes. Initially, virtualization gave 20% overhead, but with tuning this was reduced to ~5%. Moving to Kubernetes on bare metal would get this to 0%. Not having to host virtual machines is expected to also get 10% of memory capacity back.

+ +

Kubernetes federation, which CERN has been using for a portion of its production workloads since February 2018, has allowed the organization to adopt a hybrid cloud strategy. And it was remarkably simple to do. "We had a summer intern working on federation," says Rocha. "For many years, I've been developing distributed computing software, which took like a decade and a lot of effort from a lot of people to stabilize and make sure it works. And for our intern, in a couple of days he was able to demo to me and my team that we had a cluster at CERN and a few clusters outside in public clouds that were federated together and that we could submit workloads to. This was shocking for us. It really shows the power of using this kind of well-established technologies."

+ +

With such results, adoption of Kubernetes has made rapid gains at CERN, and the team is eager to give back to the community. "If we look back into the '90s and early 2000s, there were not a lot of companies focusing on systems that have to scale to this kind of size, storing petabytes of data, analyzing petabytes of data," says Rocha. "The fact that Kubernetes is supported by such a wide community and different backgrounds, it motivates us to contribute back."

+ +{{< case-studies/quote author="Ricardo Rocha, Software Engineer, CERN" >}} +This means that the physicist can build his or her analysis and publish it in a repository, share it with colleagues, and in 10 years redo the same analysis with new data. If we looked back even 10 years, this was just a dream." +{{< /case-studies/quote >}} + +

These new technologies aren't just enabling infrastructure improvements. CERN also uses the Kubernetes-based Reana/Recast platform for reusable analysis, which is "the ability to define physics analysis as a set of workflows that are fully containerized in one single entry point," says Rocha. "This means that the physicist can build his or her analysis and publish it in a repository, share it with colleagues, and in 10 years redo the same analysis with new data. If we looked back even 10 years, this was just a dream."

+ +

All of these things have changed the culture at CERN considerably. A decade ago, "The tendency was always: 'I need this, I get a couple of developers, and I implement it,'" says Rocha. "Right now it's 'I need this, I'm sure other people also need this, so I'll go and ask around.' The CNCF is a good source because there's a very large catalog of applications available. It's very hard right now to justify developing a new product in-house. There is really no real reason to keep doing that. It's much easier for us to try it out, and if we see it's a good solution, we try to reach out to the community and start working with that community."

\ No newline at end of file diff --git a/content/bn/case-studies/chinaunicom/chinaunicom_featured_logo.png b/content/bn/case-studies/chinaunicom/chinaunicom_featured_logo.png new file mode 100644 index 0000000000000..f90ff1e509c85 Binary files /dev/null and b/content/bn/case-studies/chinaunicom/chinaunicom_featured_logo.png differ diff --git a/content/bn/case-studies/chinaunicom/chinaunicom_featured_logo.svg b/content/bn/case-studies/chinaunicom/chinaunicom_featured_logo.svg new file mode 100644 index 0000000000000..aae1978cf2129 --- /dev/null +++ b/content/bn/case-studies/chinaunicom/chinaunicom_featured_logo.svg @@ -0,0 +1 @@ +kubernetes.io-logos \ No newline at end of file diff --git a/content/bn/case-studies/chinaunicom/index.html b/content/bn/case-studies/chinaunicom/index.html new file mode 100644 index 0000000000000..ee6cfdcabd7a2 --- /dev/null +++ b/content/bn/case-studies/chinaunicom/index.html @@ -0,0 +1,77 @@ +--- +title: China Unicom Case Study +linkTitle: chinaunicom +case_study_styles: true +cid: caseStudies +featured: false + +new_case_study_styles: true +heading_background: /images/case-studies/chinaunicom/banner1.jpg +heading_title_logo: /images/chinaunicom_logo.png +subheading: > + China Unicom: How China Unicom Leveraged Kubernetes to Boost Efficiency and Lower IT Costs +case_study_details: + - Company: China Unicom + - Location: Beijing, China + - Industry: Telecom +--- + +

Challenge

+ +

China Unicom is one of the top three telecom operators in China, and to serve its 300 million users, the company runs several data centers with thousands of servers in each, using Docker containerization and VMWare and OpenStack infrastructure since 2016. Unfortunately, "the resource utilization rate was relatively low," says Chengyu Zhang, Group Leader of Platform Technology R&D, "and we didn't have a cloud platform to accommodate our hundreds of applications." Formerly an entirely state-owned company, China Unicom has in recent years taken private investment from BAT (Baidu, Alibaba, Tencent) and JD.com, and is now focusing on internal development using open source technology, rather than commercial products. As such, Zhang's China Unicom Lab team began looking for open source orchestration for its cloud infrastructure.

+ +

Solution

+ +

Because of its rapid growth and mature open source community, Kubernetes was a natural choice for China Unicom. The company's Kubernetes-enabled cloud platform now hosts 50 microservices and all new development going forward. "Kubernetes has improved our experience using cloud infrastructure," says Zhang. "There is currently no alternative technology that can replace it." China Unicom also uses Istio for its microservice framework, Envoy, CoreDNS, and Fluentd.

+ +

Impact

+ +

At China Unicom, Kubernetes has improved both operational and development efficiency. Resource utilization has increased by 20-50%, lowering IT infrastructure costs, and deployment time has gone from a couple of hours to 5-10 minutes. "This is mainly because of the self-healing and scalability, so we can increase our efficiency in operation and maintenance," Zhang says. "For example, we currently have only five people maintaining our multiple systems. We could never imagine we can achieve this scalability in such a short time."

+ +{{< case-studies/quote author="Chengyu Zhang, Group Leader of Platform Technology R&D, China Unicom" >}} +"Kubernetes has improved our experience using cloud infrastructure. There is currently no alternative technology that can replace it." +{{< /case-studies/quote >}} + +{{< case-studies/lead >}} +With more than 300 million users, China Unicom is one of the country's top three telecom operators. +{{< /case-studies/lead >}} + +

Behind the scenes, the company runs multiple data centers with thousands of servers in each, using Docker containerization and VMWare and OpenStack infrastructure since 2016. Unfortunately, "the resource utilization rate was relatively low," says Chengyu Zhang, Group Leader of Platform Technology R&D, "and we didn't have a cloud platform to accommodate our hundreds of applications."

+ +

Zhang's team, which is responsible for new technology, R&D and platforms, set out to find an IT management solution. Formerly an entirely state-owned company, China Unicom has in recent years taken private investment from BAT (Baidu, Alibaba, Tencent) and JD.com, and is now focusing on homegrown development using open source technology, rather than commercial products. For that reason, the team began looking for open source orchestration for its cloud infrastructure.

+ +{{< case-studies/quote + image="/images/case-studies/chinaunicom/banner3.jpg" + author="Chengyu Zhang, Group Leader of Platform Technology R&D, China Unicom" +>}} +"We could never imagine we can achieve this scalability in such a short time." +{{< /case-studies/quote >}} + +

Though China Unicom was already using Mesos for a core telecom operator system, the team felt that Kubernetes was a natural choice for the new cloud platform. "The main reason was that it has a mature community," says Zhang. "It grows very rapidly, and so we can learn a lot from others' best practices." China Unicom also uses Istio for its microservice framework, Envoy, CoreDNS, and Fluentd.

+ +

The company's Kubernetes-enabled cloud platform now hosts 50 microservices and all new development going forward. China Unicom developers can easily leverage the technology through APIs, without doing the development work themselves. The cloud platform provides 20-30 services connected to the company's data center PaaS platform, as well as supports things such as big data analysis for internal users in the branch offices across the 31 provinces in China.

+ +

"Kubernetes has improved our experience using cloud infrastructure," says Zhang. "There is currently no alternative technology that can replace it."

+ +{{< case-studies/quote + image="/images/case-studies/chinaunicom/banner4.jpg" + author="Jie Jia, Member of Platform Technology R&D, China Unicom" +>}} +"This technology is relatively complicated, but as long as developers get used to it, they can enjoy all the benefits." +{{< /case-studies/quote >}} + +

In fact, Kubernetes has boosted both operational and development efficiency at China Unicom. Resource utilization has increased by 20-50%, lowering IT infrastructure costs, and deployment time has gone from a couple of hours to 5-10 minutes. "This is mainly because of the self-healing and scalability of Kubernetes, so we can increase our efficiency in operation and maintenance," Zhang says. "For example, we currently have only five people maintaining our multiple systems."

+ +

With the wins China Unicom has experienced with Kubernetes, Zhang and his team are eager to give back to the community. That starts with participating in meetups and conferences, and offering advice to other companies that are considering a similar path. "Especially for those companies who have had traditional cloud computing system, I really recommend them to join the cloud native computing community," says Zhang.

+ +{{< case-studies/quote author="Jie Jia, Member of Platform Technology R&D, China Unicom" >}} +"Companies can use the managed services offered by companies like Rancher, because they have already customized this technology, you can easily leverage this technology." +{{< /case-studies/quote >}} + +

Platform Technology R&D team member Jie Jia adds that though "this technology is relatively complicated, as long as developers get used to it, they can enjoy all the benefits." And Zhang points out that in his own experience with virtual machine cloud, "Kubernetes and these cloud native technologies are relatively simpler."

+ +

Plus, "companies can use the managed services offered by companies like Rancher, because they have already customized this technology," says Jia. "You can easily leverage this technology."

+ +

Looking ahead, China Unicom plans to develop more applications on Kubernetes, focusing on big data and machine learning. The team is continuing to optimize the cloud platform that it built, and hopes to pass the conformance test to join CNCF's Certified Kubernetes Conformance Program. They're also hoping to someday contribute code back to the community.

+ +

If that sounds ambitious, it's because the results they've gotten from adopting Kubernetes have been beyond even their greatest expectations. Says Zhang: "We could never imagine we can achieve this scalability in such a short time."

\ No newline at end of file diff --git a/content/bn/case-studies/city-of-montreal/city-of-montreal_featured_logo.png b/content/bn/case-studies/city-of-montreal/city-of-montreal_featured_logo.png new file mode 100644 index 0000000000000..be2af029f0bdc Binary files /dev/null and b/content/bn/case-studies/city-of-montreal/city-of-montreal_featured_logo.png differ diff --git a/content/bn/case-studies/city-of-montreal/city-of-montreal_featured_logo.svg b/content/bn/case-studies/city-of-montreal/city-of-montreal_featured_logo.svg new file mode 100644 index 0000000000000..44ac9b0b1d9fa --- /dev/null +++ b/content/bn/case-studies/city-of-montreal/city-of-montreal_featured_logo.svg @@ -0,0 +1 @@ +kubernetes.io-logos \ No newline at end of file diff --git a/content/bn/case-studies/city-of-montreal/index.html b/content/bn/case-studies/city-of-montreal/index.html new file mode 100644 index 0000000000000..e115ef666e0b2 --- /dev/null +++ b/content/bn/case-studies/city-of-montreal/index.html @@ -0,0 +1,81 @@ +--- +title: City of Montreal Case Study +linkTitle: city-of-montreal +case_study_styles: true +cid: caseStudies +featured: false + +new_case_study_styles: true +heading_background: /images/case-studies/montreal/banner1.jpg +heading_title_logo: /images/montreal_logo.png +subheading: > + City of Montréal - How the City of Montréal Is Modernizing Its 30-Year-Old, Siloed Architecture with Kubernetes +case_study_details: + - Company: City of Montréal + - Location: Montréal, Québec, Canada + - Industry: Government +--- + +

Challenge

+ +

Like many governments, Montréal has a number of legacy systems, and "we have systems that are older than some developers working here," says the city's CTO, Jean-Martin Thibault. "We have mainframes, all flavors of Windows, various flavors of Linux, old and new Oracle systems, Sun servers, all kinds of databases. Like all big corporations, some of the most important systems, like Budget and Human Resources, were developed on mainframes in-house over the past 30 years." There are over 1,000 applications in all, and most of them were running on different ecosystems. In 2015, a new management team decided to break down those silos, and invest in IT in order to move toward a more integrated governance for the city. They needed to figure out how to modernize the architecture.

+ +

Solution

+ +

The first step was containerization. The team started with a small Docker farm with four or five servers, with Rancher for providing access to the Docker containers and their logs and Jenkins to deploy. "We based our effort on the new trends; we understood the benefits of immutability and deployments without downtime and such things," says Solutions Architect Marc Khouzam. They soon realized they needed orchestration as well, and opted for Kubernetes. Says Enterprise Architect Morgan Martinet: "Kubernetes offered concepts on how you would describe an architecture for any kind of application, and based on those concepts, deploy what's required to run the infrastructure. It was becoming a de facto standard."

+ +

Impact

+ +

The time to market has improved drastically, from many months to a few weeks. Deployments went from months to hours. "In the past, you would have to ask for virtual machines, and that alone could take weeks, easily," says Thibault. "Now you don't even have to ask for anything. You just create your project and it gets deployed." Kubernetes has also improved the efficiency of how the city uses its compute resources: "Before, the 200 application components we currently run on Kubernetes would have required hundreds of virtual machines, and now, if we're talking about a single environment of production, we are able to run them on 8 machines, counting the masters of Kubernetes," says Martinet. And it's all done with a small team of just 5 people operating the Kubernetes clusters.

+ +{{< case-studies/quote author="JEAN-MARTIN THIBAULT, CTO, CITY OF MONTRÉAL" >}} +"We realized the limitations of having a non-orchestrated Docker environment. Kubernetes came to the rescue, bringing in all these features that make it a lot easier to manage and give a lot more benefits to the users." +{{< /case-studies/quote >}} + +{{< case-studies/lead >}} +The second biggest municipality in Canada, Montréal has a large number of legacy systems keeping the government running. And while they don't quite date back to the city's founding in 1642, "we have systems that are older than some developers working here," jokes the city's CTO, Jean-Martin Thibault. +{{< /case-studies/lead >}} + +

"We have mainframes, all flavors of Windows, various flavors of Linux, old and new Oracle systems, Sun servers, all kinds of databases. Some of the most important systems, like Budget and Human Resources, were developed on mainframes in-house over the past 30 years."

+ +

In recent years, that fact became a big pain point. There are over 1,000 applications in all, running on almost as many different ecosystems. In 2015, a new city management team decided to break down those silos, and invest in IT in order to move toward a more integrated governance. "The organization was siloed, so as a result the architecture was siloed," says Thibault. "Once we got integrated into one IT team, we decided to redo an overall enterprise architecture."

+ +

The first step to modernize the architecture was containerization. "We based our effort on the new trends; we understood the benefits of immutability and deployments without downtime and such things," says Solutions Architect Marc Khouzam. The team started with a small Docker farm with four or five servers, with Rancher for providing access to the Docker containers and their logs and Jenkins for deployment.

+ +{{< case-studies/quote + image="/images/case-studies/montreal/banner3.jpg" + author="MARC KHOUZAM, SOLUTIONS ARCHITECT, CITY OF MONTRÉAL" +>}} +"Getting a project running in Kubernetes is entirely dependent on how long you need to program the actual software. It's no longer dependent on deployment. Deployment is so fast that it's negligible." +{{< /case-studies/quote >}} + +

But this Docker farm setup had some limitations, including the lack of self-healing and dynamic scaling based on traffic, and the effort required to optimize server resources and scale to multiple instances of the same container. The team soon realized they needed orchestration as well. "Kubernetes came to the rescue," says Thibault, "bringing in all these features that make it a lot easier to manage and give a lot more benefits to the users."

+ +

The team had evaluated several orchestration solutions, but Kubernetes stood out because it addressed all of the pain points. (They were also inspired by Yahoo! Japan's use case, which the team members felt came close to their vision.) "Kubernetes offered concepts on how you would describe an architecture for any kind of application, and based on those concepts, deploy what's required to run the infrastructure," says Enterprise Architect Morgan Martinet. "It was becoming a de facto standard. It also promised portability across cloud providers. The choice of Kubernetes now gives us many options such as running clusters in-house or in any IaaS provider, or even using Kubernetes-as-a-service in any of the major cloud providers."

+ +

Another important factor in the decision was vendor neutrality. "As a government entity, it is essential for us to be neutral in our selection of products and providers," says Thibault. "The independence of the Cloud Native Computing Foundation from any company provides this."

+ +{{< case-studies/quote + image="/images/case-studies/montreal/banner4.jpg" + author="MORGAN MARTINET, ENTERPRISE ARCHITECT, CITY OF MONTRÉAL" +>}} +"Kubernetes has been great. It's been stable, and it provides us with elasticity, resilience, and robustness. While re-architecting for Kubernetes, we also benefited from the monitoring and logging aspects, with centralized logging, Prometheus logging, and Grafana dashboards. We have enhanced visibility of what's being deployed." +{{< /case-studies/quote >}} + +

The Kubernetes implementation began with the deployment of a small cluster using an internal Ansible playbook, which was soon replaced by the Kismatic distribution. Given the complexity they saw in operating a Kubernetes platform, they decided to provide development groups with an automated CI/CD solution based on Helm. "An integrated CI/CD solution on Kubernetes standardized how the various development teams designed and deployed their solutions, but allowed them to remain independent," says Khouzam.

+ +

During the re-architecting process, the team also added Prometheus for monitoring and alerting, Fluentd for logging, and Grafana for visualization. "We have enhanced visibility of what's being deployed," says Martinet. Adds Khouzam: "The big benefit is we can track anything, even things that don't run inside the Kubernetes cluster. It's our way to unify our monitoring effort."

+ +

All together, the cloud native solution has had a positive impact on velocity as well as administrative overhead. With standardization, code generation, automatic deployments into Kubernetes, and standardized monitoring through Prometheus, the time to market has improved drastically, from many months to a few weeks. Deployments went from months and weeks of planning down to hours. "In the past, you would have to ask for virtual machines, and that alone could take weeks to properly provision," says Thibault. Plus, for dedicated systems, experts often had to be brought in to install them with their own recipes, which could take weeks and months.

+ +

Now, says Khouzam, "we can deploy pretty much any application that's been Dockerized without any help from anybody. Getting a project running in Kubernetes is entirely dependent on how long you need to program the actual software. It's no longer dependent on deployment. Deployment is so fast that it's negligible."

+ +{{< case-studies/quote author="MORGAN MARTINET, ENTERPRISE ARCHITECT, CITY OF MONTRÉAL">}} +"We're working with the market when possible, to put pressure on our vendors to support Kubernetes, because it's a much easier solution to manage" +{{< /case-studies/quote >}} + +

Kubernetes has also improved the efficiency of how the city uses its compute resources: "Before, the 200 application components we currently run in Kubernetes would have required hundreds of virtual machines, and now, if we're talking about a single environment of production, we are able to run them on 8 machines, counting the masters of Kubernetes," says Martinet. And it's all done with a small team of just five people operating the Kubernetes clusters. Adds Martinet: "It's a dramatic improvement no matter what you measure."

+ +

So it should come as no surprise that the team's strategy going forward is to target Kubernetes as much as they can. "If something can't run inside Kubernetes, we'll wait for it," says Thibault. That means they haven't moved any of the city's Windows systems onto Kubernetes, though it's something they would like to do. "We're working with the market when possible, to put pressure on our vendors to support Kubernetes, because it's a much easier solution to manage," says Martinet.

+ +

Thibault sees a near future where 60% of the city's workloads are running on a Kubernetes platform—basically any and all of the use cases that they can get to work there. "It's so much more efficient than the way we used to do things," he says. "There's no looking back."

\ No newline at end of file diff --git a/content/bn/case-studies/crowdfire/crowdfire_featured_logo.png b/content/bn/case-studies/crowdfire/crowdfire_featured_logo.png new file mode 100644 index 0000000000000..ef84b16ea06c7 Binary files /dev/null and b/content/bn/case-studies/crowdfire/crowdfire_featured_logo.png differ diff --git a/content/bn/case-studies/crowdfire/crowdfire_featured_logo.svg b/content/bn/case-studies/crowdfire/crowdfire_featured_logo.svg new file mode 100644 index 0000000000000..a4f020161a784 --- /dev/null +++ b/content/bn/case-studies/crowdfire/crowdfire_featured_logo.svg @@ -0,0 +1 @@ +kubernetes.io-logos \ No newline at end of file diff --git a/content/bn/case-studies/crowdfire/index.html b/content/bn/case-studies/crowdfire/index.html new file mode 100644 index 0000000000000..d369be1a1ab56 --- /dev/null +++ b/content/bn/case-studies/crowdfire/index.html @@ -0,0 +1,85 @@ +--- +title: Crowdfire Case Study +case_study_styles: true +cid: caseStudies + +new_case_study_styles: true +heading_background: /images/case-studies/crowdfire/banner1.jpg +heading_title_logo: /images/crowdfire_logo.png +subheading: > + How to Keep Iterating a Fast-Growing App With a Cloud-Native Approach +case_study_details: + - Company: Crowdfire + - Location: Mumbai, India + - Industry: Social Media Software +--- + +

Challenge

+ +

Crowdfire helps content creators create their content anywhere on the Internet and publish it everywhere else in the right format. Since its launch in 2010, it has grown to 16 million users. The product began as a monolith app running on Google App Engine, and in 2015, the company began a transformation to microservices running on Amazon Web Services Elastic Beanstalk. "It was okay for our use cases initially, but as the number of services, development teams and scale increased, the deploy times, self-healing capabilities and resource utilization started to become problems for us," says Software Engineer Amanpreet Singh, who leads the infrastructure team for Crowdfire.

+ +

Solution

+ +

"We realized that we needed a more cloud-native approach to deal with these issues," says Singh. The team decided to implement a custom setup of Kubernetes based on Terraform and Ansible.

+ +

Impact

+ +

"Kubernetes has helped us reduce the deployment time from 15 minutes to less than a minute," says Singh. "Due to Kubernetes's self-healing nature, the operations team doesn't need to do any manual intervention in case of a node or pod failure." Plus, he says, "Dev-Prod parity has improved since developers can experiment with options in dev/staging clusters, and when it's finalized, they just commit the config changes in the respective code repositories. These changes automatically get replicated on the production cluster via CI/CD pipelines."

+ +{{< case-studies/quote author="Amanpreet Singh, Software Engineer at Crowdfire" >}} +"In the 15 months that we've been using Kubernetes, it has been amazing for us. It enabled us to iterate quickly, increase development speed, and continuously deliver new features and bug fixes to our users, while keeping our operational costs and infrastructure management overhead under control." +{{< /case-studies/quote >}} + +{{< case-studies/lead >}} +"If you build it, they will come." +{{< /case-studies/lead >}} + +

For most content creators, only half of that movie quote may ring true. Sure, platforms like Wordpress, YouTube and Shopify have made it simple for almost anyone to start publishing new content online, but attracting an audience isn't as easy. Crowdfire "helps users publish their content to all possible places where their audience exists," says Amanpreet Singh, a Software Engineer at the company based in Mumbai, India. Crowdfire has gained more than 16 million users—from bloggers and artists to makers and small businesses—since its launch in 2010.

+ +

With that kind of growth—and a high demand from users for new features and continuous improvements—the Crowdfire team struggled to keep up behind the scenes. In 2015, they moved their monolith Java application to Amazon Web Services Elastic Beanstalk and started breaking it down into microservices.

+ +

It was a good first step, but the team soon realized they needed to go further down the cloud-native path, which would lead them to Kubernetes. "It was okay for our use cases initially, but as the number of services and development teams increased and we scaled further, deploy times, self-healing capabilities and resource utilization started to become problematic," says Singh, who leads the infrastructure team at Crowdfire. "We realized that we needed a more cloud-native approach to deal with these issues."

+ +

As he looked around for solutions, Singh had a checklist of what Crowdfire needed. "We wanted to keep some things separate so they could be shipped independent of other things; this would help remove blockers and let different teams work at their own pace," he says. "We also make a lot of data-driven decisions, so shipping a feature and its iterations quickly was a must."

+ +

Kubernetes checked all the boxes and then some. "One of the best things was the built-in service discovery," he says. "When you have a bunch of microservices that need to call each other, having internal DNS readily available and service IPs and ports automatically set as environment variables help a lot." Plus, he adds, "Kubernetes's opinionated approach made it easier to get started."

+ +{{< case-studies/quote image="/images/case-studies/crowdfire/banner3.jpg" >}} +"We realized that we needed a more cloud-native approach to deal with these issues," says Singh. The team decided to implement a custom setup of Kubernetes based on Terraform and Ansible." +{{< /case-studies/quote >}} + +

There was another compelling business reason for the cloud-native approach. "In today's world of ever-changing business requirements, using cloud native technology provides a variety of options to choose from—even the ability to run services in a hybrid cloud environment," says Singh. "Businesses can keep services in a region closest to the users, and thus benefit from high-availability and resiliency."

+ +

So in February 2016, Singh set up a test Kubernetes cluster using the kube-up scripts provided. "I explored the features and was able to deploy an application pretty easily," he says. "However, it seemed like a black box since I didn't understand the components completely, and had no idea what the kube-up script did under the hood. So when it broke, it was hard to find the issue and fix it."

+ +

To get a better understanding, Singh dove into the internals of Kubernetes, reading the docs and even some of the code. And he looked to the Kubernetes community for more insight. "I used to stay up a little late every night (a lot of users were active only when it's night here in India) and would try to answer questions on the Kubernetes community Slack from users who were getting started," he says. "I would also follow other conversations closely. I must admit I was able to avoid a lot of issues in our setup because I knew others had faced the same issues."

+ +

Based on the knowledge he gained, Singh decided to implement a custom setup of Kubernetes based on Terraform and Ansible. "I wrote Terraform to launch Kubernetes master and nodes (Auto Scaling Groups) and an Ansible playbook to install the required components," he says. (The company recently switched to using prebaked AMIs to make the node bringup faster, and is planning to change its networking layer.)

+ +{{< case-studies/quote image="/images/case-studies/crowdfire/banner4.jpg" >}} +"Kubernetes helped us reduce the deployment time from 15 minutes to less than a minute. Due to Kubernetes's self-healing nature, the operations team doesn't need to do any manual intervention in case of a node or pod failure." +{{< /case-studies/quote >}} + +

First, the team migrated a few staging services from Elastic Beanstalk to the new Kubernetes staging cluster, and then set up a production cluster a month later to deploy some services. The results were convincing. "By the end of March 2016, we established that all the new services must be deployed on Kubernetes," says Singh. "Kubernetes helped us reduce the deployment time from 15 minutes to less than a minute. Due to Kubernetes's self-healing nature, the operations team doesn't need to do any manual intervention in case of a node or pod failure." On top of that, he says, "Dev-Prod parity has improved since developers can experiment with options in dev/staging clusters, and when it's finalized, they just commit the config changes in the respective code repositories. These changes automatically get replicated on the production cluster via CI/CD pipelines. This brings more visibility into the changes being made, and keeping an audit trail."

+ +

Over the next six months, the team worked on migrating all the services from Elastic Beanstalk to Kubernetes, except for the few that were deprecated and would soon be terminated anyway. The services were moved one at a time, and their performance was monitored for two to three days each. Today, "We're completely migrated and we run all new services on Kubernetes," says Singh.

+ +

The impact has been considerable: With Kubernetes, the company has experienced a 90% cost savings on Elastic Load Balancer, which is now only used for their public, user-facing services. Their EC2 operating expenses have been decreased by as much as 50%.

+ +

All 30 engineers at Crowdfire were onboarded at once. "I gave an internal talk where I shared the basic components and demoed the usage of kubectl," says Singh. "Everyone was excited and happy about using Kubernetes. Developers have more control and visibility into their applications running in production now. Most of all, they're happy with the low deploy times and self-healing services."

+ +

And they're much more productive, too. "Where we used to do about 5 deployments per day," says Singh, "now we're doing 30+ production and 50+ staging deployments almost every day."

+ +{{< case-studies/quote >}} +The impact has been considerable: With Kubernetes, the company has experienced a 90% cost savings on Elastic Load Balancer, which is now only used for their public, user-facing services. Their EC2 operating expenses have been decreased by as much as 50%. +{{< /case-studies/quote >}} + +

Singh notes that almost all of the engineers interact with the staging cluster on a daily basis, and that has created a cultural change at Crowdfire. "Developers are more aware of the cloud infrastructure now," he says. "They've started following cloud best practices like better health checks, structured logs to stdout [standard output], and config via files or environment variables."

+ +

With Crowdfire's commitment to Kubernetes, Singh is looking to expand the company's cloud-native stack. The team already uses Prometheus for monitoring, and he says he is evaluating Linkerd and Envoy Proxy as a way to "get more metrics about request latencies and failures, and handle them better." Other CNCF projects, including OpenTracing and gRPC are also on his radar.

+ +

Singh has found that the cloud-native community is growing in India, too, particularly in Bangalore. "A lot of startups and new companies are starting to run their infrastructure on Kubernetes," he says.

+ +

And when people ask him about Crowdfire's experience, he has this advice to offer: "Kubernetes is a great piece of technology, but it might not be right for you, especially if you have just one or two services or your app isn't easy to run in a containerized environment," he says. "Assess your situation and the value that Kubernetes provides before going all in. If you do decide to use Kubernetes, make sure you understand the components that run under the hood and what role they play in smoothly running the cluster. Another thing to consider is if your apps are 'Kubernetes-ready,' meaning if they have proper health checks and handle termination signals to shut down gracefully."

+ +

And if your company fits that profile, go for it. Crowdfire clearly did—and is now reaping the benefits. "In the 15 months that we've been using Kubernetes, it has been amazing for us," says Singh. "It enabled us to iterate quickly, increase development speed and continuously deliver new features and bug fixes to our users, while keeping our operational costs and infrastructure management overhead under control."

\ No newline at end of file diff --git a/content/bn/case-studies/daocloud/daocloud_featured_logo.png b/content/bn/case-studies/daocloud/daocloud_featured_logo.png new file mode 100644 index 0000000000000..ca6fbcdc44174 Binary files /dev/null and b/content/bn/case-studies/daocloud/daocloud_featured_logo.png differ diff --git a/content/bn/case-studies/daocloud/daocloud_featured_logo.svg b/content/bn/case-studies/daocloud/daocloud_featured_logo.svg new file mode 100644 index 0000000000000..2d05acd730ab8 --- /dev/null +++ b/content/bn/case-studies/daocloud/daocloud_featured_logo.svg @@ -0,0 +1 @@ +DaoCloud_logo \ No newline at end of file diff --git a/content/bn/case-studies/daocloud/index.html b/content/bn/case-studies/daocloud/index.html new file mode 100644 index 0000000000000..0344b3241eac5 --- /dev/null +++ b/content/bn/case-studies/daocloud/index.html @@ -0,0 +1,114 @@ +--- +title: DaoCloud Case Study +linkTitle: DaoCloud +case_study_styles: true +cid: caseStudies +logo: daocloud_featured_logo.svg + +css: /css/style_daocloud.css +new_case_study_styles: true +heading_background: /images/case-studies/daocloud/banner1.jpg +heading_title_logo: /images/daocloud-light.svg +subheading: > + Seek Global Optimal Solutions for Digital World +case_study_details: + - Company: DaoCloud + - Location: Shanghai, China + - Industry: Cloud Native +--- + +

Challenges

+ +

DaoCloud, founded in 2014, is an innovation leader in the field of cloud native. It boasts independent intellectual property rights of core technologies for crafting an open cloud platform to empower the digital transformation of enterprises.

+ +

DaoCloud has been engaged in cloud native since its inception. As containerization is crucial for cloud native business, a cloud platform that does not have containers as infrastructure is unlikely to attract its potential users. Therefore, the first challenge confronting DaoCloud is how to efficiently manage and schedule numerous containers while maintaining stable connectivity between them.

+ +

As cloud native technology gains momentum, cloud native solutions proliferate like mushrooms after rain. However, having more choices is not always a good thing, because choosing from various products to globally maximize benefits and minimize cost is always challenging and demanding. Therefore, another obstacle ahead of DaoCloud is how to pick out the best runner in each field and organize them into one platform that can achieve global optimum for cloud native.

+ +

Solutions

+ +

As the de facto standard for container orchestration, Kubernetes is undoubtedly the preferred container solution. Paco Xu, head of the Open Source and Advanced Development team at DaoCloud, stated, "Kubernetes is a fundamental tool in the current container ecosystem. Most services or applications are deployed and managed in Kubernetes clusters."

+ +

Regarding finding the global optimal solutions for cloud native technology, Peter Pan, R&D Vice President of DaoCloud, believes that "the right way is to focus on Kubernetes, coordinate relevant best practices and advanced technologies, and build a widely applicable platform."

+ +

Results

+ +

In the process of embracing cloud native technology, DaoCloud continues to learn from Kubernetes and other excellent CNCF open source projects. It has formed a product architecture centered on DaoCloud Enterprise, a platform for cloud native applications. Using Kubernetes and other cutting-edge cloud native technologies as a foundation, DaoCloud provides solid cloud native solutions for military, finance, manufacturing, energy, government, and retail clients. It helps promote digital transformation of many companies, such as SPD Bank, Huatai Securities, Fullgoal Fund, SAIC Motor, Haier, Fudan University, Watsons, Genius Auto Finance, State Grid Corporation of China, etc.

+ +{{< case-studies/quote + image="/images/case-studies/daocloud/banner2.jpg" + author="Kebe Liu, Service Mesh Expert, DaoCloud" +>}} +"As DaoCloud Enterprise becomes more powerful and attracts more users, some customers need to use Kubernetes instead of Swarm for application orchestration. We, as providers, need to meet the needs of our users." +{{< /case-studies/quote >}} + +

DaoCloud was founded to help traditional enterprises move their applications to the cloud and realize digital transformation. The first product released after the company's establishment, DaoCloud Enterprise 1.0, is a Docker-based container engine platform that can easily build images and run them in containers.

+ +

However, as applications and containers increase in number, coordinating and scheduling these containers became a bottleneck that restricted product performance. DaoCloud Enterprise 2.0 used Docker Swarm to manage containers, but the increasingly complex container scheduling system gradually went beyond the competence of Docker Swarm.

+ +

Fortunately, Kubernetes began to stand out at this time. It rapidly grew into the industrial standard for container orchestration with its competitive rich functions, stable performance, timely community support, and strong compatibility. Paco Xu said, "Enterprise container platforms need container orchestration to standardize the process of moving to the cloud. Kubernetes was accepted as the de facto standard for container orchestration around 2016 and 2017. Our products started to support it in 2017."

+ +

After thorough comparisons and evaluations, DaoCloud Enterprise 2.8, debuted in 2017, officially adopted Kubernetes (v1.6.7) as its container orchestration tool. Since then, DaoCloud Enterprise 3.0 (2018) used Kubernetes v1.10, and DaoCloud Enterprise 4.0 (2021) adopted Kubernetes v1.18. The latest version, DaoCloud Enterprise 5.0 (2022), supports Kubernetes v1.23 to v1.26.

+ +

Kubernetes served as an inseparable part of these four releases over six years, which speaks volumes about the fact that using Kubernetes in DaoCloud Enterprise was the right choice. DaoCloud has proven, through its own experience and actions, that Kubernetes is the best choice for container orchestration and that it has always been a loyal fan of Kubernetes.

+ +{{< case-studies/quote + image="/images/case-studies/daocloud/banner3.jpg" + author="Ting Ye, Vice President of Product Innovation, DaoCloud" +>}} +"Kubernetes is the cornerstone for refining our products towards world-class software." +{{< /case-studies/quote >}} + +

Kubernetes helped our product and research teams realized automation of test, build, check, and release process, ensuring the quality of deliverables. It also helped build our smart systems of collaboration about product requirements & definition, multilingual product materials, debugging, and miscellaneous challenges, improving the efficiency of intra- and inter-department collaboration.

+ +

On the one hand, Kubernetes makes our products more performant and competitive. DaoCloud integrates relevant practices and technologies around Kubernetes to polish its flagship offering – DaoCloud Enterprise. The latest 5th version, released in 2022, covers application stores, application delivery, microservice governance, observability, data services, multi-cloud management, cloud-edge collaboration, and other functions. DaoCloud Enterprise 5.0 is an inclusive integration of cloud native technologies.

+ +

DaoCloud deployed a Kubernetes platform for SPD Bank, improving its application deployment efficiency by 82%, shortening its delivery cycle from half a year to one month, and promoting its transaction success rate to 99.999%.

+ +

In terms of Sichuan Tianfu Bank, the scaling time was reduced from several hours to an average of 2 minutes, product iteration cycle was shortened from two months to two weeks, and application rollout time was cut by 76.76%.

+ +

As for a joint-venture carmaker, its delivery cycle shortened from two months to one or two weeks, success rate of application deployment increased by 53%, and application rollout became ten times more efficient. In the case of a multinational retailer, application deployment issues were solved by 46%, and fault location efficiency rose by more than 90%.

+ +

For a large-scale securities firm, its business procedure efficiency was enhanced by 30%, and resource costs were lowered by about 35%.

+ +

With this product, Fullgoal Fund shortened its middleware deployment time from hours to minutes, improved middleware operation and maintenance capabilities by 50%, containerization by 60%, and resource utilization by 40%.

+ +

On the other hand, our product development is also based on Kubernetes. DaoCloud deployed Gitlab based on Kubernetes and established a product development process of "Gitlab -> PR -> Auto Tests -> Builds & Releases", which significantly improved our development efficiency, reduced repetitive tests, and realized automatic release of applications. This approach greatly saves operation and maintenance costs, enabling technicians to invest more time and energy in product development to offer better cloud native products.

+ +{{< case-studies/quote + image="/images/case-studies/daocloud/banner4.jpg" + author="Paco Xu, Header of Open Source & Advanced Development Team, DaoCloud" +>}} +"Our developers actively contribute to open source projects and build technical expertise. DaoCloud has established a remarkable presence in the Kubernetes and Istio communities." +{{< /case-studies/quote >}} + +

DaoCloud is deeply involved in contributing to Kubernetes and other cloud native open source projects. Our participation and contributions in these communities continue to grow. In the year of 2022, DaoCloud was ranked third globally in terms of cumulative contribution to Kubernetes (data from Stackalytics as of January 5, 2023).

+ +

In August 2022, Kubernetes officially organized an interview with community contributors, and four outstanding contributors from the Asia-Pacific region were invited. Half of them came from DaoCloud, namely Shiming Zhang and Paco Xu. Both are Reviewers of SIG Node. Furthermore, at the KubeCon + CloudNative North America 2022, Kante Yin from DaoCloud won the 2022 Contributor Award of Kubernetes.

+ +

In addition, DaoCloud continue to practice its cloud native beliefs and contribute to the Kubernetes ecosystem by sharing source code of several excellent projects, including Clusterpedia, Kubean, CloudTTY, KLTS, Merbridge, HwameiStor, Spiderpool, and KWOK, on GitHub.

+ +

In particular:

+ +
    +
  • Clusterpedia: Designed for resource synchronization across clusters, Clusterpedia is compatible with Kubernetes OpenAPIs and offers a powerful search function for quick and effective retrieval of all resources in clusters.
  • +
  • Kubean: With Kubean, it's possible to quickly create production-ready Kubernetes clusters and integrate clusters from other providers.
  • +
  • CloudTTY: CloudTTY is a web terminal and cloud shell operator for Kubernetes cloud native environments, allowing for management of Kubernetes clusters on a web page from anywhere and at any time.
  • +
  • KLTS: Providing long-term free maintenance for earlier versions of Kubernetes, KLTS ensures stability and support for older Kubernetes deployments. Additionally, Piraeus is an easy and secure storage solution for Kubernetes with high performance and availability.
  • +
  • KWOK: Short for Kubernetes WithOut Kubelet, KWOK is a toolkit that enables the setup of a cluster of thousands of nodes in seconds. All nodes are simulated to behave like real ones, resulting in low resource usage that makes it easy to experiment on a laptop.
  • +
+ +

DaoCloud utilizes its practical experience across industries to contribute to Kubernetes-related open source projects, with an aim of making cloud native technologies, represented by Kubernetes, better function in production environment.

+ +{{< case-studies/quote + image="/images/case-studies/daocloud/banner5.jpg" + author="Song Zheng, Technology GM, DaoCloud" +>}} +"DaoCloud, as one of the first cloud native technology training partners certified by CNCF, will continue to carry out trainings to help more companies find their best ways for going to the cloud." +{{< /case-studies/quote >}} + +

Enterprise users need a global optimal solution, which can be understood as an inclusive platform that can maximize the advantages of multi-cloud management, application delivery, observability, cloud-edge collaboration, microservice governance, application store, and data services. In today's cloud native ecosystem, these functions cannot be achieved without Kubernetes as the underlying container orchestration tool. Therefore, Kubernetes is crucial to DaoCloud's mission of finding the optimal solution in the digital world, and all future product development will continue to be based on Kubernetes.

+ +

Kubernetes training and promotion activities have always been attached great importance in DaoCloud. In 2017, the company took the lead in passing CNCF's Certified Kubernetes Conformance Program through its featured product — DaoCloud Enterprise. In 2018, it became a CNCF-certified Kubernetes service provider and training partner.

+ +

On November 18, 2022, the "Kubernetes Community Days" event was successfully held in Chengdu, organized by CNCF, DaoCloud, Huawei Cloud, Sichuan Tianfu Bank, and OPPO. The event brought together end-users, contributors, and technical experts from open-source communities to share best practices and innovative ideas about Kubernetes and cloud native. In the future, DaoCloud will continue to contribute to Kubernetes projects, and expand the influence of Kubernetes through project training, community contributions and other activities.

\ No newline at end of file diff --git a/content/bn/case-studies/denso/denso_featured_logo.svg b/content/bn/case-studies/denso/denso_featured_logo.svg new file mode 100644 index 0000000000000..e2b26b2c8c30c --- /dev/null +++ b/content/bn/case-studies/denso/denso_featured_logo.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/content/bn/case-studies/denso/index.html b/content/bn/case-studies/denso/index.html new file mode 100644 index 0000000000000..e42a7854e3ee9 --- /dev/null +++ b/content/bn/case-studies/denso/index.html @@ -0,0 +1,82 @@ +--- +title: Denso Case Study +linkTitle: Denso +case_study_styles: true +cid: caseStudies +logo: denso_featured_logo.svg +featured: true +weight: 4 +quote: > + We got Kubernetes experts involved on our team, and it dramatically accelerated development speed. + +new_case_study_styles: true +heading_background: /images/case-studies/denso/banner2.jpg +heading_title_text: Denso +use_gradient_overlay: true +subheading: > + How DENSO Is Fueling Development on the Vehicle Edge with Kubernetes +case_study_details: + - Company: Denso + - Location: Japan + - Industry: Automotive, Edge +--- + +

Challenge

+ +

DENSO Corporation is one of the biggest automotive components suppliers in the world. With the advent of connected cars, the company launched a Digital Innovation Department to expand into software, working on vehicle edge and vehicle cloud products. But there were several technical challenges to creating an integrated vehicle edge/cloud platform: "the amount of computing resources, the occasional lack of mobile signal, and an enormous number of distributed vehicles," says R&D Product Manager Seiichi Koizumi.

+ +

Solution

+ +

Koizumi's team realized that because mobility services evolve every day, they needed the flexibility of the cloud native ecosystem for their platform. After considering other orchestrators, DENSO went with Kubernetes for orchestration and added Prometheus, Fluentd, Envoy, Istio, and Helm to the platform. Today, DENSO is using a vehicle edge computer, a private Kubernetes cloud, and managed Kubernetes (GKE, EKS, AKS).

+ +

Impact

+ +

Critical layer features can take 2-3 years to implement in the traditional, waterfall model of development at DENSO. With the Kubernetes platform and agile methods, there's a 2-month development cycle for non-critical software. Now, ten new applications are released a year, and a new prototype is introduced every week. "By utilizing Kubernetes managed services, such as GKE/EKS/AKS, we can unify the environment and simplify our maintenance operation," says Koizumi.

+ +{{< case-studies/quote + image="/images/case-studies/denso/banner1.png" + author="SEIICHI KOIZUMI, R&D PRODUCT MANAGER, DIGITAL INNOVATION DEPARTMENT AT DENSO" +>}} +"Another disruptive innovation is coming, so to survive in this situation, we need to change our culture." +{{< /case-studies/quote >}} + +{{< case-studies/lead >}} +Spun off from Toyota in 1949, DENSO Corporation is one of the top automotive suppliers in the world today, with consolidated net revenue of $48.3 billion. +{{< /case-studies/lead >}} + +

The company's mission is "contributing to a better world by creating value together with a vision for the future"—and part of that vision in recent years has been development on the vehicle edge and vehicle cloud.

+ +

With the advent of connected cars, DENSO established a Digital Innovation Department to expand its business beyond the critical layer of the engine, braking systems, and other automotive parts into the non-critical analytics and entertainment layer. Comparing connected cars to smartphones, R&D Product Manager Seiichi Koizumi says DENSO wants the ability to quickly and easily develop and install apps for the "blank slate" of the car, and iterate them based on the driver's preferences. Thus "we need a flexible application platform," he says.

+ +

But working on vehicle edge and vehicle cloud products meant there were several technical challenges: "the amount of computing resources, the occasional lack of mobile signal, and an enormous number of distributed vehicles," says Koizumi. "We are tackling these challenges to create an integrated vehicle edge/cloud platform."

+ +{{< case-studies/quote author="SEIICHI KOIZUMI, R&D PRODUCT MANAGER, DIGITAL INNOVATION DEPARTMENT AT DENSO" >}} +"We got Kubernetes experts involved on our team, and it dramatically accelerated development speed." +{{< /case-studies/quote >}} + +

Koizumi's team realized that because mobility services evolve every day, they needed the flexibility of the cloud native ecosystem for their platform. As they evaluated technologies, they were led by these criteria: Because their service-enabler business needed to support multiple cloud and on-premise environments, the solution needed to be cloud agnostic, with no vendor lock-in and open governance. It also had to support an edge-cloud integrated environment.

+ +

After considering other orchestrators, DENSO went with Kubernetes for orchestration and added Prometheus, Fluentd, Envoy, Istio, and Helm to the platform. During implementation, the team used "design thinking to clarify use cases and their value proposition," says Koizumi. Next, an agile development team worked on a POC, then an MVP, in DevOps style. "Even in the development phase, we are keeping a channel to end users," he adds.

+ +

One lesson learned during this process was the value of bringing in experts. "We tried to learn Kubernetes and cloud native technologies from scratch, but it took more time than expected," says Koizumi. "We got Kubernetes experts involved on our team, and it dramatically accelerated development speed."

+ +{{< case-studies/quote + image="/images/case-studies/denso/banner4.jpg" + author="SEIICHI KOIZUMI, R&D PRODUCT MANAGER, DIGITAL INNOVATION DEPARTMENT AT DENSO" +>}} +"By utilizing Kubernetes managed services, such as GKE/EKS/AKS, we can unify the environment and simplify our maintenance operation." +{{< /case-studies/quote >}} + +

Today, DENSO is using a vehicle edge computer, a private Kubernetes cloud, and managed Kubernetes on GKE, EKS, and AKS. "We are developing a vehicle edge/cloud integrated platform based on a microservice and service mesh architecture," says Koizumi. "We extend cloud into multiple vehicle edges and manage it as a unified platform."

+ +

Cloud native has enabled DENSO to deliver applications via its new dash cam, which has a secure connection that collects data to the cloud. "It's like a smartphone," he says. "We are installing new applications and getting the data through the cloud, and we can keep updating new applications all through the dash cam."

+ +

The unified cloud native platform, combined with agile development, has had a positive impact on productivity. Critical layer features—those involving engines or braking systems, for example—can take 2-3 years to implement at DENSO, because of the time needed to test safety, but also because of the traditional, waterfall model of development. With the Kubernetes platform and agile methods, there's a 2-month development cycle for non-critical software. Now, ten new applications are released a year, and with the department's scrum-style development, a new prototype is introduced every week.

+ +

Application portability has also led to greater developer efficiency. "There's no need to care about differences in the multi-cloud platform anymore," says Koizumi. Now, "we are also trying to have the same portability between vehicle edge and cloud platform."

+ +

Another improvement: Automotive Tier-1 suppliers like DENSO always have multiple Tier-2 suppliers. "To provide automotive-grade high-availability services, we tried to do the same thing on a multi-cloud platform," says Koizumi. Before Kubernetes, maintaining two different systems simultaneously was difficult. "By utilizing Kubernetes managed services, such as GKE/EKS/AKS, we can unify the environment and simplify our maintenance operation," he says.

+ +

Cloud native has also profoundly changed the culture at DENSO. The Digital Innovation Department is known as "Noah's Ark," and it has grown from 2 members to 70—with plans to more than double in the next year. The way they operate is completely different from the traditional Japanese automotive culture. But just as the company embraced change brought by hybrid cars in the past decade, Koizumi says, they're doing it again now, as technology companies have moved into the connected car space. "Another disruptive innovation is coming," he says, "so to survive in this situation, we need to change our culture."

+ +

Looking ahead, Koizumi and his team are expecting serverless and zero-trust security architecture to be important enhancements of Kubernetes. They are glad DENSO has come along for the ride. "Mobility service businesses require agility and flexibility," he says. "DENSO is trying to bring cloud native flexibility into the vehicle infrastructure."

\ No newline at end of file diff --git a/content/bn/case-studies/golfnow/golfnow_featured.png b/content/bn/case-studies/golfnow/golfnow_featured.png new file mode 100644 index 0000000000000..0b99ac3b8f8f8 Binary files /dev/null and b/content/bn/case-studies/golfnow/golfnow_featured.png differ diff --git a/content/bn/case-studies/golfnow/golfnow_featured.svg b/content/bn/case-studies/golfnow/golfnow_featured.svg new file mode 100644 index 0000000000000..b5b42d6fcdc08 --- /dev/null +++ b/content/bn/case-studies/golfnow/golfnow_featured.svg @@ -0,0 +1 @@ +kubernetes.io-logos \ No newline at end of file diff --git a/content/bn/case-studies/golfnow/golfnow_logo.png b/content/bn/case-studies/golfnow/golfnow_logo.png new file mode 100644 index 0000000000000..dbeb127b02a27 Binary files /dev/null and b/content/bn/case-studies/golfnow/golfnow_logo.png differ diff --git a/content/bn/case-studies/golfnow/index.html b/content/bn/case-studies/golfnow/index.html new file mode 100644 index 0000000000000..4f731436cf7c2 --- /dev/null +++ b/content/bn/case-studies/golfnow/index.html @@ -0,0 +1,89 @@ +--- +title: GolfNow Case Study +case_study_styles: true +cid: caseStudies + +new_case_study_styles: true +heading_background: /images/case-studies/golfnow/banner1.jpg +heading_title_logo: /images/golfnow_logo.png +subheading: > + Saving Time and Money with Cloud Native Infrastructure +case_study_details: + - Company: GolfNow + - Location: Orlando, Florida + - Industry: Golf Industry Technology and Services Provider +--- + +

Challenge

+ +

A member of the NBC Sports Group, GolfNow is the golf industry's technology and services leader, managing 10 different products, as well as the largest e-commerce tee time marketplace in the world. As its business began expanding rapidly and globally, GolfNow's monolithic application became problematic. "We kept growing our infrastructure vertically rather than horizontally, and the cost of doing business became problematic," says Sheriff Mohamed, GolfNow's Director, Architecture. "We wanted the ability to more easily expand globally."

+ +

Solution

+ +

Turning to microservices and containerization, GolfNow began moving its applications and databases from third-party services to its own clusters running on Docker and Kubernetes.

+ +

Impact

+ +

The results were immediate. While maintaining the same capacity—and beyond, during peak periods—GolfNow saw its infrastructure costs for the first application virtually cut in half.

+ +{{< case-studies/quote author="SHERIFF MOHAMED, DIRECTOR, ARCHITECTURE AT GOLFNOW" >}} +"With our growth we obviously needed to expand our infrastructure, and we kept growing vertically rather than horizontally. We were basically wasting money and doubling the cost of our infrastructure." +{{< /case-studies/quote >}} + +{{< case-studies/lead >}} +It's not every day that you can say you've slashed an operating expense by half. +{{< /case-studies/lead >}} + +

But Sheriff Mohamed and Josh Chandler did just that when they helped lead their company, GolfNow, on a journey from a monolithic to a containerized, cloud native infrastructure managed by Kubernetes.

+ +

A top-performing business within the NBC Sports Group, GolfNow is a technology and services company with the largest tee time marketplace in the world. GolfNow serves 5 million active golfers across 10 different products. In recent years, the business had grown so fast that the infrastructure supporting their giant monolithic application (written in C#.NET and backed by SQL Server database management system) could not keep up. "With our growth we obviously needed to expand our infrastructure, and we kept growing vertically rather than horizontally," says Sheriff, GolfNow's Director, Architecture. "Our costs were growing exponentially. And on top of that, we had to build a Disaster Recovery (DR) environment, which then meant we'd have to copy exactly what we had in our original data center to another data center that was just the standby. We were basically wasting money and doubling the cost of our infrastructure."

+ +

In moving just the first of GolfNow's important applications—a booking engine for golf courses and B2B marketing platform—from third-party services to their own Kubernetes environment, "our bill went down drastically," says Sheriff.

+ +

The path to those stellar results began in late 2014. In order to support GolfNow's global growth, the team decided that the company needed to have multiple data centers and the ability to quickly and easily re-route traffic as needed. "From there we knew that we needed to go in a direction of breaking things apart, microservices, and containerization," says Sheriff. "At the time we were trying to get away from C#.NET and SQL Server since it didn't run very well on Linux, where everything container was running smoothly."

+ +

To that end, the team shifted to working with Node.js, the open-source, cross-platform JavaScript runtime environment for developing tools and applications, and MongoDB, the open-source database program. At the time, Docker, the platform for deploying applications in containers, was still new. But once the team began experimenting with it, Sheriff says, "we realized that was the way we wanted to go, especially since that's the way the industry is heading."

+ +{{< case-studies/quote image="/images/case-studies/golfnow/banner3.jpg" >}} +"The team migrated the rest of the application into their Kubernetes cluster. And the impact was immediate: On top of cutting monthly costs by a large percentage, says Sheriff, 'Running at the same capacity and during our peak time, we were able to horizontally grow. Since we were using our VMs more efficiently with containers, we didn't have to pay extra money at all.'" +{{< /case-studies/quote >}} + +

GolfNow's dev team ran an "internal, low-key" proof of concept and were won over. "We really liked how easy it was to be able to pass containers around to each other and have them up and running in no time, exactly the way it was running on my machine," says Sheriff. "Because that is always the biggest gripe that Ops has with developers, right? 'It worked on my machine!' But then we started getting to the point of, 'How do we make sure that these things stay up and running?'"

+ +

That led the team on a quest to find the right orchestration system for the company's needs. Sheriff says the first few options they tried were either too heavy or "didn't feel quite right." In late summer 2015, they discovered the just-released Kubernetes, which Sheriff immediately liked for its ease of use. "We did another proof of concept," he says, "and Kubernetes won because of the fact that the community backing was there, built on top of what Google had already done."

+ +

But before they could go with Kubernetes, NBC, GolfNow's parent company, also asked them to comparison shop with another company. Sheriff and his team liked the competing company's platform user interface, but didn't like that its platform would not allow containers to run natively on Docker. With no clear decision in sight, Sheriff's VP at GolfNow, Steve McElwee, set up a three-month trial during which a GolfNow team (consisting of Sheriff and Josh, who's now Lead Architect, Open Platforms) would build out a Kubernetes environment, and a large NBC team would build out one with the other company's platform.

+ +

"We spun up the cluster and we tried to get everything to run the way we wanted it to run," Sheriff says. "The biggest thing that we took away from it is that not only did we want our applications to run within Kubernetes and Docker, we also wanted our databases to run there. We literally wanted our entire infrastructure to run within Kubernetes."

+ +

At the time there was nothing in the community to help them get Kafka and MongoDB clusters running within a Kubernetes and Docker environment, so Sheriff and Josh figured it out on their own, taking a full month to get it right. "Everything started rolling from there," Sheriff says. "We were able to get all our applications connected, and we finished our side of the proof of concept a month in advance. My VP was like, 'Alright, it's over. Kubernetes wins.'"

+ +

The next step, beginning in January 2016, was getting everything working in production. The team focused first on one application that was already written in Node.js and MongoDB. A booking engine for golf courses and B2B marketing platform, the application was already going in the microservice direction but wasn't quite finished yet. At the time, it was running in Heroku Compose and other third-party services—resulting in a large monthly bill.

+ +{{< case-studies/quote image="/images/case-studies/golfnow/banner4.jpg" >}} +"'The time I spent actually moving the applications was under 30 seconds! We can move data centers in just incredible amounts of time. If you haven't come from the Kubernetes world you wouldn't believe me.' Sheriff puts it in these terms: 'Before Kubernetes I wasn't sleeping at night, literally. I was woken up all the time, because things were down. After Kubernetes, I've been sleeping at night.'" +{{< /case-studies/quote >}} + +

"The goal was to take all of that out and put it within this new platform we've created with Kubernetes on Google Compute Engine (GCE)," says Sheriff. "So we ended up building piece by piece, in parallel, what was out in Heroku and Compose, in our Kubernetes cluster. Then, literally, just switched configs in the background. So in Heroku we had the app running hitting a Compose database. We'd take the config, change it and make it hit the database that was running in our cluster."

+ +

Using this procedure, they were able to migrate piecemeal, without any downtime. The first migration was done during off hours, but to test the limits, the team migrated the second database in the middle of the day, when lots of users were running the application. "We did it," Sheriff says, "and again it was successful. Nobody noticed."

+ +

After three weeks of monitoring to make sure everything was running stable, the team migrated the rest of the application into their Kubernetes cluster. And the impact was immediate: On top of cutting monthly costs by a large percentage, says Sheriff, "Running at the same capacity and during our peak time, we were able to horizontally grow. Since we were using our VMs more efficiently with containers, we didn't have to pay extra money at all."

+ +

Not only were they saving money, but they were also saving time. "I had a meeting this morning about migrating some applications from one cluster to another," says Josh. "I spent about 2 hours explaining the process. The time I spent actually moving the applications was under 30 seconds! We can move data centers in just incredible amounts of time. If you haven't come from the Kubernetes world you wouldn't believe me." Sheriff puts it in these terms: "Before Kubernetes I wasn't sleeping at night, literally. I was woken up all the time, because things were down. After Kubernetes, I've been sleeping at night."

+ +

A small percentage of the applications on GolfNow have been migrated over to the Kubernetes environment. "Our Core Team is rewriting a lot of the .NET applications into .NET Core [which is compatible with Linux and Docker] so that we can run them within containers," says Sheriff.

+ +

Looking ahead, Sheriff and his team want to spend 2017 continuing to build a whole platform around Kubernetes with Drone, an open-source continuous delivery platform, to make it more developer-centric. "Now they're able to manage configuration, they're able to manage their deployments and things like that, making all these subteams that are now creating all these microservices, be self sufficient," he says. "So it can pull us away from applications and allow us to just make sure the cluster is running and healthy, and then actually migrate that over to our Ops team."

+ +{{< case-studies/quote >}} +"Having gone from complete newbies to production-ready in three months, the GolfNow team is eager to encourage other companies to follow their lead. 'This is The Six Million Dollar Man of the cloud right now,' adds Josh. 'Just try it out, watch it happen. I feel like the proof is in the pudding when you look at these kinds of application stacks. They're faster, they're more resilient.'" +{{< /case-studies/quote >}} + +

And long-term, Sheriff has an even bigger goal for getting more people into the Kubernetes fold. "We're actually trying to make this platform generic enough so that any of our sister companies can use it if they wish," he says. "Most definitely I think it can be used as a model. I think the way we migrated into it, the way we built it out, are all ways that I think other companies can learn from, and should not be afraid of."

+ +

The GolfNow team is also giving back to the Kubernetes community by open-sourcing a bot framework that Josh built. "We noticed that the dashboard user interface is actually moving a lot faster than when we started," says Sheriff. "However we realized what we needed was something that's more of a bot that really helps us administer Kubernetes as a whole through Slack." Josh explains: "With the Kubernetes-Slack integration, you can essentially hook into a cluster and the issue commands and edit configurations. We've tried to simplify the security configuration as much as possible. We hope this will be our major thank you to Kubernetes, for everything you've given us."

+ +

Having gone from complete newbies to production-ready in three months, the GolfNow team is eager to encourage other companies to follow their lead. The lessons they've learned: "You've got to have buy-in from your boss," says Sheriff. "Another big deal is having two to three people dedicated to this type of endeavor. You can't have people who are half in, half out." And if you don't have buy-in from the get go, proving it out will get you there.

+ +

"This is The Six Million Dollar Man of the cloud right now," adds Josh. "Just try it out, watch it happen. I feel like the proof is in the pudding when you look at these kinds of application stacks. They're faster, they're more resilient."

\ No newline at end of file diff --git a/content/bn/case-studies/haufegroup/haufegroup_featured.png b/content/bn/case-studies/haufegroup/haufegroup_featured.png new file mode 100644 index 0000000000000..08b09ec9db8b7 Binary files /dev/null and b/content/bn/case-studies/haufegroup/haufegroup_featured.png differ diff --git a/content/bn/case-studies/haufegroup/haufegroup_featured.svg b/content/bn/case-studies/haufegroup/haufegroup_featured.svg new file mode 100644 index 0000000000000..a61b577ab884e --- /dev/null +++ b/content/bn/case-studies/haufegroup/haufegroup_featured.svg @@ -0,0 +1 @@ +kubernetes.io-logos \ No newline at end of file diff --git a/content/bn/case-studies/haufegroup/haufegroup_logo.png b/content/bn/case-studies/haufegroup/haufegroup_logo.png new file mode 100644 index 0000000000000..5d8245b0f6d18 Binary files /dev/null and b/content/bn/case-studies/haufegroup/haufegroup_logo.png differ diff --git a/content/bn/case-studies/haufegroup/index.html b/content/bn/case-studies/haufegroup/index.html new file mode 100644 index 0000000000000..3867b727a31b3 --- /dev/null +++ b/content/bn/case-studies/haufegroup/index.html @@ -0,0 +1,85 @@ +--- +title: Haufe Group Case Study +case_study_styles: true +cid: caseStudies + +new_case_study_styles: true +heading_background: /images/case-studies/haufegroup/banner1.jpg +heading_title_logo: /images/haufegroup_logo.png +subheading: > + Paving the Way for Cloud Native for Midsize Companies +case_study_details: + - Company: Haufe Group + - Location: Freiburg, Germany + - Industry: Media and Software +--- + +

Challenge

+ +

Founded in 1930 as a traditional publisher, Haufe Group has grown into a media and software company with 95 percent of its sales from digital products. Over the years, the company has gone from having "hardware in the basement" to outsourcing its infrastructure operations and IT. More recently, the development of new products, from Internet portals for tax experts to personnel training software, has created demands for increased speed, reliability and scalability. "We need to be able to move faster," says Solution Architect Martin Danielsson. "Adapting workloads is something that we really want to be able to do."

+ +

Solution

+

Haufe Group began its cloud-native journey when Microsoft Azure became available in Europe; the company needed cloud deployments for its desktop apps with bandwidth-heavy download services. "After that, it has been different projects trying out different things," says Danielsson. Two years ago, Holger Reinhardt joined Haufe Group as CTO and rapidly re-oriented the traditional host provider-based approach toward a cloud and API-first strategy.

+ +

A core part of this strategy was a strong mandate to embrace infrastructure-as-code across the entire software deployment lifecycle via Docker. The company is now getting ready to go live with two services in production using Kubernetes orchestration on Microsoft Azure and Amazon Web Services. The team is also working on breaking up one of their core Java Enterprise desktop products into microservices to allow for better evolvability and dynamic scaling in the cloud.

+ +

Impact

+

With the ability to adapt workloads, Danielsson says, teams "will be able to scale down to around half the capacity at night, saving 30 percent of the hardware cost." Plus, shorter release times have had a major impact. "Before, we had to announce at least a week in advance when we wanted to do a release because there was a huge checklist of things that you had to do," he says. "By going cloud native, we have the infrastructure in place to be able to automate all of these things. Now we can get a new release done in half an hour instead of days."

+ +{{< case-studies/quote author="Martin Danielsson, Solution Architect, Haufe Group" >}} +"Over the next couple of years, people won't even think that much about it when they want to run containers. Kubernetes is going to be the go-to solution." +{{< /case-studies/quote >}} + +{{< case-studies/lead >}} +More than 80 years ago, Haufe Group was founded as a traditional publishing company, printing books and commentary on paper. +{{< /case-studies/lead >}} + +

By the 1990s, though, the company's leaders recognized that the future was digital, and to their credit, were able to transform Haufe Group into a media and software business that now gets 95 percent of its sales from digital products. "Among the German companies doing this, we were one of the early adopters," says Martin Danielsson, Solution Architect for Haufe Group.

+ +

And now they're leading the way for midsize companies embracing cloud-native technology like Kubernetes. "The really big companies like Ticketmaster and Google get it right, and the startups get it right because they're faster," says Danielsson. "We're in this big lump of companies in the middle with a lot of legacy, a lot of structure, a lot of culture that does not easily fit the cloud technologies. We're just 1,500 people, but we have hundreds of customer-facing applications. So we're doing things that will be relevant for many companies of our size or even smaller."

+ +

Many of those legacy challenges stemmed from simply following the technology trends of the times. "We used to do full DevOps," he says. In the 1990s and 2000s, "that meant that you had your hardware in the basement. And then 10 years ago, the hype of the moment was to outsource application operations, outsource everything, and strip down your IT department to take away the distraction of all these hardware things. That's not our area of expertise. We didn't want to be an infrastructure provider. And now comes the backlash of that."

+ +

Haufe Group began feeling the pain as they were developing more new products, from Internet portals for tax experts to personnel training software, that have created demands for increased speed, reliability and scalability. "Right now, we have this break in workflows, where we go from writing concepts to developing, handing it over to production and then handing that over to your host provider," he says. "And then when things go bad we have no clue what went wrong. We definitely want to take back control, and we want to move a lot faster. Adapting workloads is something that we really want to be able to do."

+ +

Those needs led them to explore cloud-native technology. Their first foray into the cloud was doing deployments in Microsoft Azure, once it became available in Europe, for desktop products that had built-in download services. Hosting expenses for such bandwidth-heavy services were too high, so the company turned to the cloud. "After that, it has been different projects trying out different things," says Danielsson.

+ +{{< case-studies/quote image="/images/case-studies/haufegroup/banner3.jpg" >}} +"We have been doing containers for the last two years, and we really got the hang of how they work," says Danielsson. "But it was always for development and test, never in production, because we didn't fully understand how that would work. And to me, Kubernetes was definitely the technology that solved that." +{{< /case-studies/quote >}} + +

Two years ago, Holger Reinhardt joined Haufe Group as CTO and rapidly re-oriented the traditional host provider-based approach toward a cloud and API-first strategy. A core part of this strategy was a strong mandate to embrace infrastructure-as-code across the entire software deployment lifecycle via Docker. Some experiments went further than others; German regulations about sensitive data proved to be a road block in moving some workloads to Azure and Amazon Web Services. "Due to our history, Germany is really strict with things like personally identifiable data," Danielsson says.

+ +

These experiments took on new life with the arrival of the Azure Sovereign Cloud for Germany (an Azure clone run by the German T-Systems provider). With the availability of Azure.de—which conforms to Germany's privacy regulations—teams started to seriously consider deploying production loads in Docker into the cloud. "We have been doing containers for the last two years, and we really got the hang of how they work," says Danielsson. "But it was always for development and test, never in production, because we didn't fully understand how that would work. And to me, Kubernetes was definitely the technology that solved that."

+ +

In parallel, Danielsson had built an API management system with the aim of supporting CI/CD scenarios, aspects of which were missing in off-the-shelf API management products. With a foundation based on Mashape's Kong gateway, it is open-sourced as wicked.haufe.io. He put wicked.haufe.io to use with his product team.

Otherwise, Danielsson says his philosophy was "don't try to reinvent the wheel all the time. Go for what's there and 99 percent of the time it will be enough. And if you think you really need something custom or additional, think perhaps once or twice again. One of the things that I find so amazing with this cloud-native framework is that everything ties in."

+ +

Currently, Haufe Group is working on two projects using Kubernetes in production. One is a new mobile application for researching legislation and tax laws. "We needed a way to take out functionality from a legacy core and put an application on top of that with an API gateway—a lot of moving parts that screams containers," says Danielsson. So the team moved the build pipeline away from "deploying to some old, huge machine that you could deploy anything to" and onto a Kubernetes cluster where there would be automatic CI/CD "with feature branches and all these things that were a bit tedious in the past."

+ +{{< case-studies/quote image="/images/case-studies/haufegroup/banner4.jpg" >}} +"Before, we had to announce at least a week in advance when we wanted to do a release because there was a huge checklist of things that you had to do," says Danielsson. "By going cloud native, we have the infrastructure in place to be able to automate all of these things. Now we can get a new release done in half an hour instead of days." +{{< /case-studies/quote >}} + +

It was a proof of concept effort, and the proof was in the pudding. "Everyone was really impressed at what we accomplished in a week," says Danielsson. "We did these kinds of integrations just to make sure that we got a handle on how Kubernetes works. If you can create optimism and buzz around something, it's half won. And if the developers and project managers know this is working, you're more or less done." Adds Reinhardt: "You need to create some very visible, quick wins in order to overcome the status quo."

+ +

The impact on the speed of deployment was clear: "Before, we had to announce at least a week in advance when we wanted to do a release because there was a huge checklist of things that you had to do," says Danielsson. "By going cloud native, we have the infrastructure in place to be able to automate all of these things. Now we can get a new release done in half an hour instead of days."

+ +

The potential impact on cost was another bonus. "Hosting applications is quite expensive, so moving to the cloud is something that we really want to be able to do," says Danielsson. With the ability to adapt workloads, teams "will be able to scale down to around half the capacity at night, saving 30 percent of the hardware cost."

+ +

Just as importantly, Danielsson says, there's added flexibility: "When we try to move or rework applications that are really crucial, it's often tricky to validate whether the path we want to take is going to work out well. In order to validate that, we would need to reproduce the environment and really do testing, and that's prohibitively expensive and simply not doable with traditional host providers. Cloud native gives us the ability to do risky changes and validate them in a cost-effective way."

+ +

As word of the two successful test projects spread throughout the company, interest in Kubernetes has grown. "We want to be able to support our developers in running Kubernetes clusters but we're not there yet, so we allow them to do it as long as they're aware that they are on their own," says Danielsson. "So that's why we are also looking at things like [the managed Kubernetes platform] CoreOS Tectonic, Azure Container Service, ECS, etc. These kinds of services will be a lot more relevant to midsize companies that want to leverage cloud native but don't have the IT departments or the structure around that."

+ +

In the next year and a half, Danielsson says the company will be working on moving one of their legacy desktop products, a web app for researching legislation and tax laws originally built in Java Enterprise, onto cloud-native technology. "We're doing a microservice split out right now so that we can independently deploy the different parts," he says. The main website, which provides free content for customers, is also moving to cloud native.

+ +{{< case-studies/quote >}} +"the execution of a strategy requires alignment of culture, structure and technology. Only if those three dimensions are aligned can you successfully execute a transformation into microservices and cloud-native architectures. And it is only then that the Cloud will pay the dividends in much faster speeds in product innovation and much lower operational costs." +{{< /case-studies/quote >}} + +

But with these goals, Danielsson believes there are bigger cultural challenges that need to be constantly addressed. The move to new technology, not to mention a shift toward DevOps, means a lot of change for employees. "The roles were rather fixed in the past," he says. "You had developers, you had project leads, you had testers. And now you get into these really, really important things like test automation. Testers aren't actually doing click testing anymore, and they have to write automated testing. And if you really want to go full-blown CI/CD, all these little pieces have to work together so that you get the confidence to do a check in, and know this check in is going to land in production, because if I messed up, some test is going to break. This is a really powerful thing because whatever you do, whenever you merge something into the trunk or to the master, this is going live. And that's where you either get the people or they run away screaming." Danielsson understands that it may take some people much longer to get used to the new ways.

+ +

"Culture is nothing that you can force on people," he says. "You have to live it for yourself. You have to evangelize. You have to show the advantages time and time again: This is how you can do it, this is what you get from it." To that end, his team has scheduled daylong workshops for the staff, bringing in outside experts to talk about everything from API to Devops to cloud.

+ +

For every person who runs away screaming, many others get drawn in. "Get that foot in the door and make them really interested in this stuff," says Danielsson. "Usually it catches on. We have people you never would have expected chanting, 'Docker Docker Docker' now. It's cool to see them realize that there is a world outside of their Python libraries. It's awesome to see them really work with Kubernetes."

+ +

Ultimately, Reinhardt says, "the execution of a strategy requires alignment of culture, structure and technology. Only if those three dimensions are aligned can you successfully execute a transformation into microservices and cloud-native architectures. And it is only then that the Cloud will pay the dividends in much faster speeds in product innovation and much lower operational costs."

\ No newline at end of file diff --git a/content/bn/case-studies/huawei/huawei_featured.png b/content/bn/case-studies/huawei/huawei_featured.png new file mode 100644 index 0000000000000..22071b4691e38 Binary files /dev/null and b/content/bn/case-studies/huawei/huawei_featured.png differ diff --git a/content/bn/case-studies/huawei/huawei_featured.svg b/content/bn/case-studies/huawei/huawei_featured.svg new file mode 100644 index 0000000000000..a8a8f22c8f9a1 --- /dev/null +++ b/content/bn/case-studies/huawei/huawei_featured.svg @@ -0,0 +1 @@ +kubernetes.io-logos \ No newline at end of file diff --git a/content/bn/case-studies/huawei/huawei_logo.png b/content/bn/case-studies/huawei/huawei_logo.png new file mode 100644 index 0000000000000..94361a27eb5af Binary files /dev/null and b/content/bn/case-studies/huawei/huawei_logo.png differ diff --git a/content/bn/case-studies/huawei/index.html b/content/bn/case-studies/huawei/index.html new file mode 100644 index 0000000000000..a6caa3eee9351 --- /dev/null +++ b/content/bn/case-studies/huawei/index.html @@ -0,0 +1,73 @@ +--- +title: Huawei Case Study +case_study_styles: true +cid: caseStudies + +new_case_study_styles: true +heading_background: /images/case-studies/huawei/banner1.jpg +heading_title_logo: /images/huawei_logo.png +subheading: > + Embracing Cloud Native as a User – and a Vendor +case_study_details: + - Company: Huawei + - Location: Shenzhen, China + - Industry: Telecommunications Equipment +--- + +

Challenge

+ +

A multinational company that's the largest telecommunications equipment manufacturer in the world, Huawei has more than 180,000 employees. In order to support its fast business development around the globe, Huawei has eight data centers for its internal I.T. department, which have been running 800+ applications in 100K+ VMs to serve these 180,000 users. With the rapid increase of new applications, the cost and efficiency of management and deployment of VM-based apps all became critical challenges for business agility. "It's very much a distributed system so we found that managing all of the tasks in a more consistent way is always a challenge," says Peixin Hou, the company's Chief Software Architect and Community Director for Open Source. "We wanted to move into a more agile and decent practice."

+ +

Solution

+ +

After deciding to use container technology, Huawei began moving the internal I.T. department's applications to run on Kubernetes. So far, about 30 percent of these applications have been transferred to cloud native.

+ +

Impact

+ +

"By the end of 2016, Huawei's internal I.T. department managed more than 4,000 nodes with tens of thousands containers using a Kubernetes-based Platform as a Service (PaaS) solution," says Hou. "The global deployment cycles decreased from a week to minutes, and the efficiency of application delivery has been improved 10 fold." For the bottom line, he says, "We also see significant operating expense spending cut, in some circumstances 20-30 percent, which we think is very helpful for our business." Given the results Huawei has had internally – and the demand it is seeing externally – the company has also built the technologies into FusionStage™, the PaaS solution it offers its customers.

+ +{{< case-studies/quote author="Peixin Hou, chief software architect and community director for open source" >}} +"If you're a vendor, in order to convince your customer, you should use it yourself. Luckily because Huawei has a lot of employees, we can demonstrate the scale of cloud we can build using this technology." +{{< /case-studies/quote >}} + +

Huawei's Kubernetes journey began with one developer. Over two years ago, one of the engineers employed by the networking and telecommunications giant became interested in Kubernetes, the technology for managing application containers across clusters of hosts, and started contributing to its open source community. As the technology developed and the community grew, he kept telling his managers about it.

+ +

And as fate would have it, at the same time, Huawei was looking for a better orchestration system for its internal enterprise I.T. department, which supports every business flow processing. "We have more than 180,000 employees worldwide, and a complicated internal procedure, so probably every week this department needs to develop some new applications," says Peixin Hou, Huawei's Chief Software Architect and Community Director for Open Source. "Very often our I.T. departments need to launch tens of thousands of containers, with tasks running across thousands of nodes across the world. It's very much a distributed system, so we found that managing all of the tasks in a more consistent way is always a challenge."

+ +

In the past, Huawei had used virtual machines to encapsulate applications, but "every time when we start a VM," Hou says, "whether because it's a new service or because it was a service that was shut down because of some abnormal node functioning, it takes a lot of time." Huawei turned to containerization, so the timing was right to try Kubernetes. It took a year to adopt that engineer's suggestion – the process "is not overnight," says Hou – but once in use, he says, "Kubernetes basically solved most of our problems. Before, the time of deployment took about a week, now it only takes minutes. The developers are happy. That department is also quite happy."

+ +

Hou sees great benefits to the company that come with using this technology: "Kubernetes brings agility, scale-out capability, and DevOps practice to the cloud-based applications," he says. "It provides us with the ability to customize the scheduling architecture, which makes possible the affinity between container tasks that gives greater efficiency. It supports multiple container formats. It has extensive support for various container networking solutions and container storage."

+ +{{< case-studies/quote image="/images/case-studies/huawei/banner3.jpg" >}} +"Kubernetes basically solved most of our problems. Before, the time of deployment took about a week, now it only takes minutes. The developers are happy. That department is also quite happy." +{{< /case-studies/quote >}} + +

And not least of all, there's an impact on the bottom line. Says Hou: "We also see significant operating expense spending cut in some circumstances 20-30 percent, which is very helpful for our business."

+ +

Pleased with those initial results, and seeing a demand for cloud native technologies from its customers, Huawei doubled down on Kubernetes. In the spring of 2016, the company became not only a user but also a vendor.

+ +

"We built the Kubernetes technologies into our solutions," says Hou, referring to Huawei's FusionStage™ PaaS offering. "Our customers, from very big telecommunications operators to banks, love the idea of cloud native. They like Kubernetes technology. But they need to spend a lot of time to decompose their applications to turn them into microservice architecture, and as a solution provider, we help them. We've started to work with some Chinese banks, and we see a lot of interest from our customers like China Mobile and Deutsche Telekom."

+ +

"If you're just a user, you're just a user," adds Hou. "But if you're a vendor, in order to even convince your customers, you should use it yourself. Luckily because Huawei has a lot of employees, we can demonstrate the scale of cloud we can build using this technology. We provide customer wisdom." While Huawei has its own private cloud, many of its customers run cross-cloud applications using Huawei's solutions. It's a big selling point that most of the public cloud providers now support Kubernetes. "This makes the cross-cloud transition much easier than with other solutions," says Hou.

+ +{{< case-studies/quote image="/images/case-studies/huawei/banner4.jpg" >}} +"Our customers, from very big telecommunications operators to banks, love the idea of cloud native. They like Kubernetes technology. But they need to spend a lot of time to decompose their applications to turn them into microservice architecture, and as a solution provider, we help them." +{{< /case-studies/quote >}} + +

Within Huawei itself, once his team completes the transition of the internal business procedure department to Kubernetes, Hou is looking to convince more departments to move over to the cloud native development cycle and practice. "We have a lot of software developers, so we will provide them with our platform as a service solution, our own product," he says. "We would like to see significant cuts in their iteration cycle."

+ +

Having overseen the initial move to Kubernetes at Huawei, Hou has advice for other companies considering the technology: "When you start to design the architecture of your application, think about cloud native, think about microservice architecture from the beginning," he says. "I think you will benefit from that."

+ +

But if you already have legacy applications, "start from some microservice-friendly part of those applications first, parts that are relatively easy to be decomposed into simpler pieces and are relatively lightweight," Hou says. "Don't think from day one that within how many days I want to move the whole architecture, or move everything into microservices. Don't put that as a kind of target. You should do it in a gradual manner. And I would say for legacy applications, not every piece would be suitable for microservice architecture. No need to force it."

+ +

After all, as enthusiastic as Hou is about Kubernetes at Huawei, he estimates that "in the next 10 years, maybe 80 percent of the workload can be distributed, can be run on the cloud native environments. There's still 20 percent that's not, but it's fine. If we can make 80 percent of our workload really be cloud native, to have agility, it's a much better world at the end of the day."

+ +{{< case-studies/quote >}} +"In the next 10 years, maybe 80 percent of the workload can be distributed, can be run on the cloud native environments. There's still 20 percent that's not, but it's fine. If we can make 80 percent of our workload really be cloud native, to have agility, it's a much better world at the end of the day." +{{< /case-studies/quote >}} + +

In the nearer future, Hou is looking forward to new features that are being developed around Kubernetes, not least of all the ones that Huawei is contributing to. Huawei engineers have worked on the federation feature (which puts multiple Kubernetes clusters in a single framework to be managed seamlessly), scheduling, container networking and storage, and a just-announced technology called Container Ops, which is a DevOps pipeline engine. "This will put every DevOps job into a container," he explains. "And then this container mechanism is running using Kubernetes, but is also used to test Kubernetes. With that mechanism, we can make the containerized DevOps jobs be created, shared and managed much more easily than before."

+ +

Still, Hou sees this technology as only halfway to its full potential. First and foremost, he'd like to expand the scale it can orchestrate, which is important for supersized companies like Huawei – as well as some of its customers.

+ +

Hou proudly notes that two years after that first Huawei engineer became a contributor to and evangelist for Kubernetes, Huawei is now a top contributor to the community. "We've learned that the more you contribute to the community," he says, "the more you get back."

\ No newline at end of file diff --git a/content/bn/case-studies/ibm/ibm_featured_logo.png b/content/bn/case-studies/ibm/ibm_featured_logo.png new file mode 100644 index 0000000000000..adb07a8cdf588 Binary files /dev/null and b/content/bn/case-studies/ibm/ibm_featured_logo.png differ diff --git a/content/bn/case-studies/ibm/ibm_featured_logo.svg b/content/bn/case-studies/ibm/ibm_featured_logo.svg new file mode 100644 index 0000000000000..f79fd7847bf7c --- /dev/null +++ b/content/bn/case-studies/ibm/ibm_featured_logo.svg @@ -0,0 +1 @@ +ibm_featured_logo \ No newline at end of file diff --git a/content/bn/case-studies/ibm/index.html b/content/bn/case-studies/ibm/index.html new file mode 100644 index 0000000000000..e93536d159164 --- /dev/null +++ b/content/bn/case-studies/ibm/index.html @@ -0,0 +1,80 @@ +--- +title: IBM Case Study +linkTitle: IBM +case_study_styles: true +cid: caseStudies +logo: ibm_featured_logo.svg +featured: false + +new_case_study_styles: true +heading_background: /images/case-studies/ibm/banner1.jpg +heading_title_logo: /images/ibm_logo.png +subheading: > + Building an Image Trust Service on Kubernetes with Notary and TUF +case_study_details: + - Company: IBM + - Location: Armonk, New York + - Industry: Cloud Computing +--- + +

Challenge

+ +

IBM Cloud offers public, private, and hybrid cloud functionality across a diverse set of runtimes from its OpenWhisk-based function as a service (FaaS) offering, managed Kubernetes and containers, to Cloud Foundry platform as a service (PaaS). These runtimes are combined with the power of the company's enterprise technologies, such as MQ and DB2, its modern artificial intelligence (AI) Watson, and data analytics services. Users of IBM Cloud can exploit capabilities from more than 170 different cloud native services in its catalog, including capabilities such as IBM's Weather Company API and data services. In the later part of 2017, the IBM Cloud Container Registry team wanted to build out an image trust service.

+ +

Solution

+ +

The work on this new service culminated with its public availability in the IBM Cloud in February 2018. The image trust service, called Portieris, is fully based on the Cloud Native Computing Foundation (CNCF) open source project Notary, according to Michael Hough, a software developer with the IBM Cloud Container Registry team. Portieris is a Kubernetes admission controller for enforcing content trust. Users can create image security policies for each Kubernetes namespace, or at the cluster level, and enforce different levels of trust for different images. Portieris is a key part of IBM's trust story, since it makes it possible for users to consume the company's Notary offering from within their IKS clusters. The offering is that Notary server runs in IBM's cloud, and then Portieris runs inside the IKS cluster. This enables users to be able to have their IKS cluster verify that the image they're loading containers from contains exactly what they expect it to, and Portieris is what allows an IKS cluster to apply that verification.

+ +

Impact

+ +

IBM's intention in offering a managed Kubernetes container service and image registry is to provide a fully secure end-to-end platform for its enterprise customers. "Image signing is one key part of that offering, and our container registry team saw Notary as the de facto way to implement that capability in the current Docker and container ecosystem," Hough says. The company had not been offering image signing before, and Notary is the tool it used to implement that capability. "We had a multi-tenant Docker Registry with private image hosting," Hough says. "The Docker Registry uses hashes to ensure that image content is correct, and data is encrypted both in flight and at rest. But it does not provide any guarantees of who pushed an image. We used Notary to enable users to sign images in their private registry namespaces if they so choose."

+ +{{< case-studies/quote author="Michael Hough, a software developer with the IBM Container Registry team" >}} +"We see CNCF as a safe haven for cloud native open source, providing stability, longevity, and expected maintenance for member projects—no matter the originating vendor or project." +{{< /case-studies/quote >}} + +{{< case-studies/lead >}} +Docker had already created the Notary project as an implementation of The Update Framework (TUF), and this implementation of TUF provided the capabilities for Docker Content Trust. +{{< /case-studies/lead >}} + +

"After contribution to CNCF of both TUF and Notary, we perceived that it was becoming the de facto standard for image signing in the container ecosystem", says Michael Hough, a software developer with the IBM Cloud Container Registry team.

+ +

The key reason for selecting Notary was that it was already compatible with the existing authentication stack IBM's container registry was using. So was the design of TUF, which does not require the registry team to have to enter the business of key management. Both of these were "attractive design decisions that confirmed our choice of Notary," he says.

+ +

The introduction of Notary to implement image signing capability in IBM Cloud encourages increased security across IBM's cloud platform, "where we expect it will include both the signing of official IBM images as well as expected use by security-conscious enterprise customers," Hough says. "When combined with security policy implementations, we expect an increased use of deployment policies in CI/CD pipelines that allow for fine-grained control of service deployment based on image signers."

+ +

The availability of image signing "is a huge benefit to security-conscious customers who require this level of image provenance and security," Hough says. "With our IBM Cloud Kubernetes as-a-service offering and the admission controller we have made available, it allows both IBM services as well as customers of the IBM public cloud to use security policies to control service deployment."

+ +{{< case-studies/quote + image="/images/case-studies/ibm/banner3.jpg" + author="Michael Hough, a software developer with the IBM Cloud Container Registry team" +>}} +"Image signing is one key part of our Kubernetes container service offering, and our container registry team saw Notary as the de facto way to implement that capability in the current Docker and container ecosystem" +{{< /case-studies/quote >}} + +

Now that the Notary-implemented service is generally available in IBM's public cloud as a component of its existing IBM Cloud Container Registry, it is deployed as a highly available service across five IBM Cloud regions. This high-availability deployment has three instances across two zones in each of the five regions, load balanced with failover support. "We have also deployed it with end-to-end TLS support through to our back-end IBM Cloudant persistence storage service," Hough says.

+ +

The IBM team has created and open sourced a Kubernetes admission controller called Portieris, which uses Notary signing information combined with customer-defined security policies to control image deployment into their cluster. "We are hoping to drive adoption of Portieris through its use of our Notary offering," Hough says.

+ +

IBM has been a key player in the creation and support of open source foundations, including CNCF. Todd Moore, IBM's vice president of Open Technology, is the current CNCF governing board chair and a number of IBMers are active across many of the CNCF member projects.

+ +{{< case-studies/quote + image="/images/case-studies/ibm/banner4.jpg" + author="Michael Hough, a software developer with the IBM Cloud Container Registry team" +>}} +"With our IBM Cloud Kubernetes as-a-service offering and the admission controller we have made available, it allows both IBM services as well as customers of the IBM public cloud to use security policies to control service deployment." +{{< /case-studies/quote >}} + +

"Given that, we see CNCF as a safe haven for cloud native open source, providing stability, longevity, and expected maintenance for member projects—no matter the originating vendor or project," Hough says. Because the entire cloud native world is a fast-moving area with many competing vendors and solutions, "we see the CNCF model as an arbiter of openness and fair play across the ecosystem," he says.

+ +

With both TUF and Notary as part of CNCF, IBM expects there to be standardization around these capabilities beyond just de facto standards for signing and provenance. IBM has determined to not simply consume Notary, but also to contribute to the open source project where applicable. "IBMers have contributed a CouchDB backend to support our use of IBM Cloudant as the persistent store; and are working on generalization of the pkcs11 provider, allowing support of other security hardware devices beyond Yubikey," Hough says.

+ +{{< case-studies/quote author="Michael Hough, a software developer with the IBM Cloud Container Registry team" >}} +"There are new projects addressing these challenges, including within CNCF. We will definitely be following these advancements with interest. We found the Notary community to be an active and friendly community open to changes, such as our addition of a CouchDB backend for persistent storage." +{{< /case-studies/quote >}} + +

The company has used other CNCF projects containerd, Envoy, Prometheus, gRPC, and CNI, and is looking into SPIFFE and SPIRE as well for potential future use.

+ +

What advice does Hough have for other companies that are looking to deploy Notary or a cloud native infrastructure?

+ +

"While this is true for many areas of cloud native infrastructure software, we found that a high-availability, multi-region deployment of Notary requires a solid implementation to handle certificate management and rotation," he says. "There are new projects addressing these challenges, including within CNCF. We will definitely be following these advancements with interest. We found the Notary community to be an active and friendly community open to changes, such as our addition of a CouchDB backend for persistent storage."

\ No newline at end of file diff --git a/content/bn/case-studies/ing/index.html b/content/bn/case-studies/ing/index.html new file mode 100644 index 0000000000000..037ba9775d90d --- /dev/null +++ b/content/bn/case-studies/ing/index.html @@ -0,0 +1,78 @@ +--- +title: ING Case Study +linkTitle: ING +case_study_styles: true +cid: caseStudies +weight: 50 +featured: true +quote: > + The big cloud native promise to our business is the ability to go from idea to production within 48 hours. We are some years away from this, but that's quite feasible to us. + +new_case_study_styles: true +heading_background: /images/case-studies/ing/banner1.jpg +heading_title_logo: /images/ing_logo.png +subheading: > + Driving Banking Innovation with Cloud Native +case_study_details: + - Company: ING + - Location: Amsterdam, Netherlands + - Industry: Finance +--- + +

Challenge

+ +

After undergoing an agile transformation, ING realized it needed a standardized platform to support the work their developers were doing. "Our DevOps teams got empowered to be autonomous," says Infrastructure Architect Thijs Ebbers. "It has benefits; you get all kinds of ideas. But a lot of teams are going to devise the same wheel. Teams started tinkering with Docker, Docker Swarm, Kubernetes, Mesos. Well, it's not really useful for a company to have one hundred wheels, instead of one good wheel.

+ +

Solution

+ +

Using Kubernetes for container orchestration and Docker for containerization, the ING team began building an internal public cloud for its CI/CD pipeline and green-field applications. The pipeline, which has been built on Mesos Marathon, will be migrated onto Kubernetes. The bank-account management app Yolt in the U.K. (and soon France and Italy) market already is live hosted on a Kubernetes framework. At least two greenfield projects currently on the Kubernetes framework will be going into production later this year. By the end of 2018, the company plans to have converted a number of APIs used in the banking customer experience to cloud native APIs and host these on the Kubernetes-based platform.

+ +

Impact

+ +

"Cloud native technologies are helping our speed, from getting an application to test to acceptance to production," says Infrastructure Architect Onno Van der Voort. "If you walk around ING now, you see all these DevOps teams, doing stand-ups, demoing. They try to get new functionality out there really fast. We held a hackathon for one of our existing components and basically converted it to cloud native within 2.5 days, though of course the tail takes more time before code is fully production ready."

+ +{{< case-studies/quote author="Thijs Ebbers, Infrastructure Architect, ING">}} +"The big cloud native promise to our business is the ability to go from idea to production within 48 hours. We are some years away from this, but that's quite feasible to us." +{{< /case-studies/quote >}} + +{{< case-studies/lead >}} +ING has long embraced innovation in banking, launching the internet-based ING Direct in 1997. +{{< /case-studies/lead >}} + +

In that same spirit, the company underwent an agile transformation a few years ago. "Our DevOps teams got empowered to be autonomous," says Infrastructure Architect Thijs Ebbers. "It has benefits; you get all kinds of ideas. But a lot of teams are going to devise the same wheel. Teams started tinkering with Docker, Docker Swarm, Kubernetes, Mesos. Well, it's not really useful for a company to have one hundred wheels, instead of one good wheel."

+ +

Looking to standardize the deployment process within the company's strict security guidelines, the team looked at several solutions and found that in the past year, "Kubernetes won the container management framework wars," says Ebbers. "We decided to standardize ING on a Kubernetes framework." Everything is run on premise due to banking regulations, he adds, but "we will be building an internal public cloud. We are trying to get on par with what public clouds are doing. That's one of the reasons we got Kubernetes."

+ +

They also embraced Docker to address a major pain point in ING's CI/CD pipeline. Before containerization, "Every development team had to order a VM, and it was quite a heavy delivery model for them," says Infrastructure Architect Onno Van der Voort. "Another use case for containerization is when the application travels through the pipeline, they fire up Docker containers to do test work against the applications and after they've done the work, the containers get killed again."

+ +{{< case-studies/quote + image="/images/case-studies/ing/banner3.jpg" + author="Thijs Ebbers, Infrastructure Architect, ING" +>}} +"We decided to standardize ING on a Kubernetes framework." Everything is run on premise due to banking regulations, he adds, but "we will be building an internal public cloud. We are trying to get on par with what public clouds are doing. That's one of the reasons we got Kubernetes." +{{< /case-studies/quote >}} + +

Because of industry regulations, applications are only allowed to go through the pipeline, where compliance is enforced, rather than be deployed directly into a container. "We have to run the complete platform of services we need, many routing from different places," says Van der Voort. "We need this Kubernetes framework for deploying the containers, with all those components, monitoring, logging. It's complex." For that reason, ING has chosen to start on the OpenShift Origin Kubernetes distribution.

+ +

Already, "cloud native technologies are helping our speed, from getting an application to test to acceptance to production," says Van der Voort. "If you walk around ING now, you see all these DevOps teams, doing stand-ups, demoing. They try to get new functionality out there really fast. We held a hackathon for one of our existing components and basically converted it to cloud native within 2.5 days, though of course the tail takes more time before code is fully production ready."

+ +

The pipeline, which has been built on Mesos Marathon, will be migrated onto Kubernetes. Some legacy applications are also being rewritten as cloud native in order to run on the framework. At least two smaller greenfield projects built on Kubernetes will go into production this year. By the end of 2018, the company plans to have converted a number of APIs used in the banking customer experience to cloud native APIs and host these on the Kubernetes-based platform.

+ +{{< case-studies/quote + image="/images/case-studies/ing/banner4.jpg" + author="Onno Van der Voort, Infrastructure Architect, ING" +>}} +"We have to run the complete platform of services we need, many routing from different places. We need this Kubernetes framework for deploying the containers, with all those components, monitoring, logging. It's complex." +{{< /case-studies/quote >}} + +

The team, however, doesn't see the bank's back-end systems going onto the Kubernetes platform. "Our philosophy is it only makes sense to move things to cloud if they are cloud native," says Van der Voort. "If you have traditional architecture, build traditional patterns, it doesn't hold any value to go to the cloud." Adds Cloud Platform Architect Alfonso Fernandez-Barandiaran: "ING has a strategy about where we will go, in order to improve our agility. So it's not about how cool this technology is, it's about finding the right technology and the right approach."

+ +

The Kubernetes framework will be hosting some greenfield projects that are high priority for ING: applications the company is developing in response to PSD2, the European Commission directive requiring more innovative online and mobile payments that went into effect at the beginning of 2018. For example, a bank-account management app called Yolt, serving the U.K. market (and soon France and Italy), was built on a Kubernetes platform and has gone into production. ING is also developing blockchain-enabled applications that will live on the Kubernetes platform. "We've been contacted by a lot of development teams that have ideas with what they want to do with containers," says Ebbers.

+ +{{< case-studies/quote author="Alfonso Fernandez-Barandiaran, Cloud Platform Architect, ING" >}} +Even with the particular requirements that come in banking, ING has managed to take a lead in technology and innovation. "Every time we have constraints, we look for maybe a better way that we can use this technology." +{{< /case-studies/quote >}} + +

Even with the particular requirements that come in banking, ING has managed to take a lead in technology and innovation. "Every time we have constraints, we look for maybe a better way that we can use this technology," says Fernandez-Barandiaran.

+ +

The results, after all, are worth the effort. "The big cloud native promise to our business is the ability to go from idea to production within 48 hours," says Ebbers. "That would require all these projects to be mature. We are some years away from this, but that's quite feasible to us."

diff --git a/content/bn/case-studies/ing/ing_featured_logo.png b/content/bn/case-studies/ing/ing_featured_logo.png new file mode 100644 index 0000000000000..f6d4489715aa4 Binary files /dev/null and b/content/bn/case-studies/ing/ing_featured_logo.png differ diff --git a/content/bn/case-studies/ing/ing_featured_logo.svg b/content/bn/case-studies/ing/ing_featured_logo.svg new file mode 100644 index 0000000000000..5a2df497c7bb0 --- /dev/null +++ b/content/bn/case-studies/ing/ing_featured_logo.svg @@ -0,0 +1 @@ +kubernetes.io-logos \ No newline at end of file diff --git a/content/bn/case-studies/jd-com/index.html b/content/bn/case-studies/jd-com/index.html new file mode 100644 index 0000000000000..447beb06538fb --- /dev/null +++ b/content/bn/case-studies/jd-com/index.html @@ -0,0 +1,79 @@ +--- +title: JD.com Case Study +linkTitle: jd-com +case_study_styles: true +cid: caseStudies +featured: false + +new_case_study_styles: true +heading_background: /images/case-studies/jdcom/banner1.jpg +heading_title_logo: /images/jdcom_logo.png +subheading: > + JD.com: How JD.com Pioneered Kubernetes for E-Commerce at Hyperscale +case_study_details: + - Company: JD.com + - Location: Beijing, China + - Industry: eCommerce +--- + +

Challenge

+ +

With more than 300 million active users and total 2017 revenue of more than $55 billion, JD.com is China's largest retailer, and its operations are the epitome of hyperscale. For example, there are more than a trillion images in JD.com's product databases—with 100 million being added daily—and this enormous amount of data needs to be instantly accessible. In 2014, JD.com moved its applications to containers running on bare metal machines using OpenStack and Docker to "speed up the delivery of our computing resources and make the operations much simpler," says Haifeng Liu, JD.com's Chief Architect. But by the end of 2015, with tens of thousands of nodes running in multiple data centers, "we encountered a lot of problems because our platform was not strong enough, and we suffered from bottlenecks and scalability issues," says Liu. "We needed infrastructure for the next five years of development, now."

+ +

Solution

+ +

JD.com turned to Kubernetes to accommodate its clusters. At the beginning of 2016, the company began to transition from OpenStack to Kubernetes, and today, JD.com runs the world's largest Kubernetes cluster. "Kubernetes has provided a strong foundation on top of which we have customized the solution to suit our needs as China's largest retailer."

+ +

Impact

+ +

"We have greater data center efficiency, better managed resources, and smarter deployment with the Kubernetes platform," says Liu. Deployment time went from several hours to tens of seconds. Efficiency has improved by 20-30%, measured in IT costs. With the further optimizations the team is working on, Liu believes there is the potential to save hundreds of millions of dollars a year. But perhaps the best indication of success was the annual Singles Day shopping event, which ran on the Kubernetes platform for the first time in 2018. Over 11 days, transaction volume on JD.com was $23 billion, and "our e-commerce platforms did great," says Liu. "Infrastructure led the way to prep for 11.11. We took the approach of predicting volume, emulating the behavior of customers to prepare beforehand, and drilled for malfunctions. Because of Kubernetes's scalability, we were able to handle an extremely high level of demand."

+ +{{< case-studies/quote author="HAIFENG LIU, CHIEF ARCHITECT, JD.com" >}} +"Kubernetes helped us reduce the complexity of operations to make distributed systems stable and scalable. Most importantly, we can leverage Kubernetes for scheduling resources to reduce hardware costs. That's the big win." +{{< /case-studies/quote >}} + +{{< case-studies/lead >}} +With more than 300 million active users and $55.7 billion in annual revenues last year, JD.com is China's largest retailer, and its operations are the epitome of hyperscale. +{{< /case-studies/lead >}} + +

For example, there are more than a trillion images in JD.com's product databases for customers, with 100 million being added daily. And this enormous amount of data needs to be instantly accessible to enable a smooth online customer experience.

+ +

In 2014, JD.com moved its applications to containers running on bare metal machines using OpenStack and Docker to "speed up the delivery of our computing resources and make the operations much simpler," says Haifeng Liu, JD.com's Chief Architect. But by the end of 2015, with hundreds of thousands of nodes in multiple data centers, "we encountered a lot of problems because our platform was not strong enough, and we suffered from bottlenecks and scalability issues," Liu adds. "We needed infrastructure for the next five years of development, now."

+ +

After considering a number of orchestration technologies, JD.com decided to adopt Kubernetes to accommodate its ever-growing clusters. "The main reason is because Kubernetes can give us more efficient, scalable and much simpler application deployments, plus we can leverage it to do flexible platform scheduling," says Liu.

+ +{{< case-studies/quote + image="/images/case-studies/jdcom/banner3.jpg" + author="HAIFENG LIU, CHIEF ARCHITECT, JD.com" +>}} +"We customized Kubernetes and built a modern system on top of it. This entire ecosystem of Kubernetes plus our own optimizations have helped us save costs and time." +{{< /case-studies/quote >}} + +

The fact that Kubernetes is based on Google's Borg also gave the company confidence. The team liked that Kubernetes has a clear and simple architecture, and that it's developed mostly in Go, which is a popular language within JD.com. Though he felt that at the time Kubernetes "was not mature enough," Liu says, "we adopted it anyway."

+ +

The team spent a year developing the new container engine platform based on Kubernetes, and at the end of 2016, began promoting it within the company. "We wanted the cluster to be the default way for creating services, so scalability is easier," says Liu. "We talked to developers, interest grew, and we solved problems together." Some of these problems included networking performance and etcd scalability. "But during the past two years, Kubernetes has become more mature and very stable," he adds.

+ +

Today, the company runs the world's largest Kubernetes cluster. "We customized Kubernetes and built a modern system on top of it," says Liu. "This entire ecosystem of Kubernetes plus our own optimizations have helped us save costs and time. We have greater data center efficiency, better managed resources, and smarter deployment with the Kubernetes platform."

+ +{{< case-studies/quote + image="/images/case-studies/jdcom/banner4.jpg" + author="HAIFENG LIU, CHIEF ARCHITECT, JD.com" +>}} +"My advice is first you need to combine this technology with your own businesses, and the second is you need clear goals. You cannot just use the technology because others are using it. You need to consider your own objectives." +{{< /case-studies/quote >}} + +

The results are clear: Deployment time went from several hours to tens of seconds. Efficiency has improved by 20-30%, measured in IT costs. But perhaps the best indication of success was the annual Singles Day shopping event, which ran on the Kubernetes platform for the first time in 2018. Over 11 days, transaction volume on JD.com was $23 billion, and "our e-commerce platforms did great," says Liu. "Infrastructure led the way to prep for 11.11. We took the approach of predicting volume, emulating the behavior of customers to prepare beforehand, and drilled for malfunctions. Because of Kubernetes's scalability, we were able to handle an extremely high level of demand."

+ +

JD.com is now in its second stage with Kubernetes: The platform is already stable, scalable, and flexible, so the focus is on how to run things much more efficiently to further reduce costs. With the optimizations the team is working on with resource management, Liu believes there is the potential to save hundreds of millions of dollars a year.

+ +

"We run Kubernetes and container clusters on roughly tens of thousands of physical bare metal nodes," he says. "Using Kubernetes and leveraging our own machine learning pipeline to predict how many resources we need for each application we use, and our own intelligent scaling algorithm, we can improve our resource usage. If we boost the resource usage, for example, by several percent, that means we can reduce huge hardware costs. Then we don't need that many servers to get that same amount of workload. That can save us a lot of resources."

+ +{{< case-studies/quote author="HAIFENG LIU, CHIEF ARCHITECT, JD.com" >}} +"We can share our successful experience with the community, and we also receive good feedback from others. So it's mutually beneficial." +{{< /case-studies/quote >}} + +

JD.com, which won CNCF's 2018 End User Award, is also using Helm, CNI, Harbor, and Vitess on its platform. JD.com developers have made considerable contributions to Vitess, the CNCF project for scalable MySQL cluster management, and the company hopes to donate its own project to CNCF in the near future. Community participation is a priority for JD.com. "We have a good partnership with this community," says Liu. "We can share our successful experience with the community, and we also receive good feedback from others. So it's mutually beneficial."

+ +

To that end, Liu offers this advice for other companies considering adopting cloud native technology. "First you need to combine this technology with your own businesses, and the second is you need clear goals," he says. "You cannot just use the technology because others are using it. You need to consider your own objectives."

+ +

For JD.com's objectives, these cloud native technologies have been an ideal fit with the company's own homegrown innovation. "Kubernetes helped us reduce the complexity of operations to make distributed systems stable and scalable," says Liu. "Most importantly, we can leverage Kubernetes for scheduling resources to reduce hardware costs. That's the big win."

\ No newline at end of file diff --git a/content/bn/case-studies/jd-com/jd-com_featured_logo.png b/content/bn/case-studies/jd-com/jd-com_featured_logo.png new file mode 100644 index 0000000000000..e897998429386 Binary files /dev/null and b/content/bn/case-studies/jd-com/jd-com_featured_logo.png differ diff --git a/content/bn/case-studies/jd-com/jd-com_featured_logo.svg b/content/bn/case-studies/jd-com/jd-com_featured_logo.svg new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/content/en/case-studies/jd-com/jd.com_featured_logo.svg b/content/bn/case-studies/jd-com/jd.com_featured_logo.svg similarity index 100% rename from content/en/case-studies/jd-com/jd.com_featured_logo.svg rename to content/bn/case-studies/jd-com/jd.com_featured_logo.svg diff --git a/content/bn/case-studies/naic/index.html b/content/bn/case-studies/naic/index.html new file mode 100644 index 0000000000000..23955bdef8242 --- /dev/null +++ b/content/bn/case-studies/naic/index.html @@ -0,0 +1,87 @@ +--- +title: NAIC Case Study +linkTitle: NAIC +case_study_styles: true +cid: caseStudies +logo: naic_featured_logo.png +featured: false + +new_case_study_styles: true +heading_background: /images/case-studies/naic/banner1.jpg +heading_title_logo: /images/naic_logo.png +subheading: > + A Culture and Technology Transition Enabled by Kubernetes +case_study_details: + - Company: NAIC + - Location: Washington, DC + - Industry: Regulatory +--- + +

Challenge

+ +

The National Association of Insurance Commissioners (NAIC), the U.S. standard-setting and regulatory support organization, was looking for a way to deliver new services faster to provide more value for members and staff. It also needed greater agility to improve productivity internally.

+ +

Solution

+ +

Beginning in 2016, they started using Cloud Native Computing Foundation (CNCF) tools such as Prometheus. NAIC began hosting internal systems and development systems on Kubernetes at the beginning of 2018, as part of a broad move toward the public cloud. "Our culture and technology transition is a strategy embraced by our top leaders," says Dan Barker, Chief Enterprise Architect. "It has already proven successful by allowing us to accelerate our value pipeline by more than double while decreasing our costs by more than half. We are also seeing customer satisfaction increase as we add more and more applications to these new technologies."

+ +

Impact

+ +

Leveraging Kubernetes, "our development teams can create rapid prototypes far faster than they used to," Barker said. Applications running on Kubernetes are more resilient than those running in other environments. The deployment of open source solutions is helping influence company culture, as NAIC becomes a more open and transparent organization.

+ +

"We completed a small prototype in two days that would have previously taken at least a month," Barker says. Resiliency is currently measured in how much downtime systems have. "They've basically had none, and the occasional issue is remedied in minutes," he says.

+ +{{< case-studies/quote author="Dan Barker, Chief Enterprise Architect, NAIC" >}} +"Our culture and technology transition is a strategy embraced by our top leaders. It has already proven successful by allowing us to accelerate our value pipeline by more than double while decreasing our costs by more than half. We are also seeing customer satisfaction increase as we add more and more applications to these new technologies." +{{< /case-studies/quote >}} + +

NAIC—which was created and overseen by the chief insurance regulators from the 50 states, the District of Columbia and five U.S. territories—provides a means through which state insurance regulators establish standards and best practices, conduct peer reviews, and coordinate their regulatory oversight. Their staff supports these efforts and represents the collective views of regulators in the United States and internationally. NAIC members, together with the organization's central resources, form the national system of state-based insurance regulation in the United States.

+ +

The organization has been using the cloud for years, and wanted to find more ways to quickly deliver new services that provide more value for members and staff. They looked to Kubernetes for a solution. Within NAIC, several groups are leveraging Kubernetes, one being the Platform Engineering Team. "The team building out these tools are not only deploying and operating Kubernetes, but they're also using them," Barker says. "In fact, we're using GitLab to deploy Kubernetes with a pipeline using kops. This team was created from developers, operators, and quality engineers from across the company, so their jobs have changed quite a bit."

+ +

In addition, NAIC is onboarding teams to the new platform, and those teams have seen a lot of change in how they work and what they can do. "They now have more power in creating their own infrastructure and deploying their own applications," Barker says. They also use pipelines to facilitate their currently manual processes. NAIC has consumers who are using GitLab heavily, and they're starting to use Kubernetes to deploy simple applications that help their internal processes.

+ +{{< case-studies/quote + image="/images/case-studies/naic/banner3.jpg" + author="Dan Barker, Chief Enterprise Architect, NAIC" +>}} +"In our experience, vendor lock-in and tooling that is highly specific results in less resilient technology with fewer minds working to solve problems and grow the community." +{{< /case-studies/quote >}} + +

"We needed greater agility to enable our own productivity internally," he says. "We decided it was right for us to move everything to the public cloud [Amazon Web Services] to help with that process and be able to access many of the native tools that allows us to move faster by not needing to build everything." +The NAIC also wanted to be cloud-agnostic, "and Kubernetes helps with this for our compute layer," Barker says. "Compute is pretty standard across the clouds, and now we can take advantage of any of them while getting all of the other features Kubernetes offers."

+ +

The NAIC currently hosts internal systems and development systems on Kubernetes, and has already seen how impactful it can be. "Our development teams can create rapid prototypes in minutes instead of weeks," Barker says. "This recently happened with an internal tool that had no measurable wait time on the infrastructure. It was solely development bound. There is now a central shared resource that lives in AWS, which means it can grow as needed."

+ +

The native integrations into Kubernetes at NAIC has made it easy to write code and have it running in minutes instead of weeks. Applications running on Kubernetes have also proven to be more resilient than those running in other environments. "We even have teams using this to create more internal tools to help with communication or automating some of their current tasks," Barker says.

+ +

"We knew that Kubernetes had become the de facto standard for container orchestration," he says. "Two major factors for selecting this were the three major cloud vendors hosting their own versions and having it hosted in a neutral party as fully open source."

+ +

As for other CNCF projects, NAIC is using Prometheus on a small scale and hopes to continue using it moving forward because of the seamless integration with Kubernetes. The Association also is considering gRPC as its internal communications standard, Envoy in conjunction with Istio for service mesh, OpenTracing and Jaeger for tracing aggregation, and Fluentd with its Elasticsearch cluster.

+ +{{< case-studies/quote + image="/images/case-studies/naic/banner4.jpg" + author="Dan Barker, Chief Enterprise Architect, NAIC" +>}} +"We knew that Kubernetes had become the de facto standard for container orchestration. Two major factors for selecting this were the three major cloud vendors hosting their own versions and having it hosted in a neutral party as fully open source." +{{< /case-studies/quote >}} + +

The open governance and broad industry participation in CNCF provided a comfort level with the technology, Barker says. "We also see it as helping to influence our own company culture," he says. "We're moving to be a more open and transparent company, and we are encouraging our staff to get involved with the different working groups and codebases. We recently became CNCF members to help further our commitment to community contribution and transparency."

+ +

Factors such as vendor-neutrality and cross-industry investment were important in the selection. "In our experience, vendor lock-in and tooling that is highly specific results in less resilient technology with fewer minds working to solve problems and grow the community," Barker says.

+ +

NAIC is a largely Oracle shop, Barker says, and has been running mostly Java on JBoss. "However, we have years of history with other applications," he says. "Some of these have been migrated by completely rewriting the application, while others are just being modified slightly to fit into this new paradigm."

+ +

Running on AWS cloud, the Association has not specifically taken a microservices approach. "We are moving to microservices where practical, but we haven't found that it's a necessity to operate them within Kubernetes," Barker says.

+ +

All of its databases are currently running within public cloud services, but they have explored eventually running those in Kubernetes, as it makes sense. "We're doing this to get more reuse from common components and to limit our failure domains to something more manageable and observable," Barker says.

+ +{{< case-studies/quote author="Dan Barker, Chief Enterprise Architect, NAIC" >}} +"We have been able to move much faster at lower cost than we were able to in the past," Barker says. "We were able to complete one of our projects in a year, when the previous version took over two years. And the new project cost $500,000 while the original required $3 million, and with fewer defects. We are also able to push out new features much faster." +{{< /case-studies/quote >}} + +

NAIC has seen a significant business impact from its efforts. "We have been able to move much faster at lower cost than we were able to in the past," Barker says. "We were able to complete one of our projects in a year, when the previous version took over two years. And the new project cost $500,000 while the original required $3 million, and with fewer defects. We are also able to push out new features much faster."

+ +

He says the organization is moving toward continuous deployment "because the business case makes sense. The research is becoming very hard to argue with. We want to reduce our batch sizes and optimize on delivering value to customers and not feature count. This is requiring a larger cultural shift than just a technology shift."

+ +

NAIC is "becoming more open and transparent, as well as more resilient to failure," Barker says. "Even our customers are wanting more and more of this and trying to figure out how they can work with us to accomplish our mutual goals faster. Members of the insurance industry have reached out so that we can better learn together and grow as an industry."

\ No newline at end of file diff --git a/content/bn/case-studies/naic/naic_featured_logo.png b/content/bn/case-studies/naic/naic_featured_logo.png new file mode 100644 index 0000000000000..f2497114bf40a Binary files /dev/null and b/content/bn/case-studies/naic/naic_featured_logo.png differ diff --git a/content/bn/case-studies/naic/naic_featured_logo.svg b/content/bn/case-studies/naic/naic_featured_logo.svg new file mode 100644 index 0000000000000..b4af63931dbb4 --- /dev/null +++ b/content/bn/case-studies/naic/naic_featured_logo.svg @@ -0,0 +1 @@ +kubernetes.io-logos \ No newline at end of file diff --git a/content/bn/case-studies/nav/index.html b/content/bn/case-studies/nav/index.html new file mode 100644 index 0000000000000..14a4405ce0d8e --- /dev/null +++ b/content/bn/case-studies/nav/index.html @@ -0,0 +1,81 @@ +--- +title: Nav Case Study +linkTitle: Nav +case_study_styles: true +cid: caseStudies +featured: false + +new_case_study_styles: true +heading_background: /images/case-studies/nav/banner1.jpg +heading_title_logo: /images/nav_logo.png +subheading: > + How A Startup Reduced Its Infrastructure Costs by 50% With Kubernetes +case_study_details: + - Company: Nav + - Location: Salt Lake City, Utah, and San Mateo, California + - Industry: Financial services for businesses +--- + +

Challenge

+ +

Founded in 2012, Nav provides small business owners with access to their business credit scores from all three major commercial credit bureaus—Equifax, Experian and Dun & Bradstreet—and financing options that best fit their needs. Five years in, the startup was growing rapidly, and "our cloud environments were getting very large, and our usage of those environments was extremely low, like under 1%," says Director of Engineering Travis Jeppson. "We wanted our usage of cloud environments to be more tightly coupled with what we actually needed, so we started looking at containerization and orchestration to help us be able to run workloads that were distinct from one another but could share a similar resource pool."

+ +

Solution

+ +

After evaluating a number of orchestration solutions, the Nav team decided to adopt Kubernetes running on AWS. The strength of the community around Kubernetes was a strong draw, as well as its Google provenance. Plus, "the other solutions tended to be fairly heavy-handed, really complex, really large, and really hard to manage just off the bat," says Jeppson. "Kubernetes gave us a very simple way to be able to step into an orchestration solution that fit our needs at the time, but also the extensibility of it allowed us to be able to grow with it and be able to build in more features and functionality later on."

+ +

Impact

+ +

The four-person team got Kubernetes up and running in six months, and the full migration of Nav's 25 microservices was completed in another six months. The results have been impressive: Resource utilization, which led the company on this path in the first place, has increased from 1% to 40%. Launching a new service used to take two developers two weeks; now it takes only one developer less than 10 minutes. Deployments have increased 5x. And the company is saving 50% in infrastructure costs.

+ +{{< case-studies/quote author="Travis Jeppson, Director of Engineering, Nav" >}} + +
+"Kubernetes gave us a very simple way to be able to step into an orchestration solution that fit our needs at the time, but also the extensibility of it allowed us to be able to grow with it and be able to build in more features and functionality later on." +{{< /case-studies/quote >}} + +{{< case-studies/lead >}} +Founded in 2012, Nav provides small business owners with access to their business credit scores from all three major commercial credit bureaus—Equifax, Experian and Dun & Bradstreet—as well as details on their businesses' financial health and financing options that best fit their needs. Its mission boils down to this, says Director of Engineering Travis Jeppson: "to increase the success rate of small businesses." +{{< /case-studies/lead >}} + +

A couple of years ago, Nav recognized an obstacle in its own path to success. The business was growing rapidly, and "our cloud environments were getting very large, and our usage of those environments was extremely low, like under 1%," says Jeppson. "Most of the problem was around the ability to scale. We were just throwing money at it. 'Let's just spin up more servers. Let's just do more things in order to handle an increased load.' And with us being a startup, that could lead to our demise. We don't have the money to burn on that kind of stuff."

+ +

Plus, every new service had to go through 10 different people, taking an unacceptably long two weeks to launch. "All of the patch management and the server management was done very manually, and so we all had to watch it and maintain it really well," adds Jeppson. "It was just a very troublesome system."

+ +{{< case-studies/quote + image="/images/case-studies/nav/banner3.jpg" + author="Travis Jeppson, Director of Engineering, Nav" +>}} +"The community is absolutely vital: being able to pass ideas around, talk about a lot of the similar challenges that we're all facing, and just get help. I like that we're able to tackle the same problems for different reasons but help each other along the way." +{{< /case-studies/quote >}} + +

Jeppson had worked with containers at his previous job, and pitched that technology to Nav's management as a solution to these problems. He got the green light in early 2017. "We wanted our usage of cloud environments to be more tightly coupled with what we actually needed, so we started looking at containerization and orchestration to help us be able to run workloads that were distinct from one another but could share a similar resource pool," he says.

+ +

After evaluating a number of orchestration solutions, the company decided to adopt Kubernetes running on AWS. The strength of the community around Kubernetes was a strong draw, as was its Google origins. Additionally, "the other solutions tended to be fairly heavy-handed, really complex, really large, and really hard to manage just off the bat," says Jeppson. "Kubernetes gave us a very simple way to be able to step into an orchestration solution that fit our needs at the time, but the extensibility of it would also allow us to grow with it and build in more features and functionality later on."

+ +

Jeppson's four-person Engineering Services team got Kubernetes up and running in six months (they decided to use Kubespray to spin up clusters), and the full migration of Nav's 25 microservices and one primary monolith was completed in another six months. "We couldn't rewrite everything; we couldn't stop," he says. "We had to stay up, we had to stay available, and we had to have minimal amount of downtime. So we got really comfortable around our building pipeline, our metrics and logging, and then around Kubernetes itself: how to launch it, how to upgrade it, how to service it. And we moved little by little."

+ +{{< case-studies/quote + image="/images/case-studies/nav/banner4.jpg" + author="Travis Jeppson, Director of Engineering, Nav" +>}} +"Kubernetes has brought so much value to Nav by allowing all of these new freedoms that we had just never had before." +{{< /case-studies/quote >}} + +

A crucial part of the process involved educating Nav's 50 engineers and being transparent regarding the new workflow as well as the roadmap for the migration. Jeppson did regular presentations along the way, and a week of four-hours-a-day labs for the entire staff of engineers. He then created a repository in GitLab to house all of the information. "We showed all the frontend and backend developers how to go in, create their own namespace using kubectl, all themselves," he says. "Now, a lot of times, they just come to us and say, 'This is ready.' We click a little button in GitLab to allow it to release into production, and they're off to the races."

+ +

Since the migration was completed in early 2018, the results have been impressive: Resource utilization, which led the company on this path in the first place, has increased from 1% to 40%. Launching a new service used to take two developers two weeks; now it takes only one developer less than 10 minutes. Deployments have increased 5x, from 10 a day to 50 a day. And the company is saving 50% in infrastructure costs on the computational side. "Next we want to go in to address the database side, and once we do that, then we're going to continue to drop that cost quite a bit more," says Jeppson.

+ +

Kubernetes has also helped Nav with its compliance needs. Before, "we had to map one application to one server, mostly due to different compliance regulations around data," Jeppson says. "With the Kubernetes API, we could add in network policies and segregate that data and restrict it if needed." The company segregates its cluster into an unrestricted zone and a restricted zone, which has its own set of nodes where data protection happens. The company also uses the Twistlock tool to ensure security, "and that makes it a lot easier to sleep at night," he adds.

+ +{{< case-studies/quote author="Travis Jeppson, Director of Engineering, Nav" >}} +"We're talking four to 10 times the amount of traffic that we handle now, and it's just like, 'Oh, yeah. We're good. Kubernetes handles this for us.'" +{{< /case-studies/quote >}} + +

With Kubernetes in place, the Nav team also started improving the system's metrics and logging by adopting Prometheus. "Prometheus created a standard around metrics that was really easy for a developer to adopt," says Jeppson. "They have the freedom to display what they want, to do what they need, and keep their codebase clean, and that to us was absolutely a must."

+ +

Next up for Nav in the coming year: looking at tracing, storage, and service mesh. They're currently evaluating Envoy, OpenTracing, and Jaeger after spending much of KubeCon talking to other companies. "The community is absolutely vital: being able to pass ideas around, talk about a lot of the similar challenges that we're all facing, and just get help. I like that we're able to tackle the same problems for different reasons but help each other along the way," says Jeppson. "There's still so, so much to do around scalability, around being able to really fully adopt a cloud native solution."

+ +

Of course, it all starts with Kubernetes. With that technology, Jeppson's team has built a platform that allows Nav to scale, and that "has brought so much value to Nav by allowing all of these new freedoms that we had just never had before," he says.

+ +

Conversations about new products used to be bogged down by the fact they'd have to wait six months to get an environment set up with isolation and then figure out how to handle spikes of traffic. "But now it's just nothing to us," says Jeppson. "We're talking four to 10 times the amount of traffic that we handle now, and it's just like, 'Oh, yeah. We're good. Kubernetes handles this for us.'"

\ No newline at end of file diff --git a/content/bn/case-studies/nav/nav_featured_logo.png b/content/bn/case-studies/nav/nav_featured_logo.png new file mode 100644 index 0000000000000..22d96017c432a Binary files /dev/null and b/content/bn/case-studies/nav/nav_featured_logo.png differ diff --git a/content/bn/case-studies/nav/nav_featured_logo.svg b/content/bn/case-studies/nav/nav_featured_logo.svg new file mode 100644 index 0000000000000..42b4ffa9674d7 --- /dev/null +++ b/content/bn/case-studies/nav/nav_featured_logo.svg @@ -0,0 +1 @@ +kubernetes.io-logos \ No newline at end of file diff --git a/content/bn/case-studies/nerdalize/index.html b/content/bn/case-studies/nerdalize/index.html new file mode 100644 index 0000000000000..ad2fdb5c82d74 --- /dev/null +++ b/content/bn/case-studies/nerdalize/index.html @@ -0,0 +1,81 @@ +--- +title: Prowise Case Study +linkTitle: prowise +case_study_styles: true +cid: caseStudies +featured: false + +new_case_study_styles: true +heading_background: /images/case-studies/nerdalize/banner1.jpg +heading_title_logo: /images/nerdalize_logo.png +subheading: > + Nerdalize: Providing Affordable and Sustainable Cloud Hosting with Kubernetes +case_study_details: + - Company: Nerdalize + - Location: Delft, Netherlands + - Industry: Cloud Provider +--- + +

Challenge

+ +

Nerdalize offers affordable cloud hosting for customers—and free heat and hot water for people who sign up to house the heating devices that contain the company's servers. The savings Nerdalize realizes by not running data centers are passed on to its customers. When the team began using Docker to make its software more portable, it realized it also needed a container orchestration solution. "As a cloud provider, we have internal services for hosting our backends and billing our customers, but we also need to offer our compute to our end users," says Digital Product Engineer Ad van der Veer. "Since we have these heating devices spread across the Netherlands, we need some way of tying that all together."

+ +

Solution

+ +

After briefly using a basic scheduling setup with another open source tool, Nerdalize switched to Kubernetes. "On top of our heating devices throughout the Netherlands, we have a virtual machine layer, and on top of that we run Kubernetes clusters for our customers," says van der Veer. "As a small company, we have to provide a rock solid story in terms of the technology. Kubernetes allows us to offer a hybrid solution: 'You can run this on our cloud, but you can run it on other clouds as well. It runs in your internal hardware if you like.' And together with the Docker image standard and our multi-cloud dashboard, that allows them peace of mind."

+ +

Impact

+ +

Nerdalize prides itself on being a Kubernetes-native cloud provider that charges its customers prices 40% below that of other cloud providers. "Every euro that we have to invest for licensing of software that's not open source comes from that 40%," says van der Veer. If they had used a non-open source orchestration platform instead of Kubernetes, "that would reduce this proposition that we have of 40% less cost to like 30%. Kubernetes directly allows us to have this business model and this strategic advantage." Nerdalize customers also benefit from time savings: One went from spending a day to set up VMs, network, and software, to spinning up a Kubernetes cluster in minutes. And for households using the heating devices, they save an average of 200 euro a year on their heating bill. The environmental impact? The annual reduction in CO2 emissions comes out to be 2 tons per Nerdalize household, which is equivalent to a car driving 8,000 km.

+ +{{< case-studies/quote author="AD VAN DER VEER, PRODUCT ENGINEER, NERDALIZE" >}} +"We can walk into a boardroom and put a Kubernetes logo up, and people accept it as an established technology. It becomes this centerpiece where other cloud native projects can tie in, so there's a network effect that each project empowers each other. This is something that has a lot of value when we have to talk to customers and convince them that our cloud fits their needs." +{{< /case-studies/quote >}} + +{{< case-studies/lead >}} +Nerdalize is a cloud hosting provider that has no data centers. Instead, the four-year-old startup places its servers in homes across the Netherlands, inside heating devices it developed to turn the heat produced by the servers into heating and hot water for the residents. +{{< /case-studies/lead >}} + +

"Households save on their gas bills, and cloud users have a much more sustainable cloud solution," says Maaike Stoops, Customer Experience Queen at Nerdalize. "And we don't have the overhead of building a data center, so our cloud is up to 40% more affordable."

+ +

That business model has been enabled by the company's adoption of containerization and Kubernetes. "When we just got started, Docker was just introduced," says Digital Product Engineer Ad van der Veer. "We began with a very basic bare metal setup, but once we developed the business, we saw that containerization technology was super useful to help our customers. As a cloud provider, we have internal services for hosting our backends and billing our customers, but we also need to offer our compute to our end users. Since we have these heating devices spread across the Netherlands, we need some way of tying that all together."

+ +

After trying to develop its own scheduling system using another open source tool, Nerdalize found Kubernetes. "Kubernetes provided us with more functionality out of the gate," says van der Veer.

+ +{{< case-studies/quote + image="/images/case-studies/nerdalize/banner3.jpg" + author="AD VAN DER VEER, PRODUCT ENGINEER, NERDALIZE" +>}} +"We always try to get a working version online first, like minimal viable products, and then move to stabilize that," says van der Veer. "And I think that these kinds of day-two problems are now immediately solved. The rapid prototyping we saw internally is a very valuable aspect of Kubernetes." +{{< /case-studies/quote >}} + +

The team first experimented with a basic use case to run customers' workloads on Kubernetes. "Getting the data working was kind of difficult, and at the time the installation wasn't that simple," says van der Veer. "Then CNCF started, we saw the community grow, these problems got solved, and from there it became a very easy decision."

+ +

The first Nerdalize product that was launched in 2017 was "100% containerized and Kubernetes native," says van der Veer. "On top of our heating devices throughout the Netherlands, we have a virtual machine layer, and on top of that we run Kubernetes clusters for our customers. As a small company, we have to provide a rock solid story in terms of the technology. Kubernetes allows us to offer a hybrid solution: 'You can run this on our cloud, but you can run it on other clouds as well. It runs in your internal hardware if you like.' And together with the Docker image standard and our multi-cloud dashboard, that gives them peace of mind."

+ +

Not to mention the 40% cost savings. "Every euro that we have to invest for licensing of software that's not open source comes from that 40%," says van der Veer. If Nerdalize had used a non-open source orchestration platform instead of Kubernetes, "that would reduce our cost savings proposition to like 30%. Kubernetes directly allows us to have this business model and this strategic advantage."

+ +{{< case-studies/quote + image="/images/case-studies/nerdalize/banner4.jpg" + author="MAAIKE STOOPS, CUSTOMER EXPERIENCE QUEEN, NERDALIZE" +>}} +"One of our customers used to spend up to a day setting up the virtual machines, network and software every time they wanted to run a project in the cloud. On our platform, with Docker and Kubernetes, customers can have their projects running in a couple of minutes." +{{< /case-studies/quote >}} + +

Nerdalize now has customers, from individual engineers to data-intensive startups and established companies, all around the world. (For the time being, though, the heating devices are exclusive to the Netherlands.) One of the most common use cases is batch workloads used by data scientists and researchers, and the time savings for these end users is profound. "One of our customers used to spend up to a day setting up the virtual machines, network and software every time they wanted to run a project in the cloud," says Stoops. "On our platform, with Docker and Kubernetes, customers can have their projects running in a couple of minutes."

+ +

As for households using the heating devices, they save an average of 200 euro a year on their heating bill. The environmental impact? The annual reduction in CO2 emissions comes out to 2 tons per Nerdalize household, which is equivalent to a car driving 8,000 km.

+ +

For the Nerdalize team, feature development—such as the accessible command line interface called Nerd, which recently went live—has also been sped up by Kubernetes. "We always try to get a working version online first, like minimal viable products, and then move to stabilize that," says van der Veer. "And I think that these kinds of day-two problems are now immediately solved. The rapid prototyping we saw internally is a very valuable aspect of Kubernetes."

+ +

Another unexpected benefit has been the growing influence and reputation of Kubernetes. "We can walk into a boardroom and put a Kubernetes logo up, and people accept it as an established technology," says van der Veer. "It becomes this centerpiece where other cloud native projects can tie in, so there's a network effect that each project empowers each other. This is something that has a lot of value when we have to convince customers that our cloud fits their needs."

+ +{{< case-studies/quote author="MAAIKE STOOPS, CUSTOMER EXPERIENCE QUEEN, NERDALIZE" >}} +"It shouldn't be too big of a hassle and too large of a commitment. It should be fun and easy for end users. So we really love Kubernetes in that way." +{{< /case-studies/quote >}} + +

In fact, Nerdalize is currently looking into implementing other CNCF projects, such as Prometheus for monitoring and Rook, "which should help us with some of the data problems that we want to solve for our customers," says van der Veer.

+ +

In the coming year, Nerdalize will scale up the number of households running its hardware to 50, or the equivalent of a small scale data center. Geographic redundancy and greater server ability for customers are two main goals. Spreading the word about Kubernetes is also in the game plan. "We offer a free namespace on our sandbox, multi-tenant Kubernetes cluster for anyone to try," says van der Veer. "What's more cool than trying your first Kubernetes project on houses, to warm a shower?"

+ +

Ultimately, this ties into Nerdalize's mission of supporting affordable and sustainable cloud hosting. "We want to be the disrupter of the cloud space, showing organizations that running in the cloud is easy and affordable," says Stoops. "It shouldn't be too big of a hassle and too large of a commitment. It should be fun and easy for end users. So we really love Kubernetes in that way."

\ No newline at end of file diff --git a/content/bn/case-studies/nerdalize/nerdalize_featured_logo.png b/content/bn/case-studies/nerdalize/nerdalize_featured_logo.png new file mode 100644 index 0000000000000..eb959b8ecfa1f Binary files /dev/null and b/content/bn/case-studies/nerdalize/nerdalize_featured_logo.png differ diff --git a/content/bn/case-studies/nerdalize/nerdalize_featured_logo.svg b/content/bn/case-studies/nerdalize/nerdalize_featured_logo.svg new file mode 100644 index 0000000000000..aa2661e503bae --- /dev/null +++ b/content/bn/case-studies/nerdalize/nerdalize_featured_logo.svg @@ -0,0 +1 @@ +kubernetes.io-logos \ No newline at end of file diff --git a/content/bn/case-studies/netease/index.html b/content/bn/case-studies/netease/index.html new file mode 100644 index 0000000000000..a23afaaf56e8f --- /dev/null +++ b/content/bn/case-studies/netease/index.html @@ -0,0 +1,76 @@ +--- +title: NetEase Case Study +linkTitle: NetEase +case_study_styles: true +cid: caseStudies +logo: netease_featured_logo.png +featured: false + +new_case_study_styles: true +heading_background: /images/case-studies/netease/banner1.jpg +heading_title_logo: /images/netease_logo.png +subheading: > + How NetEase Leverages Kubernetes to Support Internet Business Worldwide +case_study_details: + - Company: NetEase + - Location: Hangzhou, China + - Industry: Internet technology +--- + +

Challenge

+ +

Its gaming business is one of the largest in the world, but that's not all that NetEase provides to Chinese consumers. The company also operates e-commerce, advertising, music streaming, online education, and email platforms; the last of which serves almost a billion users with free email services through sites like 163.com. In 2015, the NetEase Cloud team providing the infrastructure for all of these systems realized that their R&D process was slowing down developers. "Our users needed to prepare all of the infrastructure by themselves," says Feng Changjian, Architect for NetEase Cloud and Container Service. "We were eager to provide the infrastructure and tools for our users automatically via serverless container service."

+ +

Solution

+ +

After considering building its own orchestration solution, NetEase decided to base its private cloud platform on Kubernetes. The fact that the technology came out of Google gave the team confidence that it could keep up with NetEase's scale. "After our 2-to-3-month evaluation, we believed it could satisfy our needs," says Feng. The team started working with Kubernetes in 2015, before it was even 1.0. Today, the NetEase internal cloud platform—which also leverages the CNCF projects Prometheus, Envoy, Harbor, gRPC, and Helm—runs 10,000 nodes in a production cluster and can support up to 30,000 nodes in a cluster. Based on its learnings from its internal platform, the company introduced a Kubernetes-based cloud and microservices-oriented PaaS product, NetEase Qingzhou Microservice, to outside customers.

+ +

Impact

+ +

The NetEase team reports that Kubernetes has increased R&D efficiency by more than 100%. Deployment efficiency has improved by 280%. "In the past, if we wanted to do upgrades, we needed to work with other teams, even in other departments," says Feng. "We needed special staff to prepare everything, so it took about half an hour. Now we can do it in only 5 minutes." The new platform also allows for mixed deployments using GPU and CPU resources. "Before, if we put all the resources toward the GPU, we won't have spare resources for the CPU. But now we have improvements thanks to the mixed deployments," he says. Those improvements have also brought an increase in resource utilization.

+ +{{< case-studies/quote author="Zeng Yuxing, Architect, NetEase" >}} +"The system can support 30,000 nodes in a single cluster. In production, we have gotten the data of 10,000 nodes in a single cluster. The whole internal system is using this system for development, test, and production." +{{< /case-studies/quote >}} + +{{< case-studies/lead >}} +Its gaming business is the fifth-largest in the world, but that's not all that NetEase provides consumers. +{{< /case-studies/lead >}} + +

The company also operates e-commerce, advertising, music streaming, online education, and email platforms in China; the last of which serves almost a billion users with free email services through popular sites like 163.com and 126.com. With that kind of scale, the NetEase Cloud team providing the infrastructure for all of these systems realized in 2015 that their R&D process was making it hard for developers to keep up with demand. "Our users needed to prepare all of the infrastructure by themselves," says Feng Changjian, Architect for NetEase Cloud and Container Service. "We were eager to provide the infrastructure and tools for our users automatically via serverless container service."

+ +

After considering building its own orchestration solution, NetEase decided to base its private cloud platform on Kubernetes. The fact that the technology came out of Google gave the team confidence that it could keep up with NetEase's scale. "After our 2-to-3-month evaluation, we believed it could satisfy our needs," says Feng.

+ +{{< case-studies/quote + image="/images/case-studies/netease/banner3.jpg" + author="Feng Changjian, Architect for NetEase Cloud and Container Service, NetEase" +>}} +"We leveraged the programmability of Kubernetes so that we can build a platform to satisfy the needs of our internal customers for upgrades and deployment." +{{< /case-studies/quote >}} + +

The team started adopting Kubernetes in 2015, before it was even 1.0, because it was relatively easy to use and enabled DevOps at the company. "We abandoned some of the concepts of Kubernetes; we only wanted to use the standardized framework," says Feng. "We leveraged the programmability of Kubernetes so that we can build a platform to satisfy the needs of our internal customers for upgrades and deployment."

+ +

The team first focused on building the container platform to manage resources better, and then turned their attention to improving its support of microservices by adding internal systems such as monitoring. That has meant integrating the CNCF projects Prometheus, Envoy, Harbor, gRPC, and Helm. "We are trying to provide a simplified and standardized process, so our users and customers can leverage our best practices," says Feng.

+ +

And the team is continuing to make improvements. For example, the e-commerce part of the business needs to leverage mixed deployments, which in the past required using two separate platforms: the infrastructure-as-a-service platform and the Kubernetes platform. More recently, NetEase has created a cross-platform application that enables using both with one-command deployment.

+ +{{< case-studies/quote + image="/images/case-studies/netease/banner4.jpg" + author="Li Lanqing, Kubernetes Developer, NetEase" +>}} +"As long as a company has a mature team and enough developers, I think Kubernetes is a very good technology that can help them." +{{< /case-studies/quote >}} + +

Today, the NetEase internal cloud platform "can support 30,000 nodes in a single cluster," says Architect Zeng Yuxing. "In production, we have gotten the data of 10,000 nodes in a single cluster. The whole internal system is using this system for development, test, and production."

+ +

The NetEase team reports that Kubernetes has increased R&D efficiency by more than 100%. Deployment efficiency has improved by 280%. "In the past, if we wanted to do upgrades, we needed to work with other teams, even in other departments," says Feng. "We needed special staff to prepare everything, so it took about half an hour. Now we can do it in only 5 minutes." The new platform also allows for mixed deployments using GPU and CPU resources. "Before, if we put all the resources toward the GPU, we won't have spare resources for the CPU. But now we have improvements thanks to the mixed deployments." Those improvements have also brought an increase in resource utilization.

+ +{{< case-studies/quote author="Li Lanqing, Kubernetes Developer, NetEase">}} +"By engaging with this community, we can gain some experience from it and we can also benefit from it. We can see what are the concerns and the challenges faced by the community, so we can get involved." +{{< /case-studies/quote >}} + +

Based on the results and learnings from using its internal platform, the company introduced a Kubernetes-based cloud and microservices-oriented PaaS product, NetEase Qingzhou Microservice, to outside customers. "The idea is that we can find the problems encountered by our game and e-commerce and cloud music providers, so we can integrate their experiences and provide a platform to satisfy the needs of our users," says Zeng.

+ +

With or without the use of the NetEase product, the team encourages other companies to try Kubernetes. "As long as a company has a mature team and enough developers, I think Kubernetes is a very good technology that can help them," says Kubernetes developer Li Lanqing.

+ +

As an end user as well as a vendor, NetEase has become more involved in the community, learning from other companies and sharing what they've done. The team has been contributing to the Harbor and Envoy projects, providing feedback as the technologies are being tested at NetEase scale. "We are a team focusing on addressing the challenges of microservices architecture," says Feng. "By engaging with this community, we can gain some experience from it and we can also benefit from it. We can see what are the concerns and the challenges faced by the community, so we can get involved."

\ No newline at end of file diff --git a/content/bn/case-studies/netease/netease_featured_logo.png b/content/bn/case-studies/netease/netease_featured_logo.png new file mode 100644 index 0000000000000..5700b940f34af Binary files /dev/null and b/content/bn/case-studies/netease/netease_featured_logo.png differ diff --git a/content/bn/case-studies/netease/netease_featured_logo.svg b/content/bn/case-studies/netease/netease_featured_logo.svg new file mode 100644 index 0000000000000..0ea176812dd65 --- /dev/null +++ b/content/bn/case-studies/netease/netease_featured_logo.svg @@ -0,0 +1 @@ +kubernetes.io-logos \ No newline at end of file diff --git a/content/bn/case-studies/newyorktimes/index.html b/content/bn/case-studies/newyorktimes/index.html new file mode 100644 index 0000000000000..291c06a6c0c23 --- /dev/null +++ b/content/bn/case-studies/newyorktimes/index.html @@ -0,0 +1,73 @@ +--- +title: New York Times Case Study +case_study_styles: true +cid: caseStudies + +new_case_study_styles: true +heading_background: /images/case-studies/newyorktimes/banner1.jpg +heading_title_logo: /images/newyorktimes_logo.png +subheading: > + The New York Times: From Print to the Web to Cloud Native +case_study_details: + - Company: New York Times + - Location: New York, N.Y. + - Industry: News Media +--- + +

Challenge

+ +

When the company decided a few years ago to move out of its data centers, its first deployments on the public cloud were smaller, less critical applications managed on virtual machines. "We started building more and more tools, and at some point we realized that we were doing a disservice by treating Amazon as another data center," says Deep Kapadia, Executive Director, Engineering at The New York Times. Kapadia was tapped to lead a Delivery Engineering Team that would "design for the abstractions that cloud providers offer us."

+ +

Solution

+ +

The team decided to use Google Cloud Platform and its Kubernetes-as-a-service offering, GKE.

+ +

Impact

+ +

Speed of delivery increased. Some of the legacy VM-based deployments took 45 minutes; with Kubernetes, that time was "just a few seconds to a couple of minutes," says Engineering Manager Brian Balser. Adds Li: "Teams that used to deploy on weekly schedules or had to coordinate schedules with the infrastructure team now deploy their updates independently, and can do it daily when necessary." Adopting Cloud Native Computing Foundation technologies allows for a more unified approach to deployment across the engineering staff, and portability for the company.

+ +{{< case-studies/quote author="Deep Kapadia, Executive Director, Engineering at The New York Times" >}} +{{< youtube DqS_IPw-c6o youtube-quote-sm >}} +{{< youtube Tm4VfJtOHt8 youtube-quote-sm >}} +"I think once you get over the initial hump, things get a lot easier and actually a lot faster." +{{< /case-studies/quote >}} + +

Founded in 1851 and known as the newspaper of record, The New York Times is a digital pioneer: Its first website launched in 1996, before Google even existed. After the company decided a few years ago to move out of its private data centers—including one located in the pricy real estate of Manhattan. It recently took another step into the future by going cloud native.

+ +

At first, the infrastructure team "managed the virtual machines in the Amazon cloud, and they deployed more critical applications in our data centers and the less critical ones on AWS as an experiment," says Deep Kapadia, Executive Director, Engineering at The New York Times. "We started building more and more tools, and at some point we realized that we were doing a disservice by treating Amazon as another data center."

+ +

To get the most out of the cloud, Kapadia was tapped to lead a new Delivery Engineering Team that would "design for the abstractions that cloud providers offer us." In mid-2016, they began looking at the Google Cloud Platform and its Kubernetes-as-a-service offering, GKE.

+ +

At the time, says team member Tony Li, a Site Reliability Engineer, "We had some internal tooling that attempted to do what Kubernetes does for containers, but for VMs. We asked why are we building and maintaining these tools ourselves?"

+ +

In early 2017, the first production application—the nytimes.com mobile homepage—began running on Kubernetes, serving just 1% of the traffic. Today, almost 100% of the nytimes.com site's end-user facing applications run on GCP, with the majority on Kubernetes.

+ +{{< case-studies/quote image="/images/case-studies/newyorktimes/banner3.jpg" >}} +"We had some internal tooling that attempted to do what Kubernetes does for containers, but for VMs. We asked why are we building and maintaining these tools ourselves?" +{{< /case-studies/quote >}} + +

The team found that the speed of delivery was immediately impacted. "Deploying Docker images versus spinning up VMs was quite a lot faster," says Engineering Manager Brian Balser. Some of the legacy VM-based deployments took 45 minutes; with Kubernetes, that time was "just a few seconds to a couple of minutes."

+ +

The plan is to get as much as possible, not just the website, running on Kubernetes, and beyond that, moving toward serverless deployments. For instance, The New York Times crossword app was built on Google App Engine, which has been the main platform for the company's experimentation with serverless. "The hardest part was getting the engineers over the hurdle of how little they had to do," Chief Technology Officer Nick Rockwell recently told The CTO Advisor. "Our experience has been very, very good. We have invested a lot of work into deploying apps on container services, and I'm really excited about experimenting with deploying those on App Engine Flex and AWS Fargate and seeing how that feels, because that's a great migration path."

+ +

There are some exceptions to the move to cloud native, of course. "We have the print publishing business as well," says Kapadia. "A lot of that is definitely not going down the cloud-native path because they're using vendor software and even special machinery that prints the physical paper. But even those teams are looking at things like App Engine and Kubernetes if they can."

+ +

Kapadia acknowledges that there was a steep learning curve for some engineers, but "I think once you get over the initial hump, things get a lot easier and actually a lot faster."

+ +{{< case-studies/quote image="/images/case-studies/newyorktimes/banner4.jpg" >}} +"Right now, every team is running a small Kubernetes cluster, but it would be nice if we could all live in a larger ecosystem," says Kapadia. "Then we can harness the power of things like service mesh proxies that can actually do a lot of instrumentation between microservices, or service-to-service orchestration. Those are the new things that we want to experiment with as we go forward." +{{< /case-studies/quote >}} + +

At The New York Times, they did. As teams started sharing their own best practices with each other, "We're no longer the bottleneck for figuring out certain things," Kapadia says. "Most of the infrastructure and systems were managed by a centralized function. We've sort of blown that up, partly because Google and Amazon have tools that allow us to do that. We provide teams with complete ownership of their Google Cloud Platform projects, and give them a set of sensible defaults or standards. We let them know, 'If this works for you as is, great! If not, come talk to us and we'll figure out how to make it work for you.'"

+ +

As a result, "It's really allowed teams to move at a much more rapid pace than they were able to in the past," says Kapadia. Adds Li: "The use of GKE means each team can get their own compute cluster, reducing the number of individual instances they have to care about since developers can treat the cluster as a whole. Because the ticket-based workflow was removed from requesting resources and connections, developers can just call an API to get what they want. Teams that used to deploy on weekly schedules or had to coordinate schedules with the infrastructure team now deploy their updates independently, and can do it daily when necessary."

+ +

Another benefit to adopting Kubernetes: allowing for a more unified approach to deployment across the engineering staff. "Before, many teams were building their own tools for deployment," says Balser. With Kubernetes—as well as the other CNCF projects The New York Times uses, including Fluentd to collect logs for all of its AWS servers, gRPC for its Publishing Pipeline, Prometheus, and Envoy—"we can benefit from the advances that each of these technologies make, instead of trying to catch up."

+ +{{< case-studies/quote >}} +Li calls the Cloud Native Computing Foundation's projects "a northern star that we can all look at and follow." +{{< /case-studies/quote >}} + +

These open-source technologies have given the company more portability. "CNCF has enabled us to follow an industry standard," says Kapadia. "It allows us to think about whether we want to move away from our current service providers. Most of our applications are connected to Fluentd. If we wish to switch our logging provider from provider A to provider B we can do that. We're running Kubernetes in GCP today, but if we want to run it in Amazon or Azure, we could potentially look into that as well."

+ +

Li calls the Cloud Native Computing Foundation's projects "a northern star that we can all look at and follow." Led by that star, the team is looking ahead to a year of onboarding the remaining half of the 40 or so product engineering teams to extract even more value out of the technology. "Right now, every team is running a small Kubernetes cluster, but it would be nice if we could all live in a larger ecosystem," says Kapadia. "Then we can harness the power of things like service mesh proxies that can actually do a lot of instrumentation between microservices, or service-to-service orchestration. Those are the new things that we want to experiment with as we go forward."

\ No newline at end of file diff --git a/content/bn/case-studies/newyorktimes/newyorktimes_featured.png b/content/bn/case-studies/newyorktimes/newyorktimes_featured.png new file mode 100644 index 0000000000000..fad0927883a93 Binary files /dev/null and b/content/bn/case-studies/newyorktimes/newyorktimes_featured.png differ diff --git a/content/bn/case-studies/newyorktimes/newyorktimes_featured.svg b/content/bn/case-studies/newyorktimes/newyorktimes_featured.svg new file mode 100644 index 0000000000000..e386c15806625 --- /dev/null +++ b/content/bn/case-studies/newyorktimes/newyorktimes_featured.svg @@ -0,0 +1 @@ +kubernetes.io-logos \ No newline at end of file diff --git a/content/bn/case-studies/newyorktimes/newyorktimes_logo.png b/content/bn/case-studies/newyorktimes/newyorktimes_logo.png new file mode 100644 index 0000000000000..693a742c3ebcb Binary files /dev/null and b/content/bn/case-studies/newyorktimes/newyorktimes_logo.png differ diff --git a/content/bn/case-studies/nokia/index.html b/content/bn/case-studies/nokia/index.html new file mode 100644 index 0000000000000..b22465034a794 --- /dev/null +++ b/content/bn/case-studies/nokia/index.html @@ -0,0 +1,77 @@ +--- +title: Nokia Case Study +linkTitle: Nokia +case_study_styles: true +cid: caseStudies +logo: nokia_featured_logo.png + +new_case_study_styles: true +heading_background: /images/case-studies/nokia/banner1.jpg +heading_title_logo: /images/nokia_logo.png +subheading: > + Nokia: Enabling 5G and DevOps at a Telecom Company with Kubernetes +case_study_details: + - Company: Nokia + - Location: Espoo, Finland + - Industry: Telecommunications +--- + +

Challenge

+ +

Nokia's core business is building telecom networks end-to-end; its main products are related to the infrastructure, such as antennas, switching equipment, and routing equipment. "As telecom vendors, we have to deliver our software to several telecom operators and put the software into their infrastructure, and each of the operators have a bit different infrastructure," says Gergely Csatari, Senior Open Source Engineer. "There are operators who are running on bare metal. There are operators who are running on virtual machines. There are operators who are running on VMware Cloud and OpenStack Cloud. We want to run the same product on all of these different infrastructures without changing the product itself."

+ +

Solution

+ +

The company decided that moving to cloud native technologies would allow teams to have infrastructure-agnostic behavior in their products. Teams at Nokia began experimenting with Kubernetes in pre-1.0 versions. "The simplicity of the label-based scheduling of Kubernetes was a sign that showed us this architecture will scale, will be stable, and will be good for our purposes," says Csatari. The first Kubernetes-based product, the Nokia Telephony Application Server, went live in early 2018. "Now, all the products are doing some kind of re-architecture work, and they're moving to Kubernetes."

+ +

Impact

+ +

Kubernetes has enabled Nokia's foray into 5G. "When you develop something that is part of the operator's infrastructure, you have to develop it for the future, and Kubernetes and containers are the forward-looking technologies," says Csatari. The teams using Kubernetes are already seeing clear benefits. "By separating the infrastructure and the application layer, we have less dependencies in the system, which means that it's easier to implement features in the application layer," says Csatari. And because teams can test the exact same binary artifact independently of the target execution environment, "we find more errors in early phases of the testing, and we do not need to run the same tests on different target environments, like VMware, OpenStack, or bare metal," he adds. As a result, "we save several hundred hours in every release."

+ +{{< case-studies/quote author="Gergely Csatari, Senior Open Source Engineer, Nokia" >}} +"When people are picking up their phones and making a call on Nokia networks, they are creating containers in the background with Kubernetes." +{{< /case-studies/quote >}} + +{{< case-studies/lead >}} +Nokia was the first name in mobile phones when they were becoming ubiquitous in the late 1990s and early 2000s. But by 2014, the company had sold off its mobile device division and was focusing its core business not on the handhelds used for calls, but on the networks. +{{< /case-studies/lead >}} + +

Today, Nokia is building telecom networks end-to-end—from antennas to switching and routing equipment—serving operators in more than 120 countries. "As telecom vendors, we have to deliver our software to several telecom operators and put the software into their infrastructure, and each of the operators have a bit different infrastructure," says Gergely Csatari, Senior Open Source Engineer at Nokia. "There are operators who are running on bare metal. There are operators who are running on virtual machines. There are operators who are running on VMware Cloud and OpenStack Cloud. We want to run the same product on all of these different infrastructures without changing the product itself."

+ +

Looking for a way to allow its teams to build products with infrastructure-agnostic behavior, the company decided to embrace containerization, Kubernetes, and other cloud native technologies, a move that is being made across the telecom industry. Since early 2018, "when people are picking up their phones and making a call on Nokia networks, they are creating containers in the background with Kubernetes," says Csatari. "Now, all the products are doing some kind of re-architecture work, and they're moving to Kubernetes."

+ +{{< case-studies/quote + image="/images/case-studies/nokia/banner3.jpg" + author="Gergely Csatari, Senior Open Source Engineer, Nokia" +>}} +"Having the community and CNCF around Kubernetes is not only important for having a connection to other companies who are using Kubernetes and a forum where you can ask or discuss features of Kubernetes. But as a company who would like to contribute to Kubernetes, it was very important to have a CLA (Contributors License Agreement) which is connected to the CNCF and not to a particular company. That was a critical step for us to start contributing to Kubernetes and Helm." +{{< /case-studies/quote >}} + +

Nokia's cloud native journey began about two years ago, when Csatari's team was building the company's Telephony Application Server (TAS). "We wanted to have a service execution engine in the product, which was a totally separate function from all other parts," he says. "There, we had the possibility to think about new architectures and new tools that we could use. We created this particular product based on Kubernetes, and we liked the work, so we started to talk about cloud native and containers and all of these things. We did a very extensive research of different container orchestration tools. We knew that we have some, let's say, strange or different requirements because of the special environment that our software is running on."

+ +

For one thing, Nokia's software serves millions of people, and is required to have the carrier-grade "five nines" availability: to be up 99.999% of the time. "If you turn it to minutes, this means we're allowed to have only 10 minutes of downtime in a whole year," says Csatari. "Downtime here means that you are not able to serve the person to full capacity, which means that we cannot fail. This includes software upgrades, everything, because when you call 911, you're using our software, and you expect that it will work."

+ +

That meant that they needed to be able to set affinity and anti-affinity rules in their orchestration tools. "You cannot put all of the functions to the same physical host because physical hosts are failing," Csatari explains. "If you fail with one physical host, then you lose all of the core processing processes. Then there are no calls going through. So we have to divide them among the different physical hosts. At that time, only Kubernetes was able to provide these features. The simplicity of the label-based scheduling of Kubernetes was a sign that showed us this architecture will scale, will be stable, and will be good for our purposes."

+ +{{< case-studies/quote + image="/images/case-studies/nokia/banner4.jpg" + author="Gergely Csatari, Senior Open Source Engineer, Nokia" +>}} +"Kubernetes opened the window to all of these open source projects instead of implementing everything in house. Our engineers can focus more on the application level, which is actually the thing what we are selling, and not on the infrastructure level. For us, the most important thing about Kubernetes is it allows us to focus on value creation of our business." +{{< /case-studies/quote >}} + +

The TAS went live in early 2018, and now Kubernetes is also enabling Nokia's foray into 5G. The company is introducing microservices architecture and Kubernetes while adding 5G features to existing products. And all new 5G product development will be on top of Kubernetes. "When you develop something that is part of the operator's infrastructure, you have to develop it for the future, and Kubernetes and containers are the forward-looking technologies," says Csatari.

+ +

There have been real time savings thanks to Kubernetes. "By separating the infrastructure and the application layer, we have less dependencies in the system, which means that it's easier to implement features in the application layer," says Csatari. Because teams can test the exact same binary artifact independently of the target execution environment, "we find more errors in early phases of the testing, and we do not need to run the same tests on different target environments, like VMware, OpenStack or bare metal," he adds. As a result, "we save several hundred hours in every release."

+ +

Moving from Nokia's legacy cluster management system, which had been built in-house more than thirty years ago, to a Kubernetes platform also meant that "we started using Linux as a base operating system, so we just opened the window to all of these open source projects instead of implementing everything in house," says Csatari. (From CNCF's ecosystem, the team is already using Helm, gRPC, CNI, Prometheus, and Envoy, and plans to implement CoreDNS.) "Our engineers can focus more on the application level, which is actually the thing what we are selling, and not on the infrastructure level. For us, the most important thing about Kubernetes is it allows us to focus on value creation of our business."

+ +{{< case-studies/quote author="Gergely Csatari, Senior Open Source Engineer, Nokia" >}} +"I had some discussions at KubeCon with people from the networking SIG and the resource management working group, to work together on our requirements, and that's very exciting for me and my colleagues," +{{< /case-studies/quote >}} + +

The company has a long-term goal of moving the entire product portfolio into the Kubernetes platform. To that end, Nokia teams are working together with other companies to add the features needed to use Kubernetes with the real-time, nanosecond-sensitive applications close to the edge of the radio network.

+ +

And the CNCF community is proving to be a great forum for that collaboration. "I had some discussions at KubeCon with people from the networking SIG and the resource management working group, to work together on our requirements, and that's very exciting for me and my colleagues," says Csatari. "Previously, everybody had the same problem, but everybody just did it in his own, and now we are trying to solve the same problem together."

+ +

Perhaps the biggest impact that Kubernetes is having on Nokia, Csatari believes, is that people are starting to think about how a telecom company can do DevOps. "We are building a DevOps pipeline, which reaches from the actual developer to the customers, and thinking about new ways how can we digitally deliver our software to our customers and get feedback from the customers right to the engineers," he says. "This is something that will fundamentally change how telecom companies are delivering software, and how quickly can we develop new features. This is because of the usage of containers and, of course, the usage of Kubernetes."

\ No newline at end of file diff --git a/content/bn/case-studies/nokia/nokia_featured_logo.png b/content/bn/case-studies/nokia/nokia_featured_logo.png new file mode 100644 index 0000000000000..8e046f021f447 Binary files /dev/null and b/content/bn/case-studies/nokia/nokia_featured_logo.png differ diff --git a/content/bn/case-studies/nokia/nokia_featured_logo.svg b/content/bn/case-studies/nokia/nokia_featured_logo.svg new file mode 100644 index 0000000000000..1e3cce49565d3 --- /dev/null +++ b/content/bn/case-studies/nokia/nokia_featured_logo.svg @@ -0,0 +1 @@ +nokia \ No newline at end of file diff --git a/content/bn/case-studies/nordstrom/index.html b/content/bn/case-studies/nordstrom/index.html new file mode 100644 index 0000000000000..c3f2a436b55f7 --- /dev/null +++ b/content/bn/case-studies/nordstrom/index.html @@ -0,0 +1,75 @@ +--- +title: Nordstrom Case Study +case_study_styles: true +cid: caseStudies + +new_case_study_styles: true +heading_background: /images/case-studies/nordstrom/banner1.jpg +heading_title_logo: /images/nordstrom_logo.png +subheading: > + Finding Millions in Potential Savings in a Tough Retail Climate +case_study_details: + - Company: Nordstrom + - Location: Seattle, Washington + - Industry: Retail +--- + +

Challenge

+ +

Nordstrom wanted to increase the efficiency and speed of its technology operations, which includes the Nordstrom.com e-commerce site. At the same time, Nordstrom Technology was looking for ways to tighten its technology operational costs.

+ +

Solution

+ +

After embracing a DevOps transformation and launching a continuous integration/continuous deployment (CI/CD) project four years ago, the company reduced its deployment time from three months to 30 minutes. But they wanted to go even faster across environments, so they began their cloud native journey, adopting Docker containers orchestrated with Kubernetes.

+ +

Impact

+ +

Nordstrom Technology developers using Kubernetes now deploy faster and can "just focus on writing applications," says Dhawal Patel, a senior engineer on the team building a Kubernetes enterprise platform for Nordstrom. Furthermore, the team has increased Ops efficiency, improving CPU utilization from 5x to 12x depending on the workload. "We run thousands of virtual machines (VMs), but aren't effectively using all those resources," says Patel. "With Kubernetes, without even trying to make our cluster efficient, we are currently at a 10x increase."

+ +{{< case-studies/quote author="Dhawal Patel, senior engineer at Nordstrom" >}} +"We are always looking for ways to optimize and provide more value through technology. With Kubernetes we are showcasing two types of efficiency that we can bring: Dev efficiency and Ops efficiency. It's a win-win." +{{< /case-studies/quote >}} + +

When Dhawal Patel joined Nordstrom five years ago as an application developer for the retailer's website, he realized there was an opportunity to help speed up development cycles.

+ +

In those early DevOps days, Nordstrom Technology still followed a traditional model of silo teams and functions. "As a developer, I was spending more time fixing environments than writing code and adding value to business," Patel says. "I was passionate about that—so I was given the opportunity to help fix it."

+ +

The company was eager to move faster, too, and in 2013 launched the first continuous integration/continuous deployment (CI/CD) project. That project was the first step in Nordstrom's cloud native journey.

+ +

Dev and Ops team members built a CI/CD pipeline, working with the company's servers on premise. The team chose Chef, and wrote cookbooks that automated virtual IP creation, servers, and load balancing. "After we completed the project, deployment went from three months to 30 minutes," says Patel. "We still had multiple environments—dev, test, staging, then production—so with each environment running the Chef cookbooks, it took 30 minutes. It was a huge achievement at that point."

+ +

But new environments still took too long to turn up, so the next step was working in the cloud. Today, Nordstrom Technology has built an enterprise platform that allows the company's 1,500 developers to deploy applications running as Docker containers in the cloud, orchestrated with Kubernetes.

+ +{{< case-studies/quote image="/images/case-studies/nordstrom/banner3.jpg" >}} +"We made a bet that Kubernetes was going to take off, informed by early indicators of community support and project velocity, so we rebuilt our system with Kubernetes at the core," +{{< /case-studies/quote >}} + +

"The cloud provided faster access to resources, because it took weeks for us to get a virtual machine (VM) on premises," says Patel. "But now we can do the same thing in only five minutes."

+ +

Nordstrom's first foray into scheduling containers on a cluster was a homegrown system based on CoreOS fleet. They began doing a few proofs of concept projects with that system until Kubernetes 1.0 was released when they made the switch. "We made a bet that Kubernetes was going to take off, informed by early indicators of community support and project velocity, so we rebuilt our system with Kubernetes at the core," says Marius Grigoriu, Sr. Manager of the Kubernetes team at Nordstrom.

+ +

While Kubernetes is often thought as a platform for microservices, the first application to launch on Kubernetes in a critical production role at Nordstrom was Jira. "It was not the ideal microservice we were hoping to get as our first application," Patel admits, "but the team that was working on it was really passionate about Docker and Kubernetes, and they wanted to try it out. They had their application running on premises, and wanted to move it to Kubernetes."

+ +

The benefits were immediate for the teams that came on board. "Teams running on our Kubernetes cluster loved the fact that they had fewer issues to worry about. They didn't need to manage infrastructure or operating systems," says Grigoriu. "Early adopters loved the declarative nature of Kubernetes. They loved the reduced surface area they had to deal with."

+ +{{< case-studies/quote image="/images/case-studies/nordstrom/banner4.jpg">}} +"Teams running on our Kubernetes cluster loved the fact that they had fewer issues to worry about. They didn't need to manage infrastructure or operating systems," says Grigoriu. "Early adopters loved the declarative nature of Kubernetes. They loved the reduced surface area they had to deal with." +{{< /case-studies/quote >}} + +

To support these early adopters, Patel's team began growing the cluster and building production-grade services. "We integrated with Prometheus for monitoring, with a Grafana front end; we used Fluentd to push logs to Elasticsearch, so that gives us log aggregation," says Patel. The team also added dozens of open-source components, including CNCF projects and has made contributions to Kubernetes, Terraform, and kube2iam.

+ +

There are now more than 60 development teams running Kubernetes in Nordstrom Technology, and as success stories have popped up, more teams have gotten on board. "Our initial customer base, the ones who were willing to try this out, are now going and evangelizing to the next set of users," says Patel. "One early adopter had Docker containers and he was not sure how to run it in production. We sat with him and within 15 minutes we deployed it in production. He thought it was amazing, and more people in his org started coming in."

+ +

For Nordstrom Technology, going cloud-native has vastly improved development and operational efficiency. The developers using Kubernetes now deploy faster and can focus on building value in their applications. One such team started with a 25-minute merge to deploy by launching virtual machines in the cloud. Switching to Kubernetes was a 5x speedup in their process, improving their merge to deploy time to 5 minutes.

+ +{{< case-studies/quote >}} +"With Kubernetes, without even trying to make our cluster efficient, we are currently at 40 percent CPU utilization—a 10x increase. we are running 2600+ customer pods that would have been 2600+ VMs if they had gone directly to the cloud. We are running them on 40 VMs now, so that's a huge reduction in operational overhead." +{{< /case-studies/quote >}} + +

Speed is great, and easily demonstrated, but perhaps the bigger impact lies in the operational efficiency. "We run thousands of VMs on AWS, and their overall average CPU utilization is about four percent," says Patel. "With Kubernetes, without even trying to make our cluster efficient, we are currently at 40 percent CPU utilization—a 10x increase. We are running 2600+ customer pods that would have been 2600+ VMs if they had gone directly to the cloud. We are running them on 40 VMs now, so that's a huge reduction in operational overhead."

+ +

Nordstrom Technology is also exploring running Kubernetes on bare metal on premises. "If we can build an on-premises Kubernetes cluster," says Patel, "we could bring the power of cloud to provision resources fast on-premises. Then for the developer, their interface is Kubernetes; they might not even realize or care that their services are now deployed on premises because they're only working with Kubernetes."

+ +

For that reason, Patel is eagerly following Kubernetes' development of multi-cluster capabilities. "With cluster federation, we can have our on-premise as the primary cluster and the cloud as a secondary burstable cluster," he says. "So, when there is an anniversary sale or Black Friday sale, and we need more containers - we can go to the cloud."

+ +

That kind of possibility—as well as the impact that Grigoriu and Patel's team has already delivered using Kubernetes—is what led Nordstrom on its cloud native journey in the first place. "The way the retail environment is today, we are trying to build responsiveness and flexibility where we can," says Grigoriu. "Kubernetes makes it easy to: bring efficiency to both the Dev and Ops side of the equation. It's a win-win."

\ No newline at end of file diff --git a/content/bn/case-studies/nordstrom/nordstrom_featured_logo.png b/content/bn/case-studies/nordstrom/nordstrom_featured_logo.png new file mode 100644 index 0000000000000..a557ffa82f12e Binary files /dev/null and b/content/bn/case-studies/nordstrom/nordstrom_featured_logo.png differ diff --git a/content/bn/case-studies/nordstrom/nordstrom_featured_logo.svg b/content/bn/case-studies/nordstrom/nordstrom_featured_logo.svg new file mode 100644 index 0000000000000..a162e93f03b1e --- /dev/null +++ b/content/bn/case-studies/nordstrom/nordstrom_featured_logo.svg @@ -0,0 +1 @@ +kubernetes.io-logos2 \ No newline at end of file diff --git a/content/bn/case-studies/northwestern-mutual/index.html b/content/bn/case-studies/northwestern-mutual/index.html new file mode 100644 index 0000000000000..b10ca384f570c --- /dev/null +++ b/content/bn/case-studies/northwestern-mutual/index.html @@ -0,0 +1,69 @@ +--- +title: Northwestern Mutual Case Study +case_study_styles: true +cid: caseStudies + +new_case_study_styles: true +heading_background: /images/case-studies/northwestern/banner1.jpg +heading_title_logo: /images/northwestern_logo.png +subheading: > + Cloud Native at Northwestern Mutual +case_study_details: + - Company: Northwestern Mutual + - Location: Milwaukee, WI + - Industry: Insurance and Financial Services +--- + +

Challenge

+ +

In the spring of 2015, Northwestern Mutual acquired a fintech startup, LearnVest, and decided to take "Northwestern Mutual's leading products and services and meld it with LearnVest's digital experience and innovative financial planning platform," says Brad Williams, Director of Engineering for Client Experience, Northwestern Mutual. The company's existing infrastructure had been optimized for batch workflows hosted on on-prem networks; deployments were very traditional, focused on following a process instead of providing deployment agility. "We had to build a platform that was elastically scalable, but also much more responsive, so we could quickly get data to the client website so our end-customers have the experience they expect," says Williams.

+ +

Solution

+ +

The platform team came up with a plan for using the public cloud (AWS), Docker containers, and Kubernetes for orchestration. "Kubernetes gave us that base framework so teams can be very autonomous in what they're building and deliver very quickly and frequently," says Northwestern Mutual Cloud Native Engineer Frank Greco Jr. The team also built and open-sourced Kanali, a Kubernetes-native API management tool that uses OpenTracing, Jaeger, and gRPC.

+ +

Impact

+ +

Before, infrastructure deployments could take weeks; now, it is done in a matter of minutes. The number of deployments has increased dramatically, from about 24 a year to over 500 in just the first 10 months of 2017. Availability has also increased: There used to be a six-hour control window for commits every Sunday morning, as well as other periods of general maintenance, during which outages could happen. "Now we have eliminated the planned outage windows," says Bryan Pfremmer, App Platform Teams Manager, Northwestern Mutual. Kanali has had an impact on the bottom line. The vendor API management product that the company previously used required 23 servers, "dedicated, to only API management," says Pfremmer. "Now it's all integrated in the existing stack and running as another deployment on Kubernetes. And that's just one environment. Between the three that we had plus the test, that's hard dollar savings."

+ +{{< case-studies/quote author="Frank Greco Jr., Cloud Native Engineer at Northwestern Mutual">}} +"In a large enterprise, you're going to have people using Kubernetes, but then you're also going to have people using WAS and .NET. You may not be at a point where your whole stack can be cloud native. What if you can take your API management tool and make it cloud native, but still proxy to legacy systems? Using different pieces that are cloud native, open source and Kubernetes native, you can do pretty innovative stuff." +{{< /case-studies/quote >}} + +{{< case-studies/lead >}} +For more than 160 years, Northwestern Mutual has maintained its industry leadership in part by keeping a strong focus on risk management. +{{< /case-studies/lead >}} + +

For many years, the company took a similar approach to managing its technology and has recently undergone a digital transformation to advance the company's digital strategy - including making a lot of noise in the cloud-native world.

+ +

In the spring of 2015, this insurance and financial services company acquired a fintech startup, LearnVest, and decided to take "Northwestern Mutual's leading products and services and meld it with LearnVest's digital experience and innovative financial planning platform," says Brad Williams, Director of Engineering for Client Experience, Northwestern Mutual. The company's existing infrastructure had been optimized for batch workflows hosted on an on-premise datacenter; deployments were very traditional and had to many manual steps that were error prone.

+ +

In order to give the company's 4.5 million clients the digital experience they'd come to expect, says Williams, "We had to build a platform that was elastically scalable, but also much more responsive, so we could quickly get data to the client website. We essentially said, 'You build the system that you think is necessary to support a new, modern-facing one.' That's why we departed from anything legacy."

+ +{{< case-studies/quote image="/images/case-studies/northwestern/banner3.jpg" >}} +"Kubernetes has definitely been the right choice for us. It gave us that base framework so teams can be autonomous in what they're building and deliver very quickly and frequently." +{{< /case-studies/quote >}} + +

Williams and the rest of the platform team decided that the first step would be to start moving from private data centers to AWS. With a new microservice architecture in mind—and the freedom to implement what was best for the organization—they began using Docker containers. After looking into the various container orchestration options, they went with Kubernetes, even though it was still in beta at the time. "There was some debate whether we should build something ourselves, or just leverage that product and evolve with it," says Northwestern Mutual Cloud Native Engineer Frank Greco Jr. "Kubernetes has definitely been the right choice for us. It gave us that base framework so teams can be autonomous in what they're building and deliver very quickly and frequently."

+ +

As early adopters, the team had to do a lot of work with Ansible scripts to stand up the cluster. "We had a lot of hard security requirements given the nature of our business," explains Bryan Pfremmer, App Platform Teams Manager, Northwestern Mutual. "We found ourselves running a configuration that very few other people ever tried." The client experience group was the first to use the new platform; today, a few hundred of the company's 1,500 engineers are using it and more are eager to get on board.

+ +

The results have been dramatic. Before, infrastructure deployments could take two weeks; now, it is done in a matter of minutes. Now with a focus on Infrastructure automation, and self-service, "You can take an app to production in that same day if you want to," says Pfremmer.

+ +{{< case-studies/quote image="/images/case-studies/northwestern/banner4.jpg" >}} +"Now, developers have autonomy, they can use this whenever they want, however they want. It becomes more valuable the more instrumentation downstream that happens, as we mature in it." +{{< /case-studies/quote >}} + +

The process used to be so cumbersome that minor bug releases would be bundled with feature releases. With the new streamlined system enabled by Kubernetes, the number of deployments has increased from about 24 a year to more than 500 in just the first 10 months of 2017. Availability has also been improved: There used to be a six-hour control window for commits every early Sunday morning, as well as other periods of general maintenance, during which outages could happen. "Now there's no planned outage window," notes Pfremmer.

+ +

Northwestern Mutual built that API management tool—called Kanali—and open sourced it in the summer of 2017. The team took on the project because it was a key capability for what they were building and prior the solution worked in an "anti-cloud native way that was different than everything else we were doing," says Greco. Now API management is just another container deployed to Kubernetes along with a separate Jaeger deployment.

+ +

Now the engineers using the Kubernetes deployment platform have the added benefit of visibility in production—and autonomy. Before, a centralized team and would have to run a trace. "Now, developers have autonomy, they can use this whenever they want, however they want. It becomes more valuable the more instrumentation downstream that happens, as we mature in it." says Greco.

+ +{{< case-studies/quote >}} +"We're trying to make what we're doing known so that we can find people who are like, 'Yeah, that's interesting. I want to come do it!'" +{{< /case-studies/quote >}} + +

But the team didn't stop there. "In a large enterprise, you're going to have people using Kubernetes, but then you're also going to have people using WAS and .NET," says Greco. "You may not be at a point where your whole stack can be cloud native. What if you can take your API management tool and make it cloud native, but still proxy to legacy systems? Using different pieces that are cloud native, open source and Kubernetes native, you can do pretty innovative stuff."

+ +

As the team continues to improve its stack and share its Kubernetes best practices, it feels that Northwestern Mutual's reputation as a technology-first company is evolving too. "No one would think a company that's 160-plus years old is foraying this deep into the cloud and infrastructure stack," says Pfremmer. And they're hoping that means they'll be able to attract new talent. "We're trying to make what we're doing known so that we can find people who are like, 'Yeah, that's interesting. I want to come do it!'"

\ No newline at end of file diff --git a/content/bn/case-studies/northwestern-mutual/northwestern_featured_logo.png b/content/bn/case-studies/northwestern-mutual/northwestern_featured_logo.png new file mode 100644 index 0000000000000..7c1422f32b86d Binary files /dev/null and b/content/bn/case-studies/northwestern-mutual/northwestern_featured_logo.png differ diff --git a/content/bn/case-studies/northwestern-mutual/northwestern_featured_logo.svg b/content/bn/case-studies/northwestern-mutual/northwestern_featured_logo.svg new file mode 100644 index 0000000000000..7a2f09de54716 --- /dev/null +++ b/content/bn/case-studies/northwestern-mutual/northwestern_featured_logo.svg @@ -0,0 +1 @@ +kubernetes.io-logos2 \ No newline at end of file diff --git a/content/bn/case-studies/ocado/index.html b/content/bn/case-studies/ocado/index.html new file mode 100644 index 0000000000000..33674e0598bfd --- /dev/null +++ b/content/bn/case-studies/ocado/index.html @@ -0,0 +1,83 @@ +--- +title: Ocado Case Study +linkTitle: Ocado +case_study_styles: true +cid: caseStudies +logo: ocado_featured_logo.png +featured: true +weight: 4 +quote: > + People at Ocado Technology have been quite amazed. They ask, 'Can we do this on a Dev cluster?' and 10 minutes later we have rolled out something that is deployed across the cluster. The speed from idea to implementation to deployment is amazing. + +new_case_study_styles: true +heading_background: /images/case-studies/ocado/banner1.jpg +heading_title_logo: /images/ocado_logo.png +subheading: > + Ocado: Running Grocery Warehouses with a Cloud Native Platform +case_study_details: + - Company: Ocado Technology + - Location: Hatfield, England + - Industry: Grocery retail technology and platforms +--- + +

Challenge

+ +

The world's largest online-only grocery retailer, Ocado developed the Ocado Smart Platform to manage its own operations, from websites to warehouses, and is now licensing the technology to other retailers such as Kroger. To set up the first warehouses for the platform, Ocado shifted from virtual machines and Puppet infrastructure to Docker containers, using CoreOS's fleet scheduler to provision all the services on its OpenStack-based private cloud on bare metal. As the Smart Platform grew and "fleet was going end-of-life," says Platform Engineer Mike Bryant, "we started looking for a more complete platform, with all of these disparate infrastructure services being brought together in one unified API."

+ +

Solution

+ +

The team decided to migrate from fleet to Kubernetes on Ocado's private cloud. The Kubernetes stack currently uses kubeadm for bootstrapping, CNI with Weave Net for networking, Prometheus Operator for monitoring, Fluentd for logging, and OpenTracing for distributed tracing. The first app on Kubernetes, a business-critical service in the warehouses, went into production in the summer of 2017, with a mass migration continuing into 2018. Hundreds of Ocado engineers working on the Smart Platform are now deploying on Kubernetes.

+ +

Impact

+ +

With Kubernetes, "the speed from idea to implementation to deployment is amazing," says Bryant. "I've seen features go from development to production inside of a week now. In the old world, a new application deployment could easily take over a month." And because there are no longer restrictive deployment windows in the warehouses, the rate of deployments has gone from as few as two per week to dozens per week. Ocado has also achieved cost savings because Kubernetes gives the team the ability to have more fine-grained resource allocation. Says DevOps Team Leader Kevin McCormack: "We have more confidence in the resource allocation/separation features of Kubernetes, so we have been able to migrate from around 10 fleet clusters to one Kubernetes cluster." The team also uses Prometheus and Grafana to visualize resource allocation, and makes the data available to developers. "The increased visibility offered by Prometheus means developers are more aware of what they are using and how their use impacts others, especially since we now have one shared cluster," says McCormack. "I'd estimate that we use about 15-25% less hardware resources to host the same applications in Kubernetes in our test environments."

+ +{{< case-studies/quote author="Mike Bryant, Platform Engineer, Ocado" >}} +"People at Ocado Technology have been quite amazed. They ask, 'Can we do this on a Dev cluster?' and 10 minutes later we have rolled out something that is deployed across the cluster. The speed from idea to implementation to deployment is amazing." +{{< /case-studies/quote >}} + +{{< case-studies/lead >}} +When it was founded in 2000, Ocado was an online-only grocery retailer in the U.K. In the years since, it has expanded from delivering produce to families to providing technology to other grocery retailers. +{{< /case-studies/lead >}} + +

The company began developing its Ocado Smart Platform to manage its own operations, from websites to warehouses, and is now licensing the technology to other grocery chains around the world, such as Kroger. To set up the first warehouses on the platform, Ocado shifted from virtual machines and Puppet infrastructure to Docker containers, using CoreOS's fleet scheduler to provision all the services on its OpenStack-based private cloud on bare metal. As the Smart Platform grew, and "fleet was going end-of-life," says Platform Engineer Mike Bryant, "we started looking for a more complete platform, with all of these disparate infrastructure services being brought together in one unified API."

+ +

Bryant had already been using Kubernetes with Code for Life, a children's education project that's part of Ocado's charity arm. "We really liked it, so we started looking at it seriously for our production workloads," says Bryant. The team that managed fleet had researched orchestration solutions and landed on Kubernetes as well. "We were looking for a platform with wide adoption, and that was where the momentum was," says DevOps Team Leader Kevin McCormack. The two paths converged, and "We didn't even go through any proof-of-concept stage. The Code for Life work served that purpose," says Bryant.

+ +{{< case-studies/quote + image="/images/case-studies/ocado/banner3.jpg" + author="Kevin McCormack, DevOps Team Leader, Ocado" +>}} +"We were looking for a platform with wide adoption, and that was where the momentum was, the two paths converged, and we didn't even go through any proof-of-concept stage. The Code for Life work served that purpose," +{{< /case-studies/quote >}} + +

In the summer of 2016, the team began migrating from fleet to Kubernetes on Ocado's private cloud. The Kubernetes stack currently uses kubeadm for bootstrapping, CNI with Weave Net for networking, Prometheus Operator for monitoring, Fluentd for logging, and OpenTracing for distributed tracing.

+ +

The first app on Kubernetes, a business-critical service in the warehouses, went into production a year later. Once that app was running smoothly, a mass migration continued into 2018. Hundreds of Ocado engineers working on the Smart Platform are now deploying on Kubernetes, and the platform is live in Ocado's warehouses, managing tens of thousands of orders a week. At full capacity, Ocado's latest warehouse in Erith, southeast London, will deliver more than 200,000 orders per week, making it the world's largest facility for online grocery.

+ +

There are about 150 microservices now running on Kubernetes, with multiple instances of many of them. "We're not just deploying all these microservices at once. We're deploying them all for one warehouse, and then they're all being deployed again for the next warehouse, and again and again," says Bryant.

+ +

The move to Kubernetes was eye-opening for many people at Ocado Technology. "In the early days of putting the platform into our test infrastructure, the technical architect asked what network performance was like on Weave Net with encryption turned on," recalls Bryant. "So we found a Docker container for iPerf, wrote a daemon set, deployed it. A few moments later, we've deployed the entire thing across this cluster. He was pretty blown away by that."

+ +{{< case-studies/quote + image="/images/case-studies/ocado/banner4.jpg" + author="Mike Bryant, Platform Engineer, Ocado" +>}} +"The unified API of Kubernetes means this is all in one place, and it's one flow for approval and rollout. I've seen features go from development to production inside of a week now. In the old world, a new application deployment could easily take over a month." +{{< /case-studies/quote >}} + +

Indeed, the impact has been profound. "Prior to containerization, we had quite restrictive deployment windows in our warehouses," says Bryant. "Moving to microservices, we've been able to deploy much more frequently. We've been able to move towards continuous delivery in a number of areas. In our older warehouse, new application deployments involve talking to a bunch of different teams for different levels of the stack: from VM provisioning, to storage, to load balancers, and so on. The unified API of Kubernetes means this is all in one place, and it's one flow for approval and rollout. I've seen features go from development to production inside of a week now. In the old world, a new application deployment could easily take over a month."

+ +

The rate of deployment has gone from as few as two per week to dozens per week. "With Kubernetes, some of our development teams have been able to deploy their application to production on the new platform without us noticing," says Bryant, "which means they're faster at doing what they need to do and we have less work."

+ +

Ocado has also achieved cost savings because Kubernetes gives the team the ability to have more fine-grained resource allocation. "That lets us shrink quite a lot of our deployments from being per-core VM deployments to having fractions of the core," says Bryant. Adds McCormack: "We have more confidence in the resource allocation/separation features of Kubernetes, so we have been able to migrate from around 10 fleet clusters to one Kubernetes cluster. This means we use our hardware better since if we have to always have two nodes of excess capacity available in case of node failures then we only need two extra instead of 20."

+ +{{< case-studies/quote author="Mike Bryant, Platform Engineer, Ocado" >}} +"CNCF have provided us with support of different technologies. We've been able to adopt those in a very easy fashion. We do like that CNCF is vendor agnostic. We're not being asked to commit to this one way of doing things. The vast diversity of viewpoints in CNCF lead to better technology." +{{< /case-studies/quote >}} + +

The team also uses Prometheus and Grafana to visualize resource allocation, and makes the data available to developers. "The increased visibility offered by Prometheus means developers are more aware of what they are using and how their use impacts others, especially since we now have one shared cluster," says McCormack. "I'd estimate that we use about 15-25% less hardware resource to host the same applications in Kubernetes in our test environments."

+ +

One of the broader benefits of cloud native, says Bryant, is the unified API. "We have one method of doing our deployments that covers the wide range of things we need to do, and we can extend the API," he says. In addition to using Prometheus Operator, the Ocado team has started writing its own operators, some of which have been open sourced. Plus, "CNCF has provided us with support of these different technologies. We've been able to adopt those in a very easy fashion. We do like that CNCF is vendor agnostic. We're not being asked to commit to this one way of doing things. The vast diversity of viewpoints in the CNCF leads to better technology."

+ +

Ocado's own technology, in the form of its Smart Platform, will soon be used around the world. And cloud native plays a crucial role in this global expansion. "I wouldn't have wanted to try it without Kubernetes," says Bryant. "Kubernetes has made it so much nicer, especially to have that consistent way of deploying all of the applications, then taking the same thing and being able to replicate it. It's very valuable."

\ No newline at end of file diff --git a/content/bn/case-studies/ocado/ocado_featured_logo.png b/content/bn/case-studies/ocado/ocado_featured_logo.png new file mode 100644 index 0000000000000..0c2ef19ec3b03 Binary files /dev/null and b/content/bn/case-studies/ocado/ocado_featured_logo.png differ diff --git a/content/bn/case-studies/ocado/ocado_featured_logo.svg b/content/bn/case-studies/ocado/ocado_featured_logo.svg new file mode 100644 index 0000000000000..d9e2886e36fda --- /dev/null +++ b/content/bn/case-studies/ocado/ocado_featured_logo.svg @@ -0,0 +1 @@ +kubernetes.io-logos2 \ No newline at end of file diff --git a/content/bn/case-studies/openAI/index.html b/content/bn/case-studies/openAI/index.html new file mode 100644 index 0000000000000..6fb2089528343 --- /dev/null +++ b/content/bn/case-studies/openAI/index.html @@ -0,0 +1,69 @@ +--- +title: OpenAI Case Study +case_study_styles: true +cid: caseStudies + +new_case_study_styles: true +heading_background: /images/case-studies/openAI/banner1.jpg +heading_title_logo: /images/openAI_logo.png +subheading: > + Launching and Scaling Up Experiments, Made Simple +case_study_details: + - Company: OpenAI + - Location: San Francisco, California + - Industry: Artificial Intelligence Research +--- + +

Challenge

+ +

An artificial intelligence research lab, OpenAI needed infrastructure for deep learning that would allow experiments to be run either in the cloud or in its own data center, and to easily scale. Portability, speed, and cost were the main drivers.

+ +

Solution

+ +

OpenAI began running Kubernetes on top of AWS in 2016, and in early 2017 migrated to Azure. OpenAI runs key experiments in fields including robotics and gaming both in Azure and in its own data centers, depending on which cluster has free capacity. "We use Kubernetes mainly as a batch scheduling system and rely on our autoscaler to dynamically scale up and down our cluster," says Christopher Berner, Head of Infrastructure. "This lets us significantly reduce costs for idle nodes, while still providing low latency and rapid iteration."

+ +

Impact

+ +

The company has benefited from greater portability: "Because Kubernetes provides a consistent API, we can move our research experiments very easily between clusters," says Berner. Being able to use its own data centers when appropriate is "lowering costs and providing us access to hardware that we wouldn't necessarily have access to in the cloud," he adds. "As long as the utilization is high, the costs are much lower there." Launching experiments also takes far less time: "One of our researchers who is working on a new distributed training system has been able to get his experiment running in two or three days. In a week or two he scaled it out to hundreds of GPUs. Previously, that would have easily been a couple of months of work."

+ +{{< case-studies/quote >}} + +
+Check out "Building the Infrastructure that Powers the Future of AI" presented by Vicki Cheung, Member of Technical Staff & Jonas Schneider, Member of Technical Staff at OpenAI from KubeCon/CloudNativeCon Europe 2017. +{{< /case-studies/quote >}} + +{{< case-studies/lead >}} +From experiments in robotics to old-school video game play research, OpenAI's work in artificial intelligence technology is meant to be shared. +{{< /case-studies/lead >}} + +

With a mission to ensure powerful AI systems are safe, OpenAI cares deeply about open source—both benefiting from it and contributing safety technology into it. "The research that we do, we want to spread it as widely as possible so everyone can benefit," says OpenAI's Head of Infrastructure Christopher Berner. The lab's philosophy—as well as its particular needs—lent itself to embracing an open source, cloud native strategy for its deep learning infrastructure.

+ +

OpenAI started running Kubernetes on top of AWS in 2016, and a year later, migrated the Kubernetes clusters to Azure. "We probably use Kubernetes differently from a lot of people," says Berner. "We use it for batch scheduling and as a workload manager for the cluster. It's a way of coordinating a large number of containers that are all connected together. We rely on our autoscaler to dynamically scale up and down our cluster. This lets us significantly reduce costs for idle nodes, while still providing low latency and rapid iteration."

+ +

In the past year, Berner has overseen the launch of several Kubernetes clusters in OpenAI's own data centers. "We run them in a hybrid model where the control planes—the Kubernetes API servers, etcd and everything—are all in Azure, and then all of the Kubernetes nodes are in our own data center," says Berner. "The cloud is really convenient for managing etcd and all of the masters, and having backups and spinning up new nodes if anything breaks. This model allows us to take advantage of lower costs and have the availability of more specialized hardware in our own data center."

+ +{{< case-studies/quote image="/images/case-studies/openAI/banner3.jpg" >}} +OpenAI's experiments take advantage of Kubernetes' benefits, including portability. "Because Kubernetes provides a consistent API, we can move our research experiments very easily between clusters..." +{{< /case-studies/quote >}} + +

Different teams at OpenAI currently run a couple dozen projects. While the largest-scale workloads manage bare cloud VMs directly, most of OpenAI's experiments take advantage of Kubernetes' benefits, including portability. "Because Kubernetes provides a consistent API, we can move our research experiments very easily between clusters," says Berner. The on-prem clusters are generally "used for workloads where you need lots of GPUs, something like training an ImageNet model. Anything that's CPU heavy, that's run in the cloud. But we also have a number of teams that run their experiments both in Azure and in our own data centers, just depending on which cluster has free capacity, and that's hugely valuable."

+ +

Berner has made the Kubernetes clusters available to all OpenAI teams to use if it's a good fit. "I've worked a lot with our games team, which at the moment is doing research on classic console games," he says. "They had been running a bunch of their experiments on our dev servers, and they had been trying out Google cloud, managing their own VMs. We got them to try out our first on-prem Kubernetes cluster, and that was really successful. They've now moved over completely to it, and it has allowed them to scale up their experiments by 10x, and do that without needing to invest significant engineering time to figure out how to manage more machines. A lot of people are now following the same path."

+ +{{< case-studies/quote image="/images/case-studies/openAI/banner4.jpg" >}} +"One of our researchers who is working on a new distributed training system has been able to get his experiment running in two or three days," says Berner. "In a week or two he scaled it out to hundreds of GPUs. Previously, that would have easily been a couple of months of work." +{{< /case-studies/quote >}} + +

That path has been simplified by frameworks and tools that two of OpenAI's teams have developed to handle interaction with Kubernetes. "You can just write some Python code, fill out a bit of configuration with exactly how many machines you need and which types, and then it will prepare all of those specifications and send it to the Kube cluster so that it gets launched there," says Berner. "And it also provides a bit of extra monitoring and better tooling that's designed specifically for these machine learning projects."

+ +

The impact that Kubernetes has had at OpenAI is impressive. With Kubernetes, the frameworks and tooling, including the autoscaler, in place, launching experiments takes far less time. "One of our researchers who is working on a new distributed training system has been able to get his experiment running in two or three days," says Berner. "In a week or two he scaled it out to hundreds of GPUs. Previously, that would have easily been a couple of months of work."

+ +

Plus, the flexibility they now have to use their on-prem Kubernetes cluster when appropriate is "lowering costs and providing us access to hardware that we wouldn't necessarily have access to in the cloud," he says. "As long as the utilization is high, the costs are much lower in our data center. To an extent, you can also customize your hardware to exactly what you need."

+ +{{< case-studies/quote author="CHRISTOPHER BERNER, HEAD OF INFRASTRUCTURE FOR OPENAI" >}} +"Research teams can now take advantage of the frameworks we've built on top of Kubernetes, which make it easy to launch experiments, scale them by 10x or 50x, and take little effort to manage." +{{< /case-studies/quote >}} + +

OpenAI is also benefiting from other technologies in the CNCF cloud-native ecosystem. gRPC is used by many of its systems for communications between different services, and Prometheus is in place "as a debugging tool if things go wrong," says Berner. "We actually haven't had any real problems in our Kubernetes clusters recently, so I don't think anyone has looked at our Prometheus monitoring in a while. If something breaks, it will be there."

+ +

One of the things Berner continues to focus on is Kubernetes' ability to scale, which is essential to deep learning experiments. OpenAI has been able to push one of its Kubernetes clusters on Azure up to more than 2,500 nodes. "I think we'll probably hit the 5,000-machine number that Kubernetes has been tested at before too long," says Berner, adding, "We're definitely hiring if you're excited about working on these things!"

\ No newline at end of file diff --git a/content/bn/case-studies/openAI/openai_featured.png b/content/bn/case-studies/openAI/openai_featured.png new file mode 100644 index 0000000000000..b2b667c0bb13d Binary files /dev/null and b/content/bn/case-studies/openAI/openai_featured.png differ diff --git a/content/bn/case-studies/openAI/openai_featured.svg b/content/bn/case-studies/openAI/openai_featured.svg new file mode 100644 index 0000000000000..cf9b79721e8dd --- /dev/null +++ b/content/bn/case-studies/openAI/openai_featured.svg @@ -0,0 +1 @@ +kubernetes.io-logos2 \ No newline at end of file diff --git a/content/bn/case-studies/openAI/openai_logo.png b/content/bn/case-studies/openAI/openai_logo.png new file mode 100644 index 0000000000000..a85a81ea063d0 Binary files /dev/null and b/content/bn/case-studies/openAI/openai_logo.png differ diff --git a/content/bn/case-studies/peardeck/index.html b/content/bn/case-studies/peardeck/index.html new file mode 100644 index 0000000000000..a4a71916ca07c --- /dev/null +++ b/content/bn/case-studies/peardeck/index.html @@ -0,0 +1,87 @@ +--- +title: Pear Deck Case Study +case_study_styles: true +cid: caseStudies + +new_case_study_styles: true +heading_background: /images/case-studies/peardeck/banner3.jpg +heading_title_logo: /images/peardeck_logo.png +subheading: > + Infrastructure for a Growing EdTech Startup +case_study_details: + - Company: Pear Deck + - Location: Iowa City, Iowa + - Industry: Educational Software +--- + +

Challenge

+ +

The three-year-old startup provides a web app for teachers to interact with their students in the classroom. The JavaScript app was built on Google's web app development platform Firebase, using Heroku. As the user base steadily grew, so did the development team. "We outgrew Heroku when we started wanting to have multiple services, and the deploying story got pretty horrendous. We were frustrated that we couldn't have the developers quickly stage a version," says CEO Riley Eynon-Lynch. "Tracing and monitoring became basically impossible." On top of that, many of Pear Deck's customers are behind government firewalls and connect through Firebase, not Pear Deck's servers, making troubleshooting even more difficult.

+ +

Solution

+ +

In 2016, the company began moving their code from Heroku to containers running on Google Kubernetes Engine, orchestrated by Kubernetes and monitored with Prometheus.

+ +

Impact

+ +

The new cloud native stack immediately improved the development workflow, speeding up deployments. Prometheus gave Pear Deck "a lot of confidence, knowing that people are still logging into the app and using it all the time," says Eynon-Lynch. "The biggest impact is being able to work as a team on the configuration in git in a pull request, and the biggest confidence comes from the solidity of the abstractions and the trust that we have in Kubernetes actually making our yaml files a reality."

+ +{{< case-studies/quote author="RILEY EYNON-LYNCH, CEO OF PEAR DECK" >}} +"We didn't even realize how stressed out we were about our lack of insight into what was happening with the app. I'm really excited and have more and more confidence in the actual state of our application for our actual users, and not just what the CPU graphs are saying, because of Prometheus and Kubernetes." +{{< /case-studies/quote >}} + +{{< case-studies/lead >}} +With the speed befitting a startup, Pear Deck delivered its first prototype to customers within three months of incorporating. +{{< /case-studies/lead >}} + +

As a former high school math teacher, CEO Riley Eynon-Lynch felt an urgency to provide a tech solution to classes where instructors struggle to interact with every student in a short amount of time. "Pear Deck is an app that students can use to interact with the teacher all at once," he says. "When the teacher asks a question, instead of just the kid at the front of the room answering again, everybody can answer every single question. It's a huge fundamental shift in the messaging to the students about how much we care about them and how much they are a part of the classroom."

+ +

Eynon-Lynch and his partners quickly built a JavaScript web app on Google's web app development platform Firebase, and launched the minimum viable product [MVP] on Heroku "because it was fast and easy," he says. "We made everything as easy as we could."

+ +

But once it launched, the user base began growing steadily at a rate of 30 percent a month. "Our Heroku bill was getting totally insane," Eynon-Lynch says. But even more crucially, as the company hired more developers to keep pace, "we outgrew Heroku. We wanted to have multiple services and the deploying story got pretty horrendous. We were frustrated that we couldn't have the developers quickly stage a version. Tracing and monitoring became basically impossible."

+ +

On top of that, many of Pear Deck's customers are behind government firewalls and connect through Firebase, not Pear Deck's servers, making troubleshooting even more difficult.

+ +

The team began looking around for another solution, and finally decided in early 2016 to start moving the app from Heroku to containers running on Google Kubernetes Engine, orchestrated by Kubernetes and monitored with Prometheus.

+ +{{< case-studies/quote image="/images/case-studies/peardeck/banner1.jpg" >}} +"When it became clear that Google Kubernetes Engine was going to have a lot of support from Google and be a fully-managed Kubernetes platform, it seemed very obvious to us that was the way to go," says Eynon-Lynch. +{{< /case-studies/quote >}} + +

They had considered other options like Google's App Engine (which they were already using for one service) and Amazon's Elastic Compute Cloud (EC2), while experimenting with running one small service that wasn't accessible to the Internet in Kubernetes. "When it became clear that Google Kubernetes Engine was going to have a lot of support from Google and be a fully-managed Kubernetes platform, it seemed very obvious to us that was the way to go," says Eynon-Lynch. "We didn't really consider Terraform and the other competitors because the abstractions offered by Kubernetes just jumped off the page to us."

+ +

Once the team started porting its Heroku apps into Kubernetes, which was "super easy," he says, the impact was immediate. "Before, to make a new version of the app meant going to Heroku and reconfiguring 10 new services, so basically no one was willing to do it, and we never staged things," he says. "Now we can deploy our exact same configuration in lots of different clusters in 30 seconds. We have a full set up that's always running, and then any of our developers or designers can stage new versions with one command, including their recent changes. We stage all the time now, and everyone stopped talking about how cool it is because it's become invisible how great it is."

+ +

Along with Kubernetes came Prometheus. "Until pretty recently we didn't have any kind of visibility into aggregate server metrics or performance," says Eynon-Lynch. The team had tried to use Google Kubernetes Engine's Stackdriver monitoring, but had problems making it work, and considered New Relic. When they started looking at Prometheus in the fall of 2016, "the fit between the abstractions in Prometheus and the way we think about how our system works, was so clear and obvious," he says.

+ +

The integration with Kubernetes made set-up easy. Once Helm installed Prometheus, "We started getting a graph of the health of all our Kubernetes nodes and pods immediately. I think we were pretty hooked at that point," Eynon-Lynch says. "Then we got our own custom instrumentation working in 15 minutes, and had an actively updated count of requests that we could do, rate on and get a sense of how many users are connected at a given point. And then it was another hour before we had alarms automatically showing up in our Slack channel. All that was in one afternoon. And it was an afternoon of gasping with delight, basically!"

+ +{{< case-studies/quote image="/images/case-studies/peardeck/banner2.jpg" >}} +"We started getting a graph of the health of all our Kubernetes nodes and pods immediately. I think we were pretty hooked at that point," Eynon-Lynch says. "Then we got our own custom instrumentation working in 15 minutes, and had an actively updated count of requests that we could do, rate on and get a sense of how many users are connected at a given point. And then it was another hour before we had alarms automatically showing up in our Slack channel. All that was in one afternoon. And it was an afternoon of gasping with delight, basically!" +{{< /case-studies/quote >}} + +

With Pear Deck's specific challenges—traffic through Firebase as well as government firewalls—Prometheus was a game-changer. "We didn't even realize how stressed out we were about our lack of insight into what was happening with the app," Eynon-Lynch says. Before, when a customer would report that the app wasn't working, the team had to manually investigate the problem without knowing whether customers were affected all over the world, or whether Firebase was down, and where.

+ +

To help solve that problem, the team wrote a script that pings Firebase from several different geographical locations, and then reports the responses to Prometheus in a histogram. "A huge impact that Prometheus had on us was just an amazing sigh of relief, of feeling like we knew what was happening," he says. "It took 45 minutes to implement [the Firebase alarm] because we knew that we had this trustworthy metrics platform in Prometheus. We weren't going to have to figure out, 'Where do we send these metrics? How do we aggregate the metrics? How do we understand them?'"

+ +

Plus, Prometheus has allowed Pear Deck to build alarms for business goals. One measures the rate of successful app loads and goes off if the day's loads are less than 90 percent of the loads from seven days before. "We run a JavaScript app behind ridiculous firewalls and all kinds of crazy browser extensions messing with it—Chrome will push a feature that breaks some CSS that we're using," Eynon-Lynch says. "So that gives us a lot of confidence, and we at least know that people are still logging into the app and using it all the time."

+ +

Now, when a customer complains, and none of the alarms have gone off, the team can feel confident that it's not a widespread problem. "Just to be sure, we can go and double check the graphs and say, 'Yep, there's currently 10,000 people connected to that Firebase node. It's definitely working. Let's investigate your network settings, customer,'" he says. "And we can pass that back off to our support reps instead of the whole development team freaking out that Firebase is down."

+ +

Pear Deck is also giving back to the community, building and open-sourcing a metrics aggregator that enables end-user monitoring in Prometheus. "We can measure, for example, the time to interactive-dom on the web clients," he says. "The users all report that to our aggregator, then the aggregator reports to Prometheus. So we can set an alarm for some client side errors."

+ +

Most of Pear Deck's services have now been moved onto Kubernetes. And all of the team's new code is going on Kubernetes. "Kubernetes lets us experiment with service configurations and stage them on a staging cluster all at once, and test different scenarios and talk about them as a development team looking at code, not just talking about the steps we would eventually take as humans," says Eynon-Lynch.

+ +{{< case-studies/quote >}} +"A huge impact that Prometheus had on us was just an amazing sigh of relief, of feeling like we knew what was happening. It took 45 minutes to implement [the Firebase alarm] because we knew that we had this trustworthy metrics platform in Prometheus...in terms of the cloud, Kubernetes and Prometheus have so much to offer," he says. +{{< /case-studies/quote >}} + +

Looking ahead, the team is planning to explore autoscaling on Kubernetes. With users all over the world but mostly in the United States, there are peaks and valleys in the traffic. One service that's still on App Engine can get as many as 10,000 requests a second during the day but far less at night. "We pay for the same servers at night, so I understand there's autoscaling that we can be taking advantage of," he says. "Implementing it is a big worry, exposing the rest of our Kubernetes cluster to us and maybe messing that up. But it's definitely our intention to move everything over, because now none of the developers want to work on that app anymore because it's such a pain to deploy it."

+ +

They're also eager to explore the work that Kubernetes is doing with stateful sets. "Right now all of the services we run in Kubernetes are stateless, and Google basically runs our databases for us and manages backups," Eynon-Lynch says. "But we're interested in building our own web-socket solution that doesn't have to be super stateful but will have maybe an hour's worth of state on it."

+ +

That project will also involve Prometheus, for a dark launch of web socket connections. "We don't know how reliable web socket connections behind all these horrible firewalls will be to our servers," he says. "We don't know what work Firebase has done to make them more reliable. So I'm really looking forward to trying to get persistent connections with web sockets to our clients and have optional tools to understand if it's working. That's our next new adventure, into stateful servers."

+ +

As for Prometheus, Eynon-Lynch thinks the company has only gotten started. "We haven't instrumented all our important features, especially those that depend on third parties," he says. "We have to wait for those third parties to tell us they're down, which sometimes they don't do for a long time. So I'm really excited and have more and more confidence in the actual state of our application for our actual users, and not just what the CPU graphs are saying, because of Prometheus and Kubernetes."

+ +

For a spry startup that's continuing to grow rapidly—and yes, they're hiring!—Pear Deck is notably satisfied with how its infrastructure has evolved in the cloud native ecosystem. "Usually I have some angsty thing where I want to get to the new, better technology," says Eynon-Lynch, "but in terms of the cloud, Kubernetes and Prometheus have so much to offer."

\ No newline at end of file diff --git a/content/bn/case-studies/peardeck/peardeck_featured.png b/content/bn/case-studies/peardeck/peardeck_featured.png new file mode 100644 index 0000000000000..ce87ee2d47f7b Binary files /dev/null and b/content/bn/case-studies/peardeck/peardeck_featured.png differ diff --git a/content/bn/case-studies/peardeck/peardeck_featured.svg b/content/bn/case-studies/peardeck/peardeck_featured.svg new file mode 100644 index 0000000000000..1c42e719207f0 --- /dev/null +++ b/content/bn/case-studies/peardeck/peardeck_featured.svg @@ -0,0 +1 @@ +kubernetes.io-logos2 \ No newline at end of file diff --git a/content/bn/case-studies/peardeck/peardeck_logo.png b/content/bn/case-studies/peardeck/peardeck_logo.png new file mode 100644 index 0000000000000..c1b9772ec45a0 Binary files /dev/null and b/content/bn/case-studies/peardeck/peardeck_logo.png differ diff --git a/content/bn/case-studies/pearson/index.html b/content/bn/case-studies/pearson/index.html new file mode 100644 index 0000000000000..563a3f9331043 --- /dev/null +++ b/content/bn/case-studies/pearson/index.html @@ -0,0 +1,83 @@ +--- +title: Pearson Case Study +linkTitle: Pearson +case_study_styles: true +cid: caseStudies +featured: false +quote: > + We're already seeing tremendous benefits with Kubernetes—improved engineering productivity, faster delivery of applications and a simplified infrastructure. But this is just the beginning. Kubernetes will help transform the way that educational content is delivered online. + +new_case_study_styles: true +heading_background: /images/case-studies/pearson/banner1.jpg +heading_title_logo: /images/pearson_logo.png +subheading: > + Reinventing the World's Largest Education Company With Kubernetes +case_study_details: + - Company: Pearson + - Location: Global + - Industry: Education +--- + +

Challenge

+ +

A global education company serving 75 million learners, Pearson set a goal to more than double that number, to 200 million, by 2025. A key part of this growth is in digital learning experiences, and Pearson was having difficulty in scaling and adapting to its growing online audience. They needed an infrastructure platform that would be able to scale quickly and deliver products to market faster.

+ +

Solution

+ +

"To transform our infrastructure, we had to think beyond simply enabling automated provisioning," says Chris Jackson, Director for Cloud Platforms & SRE at Pearson. "We realized we had to build a platform that would allow Pearson developers to build, manage and deploy applications in a completely different way." The team chose Docker container technology and Kubernetes orchestration "because of its flexibility, ease of management and the way it would improve our engineers' productivity."

+ +

Impact

+ +

With the platform, there has been substantial improvements in productivity and speed of delivery. "In some cases, we've gone from nine months to provision physical assets in a data center to just a few minutes to provision and get a new idea in front of a customer," says John Shirley, Lead Site Reliability Engineer for the Cloud Platform Team. Jackson estimates they've achieved 15-20% developer productivity savings. Before, outages were an issue during their busiest time of year, the back-to-school period. Now, there's high confidence in their ability to meet aggressive customer SLAs.

+ +{{< case-studies/quote author="Chris Jackson, Director for Cloud Platforms & SRE at Pearson" >}} +"We're already seeing tremendous benefits with Kubernetes—improved engineering productivity, faster delivery of applications and a simplified infrastructure. But this is just the beginning. Kubernetes will help transform the way that educational content is delivered online." +{{< /case-studies/quote >}} + +

In 2015, Pearson was already serving 75 million learners as the world's largest education company, offering curriculum and assessment tools for Pre-K through college and beyond. Understanding that innovating the digital education experience was the key to the future of all forms of education, the company set out to increase its reach to 200 million people by 2025.

+ +

That goal would require a transformation of its existing infrastructure, which was in data centers. In some cases, it took nine months to provision physical assets. In order to adapt to the demands of its growing online audience, Pearson needed an infrastructure platform that would be able to scale quickly and deliver business-critical products to market faster. "We had to think beyond simply enabling automated provisioning," says Chris Jackson, Director for Cloud Platforms & SRE at Pearson. "We realized we had to build a platform that would allow Pearson developers to build, manage and deploy applications in a completely different way."

+ +

With 400 development groups and diverse brands with varying business and technical needs, Pearson embraced Docker container technology so that each brand could experiment with building new types of content using their preferred technologies, and then deliver it using containers. Jackson chose Kubernetes orchestration "because of its flexibility, ease of management and the way it would improve our engineers' productivity," he says.

+ +

The team adopted Kubernetes when it was still version 1.2 and are still going strong now on 1.7; they use Terraform and Ansible to deploy it on to basic AWS primitives. "We were trying to understand how we can create value for Pearson from this technology," says Ben Somogyi, Principal Architect for the Cloud Platforms. "It turned out that Kubernetes' benefits are huge. We're trying to help our applications development teams that use our platform go faster, so we filled that gap with a CI/CD pipeline that builds their images for them, standardizes them, patches everything up, allows them to deploy their different environments onto the cluster, and obfuscating the details of how difficult the work underneath the covers is."

+ +{{< case-studies/quote + image="/images/case-studies/pearson/banner3.jpg" + author="Chris Jackson, Director for Cloud Platforms & SRE at Pearson" +>}} +"Your internal customers need to feel like they are choosing the very best option for them. We are experiencing this first hand in the growth of adoption. We are seeing triple-digit, year-on-year growth of the service." +{{< /case-studies/quote >}} + +

That work resulted in two tools for building and deploying applications in the cluster that Pearson has open sourced. "We're an education company, so we want to share what we can," says Somogyi.

+ +

Now that development teams no longer have to worry about infrastructure, there have been substantial improvements in productivity and speed of delivery. "In some cases, we've gone from nine months to provision physical assets in a data center to just a few minutes to provision and to get a new idea in front of a customer," says John Shirley, Lead Site Reliability Engineer for the Cloud Platform Team.

+ +

According to Jackson, the Cloud Platforms team can "provision a new proof-of-concept environment for a development team in minutes, and then they can take that to production as quickly as they are able to. This is the value proposition of all major technology services, and we had to compete like one to become our developers' preferred choice. Just because you work for the same company, you do not have the right to force people into a mediocre service. Your internal customers need to feel like they are choosing the very best option for them. We are experiencing this first hand in the growth of adoption. We are seeing triple-digit, year-on-year growth of the service."

+ +

Jackson estimates they've achieved a 15-20% boost in productivity for developer teams who adopt the platform. They also see a reduction in the number of customer-impacting incidents. Plus, says Jackson, "Teams who were previously limited to 1-2 releases per academic year can now ship code multiple times per day!"

+ +{{< case-studies/quote + image="/images/case-studies/pearson/banner4.jpg" + author="Chris Jackson, Director for Cloud Platforms & SRE at Pearson" +>}} +"Teams who were previously limited to 1-2 releases per academic year can now ship code multiple times per day!" +{{< /case-studies/quote >}} + +

Availability has also been positively impacted. The back-to-school period is the company's busiest time of year, and "you have to keep applications up," says Somogyi. Before, this was a pain point for the legacy infrastructure. Now, for the applications that have been migrated to the Kubernetes platform, "We have 100% uptime. We're not worried about 9s. There aren't any. It's 100%, which is pretty astonishing for us, compared to some of the existing platforms that have legacy challenges," says Shirley.

+ +

"You can't even begin to put a price on how much that saves the company," Jackson explains. "A reduction in the number of support cases takes load out of our operations. The customer sentiment of having a reliable product drives customer retention and growth. It frees us to think about investing more into our digital transformation and taking a better quality of education to a global scale."

+ +

The platform itself is also being broken down, "so we can quickly release smaller pieces of the platform, like upgrading our Kubernetes or all the different modules that make up our platform," says Somogyi. "One of the big focuses in 2018 is this scheme of delivery to update the platform itself."

+ +

Guided by Pearson's overarching goal of getting to 200 million users, the team has run internal tests of the platform's scalability. "We had a challenge: 28 million requests within a 10 minute period," says Shirley. "And we demonstrated that we can hit that, with an acceptable latency. We saw that we could actually get that pretty readily, and we scaled up in just a few seconds, using open source tools entirely. Shout out to Locustfor that one. So that's amazing."

+ +{{< case-studies/quote author="Benjamin Somogyi, Principal Systems Architect at Pearson" >}} +"We have 100% uptime. We're not worried about 9s. There aren't any. It's 100%, which is pretty astonishing for us, compared to some of the existing platforms that have legacy challenges. You can't even begin to put a price on how much that saves the company." +{{< /case-studies/quote >}} + +

In just two years, "We're already seeing tremendous benefits with Kubernetes—improved engineering productivity, faster delivery of applications and a simplified infrastructure," says Jackson. "But this is just the beginning. Kubernetes will help transform the way that educational content is delivered online."

+ +

So far, about 15 production products are running on the new platform, including Pearson's new flagship digital education service, the Global Learning Platform. The Cloud Platform team continues to prepare, onboard and support customers that are a good fit for the platform. Some existing products will be refactored into 12-factor apps, while others are being developed so that they can live on the platform from the get-go. "There are challenges with bringing in new customers of course, because we have to help them to see a different way of developing, a different way of building," says Shirley.

+ +

But, he adds, "It is our corporate motto: Always Learning. We encourage those teams that haven't started a cloud native journey, to see the future of technology, to learn, to explore. It will pique your interest. Keep learning."

\ No newline at end of file diff --git a/content/bn/case-studies/pearson/pearson_featured.png b/content/bn/case-studies/pearson/pearson_featured.png new file mode 100644 index 0000000000000..6f8ffec49e6ef Binary files /dev/null and b/content/bn/case-studies/pearson/pearson_featured.png differ diff --git a/content/bn/case-studies/pearson/pearson_featured.svg b/content/bn/case-studies/pearson/pearson_featured.svg new file mode 100644 index 0000000000000..ed1602ac1a2de --- /dev/null +++ b/content/bn/case-studies/pearson/pearson_featured.svg @@ -0,0 +1,52 @@ + + + diff --git a/content/bn/case-studies/pearson/pearson_logo.png b/content/bn/case-studies/pearson/pearson_logo.png new file mode 100644 index 0000000000000..57e586f3ebd32 Binary files /dev/null and b/content/bn/case-studies/pearson/pearson_logo.png differ diff --git a/content/bn/case-studies/pingcap/index.html b/content/bn/case-studies/pingcap/index.html new file mode 100644 index 0000000000000..ea8890736bef1 --- /dev/null +++ b/content/bn/case-studies/pingcap/index.html @@ -0,0 +1,79 @@ +--- +title: pingcap Case Study +linkTitle: pingcap +case_study_styles: true +cid: caseStudies +featured: false + +new_case_study_styles: true +heading_background: /images/case-studies/pingcap/banner1.jpg +heading_title_logo: /images/pingcap_logo.png +subheading: > + PingCAP Bets on Cloud Native for Its TiDB Database Platform +case_study_details: + - Company: PingCAP + - Location: Beijing, China, and San Mateo, CA + - Industry: Software +--- + +

Challenge

+ +

PingCAP is the company leading the development of the popular open source NewSQL database TiDB, which is MySQL-compatible, can handle hybrid transactional and analytical processing (HTAP) workloads, and has a cloud native architectural design. "Having a hybrid multi-cloud product is an important part of our global go-to-market strategy," says Kevin Xu, General Manager of Global Strategy and Operations. In order to achieve that, the team had to address two challenges: "how to deploy, run, and manage a distributed stateful application, such as a distributed database like TiDB, in a containerized world," Xu says, and "how to deliver an easy-to-use, consistent, and reliable experience for our customers when they use TiDB in the cloud, any cloud, whether that's one cloud provider or a combination of different cloud environments." Knowing that using a distributed system isn't easy, they began looking for the right orchestration layer to help reduce some of that complexity for end users.

+ +

Solution

+ +

The team started looking at Kubernetes for orchestration early on. "We knew Kubernetes had the promise of helping us solve our problems," says Xu. "We were just waiting for it to mature." In early 2018, PingCAP began integrating Kubernetes into its internal development as well as in its TiDB product. At that point, the team has already had experience using other cloud native technologies, having integrated both Prometheus and gRPC as parts of the TiDB platform earlier on.

+ +

Impact

+ +

Xu says that PingCAP customers have had a "very positive" response so far to Kubernetes being the tool to deploy and manage TiDB. Prometheus, with Grafana as the dashboard, is installed by default when customers deploy TiDB, so that they can monitor performance and make any adjustments needed to reach their target before and while deploying TiDB in production. That monitoring layer "makes the evaluation process and communication much smoother," says Xu.

+ +

With the company's Kubernetes-based Operator implementation, which is open sourced, customers are now able to deploy, run, manage, upgrade, and maintain their TiDB clusters in the cloud with no downtime, and reduced workload, burden and overhead. And internally, says Xu, "we've completely switched to Kubernetes for our own development and testing, including our data center infrastructure and Schrodinger, an automated testing platform for TiDB. With Kubernetes, our resource usage is greatly improved. Our developers can allocate and deploy clusters themselves, and the deploying process has gone from hours to minutes, so we can devote fewer people to manage IDC resources. The productivity improvement is about 15%, and as we gain more Kubernetes knowledge on the debugging and diagnosis front, the productivity should improve to more than 20%."

+ +{{< case-studies/quote author="KEVIN XU, GENERAL MANAGER OF GLOBAL STRATEGY AND OPERATIONS, PINGCAP" >}} +"We knew Kubernetes had the promise of helping us solve our problems. We were just waiting for it to mature, so we can fold it into our own development and product roadmap." +{{< /case-studies/quote >}} + +{{< case-studies/lead >}} +Since it was introduced in 2015, the open source NewSQL database TiDB has gained a following for its compatibility with MySQL, its ability to handle hybrid transactional and analytical processing (HTAP) workloads—and its cloud native architectural design. +{{< /case-studies/lead >}} + +

PingCAP, the company behind TiDB, designed the platform with cloud in mind from day one, says Kevin Xu, General Manager of Global Strategy and Operations, and "having a hybrid multi-cloud product is an important part of our global go-to-market strategy."

+ +

In order to achieve that, the team had to address two challenges: "how to deploy, run, and manage a distributed stateful application, such as a distributed database like TiDB, in a containerized world," Xu says, and "how to deliver an easy-to-use, consistent, and reliable experience for our customers when they use TiDB in the cloud, any cloud, whether that's one cloud provider or a combination of different cloud environments."

+ +

Knowing that using a distributed system isn't easy, the PingCAP team began looking for the right orchestration layer to help reduce some of that complexity for end users. Kubernetes had been on their radar for quite some time. "We knew Kubernetes had the promise of helping us solve our problems," says Xu. "We were just waiting for it to mature."

+ +{{< case-studies/quote + image="/images/case-studies/pingcap/banner3.jpg" + author="KEVIN XU, GENERAL MANAGER OF GLOBAL STRATEGY AND OPERATIONS, PINGCAP" +>}} +"With the governance process being so open, it's not hard to find out what's the latest development in the technology and community, or figure out who to reach out to if we have problems or issues." +{{< /case-studies/quote >}} + +

That time came in early 2018, when PingCAP began integrating Kubernetes into its internal development as well as in its TiDB product. "Having Kubernetes be part of the CNCF, as opposed to having only the backing of one individual company, was valuable in having confidence in the longevity of the technology," says Xu. Plus, "with the governance process being so open, it's not hard to find out what's the latest development in the technology and community, or figure out who to reach out to if we have problems or issues."

+ +

TiDB's cloud native architecture consists of a stateless SQL layer (also called TiDB) and a persistent key-value storage layer that supports distributed transactions (TiKV, which is now in the CNCF Sandbox), which are loosely coupled. "You can scale both out or in depending on your computation and storage needs, and the two scaling processes can happen independent of each other," says Xu. The PingCAP team also built the TiDB Operator based on Kubernetes, which helps bootstrap a TiDB cluster on any cloud environment and simplifies and automates deployment, scaling, scheduling, upgrades, and maintenance. The company also recently previewed its fully-managed TiDB Cloud offering.

+ +{{< case-studies/quote + image="/images/case-studies/pingcap/banner4.jpg" + author="KEVIN XU, GENERAL MANAGER OF GLOBAL STRATEGY AND OPERATIONS, PINGCAP" +>}} +"A cloud native infrastructure will not only save you money and allow you to be more in control of the infrastructure resources you consume, but also empower new product innovation, new experience for your users, and new business possibilities. It's both a cost reducer and a money maker." +{{< /case-studies/quote >}} + +

The entire TiDB platform leverages Kubernetes and other cloud native technologies, including Prometheus for monitoring and gRPC for interservice communication.

+ +

So far, the customer response to the Kubernetes-enabled platform has been "very positive." Prometheus, with Grafana as the dashboard, is installed by default when customers deploy TiDB, so that they can monitor and make any adjustments needed to reach their performance requirements before deploying TiDB in production. That monitoring layer "makes the evaluation process and communication much smoother," says Xu. With the company's Kubernetes-based Operator implementation, customers are now able to deploy, run, manage, upgrade, and maintain their TiDB clusters in the cloud with no downtime, and reduced workload, burden and overhead.

+ +

These technologies have also had an impact internally. "We've completely switched to Kubernetes for our own development and testing, including our data center infrastructure and Schrodinger, an automated testing platform for TiDB," says Xu. "With Kubernetes, our resource usage is greatly improved. Our developers can allocate and deploy clusters themselves, and the deploying process takes less time, so we can devote fewer people to manage IDC resources.

+ +{{< case-studies/quote author="KEVIN XU, GENERAL MANAGER OF GLOBAL STRATEGY AND OPERATIONS, PINGCAP" >}} +"The entire cloud native community, whether it's Kubernetes, CNCF in general, or cloud native vendors like us, have all gained enough experience—and have the battle scars to prove it—and are ready to help you succeed." +{{< /case-studies/quote >}} + +

The productivity improvement is about 15%, and as we gain more Kubernetes knowledge on the debugging and diagnosis front, the productivity should improve to more than 20%."

+ +

Kubernetes is now a crucial part of PingCAP's product roadmap. For anyone else considering going cloud native, Xu has this advice: "There's no better time to get started," he says. "The entire cloud native community, whether it's Kubernetes, CNCF in general, or cloud native vendors like us, have all gained enough experience—and have the battle scars to prove it—and are ready to help you succeed."

+ +

In fact, the PingCAP team has seen more and more customers moving toward a cloud native approach, and for good reason. "IT infrastructure is quickly evolving from a cost-center and afterthought, to the core competency and competitiveness of any company," says Xu. "A cloud native infrastructure will not only save you money and allow you to be more in control of the infrastructure resources you consume, but also empower new product innovation, new experience for your users, and new business possibilities. It's both a cost reducer and a money maker."

\ No newline at end of file diff --git a/content/bn/case-studies/pingcap/pingcap_featured_logo.png b/content/bn/case-studies/pingcap/pingcap_featured_logo.png new file mode 100644 index 0000000000000..8b57f417ae813 Binary files /dev/null and b/content/bn/case-studies/pingcap/pingcap_featured_logo.png differ diff --git a/content/bn/case-studies/pingcap/pingcap_featured_logo.svg b/content/bn/case-studies/pingcap/pingcap_featured_logo.svg new file mode 100644 index 0000000000000..46d2d2543d784 --- /dev/null +++ b/content/bn/case-studies/pingcap/pingcap_featured_logo.svg @@ -0,0 +1 @@ +kubernetes.io-logos2 \ No newline at end of file diff --git a/content/bn/case-studies/pinterest/index.html b/content/bn/case-studies/pinterest/index.html new file mode 100644 index 0000000000000..b64ed26485dde --- /dev/null +++ b/content/bn/case-studies/pinterest/index.html @@ -0,0 +1,84 @@ +--- +title: Pinterest Case Study +linkTitle: Pinterest +case_study_styles: true +cid: caseStudies +featured: false +weight: 30 +quote: > + We are in the position to run things at scale, in a public cloud environment, and test things out in way that a lot of people might not be able to do. + +new_case_study_styles: true +heading_background: /images/case-studies/pinterest/banner1.jpg +heading_title_logo: /images/pinterest_logo.png +subheading: > + Pinning Its Past, Present, and Future on Cloud Native +case_study_details: + - Company: Pinterest + - Location: San Francisco, California + - Industry: Web and Mobile App +--- + +

Challenge

+ +

After eight years in existence, Pinterest had grown into 1,000 microservices and multiple layers of infrastructure and diverse set-up tools and platforms. In 2016 the company launched a roadmap towards a new compute platform, led by the vision of creating the fastest path from an idea to production, without making engineers worry about the underlying infrastructure.

+ +

Solution

+ +

The first phase involved moving services to Docker containers. Once these services went into production in early 2017, the team began looking at orchestration to help create efficiencies and manage them in a decentralized way. After an evaluation of various solutions, Pinterest went with Kubernetes.

+ +

Impact

+ +

"By moving to Kubernetes the team was able to build on-demand scaling and new failover policies, in addition to simplifying the overall deployment and management of a complicated piece of infrastructure such as Jenkins," says Micheal Benedict, Product Manager for the Cloud and the Data Infrastructure Group at Pinterest. "We not only saw reduced build times but also huge efficiency wins. For instance, the team reclaimed over 80 percent of capacity during non-peak hours. As a result, the Jenkins Kubernetes cluster now uses 30 percent less instance-hours per-day when compared to the previous static cluster."

+ +{{< case-studies/quote author="Micheal Benedict, Product Manager for the Cloud and the Data Infrastructure Group at Pinterest" >}} + +
+"So far it's been good, especially the elasticity around how we can configure our Jenkins workloads on that Kubernetes shared cluster. That is the win we were pushing for." +{{< /case-studies/quote >}} + +{{< case-studies/lead >}} +Pinterest was born on the cloud—running on AWS since day one in 2010—but even cloud native companies can experience some growing pains. +{{< /case-studies/lead >}} + +

Since its launch, Pinterest has become a household name, with more than 200 million active monthly users and 100 billion objects saved. Underneath the hood, there are 1,000 microservices running and hundreds of thousands of data jobs.

+ +

With such growth came layers of infrastructure and diverse set-up tools and platforms for the different workloads, resulting in an inconsistent and complex end-to-end developer experience, and ultimately less velocity to get to production. So in 2016, the company launched a roadmap toward a new compute platform, led by the vision of having the fastest path from an idea to production, without making engineers worry about the underlying infrastructure.

+ +

The first phase involved moving to Docker. "Pinterest has been heavily running on virtual machines, on EC2 instances directly, for the longest time," says Micheal Benedict, Product Manager for the Cloud and the Data Infrastructure Group. "To solve the problem around packaging software and not make engineers own portions of the fleet and those kinds of challenges, we standardized the packaging mechanism and then moved that to the container on top of the VM. Not many drastic changes. We didn't want to boil the ocean at that point."

+ +{{< case-studies/quote + image="/images/case-studies/pinterest/banner3.jpg" + author="MICHEAL BENEDICT, PRODUCT MANAGER FOR THE CLOUD AND THE DATA INFRASTRUCTURE GROUP AT PINTEREST" +>}} +"Though Kubernetes lacked certain things we wanted, we realized that by the time we get to productionizing many of those things, we'll be able to leverage what the community is doing." +{{< /case-studies/quote >}} + +

The first service that was migrated was the monolith API fleet that powers most of Pinterest. At the same time, Benedict's infrastructure governance team built chargeback and capacity planning systems to analyze how the company uses its virtual machines on AWS. "It became clear that running on VMs is just not sustainable with what we're doing," says Benedict. "A lot of resources were underutilized. There were efficiency efforts, which worked fine at a certain scale, but now you have to move to a more decentralized way of managing that. So orchestration was something we thought could help solve that piece."

+ +

That led to the second phase of the roadmap. In July 2017, after an eight-week evaluation period, the team chose Kubernetes over other orchestration platforms. "Kubernetes lacked certain things at the time—for example, we wanted Spark on Kubernetes," says Benedict. "But we realized that the dev cycles we would put in to even try building that is well worth the outcome, both for Pinterest as well as the community. We've been in those conversations in the Big Data SIG. We realized that by the time we get to productionizing many of those things, we'll be able to leverage what the community is doing."

+ +

At the beginning of 2018, the team began onboarding its first use case into the Kubernetes system: Jenkins workloads. "Although we have builds happening during a certain period of the day, we always need to allocate peak capacity," says Benedict. "They don't have any auto-scaling capabilities, so that capacity stays constant. It is difficult to speed up builds because ramping up takes more time. So given those kind of concerns, we thought that would be a perfect use case for us to work on."

+ +{{< case-studies/quote + image="/images/case-studies/pinterest/banner4.jpg" + author="MICHEAL BENEDICT, PRODUCT MANAGER FOR THE CLOUD AND THE DATA INFRASTRUCTURE GROUP AT PINTEREST" +>}} +"So far it's been good, especially the elasticity around how we can configure our Jenkins workloads on Kubernetes shared cluster. That is the win we were pushing for." +{{< /case-studies/quote >}} + +

They ramped up the cluster, and working with a team of four people, got the Jenkins Kubernetes cluster ready for production. "We still have our static Jenkins cluster," says Benedict, "but on Kubernetes, we are doing similar builds, testing the entire pipeline, getting the artifact ready and just doing the comparison to see, how much time did it take to build over here. Is the SLA okay, is the artifact generated correct, are there issues there?"

+ +

"So far it's been good," he adds, "especially the elasticity around how we can configure our Jenkins workloads on Kubernetes shared cluster. That is the win we were pushing for."

+ +

By the end of Q1 2018, the team successfully migrated Jenkins Master to run natively on Kubernetes and also collaborated on the Jenkins Kubernetes Plugin to manage the lifecycle of workers. "We're currently building the entire Pinterest JVM stack (one of the larger monorepos at Pinterest which was recently bazelized) on this new cluster," says Benedict. "At peak, we run thousands of pods on a few hundred nodes. Overall, by moving to Kubernetes the team was able to build on-demand scaling and new failover policies, in addition to simplifying the overall deployment and management of a complicated piece of infrastructure such as Jenkins. We not only saw reduced build times but also huge efficiency wins. For instance, the team reclaimed over 80 percent of capacity during non-peak hours. As a result, the Jenkins Kubernetes cluster now uses 30 percent less instance-hours per-day when compared to the previous static cluster."

+ +{{< case-studies/quote author="MICHEAL BENEDICT, PRODUCT MANAGER FOR THE CLOUD AND THE DATA INFRASTRUCTURE GROUP AT PINTEREST">}} +"We are in the position to run things at scale, in a public cloud environment, and test things out in way that a lot of people might not be able to do." +{{< /case-studies/quote >}} + +

Benedict points to a "pretty robust roadmap" going forward. In addition to the Pinterest big data team's experiments with Spark on Kubernetes, the company collaborated with Amazon's EKS team on an ENI/CNI plug in.

+ +

Once the Jenkins cluster is up and running out of dark mode, Benedict hopes to establish best practices, including having governance primitives established—including integration with the chargeback system—before moving on to migrating the next service. "We have a healthy pipeline of use-cases to be on-boarded. After Jenkins, we want to enable support for Tensorflow and Apache Spark. At some point, we aim to move the company's monolithic API service. If we move that and understand the complexity around that, it builds our confidence," says Benedict. "It sets us up for migration of all our other services."

+ +

After years of being a cloud native pioneer, Pinterest is eager to share its ongoing journey. "We are in the position to run things at scale, in a public cloud environment, and test things out in way that a lot of people might not be able to do," says Benedict. "We're in a great position to contribute back some of those learnings."

\ No newline at end of file diff --git a/content/bn/case-studies/pinterest/pinterest_feature.png b/content/bn/case-studies/pinterest/pinterest_feature.png new file mode 100644 index 0000000000000..ea5d625789468 Binary files /dev/null and b/content/bn/case-studies/pinterest/pinterest_feature.png differ diff --git a/content/bn/case-studies/pinterest/pinterest_feature.svg b/content/bn/case-studies/pinterest/pinterest_feature.svg new file mode 100644 index 0000000000000..96cd6ded97560 --- /dev/null +++ b/content/bn/case-studies/pinterest/pinterest_feature.svg @@ -0,0 +1 @@ +kubernetes.io-logos \ No newline at end of file diff --git a/content/bn/case-studies/pinterest/pinterest_logo.png b/content/bn/case-studies/pinterest/pinterest_logo.png new file mode 100644 index 0000000000000..0f744e7828cc2 Binary files /dev/null and b/content/bn/case-studies/pinterest/pinterest_logo.png differ diff --git a/content/bn/case-studies/prowise/index.html b/content/bn/case-studies/prowise/index.html new file mode 100644 index 0000000000000..bd9da21b01670 --- /dev/null +++ b/content/bn/case-studies/prowise/index.html @@ -0,0 +1,83 @@ +--- +title: Prowise Case Study +linkTitle: prowise +case_study_styles: true +cid: caseStudies +featured: false + +new_case_study_styles: true +heading_background: /images/case-studies/prowise/banner1.jpg +heading_title_logo: /images/prowise_logo.png +subheading: > + Prowise: How Kubernetes is Enabling the Edtech Solution's Global Expansion +case_study_details: + - Company: Prowise + - Location: Budel, The Netherlands + - Industry: Edtech +--- + +

Challenge

+ +

A Dutch company that produces educational devices and software used around the world, Prowise had an infrastructure based on Linux services with multiple availability zones in Europe, Australia, and the U.S. "We've grown a lot in the past couple of years, and we started to encounter problems with versioning and flexible scaling," says Senior DevOps Engineer Victor van den Bosch, "not only scaling in demands, but also in being able to deploy multiple products which all have their own versions, their own development teams, and their own problems that they're trying to solve. To be able to put that all on the same platform without much resistance is what we were looking for. We wanted to future proof our infrastructure, and also solve some of the problems that are associated with just running a normal Linux service."

+ +

Solution

+ +

The Prowise team adopted containerization, spent time improving its CI/CD pipelines, and chose Microsoft Azure's managed Kubernetes service, AKS, for orchestration. "Kubernetes solves things like networking really well, in a way that fits our business model," says van den Bosch. "We want to focus on our core products, and that's the software that runs on it and not necessarily the infrastructure itself."

+ +

Impact

+ +

With its first web-based applications now running in beta on Prowise's Kubernetes platform, the team is seeing the benefits of rapid and smooth deployments. "The old way of deploying took half an hour of preparations and half an hour deploying it. With Kubernetes, it's a couple of seconds," says Senior Developer Bart Haalstra. As a result, adds van den Bosch, "We've gone from quarterly releases to a release every month in production. We're pretty much deploying every hour or just when we find that a feature is ready for production; before, our releases were mostly done on off-hours, where it couldn't impact our customers, as our confidence in the process was relatively low. Kubernetes has also enabled us to follow up quickly on bugs and implement tweaks to our users with zero downtime between versions. For some bugs we've pushed code fixes to production minutes after detection." Recently, the team launched a new single sign-on solution for use in an internal application. "Due to the resource based architecture of the Kubernetes platform, we were able to bring that application into an entirely new production environment in less than a day, most of that time used for testing after applying the already well-known resource definitions from staging to the new environment," says van den Bosch. "On a traditional VM this would have likely cost a day or two, and then probably a few weeks to iron out the kinks in our provisioning scripts as we apply updates."

+ +{{< case-studies/quote author="VICTOR VAN DEN BOSCH, SENIOR DEVOPS ENGINEER, PROWISE" >}} +"Because of Kubernetes, things have been much easier, our individual applications are better, and we can spend more time on functional implementation. We do not want to go back." +{{< /case-studies/quote >}} + +{{< case-studies/lead >}} +If you haven't set foot in a school in awhile, you might be surprised by what you'd see in a digitally connected classroom these days: touchscreen monitors, laptops, tablets, touch tables, and more. +{{< /case-studies/lead >}} + +

One of the leaders in the space, the Dutch company Prowise, offers an integrated solution of hardware and software to help educators create a more engaging learning environment.

+ +

As the company expanded its offerings beyond the Netherlands in recent years—creating multiple availability zones in Europe, Australia, and the U.S., with as many as nine servers per zone—its Linux service-based infrastructure struggled to keep up. "We've grown a lot in the past couple of years, and we started to encounter problems with versioning and flexible scaling," says Senior DevOps Engineer Victor van den Bosch, who was hired by the company in late 2017 to build a new platform.

+ +

Prowise's products support ten languages, so the problem wasn't just scaling in demands, he adds, "but also in being able to deploy multiple products which all have their own versions, their own development teams, and their own problems that they're trying to solve. To be able to put that all on the same platform without much resistance is what we were looking for. We wanted to future proof our infrastructure, and also solve some of the problems that are associated with just running a normal Linux service."

+ +

The company's existing infrastructure on Microsoft Azure Cloud was all on virtual machines, "a pretty traditional setup," van den Bosch says. "We decided that we want some features in our software that requires being able to scale quickly, being able to deploy new applications and versions on different versions of different programming languages quickly. And we didn't really want the hassle of trying to keep those servers in a particular state."

+ +{{< case-studies/quote + image="/images/case-studies/prowise/banner3.jpg" + author="VICTOR VAN DEN BOSCH, SENIOR DEVOPS ENGINEER, PROWISE" +>}} +"You don't have to go all-in immediately. You can just take a few projects, a service, run it alongside your more traditional stack, and build it up from there. Kubernetes scales, so as you add applications and services to it, it will scale with you. You don't have to do it all at once, and that's really a secret to everything, but especially true to Kubernetes." +{{< /case-studies/quote >}} + +

After researching possible solutions, he opted for containerization and Kubernetes orchestration. "Containerization is the future," van den Bosch says. "Kubernetes solves things like networking really well, in a way that fits our business model. We want to focus on our core products, and that's the software that runs on it and not necessarily the infrastructure itself." Plus, the Prowise team liked that there was no vendor lock-in. "We don't want to be limited to one platform," he says. "We try not to touch products that are very proprietary and can't be ported easily to another vendor."

+ +

The time to market with Kubernetes was very short: The first web-based applications on the platform went into beta within a few months. That was largely made possible by van den Bosch's decision to use Azure's managed Kubernetes service, AKS. The team then had to figure out which components to keep and which to replace. Monitoring tools like New Relic were taken out "because they tend to become very expensive when you scale it to different availability zones, and it's just not very maintainable," he says.

+ +

A lot of work also went into improving Prowise's CI/CD pipelines. "We wanted to make sure that the pipelines are automated and easy to use," he says. "We have a lot of settings and configurations figured out for the pipelines, and it's just applying those scripts and those configurations to new projects from here on out."

+ +

With its first web-based applications now running in beta on Prowise's Kubernetes platform, the team is seeing the benefits of rapid and smooth deployments. "The old way of deploying took half an hour of preparations and half an hour deploying it. With Kubernetes, it's a couple of seconds," says Senior Developer Bart Haalstra. As a result, adds van den Bosch, "We've gone from quarterly releases to a release every month in production. We're pretty much deploying every hour or just when we find that a feature is ready for production. Before, our releases were mostly done on off-hours, where it couldn't impact our customers, as our confidence the process itself was relatively low. With Kubernetes, we dare to deploy in the middle of a busy day with high confidence the deployment will succeed."

+ +{{< case-studies/quote + image="/images/case-studies/prowise/banner4.jpg" + author="VICTOR VAN DEN BOSCH, SENIOR DEVOPS ENGINEER, PROWISE" +>}} +"Kubernetes allows us to really consider the best tools for a problem. Want to have a full-fledged analytics application developed by a third party that is just right for your use case? Run it. Dabbling in machine learning and AI algorithms but getting tired of waiting days for training to complete? It takes only seconds to scale it. Got a stubborn developer that wants to use a programming language no one has heard of? Let him, if it runs in a container, of course. And all of that while your operations team/DevOps get to sleep at night." +{{< /case-studies/quote >}} + +

Plus, van den Bosch says, "Kubernetes has enabled us to follow up quickly on bugs and implement tweaks to our users with zero downtime between versions. For some bugs we've pushed code fixes to production minutes after detection."

+ +

Recently, the team launched a new single sign-on solution for use in an internal application. "Due to the resource based architecture of the Kubernetes platform, we were able to bring that application into an entirely new production environment in less than a day, most of that time used for testing after applying the already well-known resource definitions from staging to the new environment," says van den Bosch. "On a traditional VM this would have likely cost a day or two, and then probably a few weeks to iron out the kinks in our provisioning scripts as we apply updates."

+ +

Legacy applications are also being moved to Kubernetes. Not long ago, the team needed to set up a Java-based application for compiling and running a frontend. "On a traditional VM, it would have taken quite a bit of time to set it up and keep it up to date, not to mention maintenance for that setup down the line," says van den Bosch. Instead, it took less than half a day to containerize it and get it running on Kubernetes. "It was much easier, and we were able to save costs too because we didn't have to spin up new VMs specially for it."

+ +{{< case-studies/quote author="VICTOR VAN DEN BOSCH, SENIOR DEVOPS ENGINEER, PROWISE" >}} +"We're really trying to deliver integrated solutions with our hardware and software and making it as easy as possible for users to use and collaborate from different places," says van den Bosch. And, says Haalstra, "We cannot do it without Kubernetes." +{{< /case-studies/quote >}} + +

Perhaps most importantly, van den Bosch says, "Kubernetes allows us to really consider the best tools for a problem and take full advantage of microservices architecture. Got a library in Node.js that excels at solving a certain problem? Use it. Want to have a full-fledged analytics application developed by a third party that is just right for your use case? Run it. Dabbling in machine learning and AI algorithms but getting tired of waiting days for training to complete? It takes only seconds to scale it. Got a stubborn developer that wants to use a programming language no one has heard of? Let him, if it runs in a container, of course. And all of that while your operations team/DevOps get to sleep at night."

+ +

Looking ahead, all new web development, platforms, and APIs at Prowise will be on Kubernetes. One of the big greenfield projects is a platform for teachers and students that is launching for back-to-school season in September. Users will be able to log in and access a wide variety of educational applications. With the recent acquisition of the software company Oefenweb, Prowise plans to provide adaptive software that allows teachers to get an accurate view of their students' progress and weak points, and automatically adjusts the difficulty level of assignments to suit individual students. "We will be leveraging Kubernetes' power to integrate, supplement, and support our combined application portfolio and bring our solutions to more classrooms," says van den Bosch.

+ +

Collaborative software is also a priority. With the single sign-in software, users' settings and credentials are saved in the cloud and can be used on any screen in the world. "We're really trying to deliver integrated solutions with our hardware and software and making it as easy as possible for users to use and collaborate from different places," says van den Bosch. And, says Haalstra, "We cannot do it without Kubernetes."

\ No newline at end of file diff --git a/content/bn/case-studies/prowise/prowise_featured_logo.png b/content/bn/case-studies/prowise/prowise_featured_logo.png new file mode 100644 index 0000000000000..e6dc1a35ec238 Binary files /dev/null and b/content/bn/case-studies/prowise/prowise_featured_logo.png differ diff --git a/content/bn/case-studies/prowise/prowise_featured_logo.svg b/content/bn/case-studies/prowise/prowise_featured_logo.svg new file mode 100644 index 0000000000000..1f2d5ce41a918 --- /dev/null +++ b/content/bn/case-studies/prowise/prowise_featured_logo.svg @@ -0,0 +1 @@ +kubernetes.io-logos2 \ No newline at end of file diff --git a/content/bn/case-studies/ricardo-ch/index.html b/content/bn/case-studies/ricardo-ch/index.html new file mode 100644 index 0000000000000..205e984e7f5c6 --- /dev/null +++ b/content/bn/case-studies/ricardo-ch/index.html @@ -0,0 +1,79 @@ +--- +title: ricardo.ch Case Study +linkTitle: ricardo-ch +case_study_styles: true +cid: caseStudies +featured: false + +new_case_study_styles: true +heading_background: /images/case-studies/ricardoch/banner1.png +heading_title_logo: /images/ricardoch_logo.png +subheading: > + ricardo.ch: How Kubernetes Improved Velocity and DevOps Harmony +case_study_details: + - Company: ricardo.ch + - Location: Zurich, Switzerland + - Industry: E-commerce +--- + +

Challenge

+ +

A Swiss online marketplace, ricardo.ch was experiencing problems with velocity, as well as a "classic gap" between Development and Operations, with the two sides unable to work well together. "They wanted to, but they didn't have common ground," says Cedric Meury, Head of Platform Engineering. "This was one of the root causes that slowed us down." The company began breaking down the legacy monolith into microservices, and needed orchestration to support the new architecture in its own data centers—as well as bring together Dev and Ops.

+ +

Solution

+ +

The company adopted Kubernetes for cluster management, Prometheus for monitoring, and Fluentd for logging. The first cluster was deployed on premise in December 2016, with the first service in production three months later. The migration is about half done, and the company plans to move completely to Google Cloud Platform by the end of 2018.

+ +

Impact

+ +

Splitting up the monolith into microservices "allowed higher velocity, and Kubernetes was crucial to support that," says Meury. The number of deployments to production has gone from fewer than 10 a week to 30-60 per day. Before, "when there was a problem with something in production, tickets or complaints would be thrown over the wall to operations, the classical problem. Now, people have the chance to look into operations and troubleshoot for themselves first because everything is deployed in a standardized way," says Meury. He sees the impact in everyday interactions: "A couple of weeks ago, I saw a product manager doing a pull request for a JSON file that contains some variables, and someone else accepted it. And it was deployed after a couple of minutes or seconds even, which was unthinkable before. There used to be quite a chain of things that needed to happen, the whole monolith was difficult to understand, even for engineers. So, previously requests would go into large, inefficient Kanban boards and hopefully someone will have done the change after weeks and months." Before, infrastructure- and platform-related projects took months or years to complete; now developers and operators can work together to deploy infrastructure parts via Kubernetes in a matter of weeks and sometimes days. In the long run, the company also expects to notch 50% cost savings going from custom data center and virtual machines to containerized infrastructure and cloud services.

+ +{{< case-studies/quote author="CEDRIC MEURY, HEAD OF PLATFORM ENGINEERING, RICARDO.CH" >}} +"Splitting up the monolith allowed higher velocity, and Kubernetes was crucial to support that. Containerization and orchestration by Kubernetes helped us to drastically reduce the conflict between Dev and Ops and also allowed us to speak the same language on both sides of the aisle." +{{< /case-studies/quote >}} + +{{< case-studies/lead >}} +When Cedric Meury joined ricardo.ch in 2016, he saw a clear divide between Operations and Development. In fact, there was literal distance between them: The engineering team worked in France, while the rest of the org was based in Switzerland. +{{< /case-studies/lead >}} + +

"It was a classic gap between those departments and even some anger and frustration here and there," says Meury. "They wanted to work together, but they didn't have common ground. This was one of the root causes that slowed us down."

+ +

That gap was hurting velocity at ricardo.ch, a Swiss online marketplace. The website processes up to 2.6 million searches on a peak day from both web and mobile apps, serving 3.2 million members with its live auctions. The technology team's main challenge was to make sure that "the bids for items come in the right order, and before the auction is finished, and that this works in a fair way," says Meury. "We have a real-time requirement. We also provide an automated system to bid, and it needs to be accurate and correct. With a distributed system, you have the challenge of making sure that the ordering is right. And that's one of the things we're currently dealing with."

+ +

To address the velocity issue, ricardo.ch CTO Jeremy Seitz established a new software factory called EPD, which consists of 65 engineers, 7 product managers and 2 designers. "We brought these three departments together so that they can kind of streamline this and talk to each other much more closely," says Meury.

+ +{{< case-studies/quote + image="/images/case-studies/ricardoch/banner3.png" + author="CEDRIC MEURY, HEAD OF PLATFORM ENGINEERING, RICARDO.CH" +>}} +"Being in the End User Community demonstrates that we stand behind these technologies. In Switzerland, if all the companies see that ricardo.ch's using it, I think that will help adoption. I also like that we're connected to the other end users, so if there is a really heavy problem, I could go to the Slack channel, and say, 'Hey, you guys…' Like Reddit, Github and New York Times or whoever can give a recommendation on what to use here or how to solve that. So that's kind of a superpower." +{{< /case-studies/quote >}} + +

The company also began breaking down the legacy monolith into more than 100 microservices, and needed orchestration to support the new architecture in its own data centers. "Splitting up the monolith allowed higher velocity, and Kubernetes was crucial to support that," says Meury. "Containerization and orchestration by Kubernetes helped us to drastically reduce the conflict between Dev and Ops and also allowed us to speak the same language on both sides of the aisle."

+ +

Meury put together a platform engineering team to choose the tools—including Fluentd for logging and Prometheus for monitoring, with Grafana visualization—and lay the groundwork for the first Kubernetes cluster, which was installed on premise in December 2016. Within a few weeks, the new platform was available to teams, who were given training sessions and documentation. The platform engineering team then embedded with engineers to help them deploy their applications on the new platform. The first service in production was the ricardo.ch jobs page. "It was an exercise in front-end development, so the developers could experiment with a new stack," says Meury.

+ +

Meury estimates that half of the application has been migrated to Kubernetes. And the plan is to move everything to the Google Cloud Platform by the end of 2018. "We are still running some servers in our own data centers, but all of the containerization efforts and describing our services as Kubernetes manifests will allow us to quite easily make that shift," says Meury.

+ +{{< case-studies/quote + image="/images/case-studies/ricardoch/banner4.png" + author="CEDRIC MEURY, HEAD OF PLATFORM ENGINEERING, RICARDO.CH" +>}} +"One of the core moments was when a front-end developer asked me how to do a port forward from his laptop to a front-end application to debug, and I told him the command. And he was like, 'Wow, that's all I need to do?' He was super excited and happy about it. That showed me that this power in the right hands can just accelerate development." +{{< /case-studies/quote >}} + +

The impact has been great. Moving from custom data center and virtual machines to containerized infrastructure and cloud services is expected to result in 50% cost savings for the company. The number of deployments to production has gone from fewer than 10 a week to 30-60 per day. Before, "when there was a problem with something in production, tickets or complaints would be thrown over the wall to operations, the classical problem," says Meury. "Now, people have the chance to look into operations and troubleshoot for themselves first because everything is deployed in a standardized way. That reduces time and uncertainty."

+ +

Meury also sees the impact in everyday interactions: "A couple of weeks ago, I saw a product manager doing a pull request for a JSON file that contains some variables, and someone else accepted it. And it was deployed after a couple of minutes or seconds even, which was unthinkable before. There used to be quite a chain of things that needed to happen, the whole monolith was difficult to understand, even for engineers. So, previously requests would go into large, inefficient Kanban boards and hopefully someone will have done the change after weeks and months."

+ +

The divide between Dev and Ops has also diminished. "After a couple of months, I got requests by people saying, 'Hey, could you help me install the Kubernetes client? I want to actually look at what's going on,'" says Meury. "People were directly looking at the state of the system, bringing them much, much closer to the operations." Before, infrastructure- and platform-related projects took months or years to complete; now developers and operators can work together to deploy infrastructure parts via Kubernetes in a matter of weeks and sometimes days.

+ +{{< case-studies/quote author="CEDRIC MEURY, HEAD OF PLATFORM ENGINEERING, RICARDO.CH" >}} +"One of my colleagues was listening to all the talks at KubeCon, and he was overwhelmed by all the tools, technologies, frameworks out there that are currently lacking on our platform, but at the same time, he's very happy to know that in the future there is so much that we can still explore and we can improve and we can work on." +{{< /case-studies/quote >}} + +

The ability to have insight into the system has extended to other parts of the company, too. "I found out that one of our customer support representatives looks at Grafana metrics to find out whether the system is running fine, which is fantastic," says Meury. "Prometheus is directly hooked into customer care."

+ +

The ricardo.ch cloud native journey has perhaps had the most impact on the Ops team. "We have an operations team that comes from a hardware-based background, and right now they are relearning how to operate in a more virtualized and cloud native world, with great success so far," says Meury. "So besides still operating on-site data center firewalls, they learn to code in Go or do some Python scripting at the same time. Former network administrators are writing Go code. It's just really cool.

+ +

For Meury, the journey boils down to this. "One of my colleagues was listening to all the talks at KubeCon, and he was overwhelmed by all the tools, technologies, frameworks out there that are currently lacking on our platform," says Meury. "But at the same time, he's very happy to know that in the future there is so much that we can still explore and we can improve and we can work on. We're transitioning from seeing problems everywhere—like, 'This is broken' or 'This is down, and we have to fix it'—more to, 'How can we actually improve and automate more, and make it nicer for developers and ultimately for the end users?'"

\ No newline at end of file diff --git a/content/bn/case-studies/ricardo-ch/ricardo-ch_featured_logo.png b/content/bn/case-studies/ricardo-ch/ricardo-ch_featured_logo.png new file mode 100644 index 0000000000000..c462c7ba565c2 Binary files /dev/null and b/content/bn/case-studies/ricardo-ch/ricardo-ch_featured_logo.png differ diff --git a/content/bn/case-studies/ricardo-ch/ricardo-ch_featured_logo.svg b/content/bn/case-studies/ricardo-ch/ricardo-ch_featured_logo.svg new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/content/en/case-studies/ricardo-ch/ricardo.ch_featured_logo.svg b/content/bn/case-studies/ricardo-ch/ricardo.ch_featured_logo.svg similarity index 100% rename from content/en/case-studies/ricardo-ch/ricardo.ch_featured_logo.svg rename to content/bn/case-studies/ricardo-ch/ricardo.ch_featured_logo.svg diff --git a/content/bn/case-studies/slamtec/index.html b/content/bn/case-studies/slamtec/index.html new file mode 100644 index 0000000000000..c8e45c457de41 --- /dev/null +++ b/content/bn/case-studies/slamtec/index.html @@ -0,0 +1,71 @@ +--- +title: Slamtec Case Study +linkTitle: slamtec +case_study_styles: true +cid: caseStudies +featured: false + +new_case_study_styles: true +heading_background: /images/case-studies/slamtec/banner1.jpg +heading_title_logo: /images/slamtec_logo.png +case_study_details: + - Company: Slamtec + - Location: Shanghai, China + - Industry: Robotics +--- + +

Challenge

+ +

Founded in 2013, SLAMTEC provides service robot autonomous localization and navigation solutions. The company's strength lies in its R&D team's ability to quickly introduce, and continually iterate on, its core products. In the past few years, the company, which had a legacy infrastructure based on Alibaba Cloud and VMware vSphere, began looking to build its own stable and reliable container cloud platform to host its Internet of Things applications. "Our needs for the cloud platform included high availability, scalability and security; multi-granularity monitoring alarm capability; friendliness to containers and microservices; and perfect CI/CD support," says Benniu Ji, Director of Cloud Computing Business Division.

+ +

Solution

+ +

Ji's team chose Kubernetes for orchestration. "CNCF brings quality assurance and a complete ecosystem for Kubernetes, which is very important for the wide application of Kubernetes," says Ji. Thus Slamtec decided to adopt other CNCF projects as well: Prometheus monitoring, Fluentd logging, Harbor registry, and Helm package manager.

+ +

Impact

+ +

With the new platform, Ji reports that Slamtec has experienced "18+ months of 100% stability!" For users, there is now zero service downtime and seamless upgrades. "Kubernetes with third-party service mesh integration (Istio, along with Jaeger and Envoy) significantly reduced the microservice configuration and maintenance efforts by 50%," he adds. With centralized metrics monitoring and log aggregation provided by Prometheus on Fluentd, teams are saving 50% of time spent on troubleshooting and debugging. Harbor replication has allowed production/staging/testing environments to cross public cloud and the private Kubernetes cluster to share the same container registry, resulting in 30% savings of CI/CD efforts. Plus, Ji says, "Helm has accelerated prototype development and environment setup with its rich sharing charts."

+ +{{< case-studies/quote author="BENNIU JI, DIRECTOR OF CLOUD COMPUTING BUSINESS DIVISION" >}} +"Cloud native technology helps us ensure high availability of our business, while improving development and testing efficiency, shortening the research and development cycle and enabling rapid product delivery." +{{< /case-studies/quote >}} + +{{< case-studies/lead >}} +Founded in 2013, Slamtec provides service robot autonomous localization and navigation solutions. In this fast-moving space, the company built its success on the ability of its R&D team to quickly introduce, and continually iterate on, its core products. +{{< /case-studies/lead >}} + +

To sustain that development velocity, the company over the past few years began looking to build its own stable and reliable container cloud platform to host its Internet of Things applications. With a legacy infrastructure based on Alibaba Cloud and VMware vSphere, Slamtec teams had already adopted microservice architecture and continuous delivery, for "fine granularity on-demand scaling, fault isolation, ease of development, testing, and deployment, and for facilitating high-speed iteration," says Benniu Ji, Director of Cloud Computing Business Division. So "our needs for the cloud platform included high availability, scalability and security; multi-granularity monitoring alarm capability; friendliness to containers and microservices; and perfect CI/CD support."

+ +

After an evaluation of existing technologies, Ji's team chose Kubernetes for orchestration. "CNCF brings quality assurance and a complete ecosystem for Kubernetes, which is very important for the wide application of Kubernetes," says Ji. Plus, "avoiding binding to an infrastructure technology or provider can help us ensure that our business is deployed and migrated in cross-regional environments, and can serve users all over the world."

+ +{{< case-studies/quote + image="/images/case-studies/slamtec/banner3.jpg" + author="BENNIU JI, DIRECTOR OF CLOUD COMPUTING BUSINESS DIVISION" +>}} +"CNCF brings quality assurance and a complete ecosystem for Kubernetes, which is very important for the wide application of Kubernetes." +{{< /case-studies/quote >}} + +

Thus Slamtec decided to adopt other CNCF projects as well. "We built a monitoring and logging system based on Prometheus and Fluentd," says Ji. "The integration between Prometheus/Fluentd and Kubernetes is convenient, with multiple dimensions of data monitoring and log collection capabilities."

+ +

The company uses Harbor as a container image repository. "Harbor's replication function helps us implement CI/CD on both private and public clouds," says Ji. "In addition, multi-project support, certification and policy configuration, and integration with Kubernetes are also excellent functions." Helm is also being used as a package manager, and the team is evaluating the Istio framework. "We're very pleased that Kubernetes and these frameworks can be seamlessly integrated," Ji adds.

+ +{{< case-studies/quote + image="/images/case-studies/slamtec/banner4.jpg" + author="BENNIU JI, DIRECTOR OF CLOUD COMPUTING BUSINESS DIVISION" +>}} +"Cloud native is suitable for microservice architecture, it's suitable for fast iteration and agile development, and it has a relatively perfect ecosystem and active community." +{{< /case-studies/quote >}} + +

With the new platform, Ji reports that Slamtec has experienced "18+ months of 100% stability!" For users, there is now zero service downtime and seamless upgrades. "We benefit from the abstraction of Kubernetes from network and storage," says Ji. "The dependence on external services can be decoupled from the service and placed under unified management in the cluster."

+ +

Using Kubernetes and Istio "significantly reduced the microservice configuration and maintenance efforts by 50%," he adds. With centralized metrics monitoring and log aggregation provided by Prometheus on Fluentd, teams are saving 50% of time spent on troubleshooting and debugging. Harbor replication has allowed production/staging/testing environments to cross public cloud and the private Kubernetes cluster to share the same container registry, resulting in 30% savings of CI/CD efforts. Plus, Ji adds, "Helm has accelerated prototype development and environment setup with its rich sharing charts."

+ +

In short, Ji says, Slamtec's new platform is helping it achieve one of its primary goals: the quick and easy release of products. With multiple release models and a centralized control interface, the platform is changing developers' lives for the better. Slamtec also offers a unified API for the development of automated deployment tools according to users' specific needs.

+ +{{< case-studies/quote author="BENNIU JI, DIRECTOR OF CLOUD COMPUTING BUSINESS DIVISION" >}} +"We benefit from the abstraction of Kubernetes from network and storage, the dependence on external services can be decoupled from the service and placed under unified management in the cluster." +{{< /case-studies/quote >}} + +

Given its own success with cloud native, Slamtec has just one piece of advice for organizations considering making the leap. "For already containerized services, you should migrate them to the cloud native architecture as soon as possible and enjoy the advantages brought by the cloud native ecosystem," Ji says. "To migrate traditional, non-containerized services, in addition to the architecture changes of the service itself, you need to fully consider the operation and maintenance workload required to build the cloud native architecture."

+ +

That said, the cost-benefit analysis has been simple for Slamtec. "Cloud native technology is suitable for microservice architecture, it's suitable for fast iteration and agile development, and it has a relatively perfect ecosystem and active community," says Ji. "It helps us ensure high availability of our business, while improving development and testing efficiency, shortening the research and development cycle and enabling rapid product delivery."

\ No newline at end of file diff --git a/content/bn/case-studies/slamtec/slamtec_featured_logo.png b/content/bn/case-studies/slamtec/slamtec_featured_logo.png new file mode 100644 index 0000000000000..598db9fe43a94 Binary files /dev/null and b/content/bn/case-studies/slamtec/slamtec_featured_logo.png differ diff --git a/content/bn/case-studies/slamtec/slamtec_featured_logo.svg b/content/bn/case-studies/slamtec/slamtec_featured_logo.svg new file mode 100644 index 0000000000000..7b4f6d6af1683 --- /dev/null +++ b/content/bn/case-studies/slamtec/slamtec_featured_logo.svg @@ -0,0 +1 @@ +kubernetes.io-logos2 \ No newline at end of file diff --git a/content/bn/case-studies/slingtv/index.html b/content/bn/case-studies/slingtv/index.html new file mode 100644 index 0000000000000..c48e008d82ea3 --- /dev/null +++ b/content/bn/case-studies/slingtv/index.html @@ -0,0 +1,79 @@ +--- +title: SlingTV Case Study +linkTitle: Sling TV +case_study_styles: true +cid: caseStudies +featured: true +weight: 49 +quote: > + I would almost be so bold as to say that most of these applications that we are building now would not have been possible without the cloud native patterns and the flexibility that Kubernetes enables. + +new_case_study_styles: true +heading_background: /images/case-studies/slingtv/banner1.jpg +heading_title_logo: /images/slingtv_logo.png +subheading: > + Sling TV: Marrying Kubernetes and AI to Enable Proper Web Scale +case_study_details: + - Company: Sling TV + - Location: Englewood, Colorado + - Industry: Streaming television +--- + +

Challenge

+ +

Launched by DISH Network in 2015, Sling TV experienced great customer growth from the beginning. After just a year, "we were going through some growing pains of some of the legacy systems and trying to find the right architecture to enable our future," says Brad Linder, Sling TV's Cloud Native & Big Data Evangelist. The company has particular challenges: "We take live TV and distribute it over the internet out to a user's device that we do not control," says Linder. "In a lot of ways, we are working in the Wild West: The internet is what it is going to be, and if a customer's service does not work for whatever reason, they do not care why. They just want things to work. Those are the variables of the equation that we have to try to solve. We really have to try to enable optionality and good customer experience at web scale."

+ +

Solution

+ +

Led by the belief that "the cloud native architectures and patterns really give us a lot of flexibility in meeting the needs of that sort of customer base," Linder partnered with Rancher Labs to build Sling TV's next-generation platform around Kubernetes. "We are going to need to enable a hybrid cloud strategy including multiple public clouds and an on-premise VMWare multi data center environment to meet the needs of the business at some point, so getting that sort of abstraction was a real goal," he says. "That is one of the biggest reasons why we picked Kubernetes." The team launched its first applications on Kubernetes in Sling TV's two internal data centers. The push to enable AWS as a data center option is underway and should be available by the end of 2018. The team has added Prometheus for monitoring and Jaeger for tracing, to work alongside the company's existing tool sets: Zenoss, New Relic and ELK.

+ +

Impact

+ +

"We are getting to the place where we can one-click deploy an entire data center – the compute, network, Kubernetes, logging, monitoring and all the apps," says Linder. "We have really enabled a platform thinking based approach to allowing applications to consume common tools. A new application can be onboarded in about an hour using common tooling and CI/CD processes. The gains on that side have been huge. Before, it took at least a few days to get things sorted for a new application to deploy. That does not consider the training of our operations staff to manage this new application. It is two or three orders of magnitude of savings in time and cost, and operationally it has given us the opportunity to let a core team of talented operations engineers manage common infrastructure and tooling to make our applications available at web scale."

+ +{{< case-studies/quote author="Brad Linder, Cloud Native & Big Data Evangelist for Sling TV" >}} +"I would almost be so bold as to say that most of these applications that we are building now would not have been possible without the cloud native patterns and the flexibility that Kubernetes enables." +{{< /case-studies/quote >}} + +{{< case-studies/lead >}} +The beauty of streaming television, like the service offered by Sling TV, is that you can watch it from any device you want, wherever you want. +{{< /case-studies/lead >}} + + +

Of course, from the provider side of things, that creates a particular set of challenges "We take live TV and distribute it over the internet out to a user's device that we do not control," says Brad Linder, Sling TV's Cloud Native & Big Data Evangelist. "In a lot of ways, we are working in the Wild West: The internet is what it is going to be, and if a customer's service does not work for whatever reason, they do not care why. They just want things to work. Those are the variables of the equation that we have to try to solve. We really have to try to enable optionality and we have to do it at web scale."

+ +

Indeed, Sling TV experienced great customer growth from the beginning of its launch by DISH Network in 2015. After just a year, "we were going through some growing pains of some of the legacy systems and trying to find the right architecture to enable our future," says Linder. Tasked with building a next-generation web scale platform for the "personalized customer experience," Linder has spent the past year bringing Kubernetes to Sling TV.

+ +

Led by the belief that "the cloud native architectures and patterns really give us a lot of flexibility in meeting the needs of our customers," Linder partnered with Rancher Labs to build the platform around Kubernetes. "They have really helped us get our head around how to use Kubernetes," he says. "We needed the flexibility to enable our use case versus just a simple orchestrater. Enabling our future in a way that did not give us vendor lock-in was also a key part of our strategy. I think that is part of the Rancher value proposition."

+ +{{< case-studies/quote + image="/images/case-studies/slingtv/banner3.jpg" + author="Brad Linder, Cloud Native & Big Data Evangelist for Sling TV" +>}} +"We needed the flexibility to enable our use case versus just a simple orchestrater. Enabling our future in a way that did not give us vendor lock-in was also a key part of our strategy. I think that is part of the Rancher value proposition." +{{< /case-studies/quote >}} + +

One big reason he chose Kubernetes was getting a level of abstraction that would enable the company to "enable a hybrid cloud strategy including multiple public clouds and an on-premise VMWare multi data center environment to meet the needs of the business," he says. Another factor was how much the Kubernetes ecosystem has matured over the past couple of years. "We have spent a lot of time and energy around making logging, monitoring and alerting production ready to give us insights into applications' well-being," says Linder. The team has added Prometheus for monitoring and Jaeger for tracing, to work alongside the company's existing tool sets: Zenoss, New Relic and ELK.

+ +

With the emphasis on common tooling, "We are getting to the place where we can one-click deploy an entire data center – the compute, network, Kubernetes, logging, monitoring and all the apps," says Linder. "We have really enabled a platform thinking based approach to allowing applications to consume common tools and services. A new application can be onboarded in about an hour using common tooling and CI/CD processes. The gains on that side have been huge. Before, it took at least a few days to get things sorted for a new application to deploy. That does not consider the training of our operations staff to manage this new application. It is two or three orders of magnitude of savings in time and cost, and operationally it has given us the opportunity to let a core team of talented operations engineers manage common infrastructure and tooling to make our applications available at web scale."

+ +{{< case-studies/quote + image="/images/case-studies/slingtv/banner4.jpg" + author="Brad Linder, Cloud Native & Big Data Evangelist for Sling TV" +>}} +"We have to be able to react to changes and hiccups in the matrix. It is the foundation for our ability to deliver a high-quality service for our customers." +{{< /case-studies/quote >}} + +

The team launched its first applications on Kubernetes in Sling TV's two internal data centers in the early part of Q1 2018 and began to enable AWS as a data center option. The company plans to expand into other public clouds in the future.

+ +

The first application that went into production is a web socket-based back-end notification service. "It allows back-end changes to trigger messages to our clients in the field without the polling," says Linder. "We are talking about very high volumes of messages with this application. Without something like Kubernetes to be able to scale up and down, as well as just support that overall workload, that is pretty hard to do. I would almost be so bold as to say that most of these applications that we are building now would not have been possible without the cloud native patterns and the flexibility that Kubernetes enables."

+ +

Linder oversees three teams working together on building the next-generation platform: a platform engineering team; an enterprise middleware services team; and a big data and analytics team. "We have really tried to bring everything together to be able to have a client application interact with a cloud native middleware layer. That middleware layer must run on a platform, consume platform services and then have logs and events monitored by an artificial agent to keep things running smoothly," says Linder.

+ +{{< case-studies/quote author="BRAD LINDER, CLOUD NATIVE & BIG DATA EVANGELIST FOR SLING TV">}} +This undertaking is about "trying to marry Kubernetes with AI to enable web scale that just works". +{{< /case-studies/quote >}} + +

Ultimately, this undertaking is about "trying to marry Kubernetes with AI to enable web scale that just works," he adds. "We want the artificial agents and the big data platform using the actual logs and events coming out of the applications, Kubernetes, the infrastructure, backing services and changes to the environment to make decisions like, 'Hey we need more capacity for this service so please add more nodes.' From a platform perspective, if you are truly doing web scale stuff and you are not using AI and big data, in my opinion, you are going to implode under your own weight. It is not a question of if, it is when. If you are in a 'millions of users' sort of environment, that implosion is going to be catastrophic. We are on our way to this goal and have learned a lot along the way."

+ +

For Sling TV, moving to cloud native has been exactly what they needed. "We have to be able to react to changes and hiccups in the matrix," says Linder. "It is the foundation for our ability to deliver a high-quality service for our customers. Building intelligent platforms, tools and clients in the field consuming those services has got to be part of all of this. In my eyes that is a big part of what cloud native is all about. It is taking these distributed, potentially unreliable entities and enabling a robust customer experience they expect."

\ No newline at end of file diff --git a/content/bn/case-studies/slingtv/slingtv_featured_logo.png b/content/bn/case-studies/slingtv/slingtv_featured_logo.png new file mode 100644 index 0000000000000..b52143ee8b6c6 Binary files /dev/null and b/content/bn/case-studies/slingtv/slingtv_featured_logo.png differ diff --git a/content/bn/case-studies/slingtv/slingtv_featured_logo.svg b/content/bn/case-studies/slingtv/slingtv_featured_logo.svg new file mode 100644 index 0000000000000..764f8ddd884a8 --- /dev/null +++ b/content/bn/case-studies/slingtv/slingtv_featured_logo.svg @@ -0,0 +1 @@ +kubernetes.io-logos2 \ No newline at end of file diff --git a/content/bn/case-studies/sos/index.html b/content/bn/case-studies/sos/index.html new file mode 100644 index 0000000000000..da729565c5eb7 --- /dev/null +++ b/content/bn/case-studies/sos/index.html @@ -0,0 +1,83 @@ +--- +title: SOS International Case Study +linkTitle: SOS International +case_study_styles: true +cid: caseStudies +logo: sos_featured_logo.png + +new_case_study_styles: true +heading_background: /images/case-studies/sos/banner1.jpg +heading_title_logo: /images/sos_logo.png +subheading: > + SOS International: Using Kubernetes to Provide Emergency Assistance in a Connected World +case_study_details: + - Company: SOS International + - Location: Frederiksberg, Denmark + - Industry: Medical and Travel Assistance +--- + +

Challenge

+ +

For the past six decades, SOS International has been providing reliable medical and travel assistance in the Nordic region. In recent years, the company's business strategy has required increasingly intense development in the digital space, but when it came to its IT systems, "SOS has a very fragmented legacy," with three traditional monoliths (Java, .NET, and IBM's AS/400) and a waterfall approach, says Martin Ahrentsen, Head of Enterprise Architecture. "We have been forced to institute both new technology and new ways of working, so we could be more efficient with a shorter time to market. It was a much more agile approach, and we needed to have a platform that can help us deliver that to the business."

+ +

Solution

+ +

After an unsuccessful search for a standard system, the company decided to take a platform approach and look for a solution that rolls up Kubernetes and the container technology. RedHat OpenShift proved to be a perfect fit for SOS's fragmented systems. "We have a lot of different technologies that we use, both code languages and others, and all of them could use the resources on the new platform," says Ahrentsen. Of the company's three monoliths, "we can provide this new bleeding edge technology to two of them (.NET and Java)." The platform went live in the spring of 2018; there are now six greenfield projects based on microservices architecture underway, plus all of the company's Java applications are currently going through a "lift and shift" migration.

+ +

Impact

+ +

Kubernetes has delivered "improved time to market, agility, and the ability to adapt to changes and new technologies," says Ahrentsen. "Just the time between when the software is ready for release and when it can be released has dramatically been improved." The way of thinking at SOS International has also changed for the better: "Since we have Kubernetes and easy access to scripts that can help us automate, creating CI/CD pipelines easily, that has spawned a lot of internal interest in how to do this fully automated, all the way. It creates a very good climate in order to start the journey," he says. Moreover, being part of the cloud native community has helped the company attract talent. "They want to work with the cool, new technologies," says Ahrentsen. "During our onboarding, we could see that we were chosen by IT professionals because we provided the new technologies."

+ +{{< case-studies/quote author="Martin Ahrentsen, Head of Enterprise Architecture, SOS International" >}} +"The speed of the changes that cloud native software and technologies drive right now is amazing, and following and adopting it is very crucial for us. The amazing technology provided by Kubernetes and cloud native has started the change for SOS towards a digital future." +{{< /case-studies/quote >}} + +{{< case-studies/lead >}} +For six decades, SOS International has provided reliable emergency medical and travel assistance for customers in the Nordic countries. +{{< /case-studies/lead >}} + +

SOS operators handle a million cases and over a million phone calls a year. But in the past four years, the company's business strategy has required increasingly intense development in the digital space.

+ +

When it comes to its IT systems, "SOS has a very fragmented legacy," with three traditional monoliths running in the company's own data centers and a waterfall approach, says Martin Ahrentsen, Head of Enterprise Architecture. "We had to institute both new technology and new ways of working so we could be more efficient, with a shorter time to market. It was a much more agile approach, and we needed to have a platform that can help us deliver that to the business."

+ +

For a long time, Ahrentsen and his team searched for a standard solution that could work at SOS. "There aren't that many assistance companies like us, so you cannot get a standard system that fits for that; there is no perfect match," he says. "We would have to take a standard system and twist it too much so it is not standard anymore. Based on that, we decided to find a technology platform instead, with some common components that we could use to build the new digital systems and core systems."

+ +{{< case-studies/quote + image="/images/case-studies/sos/banner3.jpg" + author="Martin Ahrentsen, Head of Enterprise Architecture, SOS International" +>}} +"We have to deliver new digital services, but we also have to migrate the old stuff, and we have to transform our core systems into new systems built on top of this platform. One of the reasons why we chose this technology is that we could build new digital services while changing the old one." +{{< /case-studies/quote >}} + +

Sold on what Kubernetes could do, Ahrentsen zeroed in on platforms that could meet the business's needs right away. The company opted to use RedHat's OpenShift container platform, which incorporates Docker containers and Kubernetes, as well as a whole stack of technologies, including RedHat Hyperconverged Infrastructure and some midware components, all from the open source community.

+ +

Based on the company's criteria—technology fit, agility fit, legal requirements, and competencies—the OpenShift solution seemed like a perfect fit for SOS's fragmented systems. "We have a lot of different technologies that we use, both code languages and others, and all of them could use the resources on the new platform," says Ahrentsen. Of the company's three monoliths, "we can provide this new bleeding edge technology to two of them (.NET and Java)."

+ +

The platform went live in the spring of 2018; six greenfield projects based on microservices architecture were initially launched, plus all of the company's Java applications are currently going through a "lift and shift" migration. One of the first Kubernetes-based projects to go live is Remote Medical Treatment, a solution in which customers can contact the SOS alarm center via voice, chat, or video. "We managed to develop it in quite a short timeframe with focus on full CI/CD pipelining and a modern microservice architecture all running in a dual OpenShift cluster setup," says Ahrentsen. Onsite, which is used for dispatching rescue trucks around the Nordic countries, and Follow Your Truck, which allows customers to track tow trucks, are also being rolled out.

+ +{{< case-studies/quote + image="/images/case-studies/sos/banner4.jpg" + author="Martin Ahrentsen, Head of Enterprise Architecture, SOS International" +>}} +"During our onboarding, we could see that we were chosen by IT professionals because we provided the new technologies." +{{< /case-studies/quote >}} + +

The platform is still running on premise, because some of SOS's customers in the insurance industry, for whom the company handles data, don't yet have a cloud strategy. Kubernetes is allowing SOS to start in the data center and move to the cloud when the business is ready. "Over the next three to five years, all of them will have a strategy, and we could probably take the data and go to the cloud," says Ahrentsen. There's also the possibility of moving to a hybrid cloud setup for sensitive and non-sensitive data.

+ +

SOS's technology is certainly in a state of transition. "We have to deliver new digital services, but we also have to migrate the old stuff, and we have to transform our core systems into new systems built on top of this platform," says Ahrentsen. "One of the reasons why we chose this technology is that we could build new digital services while changing the old one."

+ +

But already, Kubernetes has delivered improved time to market, as evidenced by how quickly the greenfield projects were developed and released. "Just the time between when the software is ready for release and when it can be released has dramatically been improved," says Ahrentsen.

+ +

Moreover, being part of the cloud native community has helped the company attract talent as it pursues a goal of growing the ranks of engineers, operators, and architects from 60 to 100 this year. "They want to work with the cool, new technologies," says Ahrentsen. "During our onboarding, we could see that we were chosen by IT professionals because we provided the new technologies."

+ +{{< case-studies/quote author="Martin Ahrentsen, Head of Enterprise Architecture, SOS International" >}} +"The future world where everything is connected and sends data will create a big potential for us in terms of new market opportunities. But it will also set a big demand on the IT platform and what we need to deliver." +{{< /case-studies/quote >}} + +

The way of thinking at SOS International has also changed dramatically: "Since we have Kubernetes and easy access to scripts that can help us automate, creating CI/CD pipelines easily, that has spawned a lot of internal interest in how to do this fully automated, all the way. It creates a very good climate in order to start the journey."

+ +

For this journey at SOS, digitalization and optimization are the key words. "For IT to deliver this, we need to improve, and that is not just on the way of using Kubernetes and the platform," says Ahrentsen. "It's also a way of building the systems to be ready for automation, and afterwards, machine learning and other interesting technologies that are on the way."

+ +

Case in point: the introduction of the internet of things into automobiles. The European Commission now mandates all new cars to be equipped with eCall, which transmits location and other data in case of a serious traffic accident. SOS provides this service as smart auto assistance. "We receive the call and find out if an emergency response team needs to be sent, or if it's not heavy impact," says Ahrentsen. "The future world where everything is connected and sends data will create a big potential for us in terms of new market opportunities. But it will also set a big demand on the IT platform and what we need to deliver."

+ +

Ahrentsen feels that SOS is well equipped for the challenge, given the technology choices the company has made. "The speed of the changes that cloud native software and technologies drive right now is amazing, and following it and adopting it is very crucial for us," he says. "The amazing technology provided by Kubernetes and cloud native has started the change for SOS towards a digital future."

\ No newline at end of file diff --git a/content/bn/case-studies/sos/sos_featured_logo.png b/content/bn/case-studies/sos/sos_featured_logo.png new file mode 100644 index 0000000000000..a97671af6d8f5 Binary files /dev/null and b/content/bn/case-studies/sos/sos_featured_logo.png differ diff --git a/content/bn/case-studies/sos/sos_featured_logo.svg b/content/bn/case-studies/sos/sos_featured_logo.svg new file mode 100644 index 0000000000000..4e611798ac356 --- /dev/null +++ b/content/bn/case-studies/sos/sos_featured_logo.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/content/bn/case-studies/spotify/index.html b/content/bn/case-studies/spotify/index.html new file mode 100644 index 0000000000000..7448b4093933d --- /dev/null +++ b/content/bn/case-studies/spotify/index.html @@ -0,0 +1,79 @@ +--- +title: Spotify Case Study +linkTitle: Spotify +case_study_styles: true +cid: caseStudies +featured: false + +new_case_study_styles: true +heading_background: /images/case-studies/spotify/banner1.jpg +heading_title_text: Spotify +subheading: > + Spotify: An Early Adopter of Containers, Spotify Is Migrating from Homegrown Orchestration to Kubernetes +case_study_details: + - Company: Spotify + - Location: Global + - Industry: Entertainment +--- + +

Challenge

+ +

Launched in 2008, the audio-streaming platform has grown to over 200 million monthly active users across the world. "Our goal is to empower creators and enable a really immersive listening experience for all of the consumers that we have today—and hopefully the consumers we'll have in the future," says Jai Chakrabarti, Director of Engineering, Infrastructure and Operations. An early adopter of microservices and Docker, Spotify had containerized microservices running across its fleet of VMs with a homegrown container orchestration system called Helios. By late 2017, it became clear that "having a small team working on the features was just not as efficient as adopting something that was supported by a much bigger community," he says.

+ +

Solution

+ +

"We saw the amazing community that had grown up around Kubernetes, and we wanted to be part of that," says Chakrabarti. Kubernetes was more feature-rich than Helios. Plus, "we wanted to benefit from added velocity and reduced cost, and also align with the rest of the industry on best practices and tools." At the same time, the team wanted to contribute its expertise and influence in the flourishing Kubernetes community. The migration, which would happen in parallel with Helios running, could go smoothly because "Kubernetes fit very nicely as a complement and now as a replacement to Helios," says Chakrabarti.

+ +

Impact

+ +

The team spent much of 2018 addressing the core technology issues required for a migration, which started late that year and is a big focus for 2019. "A small percentage of our fleet has been migrated to Kubernetes, and some of the things that we've heard from our internal teams are that they have less of a need to focus on manual capacity provisioning and more time to focus on delivering features for Spotify," says Chakrabarti. The biggest service currently running on Kubernetes takes about 10 million requests per second as an aggregate service and benefits greatly from autoscaling, says Site Reliability Engineer James Wen. Plus, he adds, "Before, teams would have to wait for an hour to create a new service and get an operational host to run it in production, but with Kubernetes, they can do that on the order of seconds and minutes." In addition, with Kubernetes's bin-packing and multi-tenancy capabilities, CPU utilization has improved on average two- to threefold.

+ +{{< case-studies/quote author="Jai Chakrabarti, Director of Engineering, Infrastructure and Operations, Spotify" >}} +"We saw the amazing community that's grown up around Kubernetes, and we wanted to be part of that. We wanted to benefit from added velocity and reduced cost, and also align with the rest of the industry on best practices and tools." +{{< /case-studies/quote >}} + +{{< case-studies/lead >}} +"Our goal is to empower creators and enable a really immersive listening experience for all of the consumers that we have today—and hopefully the consumers we'll have in the future," says Jai Chakrabarti, Director of Engineering, Infrastructure and Operations at Spotify. Since the audio-streaming platform launched in 2008, it has already grown to over 200 million monthly active users around the world, and for Chakrabarti's team, the goal is solidifying Spotify's infrastructure to support all those future consumers too. +{{< /case-studies/lead >}} + +

An early adopter of microservices and Docker, Spotify had containerized microservices running across its fleet of VMs since 2014. The company used an open source, homegrown container orchestration system called Helios, and in 2016-17 completed a migration from on premise data centers to Google Cloud. Underpinning these decisions, "We have a culture around autonomous teams, over 200 autonomous engineering squads who are working on different pieces of the pie, and they need to be able to iterate quickly," Chakrabarti says. "So for us to have developer velocity tools that allow squads to move quickly is really important."

+ +

But by late 2017, it became clear that "having a small team working on the Helios features was just not as efficient as adopting something that was supported by a much bigger community," says Chakrabarti. "We saw the amazing community that had grown up around Kubernetes, and we wanted to be part of that. We wanted to benefit from added velocity and reduced cost, and also align with the rest of the industry on best practices and tools." At the same time, the team wanted to contribute its expertise and influence in the flourishing Kubernetes community.

+ +{{< case-studies/quote + image="/images/case-studies/spotify/banner3.jpg" + author="Dave Zolotusky, Software Engineer, Infrastructure and Operations, Spotify" +>}} +"The community has been extremely helpful in getting us to work through all the technology much faster and much easier. And it's helped us validate all the things we're doing." +{{< /case-studies/quote >}} + +

Another plus: "Kubernetes fit very nicely as a complement and now as a replacement to Helios, so we could have it running alongside Helios to mitigate the risks," says Chakrabarti. "During the migration, the services run on both, so we're not having to put all of our eggs in one basket until we can validate Kubernetes under a variety of load circumstances and stress circumstances."

+ +

The team spent much of 2018 addressing the core technology issues required for the migration. "We were able to use a lot of the Kubernetes APIs and extensibility features of Kubernetes to support and interface with our legacy infrastructure, so the integration was straightforward and easy," says Site Reliability Engineer James Wen.

+ +

Migration started late that year and has accelerated in 2019. "Our focus is really on stateless services, and once we address our last remaining technology blocker, that's where we hope that the uptick will come from," says Chakrabarti. "For stateful services there's more work that we need to do."

+ +

A small percentage of Spotify's fleet, containing over 150 services, has been migrated to Kubernetes so far. "We've heard from our customers that they have less of a need to focus on manual capacity provisioning and more time to focus on delivering features for Spotify," says Chakrabarti. The biggest service currently running on Kubernetes takes over 10 million requests per second as an aggregate service and benefits greatly from autoscaling, says Wen. Plus, Wen adds, "Before, teams would have to wait for an hour to create a new service and get an operational host to run it in production, but with Kubernetes, they can do that on the order of seconds and minutes." In addition, with Kubernetes's bin-packing and multi-tenancy capabilities, CPU utilization has improved on average two- to threefold.

+ +{{< case-studies/quote + image="/images/case-studies/spotify/banner4.jpg" + author="James Wen, Site Reliability Engineer, Spotify" +>}} +"We were able to use a lot of the Kubernetes APIs and extensibility features to support and interface with our legacy infrastructure, so the integration was straightforward and easy." +{{< /case-studies/quote >}} + +

Chakrabarti points out that for all four of the top-level metrics that Spotify looks at—lead time, deployment frequency, time to resolution, and operational load—"there is impact that Kubernetes is having."

+ +

One success story that's come out of the early days of Kubernetes is a tool called Slingshot that a Spotify team built on Kubernetes. "With a pull request, it creates a temporary staging environment that self destructs after 24 hours," says Chakrabarti. "It's all facilitated by Kubernetes, so that's kind of an exciting example of how, once the technology is out there and ready to use, people start to build on top of it and craft their own solutions, even beyond what we might have envisioned as the initial purpose of it."

+ +

Spotify has also started to use gRPC and Envoy, replacing existing homegrown solutions, just as it had with Kubernetes. "We created things because of the scale we were at, and there was no other solution existing," says Dave Zolotusky, Software Engineer, Infrastructure and Operations. "But then the community kind of caught up and surpassed us, even for tools that work at that scale."

+ +{{< case-studies/quote author="James Wen, Site Reliability Engineer, Spotify" >}} +"It's been surprisingly easy to get in touch with anybody we wanted to, to get expertise on any of the things we're working with. And it's helped us validate all the things we're doing." +{{< /case-studies/quote >}} + +

Both of those technologies are in early stages of adoption, but already "we have reason to believe that gRPC will have a more drastic impact during early development by helping with a lot of issues like schema management, API design, weird backward compatibility issues, things like that," says Zolotusky. "So we're leaning heavily on gRPC to help us in that space."

+ +

As the team continues to fill out Spotify's cloud native stack—tracing is up next—it is using the CNCF landscape as a helpful guide. "We look at things we need to solve, and if there are a bunch of projects, we evaluate them equivalently, but there is definitely value to the project being a CNCF project," says Zolotusky.

+ +

Spotify's experiences so far with Kubernetes bears this out. "The community has been extremely helpful in getting us to work through all the technology much faster and much easier," Zolotusky says. "It's been surprisingly easy to get in touch with anybody we wanted to, to get expertise on any of the things we're working with. And it's helped us validate all the things we're doing."

\ No newline at end of file diff --git a/content/bn/case-studies/spotify/spotify-featured.svg b/content/bn/case-studies/spotify/spotify-featured.svg new file mode 100644 index 0000000000000..d1cc3418dec97 --- /dev/null +++ b/content/bn/case-studies/spotify/spotify-featured.svg @@ -0,0 +1 @@ +kubernetes.io-logos \ No newline at end of file diff --git a/content/bn/case-studies/spotify/spotify_featured.svg b/content/bn/case-studies/spotify/spotify_featured.svg new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/content/bn/case-studies/spotify/spotify_featured_logo.png b/content/bn/case-studies/spotify/spotify_featured_logo.png new file mode 100644 index 0000000000000..def15c51bfc14 Binary files /dev/null and b/content/bn/case-studies/spotify/spotify_featured_logo.png differ diff --git a/content/bn/case-studies/squarespace/index.html b/content/bn/case-studies/squarespace/index.html new file mode 100644 index 0000000000000..649c678003139 --- /dev/null +++ b/content/bn/case-studies/squarespace/index.html @@ -0,0 +1,71 @@ +--- +title: Squarespace Case Study +case_study_styles: true +cid: caseStudies + +new_case_study_styles: true +heading_background: /images/case-studies/squarespace/banner1.jpg +heading_title_logo: /images/squarespace_logo.png +subheading: > + Squarespace: Gaining Productivity and Resilience with Kubernetes +case_study_details: + - Company: Squarespace + - Location: New York, N.Y. + - Industry: Software as a Service, Website-Building Platform +--- + +

Challenge

+ +

Moving from a monolith to microservices in 2014 "solved a problem on the development side, but it pushed that problem to the infrastructure team," says Kevin Lynch, Staff Engineer on the Site Reliability team at Squarespace. "The infrastructure deployment process on our 5,000 VM hosts was slowing everyone down."

+ +

Solution

+ +

The team experimented with container orchestration platforms, and found that Kubernetes "answered all the questions that we had," says Lynch. The company began running Kubernetes in its data centers in 2016.

+ +

Impact

+ +

Since Squarespace moved to Kubernetes, in conjunction with modernizing its networking stack, deployment time has been reduced by almost 85%. Before, their VM deployment would take half an hour; now, says Lynch, "someone can generate a templated application, deploy it within five minutes, and have actual instances containerized, running in our staging environment at that point." Because of that, "productivity time is the big cost saver," he adds. "When we started the Kubernetes project, we had probably a dozen microservices. Today there are twice that in the pipeline being actively worked on." Resilience has also been improved with Kubernetes: "If a node goes down, it's rescheduled immediately and there's no performance impact."

+ +{{< case-studies/quote author="Kevin Lynch, Staff Engineer on the Site Reliability team at Squarespace" >}} + +
+"Once you prove that Kubernetes solves one problem, everyone immediately starts solving other problems without you even having to evangelize it." +{{< /case-studies/quote >}} + +{{< case-studies/lead >}} +Since it was started in a dorm room in 2003, Squarespace has made it simple for millions of people to create their own websites. +{{< /case-studies/lead >}} + +

Behind the scenes, though, the company's monolithic Java application was making things not so simple for its developers to keep improving the platform. So in 2014, the company decided to "go down the microservices path," says Kevin Lynch, staff engineer on Squarespace's Site Reliability team. "But we were always deploying our applications in vCenter VMware VMs [in our own data centers]. Microservices solved a problem on the development side, but it pushed that problem to the Infrastructure team. The infrastructure deployment process on our 5,000 VM hosts was slowing everyone down."

+ +

After experimenting with another container orchestration platform and "breaking it in very painful ways," Lynch says, the team began experimenting with Kubernetes in mid-2016 and found that it "answered all the questions that we had." Deploying it in the data center rather than the public cloud was their biggest challenge, and at the time, not a lot of other companies were doing that. "We had to figure out how to deploy this in our infrastructure for ourselves, and we had to integrate it with our other applications," says Lynch.

+ +

At the same time, Squarespace's Network Engineering team was modernizing its networking stack, switching from a traditional layer-two network to a layer-three spine-and-leaf network. "It mapped beautifully with what we wanted to do with Kubernetes," says Lynch. "It gives us the ability to have our servers communicate directly with the top-of-rack switches. We use Calico for CNI networking for Kubernetes, so we can announce all these individual Kubernetes pod IP addresses and have them integrate seamlessly with our other services that are still provisioned in the VMs."

+ +{{< case-studies/quote image="/images/case-studies/squarespace/banner3.jpg" >}} +After experimenting with another container orchestration platform and "breaking it in very painful ways," Lynch says, the team began experimenting with Kubernetes in mid-2016 and found that it "answered all the questions that we had." +{{< /case-studies/quote >}} + +

Within a couple months, they had a stable cluster for their internal use, and began rolling out Kubernetes for production. They also added Zipkin and CNCF projects Prometheus and fluentd to their cloud native stack. "We switched to Kubernetes, a new world, and we revamped all our other tooling as well," says Lynch. "It allowed us to streamline our process, so we can now easily create an entire microservice project from templates, generate the code and deployment pipeline for that, generate the Dockerfile, and then immediately just ship a workable, deployable project to Kubernetes." Deployments across Dev/QA/Stage/Prod were also "simplified drastically," Lynch adds. "Now there is little configuration variation."

+ +

And the whole process takes only five minutes, an almost 85% reduction in time compared to their VM deployment. "From end to end that probably took half an hour, and that's not accounting for the fact that an infrastructure engineer would be responsible for doing that, so there's some business delay in there as well."

+ +

With faster deployments, "productivity time is the big cost saver," says Lynch. "We had a team that was implementing a new file storage service, and they just started integrating that with our storage back end without our involvement"—which wouldn't have been possible before Kubernetes. He adds: "When we started the Kubernetes project, we had probably a dozen microservices. Today there are twice that in the pipeline being actively worked on."

+ +{{< case-studies/quote image="/images/case-studies/squarespace/banner4.jpg" >}} +"We switched to Kubernetes, a new world....It allowed us to streamline our process, so we can now easily create an entire microservice project from templates," Lynch says. And the whole process takes only five minutes, an almost 85% reduction in time compared to their VM deployment. +{{< /case-studies/quote >}} + +

There's also been a positive impact on the application's resilience. "When we're deploying VMs, we have to build tooling to ensure that a service is spread across racks appropriately and can withstand failure," he says. "Kubernetes just does it. If a node goes down, it's rescheduled immediately and there's no performance impact."

+ +

Another big benefit is autoscaling. "It wasn't really possible with the way we've been using VMware," says Lynch, "but now we can just add the appropriate autoscaling features via Kubernetes directly, and boom, it's scaling up as demand increases. And it worked out of the box."

+ +

For others starting out with Kubernetes, Lynch says his best advice is to "fail fast": "Once you've planned things out, just execute. Kubernetes has been really great for trying something out quickly and seeing if it works or not."

+ +{{< case-studies/quote >}} +"When we're deploying VMs, we have to build tooling to ensure that a service is spread across racks appropriately and can withstand failure," he says. "Kubernetes just does it. If a node goes down, it's rescheduled immediately and there's no performance impact." +{{< /case-studies/quote >}} + +

Lynch and his team are planning to open source some of the tools they've developed to extend Kubernetes and use it as an API itself. The first tool injects dependent applications as containers in a pod. "When you ship an application, usually it comes along with a whole bunch of dependent applications that need to be shipped with that, for example, fluentd for logging," he explains. With this tool, the developer doesn't need to worry about the configurations.

+ +

Going forward, all new services at Squarespace are going into Kubernetes, and the end goal is to convert everything it can. About a quarter of existing services have been migrated. "Our monolithic application is going to be the last one, just because it's so big and complex," says Lynch. "But now I'm seeing other services get moved over, like the file storage service. Someone just did it and it worked—painlessly. So I believe if we tackle it, it's probably going to be a lot easier than we fear. Maybe I should just take my own advice and fail fast!"

\ No newline at end of file diff --git a/content/bn/case-studies/squarespace/squarespace_featured_logo.png b/content/bn/case-studies/squarespace/squarespace_featured_logo.png new file mode 100644 index 0000000000000..551b6da32119d Binary files /dev/null and b/content/bn/case-studies/squarespace/squarespace_featured_logo.png differ diff --git a/content/bn/case-studies/squarespace/squarespace_featured_logo.svg b/content/bn/case-studies/squarespace/squarespace_featured_logo.svg new file mode 100644 index 0000000000000..a69d7ea5c8ebf --- /dev/null +++ b/content/bn/case-studies/squarespace/squarespace_featured_logo.svg @@ -0,0 +1 @@ +kubernetes.io-logos2 \ No newline at end of file diff --git a/content/bn/case-studies/thredup/index.html b/content/bn/case-studies/thredup/index.html new file mode 100644 index 0000000000000..c077184bc7963 --- /dev/null +++ b/content/bn/case-studies/thredup/index.html @@ -0,0 +1,77 @@ +--- +title: ThredUp Case Study +linkTitle: thredup +case_study_styles: true +cid: caseStudies +featured: false + +new_case_study_styles: true +heading_background: /images/case-studies/thredup/banner1.jpg +heading_title_logo: /images/thredup_logo.png +case_study_details: + - Company: ThredUp + - Location: San Francisco, CA + - Industry: eCommerce +--- + +

Challenge

+ +

The largest online consignment store for women's and children's clothes, ThredUP launched in 2009 with a monolithic application running on Amazon Web Services. Though the company began breaking up the monolith into microservices a few years ago, the infrastructure team was still dealing with handcrafted servers, which hampered productivity. "We've configured them just to get them out as fast as we could, but there was no standardization, and as we kept growing, that became a bigger and bigger chore to manage," says Cofounder/CTO Chris Homer. The infrastructure, they realized, needed to be modernized to enable the velocity the company needed. "It's really important to a company like us who's disrupting the retail industry to make sure that as we're building software and getting it out in front of our users, we can do it on a fast cycle and learn a ton as we experiment," adds Homer. "We wanted to make sure that our engineers could embrace the DevOps mindset as they built software. It was really important to us that they could own the life cycle from end to end, from conception at design, through shipping it and running it in production, from marketing to ecommerce, the user experience and our internal distribution center operations."

+ +

Solution

+ +

In early 2017, the company adopted Kubernetes for container orchestration, and in the course of a year, the entire infrastructure was moved to Kubernetes.

+ +

Impact

+ +

Before, "even considering that we already have all the infrastructure in the cloud, databases and services, and all these good things," says Infrastructure Engineer Oleksandr Snagovskyi, setting up a new service meant waiting 2-4 weeks just to get the environment. With Kubernetes, new application roll-out time has decreased from several days or weeks to minutes or hours. Now, says Infrastructure Engineer Oleksii Asiutin, "our developers can experiment with existing applications and create new services, and do it all blazingly fast." In fact, deployment time has decreased about 50% on average for key services. "Lead time" for all applications is under 20 minutes, enabling engineers to deploy multiple times a day. Plus, 3200+ ansible scripts have been deprecated in favor of helm charts. And impressively, hardware cost has decreased 56% while the number of services ThredUP runs has doubled.

+ +{{< case-studies/quote author="CHRIS HOMER, COFOUNDER/CTO, THREDUP" >}} + +
+"Moving towards cloud native technologies like Kubernetes really unlocks our ability to experiment quickly and learn from customers along the way." +{{< /case-studies/quote >}} + +{{< case-studies/lead >}} +The largest online consignment store for women's and children's clothes, ThredUP is focused on getting consumers to think second-hand first. "We're disrupting the retail industry, and it's really important to us to make sure that as we're building software and getting it out in front of our users, we can do it on a fast cycle and learn a ton as we experiment," says Cofounder/CTO Chris Homer. +{{< /case-studies/lead >}} + +

But over the past few years, ThredUP, which was launched in 2009 with a monolithic application running on Amazon Web Services, was feeling growing pains as its user base passed the 20- million mark. Though the company had begun breaking up the monolith into microservices, the infrastructure team was still dealing with handcrafted servers, which hampered productivity. "We've configured them just to get them out as fast as we could, but there was no standardization, and as we kept growing, that became a bigger and bigger chore to manage," says Homer. The infrastructure, Homer realized, needed to be modernized to enable the velocity—and the culture—the company wanted.

+ +

"We wanted to make sure that our engineers could embrace the DevOps mindset as they built software," Homer says. "It was really important to us that they could own the life cycle from end to end, from conception at design, through shipping it and running it in production, from marketing to ecommerce, the user experience and our internal distribution center operations."

+ +{{< case-studies/quote + image="/images/case-studies/thredup/banner3.jpg" + author="CHRIS HOMER, COFOUNDER/CTO, THREDUP" +>}} +"Kubernetes enabled auto scaling in a seamless and easily manageable way on days like Black Friday. We no longer have to sit there adding instances, monitoring the traffic, doing a lot of manual work." +{{< /case-studies/quote >}} + +

In early 2017, Homer found the solution with Kubernetes container orchestration. In the course of a year, the company migrated its entire infrastructure to Kubernetes, starting with its website applications and concluding with its operations backend. Teams are now also using Fluentd and Helm. "Initially there were skeptics about the value that this move to cloud native technologies would bring, but as we went through the process, people very quickly started to realize the benefit of having seamless upgrades and easy rollbacks without having to worry about what was happening," says Homer. "It unlocks the developers' confidence in being able to deploy quickly, learn, and if you make a mistake, you can roll it back without any issue."

+ +

According to the infrastructure team, the key improvement was the consistent experience Kubernetes enabled for developers. "It lets developers work in the same environment that their application will be running in production," says Infrastructure Engineer Oleksandr Snagovskyi. Plus, "It became easier to test, easier to refine, and easier to deploy, because everything's done automatically," says Infrastructure Engineer Oleksii Asiutin. "One of the main goals of our team is to make developers' lives more comfortable, and we are achieving this with Kubernetes. They can experiment with existing applications and create new services, and do it all blazingly fast."

+ +{{< case-studies/quote + image="/images/case-studies/thredup/banner4.jpg" + author="OLEKSII ASIUTIN, INFRASTRUCTURE ENGINEER, THREDUP" +>}} +"One of the main goals of our team is to make developers' lives more comfortable, and we are achieving this with Kubernetes. They can experiment with existing applications and create new services, and do it all blazingly fast." +{{< /case-studies/quote >}} + +

Before, "even considering that we already have all the infrastructure in the cloud, databases and services, and all these good things," says Snagovskyi, setting up a new service meant waiting 2-4 weeks just to get the environment. With Kubernetes, because of simple configuration and minimal dependency on the infrastructure team, the roll-out time for new applications has decreased from several days or weeks to minutes or hours.

+ +

In fact, deployment time has decreased about 50% on average for key services. "Fast deployment and parallel test execution in Kubernetes keep a 'lead time' for all applications under 20 minutes," allowing engineers to do multiple releases a day, says Director of Infrastructure Roman Chepurnyi. The infrastructure team's jobs, he adds, have become less burdensome, too: "We can execute seamless upgrades frequently and keep cluster performance and security up-to-date because OS-level hardening and upgrades of a Kubernetes cluster is a non-blocking activity for production operations and does not involve coordination with multiple engineering teams."

+ +

More than 3,200 ansible scripts have been deprecated in favor of Helm charts. And impressively, hardware cost has decreased 56% while the number of services ThredUP runs has doubled.

+ +{{< case-studies/quote author="CHRIS HOMER, COFOUNDER/CTO, THREDUP">}} +"Our future's all about automation, and behind that, cloud native technologies are going to unlock our ability to embrace that and go full force towards the future." +{{< /case-studies/quote >}} + +

Perhaps the impact is most evident on the busiest days in retail. "Kubernetes enabled auto scaling in a seamless and easily manageable way on days like Black Friday," says Homer. "We no longer have to sit there adding instances, monitoring the traffic, doing a lot of manual work. That's handled for us, and instead we can actually have some turkey, drink some wine and enjoy our families."

+ +

For ThredUP, Kubernetes fits perfectly with the company's vision for how it's changing retail. Some of what ThredUP does is still very manual: "As our customers send bags of items to our distribution centers, they're photographed, inspected, tagged, and put online today," says Homer.

+ +

But in every other aspect, "we use different forms of technology to drive everything we do," Homer says. "We have machine learning algorithms to help predict the likelihood of sale for items, which drives our pricing algorithm. We have personalization algorithms that look at the images and try to determine style and match users' preferences across our systems."

+ +

Count Kubernetes as one of those drivers. "Our future's all about automation," says Homer, "and behind that, cloud native technologies are going to unlock our ability to embrace that and go full force towards the future."

\ No newline at end of file diff --git a/content/bn/case-studies/thredup/thredup_featured_logo.png b/content/bn/case-studies/thredup/thredup_featured_logo.png new file mode 100644 index 0000000000000..3961f761b1f4c Binary files /dev/null and b/content/bn/case-studies/thredup/thredup_featured_logo.png differ diff --git a/content/bn/case-studies/thredup/thredup_featured_logo.svg b/content/bn/case-studies/thredup/thredup_featured_logo.svg new file mode 100644 index 0000000000000..987e1a55c1203 --- /dev/null +++ b/content/bn/case-studies/thredup/thredup_featured_logo.svg @@ -0,0 +1 @@ +kubernetes.io-logos2 \ No newline at end of file diff --git a/content/bn/case-studies/vsco/index.html b/content/bn/case-studies/vsco/index.html new file mode 100644 index 0000000000000..beedf2b728ec4 --- /dev/null +++ b/content/bn/case-studies/vsco/index.html @@ -0,0 +1,79 @@ +--- +title: vsco Case Study +linkTitle: vsco +case_study_styles: true +cid: caseStudies +featured: false + +new_case_study_styles: true +heading_background: /images/case-studies/vsco/banner1.jpg +heading_title_logo: /images/vsco_logo.png +subheading: > + VSCO: How a Mobile App Saved 70% on Its EC2 Bill with Cloud Native +case_study_details: + - Company: VSCO + - Location: Oakland, CA + - Industry: Photo Mobile App +--- + +

Challenge

+ +

After moving from Rackspace to AWS in 2015, VSCO began building Node.js and Go microservices in addition to running its PHP monolith. The team containerized the microservices using Docker, but "they were all in separate groups of EC2 instances that were dedicated per service," says Melinda Lu, Engineering Manager for the Machine Learning Team. Adds Naveen Gattu, Senior Software Engineer on the Community Team: "That yielded a lot of wasted resources. We started looking for a way to consolidate and be more efficient in the AWS EC2 instances."

+ +

Solution

+ +

The team began exploring the idea of a scheduling system, and looked at several solutions including Mesos and Swarm before deciding to go with Kubernetes. VSCO also uses gRPC and Envoy in their cloud native stack.

+ +

Impact

+ +

Before, deployments required "a lot of manual tweaking, in-house scripting that we wrote, and because of our disparate EC2 instances, Operations had to babysit the whole thing from start to finish," says Senior Software Engineer Brendan Ryan. "We didn't really have a story around testing in a methodical way, and using reusable containers or builds in a standardized way." There's a faster onboarding process now. Before, the time to first deploy was two days' hands-on setup time; now it's two hours. By moving to continuous integration, containerization, and Kubernetes, velocity was increased dramatically. The time from code-complete to deployment in production on real infrastructure went from one to two weeks to two to four hours for a typical service. Adds Gattu: "In man hours, that's one person versus a developer and a DevOps individual at the same time." With an 80% decrease in time for a single deployment to happen in production, the number of deployments has increased as well, from 1200/year to 3200/year. There have been real dollar savings too: With Kubernetes, VSCO is running at 2x to 20x greater EC2 efficiency, depending on the service, adding up to about 70% overall savings on the company's EC2 bill. Ryan points to the company's ability to go from managing one large monolithic application to 50+ microservices with "the same size developer team, more or less. And we've only been able to do that because we have increased trust in our tooling and a lot more flexibility, so we don't need to employ a DevOps engineer to tune every service." With Kubernetes, gRPC, and Envoy in place, VSCO has seen an 88% reduction in total minutes of outage time, mainly due to the elimination of JSON-schema errors and service-specific infrastructure provisioning errors, and an increased speed in fixing outages.

+ +{{< case-studies/quote author="MELINDA LU, ENGINEERING MANAGER FOR VSCO'S MACHINE LEARNING TEAM" >}} +"I've been really impressed seeing how our engineers have come up with creative solutions to things by just combining a lot of Kubernetes primitives. Exposing Kubernetes constructs as a service to our engineers as opposed to exposing higher order constructs has worked well for us. It lets you get familiar with the technology and do more interesting things with it." +{{< /case-studies/quote >}} + +{{< case-studies/lead >}} +A photography app for mobile, VSCO was born in the cloud in 2011. In the beginning, "we were using Rackspace and had one PHP monolith application talking to MySQL database, with FTP deployments, no containerization, no orchestration," says Software Engineer Brendan Ryan, "which was sufficient at the time." +{{< /case-studies/lead >}} + +

After VSCO moved to AWS in 2015 and its user base passed the 30 million mark, the team quickly realized that set-up wouldn't work anymore. Developers had started building some Node and Go microservices, which the team tried containerizing with Docker. But "they were all in separate groups of EC2 instances that were dedicated per service," says Melinda Lu, Engineering Manager for the Machine Learning Team. Adds Naveen Gattu, Senior Software Engineer on the Community Team: "That yielded a lot of wasted resources. We started looking for a way to consolidate and be more efficient in the EC2 instances."

+ +

With a checklist that included ease of use and implementation, level of support, and whether it was open source, the team evaluated a few scheduling solutions, including Mesos and Swarm, before deciding to go with Kubernetes. "Kubernetes seemed to have the strongest open source community around it," says Lu. Plus, "We had started to standardize on a lot of the Google stack, with Go as a language, and gRPC for almost all communication between our own services inside the data center. So it seemed pretty natural for us to choose Kubernetes."

+ +{{< case-studies/quote + image="/images/case-studies/vsco/banner2.jpg" + author="MELINDA LU, ENGINEERING MANAGER FOR VSCO'S MACHINE LEARNING TEAM" +>}} +"Kubernetes seemed to have the strongest open source community around it, plus, we had started to standardize on a lot of the Google stack, with Go as a language, and gRPC for almost all communication between our own services inside the data center. So it seemed pretty natural for us to choose Kubernetes." +{{< /case-studies/quote >}} + +

At the time, there were few managed Kubernetes offerings and less tooling available in the ecosystem, so the team stood up its own cluster and built some custom components for its specific deployment needs, such as an automatic ingress controller and policy constructs for canary deploys. "We had already begun breaking up the monolith, so we moved things one by one, starting with pretty small, low-risk services," says Lu. "Every single new service was deployed there." The first service was migrated at the end of 2016, and after one year, 80% of the entire stack was on Kubernetes, including the rest of the monolith.

+ +

The impact has been great. Deployments used to require "a lot of manual tweaking, in-house scripting that we wrote, and because of our disparate EC2 instances, Operations had to babysit the whole thing from start to finish," says Ryan. "We didn't really have a story around testing in a methodical way, and using reusable containers or builds in a standardized way." There's a faster onboarding process now. Before, the time to first deploy was two days' hands-on setup time; now it's two hours.

+ +

By moving to continuous integration, containerization, and Kubernetes, velocity was increased dramatically. The time from code-complete to deployment in production on real infrastructure went from one to two weeks to two to four hours for a typical service. Plus, says Gattu, "In man hours, that's one person versus a developer and a DevOps individual at the same time." With an 80% decrease in time for a single deployment to happen in production, the number of deployments has increased as well, from 1200/year to 3200/year.

+ +{{< case-studies/quote + image="/images/case-studies/vsco/banner4.jpg" + author="MELINDA LU, ENGINEERING MANAGER FOR VSCO'S MACHINE LEARNING TEAM" +>}} +"I've been really impressed seeing how our engineers have come up with really creative solutions to things by just combining a lot of Kubernetes primitives, exposing Kubernetes constructs as a service to our engineers as opposed to exposing higher order constructs has worked well for us. It lets you get familiar with the technology and do more interesting things with it." +{{< /case-studies/quote >}} + +

There have been real dollar savings too: With Kubernetes, VSCO is running at 2x to 20x greater EC2 efficiency, depending on the service, adding up to about 70% overall savings on the company's EC2 bill.

+ +

Ryan points to the company's ability to go from managing one large monolithic application to 50+ microservices with "the same size developer team, more or less. And we've only been able to do that because we have increased trust in our tooling and a lot more flexibility when there are stress points in our system. You can increase CPU memory requirements of a service without having to bring up and tear down instances, and read through AWS pages just to be familiar with a lot of jargon, which isn't really tenable for a company at our scale."

+ +

Envoy and gRPC have also had a positive impact at VSCO. "We get many benefits from gRPC out of the box: type safety across multiple languages, ease of defining services with the gRPC IDL, built-in architecture like interceptors, and performance improvements over HTTP/1.1 and JSON," says Lu.

+ +

VSCO was one of the first users of Envoy, getting it in production five days after it was open sourced. "We wanted to serve gRPC and HTTP/2 directly to mobile clients through our edge load balancers, and Envoy was our only reasonable solution," says Lu. "The ability to send consistent and detailed stats by default across all services has made observability and standardization of dashboards much easier." The metrics that come built in with Envoy have also "greatly helped with debugging," says DevOps Engineer Ryan Nguyen.

+ +{{< case-studies/quote author="NAVEEN GATTU, SENIOR SOFTWARE ENGINEER ON VSCO'S COMMUNITY TEAM" >}} +"Because there's now an organization that supports Kubernetes, does that build confidence? The answer is a resounding yes." +{{< /case-studies/quote >}} + +

With Kubernetes, gRPC, and Envoy in place, VSCO has seen an 88% reduction in total minutes of outage time, mainly due to the elimination of JSON-schema errors and service-specific infrastructure provisioning errors, and an increased speed in fixing outages.

+ +

Given its success using CNCF projects, VSCO is starting to experiment with others, including CNI and Prometheus. "To have a large organization backing these technologies, we have a lot more confidence trying this software and deploying to production," says Nguyen.

+ +

The team has made contributions to gRPC and Envoy, and is hoping to be even more active in the CNCF community. "I've been really impressed seeing how our engineers have come up with really creative solutions to things by just combining a lot of Kubernetes primitives," says Lu. "Exposing Kubernetes constructs as a service to our engineers as opposed to exposing higher order constructs has worked well for us. It lets you get familiar with the technology and do more interesting things with it."

\ No newline at end of file diff --git a/content/bn/case-studies/vsco/vsco_featured_logo.png b/content/bn/case-studies/vsco/vsco_featured_logo.png new file mode 100644 index 0000000000000..e01e2e4e8f0ed Binary files /dev/null and b/content/bn/case-studies/vsco/vsco_featured_logo.png differ diff --git a/content/bn/case-studies/vsco/vsco_featured_logo.svg b/content/bn/case-studies/vsco/vsco_featured_logo.svg new file mode 100644 index 0000000000000..e65dad8c52d1c --- /dev/null +++ b/content/bn/case-studies/vsco/vsco_featured_logo.svg @@ -0,0 +1 @@ +kubernetes.io-logos2 \ No newline at end of file diff --git a/content/bn/case-studies/wikimedia/index.html b/content/bn/case-studies/wikimedia/index.html new file mode 100644 index 0000000000000..0879a3516276b --- /dev/null +++ b/content/bn/case-studies/wikimedia/index.html @@ -0,0 +1,66 @@ +--- +title: Wikimedia Case Study +case_study_styles: true +cid: caseStudies + +new_case_study_styles: true +heading_title_text: Wikimedia +use_gradient_overlay: true +subheading: > + Using Kubernetes to Build Tools to Improve the World's Wikis +case_study_details: + - Company: Wikimedia + - Location: San Francisco, CA +--- + +

The non-profit Wikimedia Foundation operates some of the largest collaboratively edited reference projects in the world, including Wikipedia. To help users maintain and use wikis, it runs Wikimedia Tool Labs, a hosting environment for community developers working on tools and bots to help editors and other volunteers do their work, including reducing vandalism. The community around Wikimedia Tool Labs began forming nearly 10 years ago.

+ +{{< case-studies/quote author="Yuvi Panda, operations engineer at Wikimedia Foundation and Wikimedia Tool Labs">}} +Wikimedia +
+
+"Wikimedia Tool Labs is vital for making sure wikis all around the world work as well as they possibly can. Because it's grown organically for almost 10 years, it has become an extremely challenging environment and difficult to maintain. It's like a big ball of mud — you really can't see through it. With Kubernetes, we're simplifying the environment and making it easier for developers to build the tools that make wikis run better." +{{< /case-studies/quote >}} + +

Challenges

+ +
    +
  • Simplify a complex, difficult-to-manage infrastructure
  • +
  • Allow developers to continue writing tools and bots using existing techniques
  • +
+ +

Why Kubernetes

+ +
    +
  • Wikimedia Tool Labs chose Kubernetes because it can mimic existing workflows, while reducing complexity
  • +
+ +

Approach

+ +
    +
  • Migrate old systems and a complex infrastructure to Kubernetes
  • +
+ +

Results

+ +
    +
  • 20 percent of web tools that account for more than 40 percent of web traffic now run on Kubernetes
  • +
  • A 25-node cluster that keeps up with each new Kubernetes release
  • +
  • Thousands of lines of old code have been deleted, thanks to Kubernetes
  • +
+ +

Using Kubernetes to provide tools for maintaining wikis

+ +

Wikimedia Tool Labs is run by a staff of four-and-a-half paid employees and two volunteers. The infrastructure didn't make it easy or intuitive for developers to build bots and other tools to make wikis work more easily. Yuvi says, "It's incredibly chaotic. We have lots of Perl and Bash duct tape on top of it. Everything is super fragile."

+ +

To solve the problem, Wikimedia Tool Labs migrated parts of its infrastructure to Kubernetes, in preparation for eventually moving its entire system. Yuvi said Kubernetes greatly simplifies maintenance. The goal is to allow developers creating bots and other tools to use whatever development methods they want, but make it easier for the Wikimedia Tool Labs to maintain the required infrastructure for hosting and sharing them.

+ +

"With Kubernetes, I've been able to remove a lot of our custom-made code, which makes everything easier to maintain. Our users' code also runs in a more stable way than previously," says Yuvi.

+ +

Simplifying infrastructure and keeping wikis running better

+ +

Wikimedia Tool Labs has seen great success with the initial Kubernetes deployment. Old code is being simplified and eliminated, contributing developers don't have to change the way they write their tools and bots, and those tools and bots run in a more stable fashion than they have in the past. The paid staff and volunteers are able to better keep up with fixing issues.

+ +

In the future, with a more complete migration to Kubernetes, Wikimedia Tool Labs expects to make it even easier to host and maintain the bots and tools that help run wikis across the world. The tool labs already host approximately 1,300 tools and bots from 800 volunteers, with many more being submitted every day. Twenty percent of the tool labs' web tools that account for more than 60 percent of web traffic now run on Kubernetes. The tool labs has a 25-node cluster that keeps up with each new Kubernetes release. Many existing web tools are migrating to Kubernetes.

+ +

"Our goal is to make sure that people all over the world can share knowledge as easily as possible. Kubernetes helps with that, by making it easier for wikis everywhere to have the tools they need to thrive," says Yuvi.

\ No newline at end of file diff --git a/content/bn/case-studies/wikimedia/wikimedia_featured.png b/content/bn/case-studies/wikimedia/wikimedia_featured.png new file mode 100644 index 0000000000000..7b1f89ac98490 Binary files /dev/null and b/content/bn/case-studies/wikimedia/wikimedia_featured.png differ diff --git a/content/bn/case-studies/wikimedia/wikimedia_featured.svg b/content/bn/case-studies/wikimedia/wikimedia_featured.svg new file mode 100644 index 0000000000000..5fa786aaa52ba --- /dev/null +++ b/content/bn/case-studies/wikimedia/wikimedia_featured.svg @@ -0,0 +1 @@ +kubernetes.io-logos2 \ No newline at end of file diff --git a/content/bn/case-studies/wikimedia/wikimedia_logo.png b/content/bn/case-studies/wikimedia/wikimedia_logo.png new file mode 100644 index 0000000000000..3ad5b63034204 Binary files /dev/null and b/content/bn/case-studies/wikimedia/wikimedia_logo.png differ diff --git a/content/bn/case-studies/wink/index.html b/content/bn/case-studies/wink/index.html new file mode 100644 index 0000000000000..bcb39e1cd8d3d --- /dev/null +++ b/content/bn/case-studies/wink/index.html @@ -0,0 +1,87 @@ +--- +title: Wink Case Study +case_study_styles: true +cid: caseStudies + +new_case_study_styles: true +heading_background: /images/case-studies/wink/banner1.jpg +heading_title_logo: /images/wink_logo.png +subheading: > + Cloud-Native Infrastructure Keeps Your Smart Home Connected +case_study_details: + - Company: Wink + - Location: New York, N.Y. + - Industry: Internet of Things Platform +--- + +

Challenge

+ +

Building a low-latency, highly reliable infrastructure to serve communications between millions of connected smart-home devices and the company's consumer hubs and mobile app, with an emphasis on horizontal scalability, the ability to encrypt everything quickly and connections that could be easily brought back up if anything went wrong.

+ +

Solution

+ +

Across-the-board use of a Kubernetes-Docker-CoreOS Container Linux stack.

+ +

Impact

+ +

"Two of the biggest American retailers [Home Depot and Walmart] are carrying and promoting the brand and the hardware," Wink Head of Engineering Kit Klein says proudly – though he adds that "it really comes with a lot of pressure. It's not a retail situation where you have a lot of tech enthusiasts. These are everyday people who want something that works and have no tolerance for technical excuses." And that's further testament to how much faith Klein has in the infrastructure that the Wink team has built. With 80 percent of Wink's workload running on a unified stack of Kubernetes-Docker-CoreOS, the company has put itself in a position to continually innovate and improve its products and services. Committing to this technology, says Klein, "makes building on top of the infrastructure relatively easy."

+ +{{< case-studies/quote author="KIT KLEIN, HEAD OF ENGINEERING, WINK" >}} +"It's not proprietary, it's totally open, it's really portable. You can run all the workloads across different cloud providers. You can easily run a hybrid AWS or even bring in your own data center. That's the benefit of having everything unified on one open source Kubernetes-Docker-CoreOS Container Linux stack. There are massive security benefits if you only have one Linux distro/machine image to validate. The benefits are enormous because you save money, and you save time." +{{< /case-studies/quote >}} + +{{< case-studies/lead >}} +How many people does it take to turn on a light bulb? +{{< /case-studies/lead >}} + +

Kit Klein whips out his phone to demonstrate. With a few swipes, the head of engineering at Wink pulls up the smart-home app created by the New York City-based company and taps the light button. "Honestly when you're holding the phone and you're hitting the light," he says, "by the time you feel the pressure of your finger on the screen, it's on. It takes as long as the signal to travel to your brain."

+ +

Sure, it takes just one finger and less than 200 milliseconds to turn on the light – or lock a door or change a thermostat. But what allows Wink to help consumers manage their connected smart-home products with such speed and ease is a sophisticated, cloud native infrastructure that Klein and his team built and continue to develop using a unified stack of CoreOS, the open-source operating system designed for clustered deployments, and Kubernetes, an open-source platform for automating deployment, scaling, and operations of application containers across clusters of hosts, providing container-centric infrastructure. "When you have a big, complex network of interdependent microservices that need to be able to discover each other, and need to be horizontally scalable and tolerant to failure, that's what this is really optimized for," says Klein. "A lot of people end up relying on proprietary services [offered by some big cloud providers] to do some of this stuff, but what you get by adopting CoreOS/Kubernetes is portability, to not be locked in to anyone. You can really make your own fate."

+ +

Indeed, Wink did. The company's mission statement is to make the connected home accessible – that is, user-friendly for non-technical owners, affordable and perhaps most importantly, reliable. "If you can't trust that when you hit the switch, you know a light is going to go on, or if you're remote and you're checking on your house and that information isn't accurate, then the convenience of the system is lost," says Klein. "So that's where the infrastructure comes in."

+ +

Wink was incubated within Quirky, a company that developed crowd-sourced inventions. The Wink app was first introduced in 2013, and at the time, it controlled only a few consumer products such as the PivotPower Strip that Quirky produced in collaboration with GE. As smart-home products proliferated, Wink was launched in 2014 in Home Depot stores nationwide. Its first project: a hub that could integrate with smart products from about a dozen brands like Honeywell and Chamberlain. The biggest challenge would be to build the infrastructure to serve all those communications between the hub and the products, with a focus on maximizing reliability and minimizing latency.

+ +

"When we originally started out, we were moving very fast trying to get the first product to market, the minimum viable product," says Klein. "Lots of times you go down a path and end up having to backtrack and try different things. But in this particular case, we did a lot of the work up front, which led to us making a really sound decision to deploy it on CoreOS Container Linux. And that was very early in the life of it."

+ +{{< case-studies/quote image="/images/case-studies/wink/banner3.jpg">}} +"...what you get by adopting CoreOS/Kubernetes is portability, to not be locked in to anyone. You can really make your own fate." +{{< /case-studies/quote >}} + +

Concern number one: Wink's products need to connect to consumer devices in people's homes, behind a firewall. "You don't have an end point like a URL, and you don't even know what ports are open behind that firewall," Klein explains. "So you essentially need to have this thing wake up and talk to your system and then open real-time, bidirectional communication between the cloud and the device. And it's really, really important that it's persistent because you want to decrease as much as possible the overhead of sending a message – you never know when someone is going to turn on the lights."

+ +

With the earliest version of the Wink Hub, when you decided to turn your lights on or off, the request would be sent to the cloud and then executed. Subsequent updates to Wink's software enabled local control, cutting latency down to about 10 milliseconds for many devices. But with the need for cloud-enabled integrations of an ever-growing ecosystem of smart home products, low-latency internet connectivity is still a critical consideration.

+ +{{< case-studies/lead >}} +"You essentially need to have this thing wake up and talk to your system and then open real-time, bidirectional communication between the cloud and the device. And it's really, really important that it's persistent...you never know when someone is going to turn on the lights." +{{< /case-studies/lead >}} + +

In addition, Wink had other requirements: horizontal scalability, the ability to encrypt everything quickly, connections that could be easily brought back up if something went wrong. "Looking at this whole structure we started, we decided to make a secure socket-based service," says Klein. "We've always used, I would say, some sort of clustering technology to deploy our services and so the decision we came to was, this thing is going to be containerized, running on Docker."

+ +

In 2015, Docker wasn't yet widely used, but as Klein points out, "it was certainly understood by the people who were on the frontier of technology. We started looking at potential technologies that existed. One of the limiting factors was that we needed to deploy multi-port non-http/https services. It wasn't really appropriate for some of the early cluster technology. We liked the project a lot and we ended up using it on other stuff for a while, but initially it was too targeted toward http workloads."

+ +

Once Wink's backend engineering team decided on a containerized workload, they had to make decisions about the OS and the container orchestration platform. "Obviously you can't just start the containers and hope everything goes well," Klein says with a laugh. "You need to have a system that is helpful [in order] to manage where the workloads are being distributed out to. And when the container inevitably dies or something like that, to restart it, you have a load balancer. All sorts of housekeeping work is needed to have a robust infrastructure."

+ +{{< case-studies/quote image="/images/case-studies/wink/banner4.jpg" >}} +"Obviously you can't just start the containers and hope everything goes well," Klein says with a laugh. "You need to have a system that is helpful [in order] to manage where the workloads are being distributed out to. And when the container inevitably dies or something like that, to restart it, you have a load balancer. All sorts of housekeeping work is needed to have a robust infrastructure." +{{< /case-studies/quote >}} + +

Wink considered building directly on a general purpose Linux distro like Ubuntu (which would have required installing tools to run a containerized workload) and cluster management systems like Mesos (which was targeted toward enterprises with larger teams/workloads), but ultimately set their sights on CoreOS Container Linux. "A container-optimized Linux distribution system was exactly what we needed," he says. "We didn't have to futz around with trying to take something like a Linux distro and install everything. It's got a built-in container orchestration system, which is Fleet, and an easy-to-use API. It's not as feature-rich as some of the heavier solutions, but we realized that, at that moment, it was exactly what we needed."

+ +

Wink's hub (along with a revamped app) was introduced in July 2014 with a short-term deployment, and within the first month, they had moved the service to the containerized CoreOS deployment. Since then, they've moved almost every other piece of their infrastructure – from third-party cloud-to-cloud integrations to their customer service and payment portals – onto CoreOS Container Linux clusters.

+ +

Using this setup did require some customization. "Fleet is really nice as a basic container orchestration system, but it doesn't take care of routing, sharing configurations, secrets, et cetera, among instances of a service," Klein says. "All of those layers of functionality can be implemented, of course, but if you don't want to spend a lot of time writing unit files manually – which of course nobody does – you need to create a tool to automate some of that, which we did."

+ +

Wink quickly embraced the Kubernetes container cluster manager when it was launched in 2015 and integrated with CoreOS core technology, and as promised, it ended up providing the features Wink wanted and had planned to build. "If not for Kubernetes, we likely would have taken the logic and library we implemented for the automation tool that we created, and would have used it in a higher level abstraction and tool that could be used by non-DevOps engineers from the command line to create and manage clusters," Klein says. "But Kubernetes made that totally unnecessary – and is written and maintained by people with a lot more experience in cluster management than us, so all the better." Now, an estimated 80 percent of Wink's workload is run on Kubernetes on top of CoreOS Container Linux.

+ +{{< case-studies/quote >}} +"Stay close to the development. Understand why decisions are being made. If you understand the intent behind the project, from the technological intent to a certain philosophical intent, then it helps you understand how to build your system in harmony with those systems as opposed to trying to work against it." +{{< /case-studies/quote >}} + +

Wink's reasons for going all in are clear: "It's not proprietary, it's totally open, it's really portable," Klein says. "You can run all the workloads across different cloud providers. You can easily run a hybrid AWS or even bring in your own data center. That's the benefit of having everything unified on one Kubernetes-Docker-CoreOS Container Linux stack. There are massive security benefits if you only have one Linux distro to try to validate. The benefits are enormous because you save money, you save time."

+ +

Klein concedes that there are tradeoffs in every technology decision. "Cutting-edge technology is going to be scary for some people," he says. "In order to take advantage of this, you really have to keep up with the technology. You can't treat it like it's a black box. Stay close to the development. Understand why decisions are being made. If you understand the intent behind the project, from the technological intent to a certain philosophical intent, then it helps you understand how to build your system in harmony with those systems as opposed to trying to work against it."

+ +

Wink, which was acquired by Flex in 2015, now controls 2.3 million connected devices in households all over the country. What's next for the company? A new version of the hub - Wink Hub 2 - hit shelves last November – and is being offered for the first time at Walmart stores in addition to Home Depot. "Two of the biggest American retailers are carrying and promoting the brand and the hardware," Klein says proudly – though he adds that "it really comes with a lot of pressure. It's not a retail situation where you have a lot of tech enthusiasts. These are everyday people who want something that works and have no tolerance for technical excuses." And that's further testament to how much faith Klein has in the infrastructure that the Wink team has have built.

+ +

Wink's engineering team has grown exponentially since its early days, and behind the scenes, Klein is most excited about the machine learning Wink is using. "We built [a system of] containerized small sections of the data pipeline that feed each other and can have multiple outputs," he says. "It's like data pipelines as microservices." Again, Klein points to having a unified stack running on CoreOS Container Linux and Kubernetes as the primary driver for the innovations to come. "You're not reinventing the wheel every time," he says. "You can just get down to work."

\ No newline at end of file diff --git a/content/bn/case-studies/wink/wink_featured.png b/content/bn/case-studies/wink/wink_featured.png new file mode 100644 index 0000000000000..3c01133bef701 Binary files /dev/null and b/content/bn/case-studies/wink/wink_featured.png differ diff --git a/content/bn/case-studies/wink/wink_featured.svg b/content/bn/case-studies/wink/wink_featured.svg new file mode 100644 index 0000000000000..8168ac2b435c2 --- /dev/null +++ b/content/bn/case-studies/wink/wink_featured.svg @@ -0,0 +1 @@ +kubernetes.io-logos2 \ No newline at end of file diff --git a/content/bn/case-studies/wink/wink_logo.png b/content/bn/case-studies/wink/wink_logo.png new file mode 100644 index 0000000000000..ef2ee30bf3a46 Binary files /dev/null and b/content/bn/case-studies/wink/wink_logo.png differ diff --git a/content/bn/case-studies/woorank/index.html b/content/bn/case-studies/woorank/index.html new file mode 100644 index 0000000000000..153e73e029774 --- /dev/null +++ b/content/bn/case-studies/woorank/index.html @@ -0,0 +1,79 @@ +--- +title: Woorank Case Study +linkTitle: woorank +case_study_styles: true +cid: caseStudies +featured: false + +new_case_study_styles: true +heading_background: /images/case-studies/woorank/banner1.jpg +heading_title_logo: /images/woorank_logo.png +subheading: > + Woorank: How Kubernetes Helped a Startup Manage 50 Microservices with 12 Engineers—At 30% Less Cost +case_study_details: + - Company: Woorank + - Location: Brussels, Belgium + - Industry: Digital marketing tool +--- + +

Challenge

+ +

Founded in 2011, Woorank embraced microservices and containerization early on, so its core product, a tool that helps digital marketers improve their websites' visibility on the internet, consists of 50 applications developed and maintained by a technical team of 12. For two years, the infrastructure ran smoothly on Mesos, but "there were still lots of our own libraries that we had to roll and applications that we had to bring in, so it was very cumbersome for us as a small team to keep those things alive and to update them," says CTO/Cofounder Nils De Moor. So he began looking for a new solution with more automation and self-healing built in, that would better suit the company's human resources.

+ +

Solution

+ +

De Moor decided to switch to Kubernetes running on AWS, which "allows us to just define applications, how they need to run, how scalable they need to be, and it takes pain away from the developers thinking about that," he says. "When things fail and errors pop up, the system tries to heal itself, and that's really, for us, the key reason to work with Kubernetes." The company now also uses Fluentd, Prometheus, and OpenTracing.

+ +

Impact

+ +

The company's number one concern was immediately erased: Maintaining Kubernetes takes just one person on staff, and it's not a fulltime job. Infrastructure updates used to take two active working days; now it's just a matter of "a few hours of passively following the process," says De Moor. Implementing new tools—which once took weeks of planning, installing, and onboarding—now only takes a few days. "We were already pretty flexible in our costs and taking on traffic peaks and higher load in general," adds De Moor, "but with Kubernetes and the other CNCF tools we use, we have achieved about 30% in cost savings." Plus, the rate of deployments per day has nearly doubled.

+ +{{< case-studies/quote author="NILS DE MOOR, CTO/COFOUNDER, WOORANK" >}} +"It was definitely important for us to have CNCF as an umbrella above everything. We've always been working with open source libraries and tools and technologies. It works very well for us, but sometimes things can drift, maintainers drop out, and projects go haywire. For us, it was indeed important to know that whatever project gets taken under this umbrella, it's taken very seriously. Our way of contributing back is also by joining this community. It's, for us, a way to show our appreciation for what's going on in this framework." +{{< /case-studies/quote >}} + +{{< case-studies/lead >}} +Woorank's core product is a tool that enables digital marketers to improve their websites' visibility on the internet. +{{< /case-studies/lead >}} + +

"We help them acquire lots of data and then present it to them in meaningful ways so they can work with it," says CTO/Cofounder Nils De Moor. In its seven years as a startup, the company followed a familiar technological path to build that product: starting with a monolithic application, breaking it down into microservices, and then embracing containerization. "That's where our modern infrastructure started out," says De Moor.

+ +

As new features have been added to the product, it has grown to consist of 50 applications under the hood. Though Docker had made things easier to deploy, and the team had been using Mesos as an orchestration framework on AWS since 2015, De Moor realized there was still too much overhead to managing the infrastructure, especially with a technical team of just 12.

+ +

"The pain point was that there were still lots of our own libraries that we had to roll and applications that we had to bring in, so it was very cumbersome for us as a small team to keep those things alive and to update them," says De Moor. "When things went wrong during deployment, someone manually had to come in and figure it out. It wasn't necessarily that the technology or anything was wrong with Mesos; it was just not really fitting our model of being a small company, not having the human resources to make sure it all works and can be updated."

+ +{{< case-studies/quote + image="/images/case-studies/woorank/banner3.jpg" + author="NILS DE MOOR, CTO/COFOUNDER, WOORANK" +>}} +"Cloud native technologies have brought to us a transparency on everything going on in our system, from the code to the server. It has brought huge cost savings and a better way of dealing with those costs and keeping them under control. And performance-wise, it has helped our team understand how we can make our code work better on the cloud native infrastructure." +{{< /case-studies/quote >}} + +

Around the time Woorank was grappling with these issues, Kubernetes was emerging as a technology. De Moor knew that he wanted a platform that would be more automated and self-healing, and when he began experimenting with Kubernetes, he found that it checked all those boxes. "Kubernetes allows us to just define applications, how they need to run, how scalable they need to be, and it takes pain away from the developers thinking about that," he says. "When things fail and errors pop up, the system tries to heal itself, and that's really, for us, the key reason to work with Kubernetes. It allowed us to set up certain testing frameworks to just be alerted when things go wrong, instead of having to look at whether everything went right. It's made people's lives much easier. It's quite a big mindset change."

+ +

Once one small Kubernetes cluster was up and running, the team began moving over a few applications at a time, gradually increasing the load over the course of several months. By early 2017, Woorank was 100% deployed on Kubernetes.

+ +

The company's number one concern was immediately erased: Maintaining Kubernetes is the responsibility of just one person on staff, and it's not his fulltime job. Updating the old infrastructure "was always a pain," says De Moor: It used to take two active working days, "and it was always a bit scary when we did that." With Kubernetes, it's just a matter of "a few hours of passively following the process."

+ +{{< case-studies/quote + image="/images/case-studies/woorank/banner4.jpg" + author="NILS DE MOOR, CTO/COFOUNDER, WOORANK" +>}} +"When things fail and errors pop up, the system tries to heal itself, and that's really, for us, the key reason to work with Kubernetes. It allowed us to set up certain testing frameworks to just be alerted when things go wrong, instead of having to look at whether everything went right. It's made people's lives much easier. It's quite a big mindset change." +{{< /case-studies/quote >}} + +

Transparency on all levels, from the code to the servers, has also been a byproduct of the move to Kubernetes. "It's easier for the entire team to get a better understanding of the infrastructure, how it's working, how it looks like, what's going on," says De Moor. "It's not that thing that's running, and no one really knows how it works except this one person. Now it's really a team effort of everyone knowing, 'Okay, when something goes wrong, it's probably in this area or we need to check this.'"

+ +

To that end, Woorank has begun implementing other cloud native tools that help with visibility, such as Fluentd for logging, Prometheus for monitoring, and OpenTracing for distributed tracing. Implementing these new tools—which once took weeks of planning, installing, and onboarding—now only takes a few days. "With all the tools and projects under the CNCF umbrella, it's easier for us to test and play with technology than it used to be," says De Moor. "With Prometheus, we used it fairly early and couldn't get it fairly stable. A couple of months ago, the question reappeared, so we set it up in two days, and now everyone is using it."

+ +

Deployments, too, have been impacted: The rate has more than doubled, which De Moor partly attributes to the transparency of the new process. "With Kubernetes, you see that these three containers didn't start for this reason," he says. Plus, "now we bring deployment messages into Slack. If you see deployments rolling by every day, it does somehow indirectly enforce you, okay, I need to be part of this train, so I also need to deploy."

+ +{{< case-studies/quote author="NILS DE MOOR, CTO/COFOUNDER, WOORANK" >}} +"We can plan those things over a certain timeline, try to fit our resource usage to that, and then bring in spot instances, which will hopefully drive the costs down more." +{{< /case-studies/quote >}} + +

Perhaps the biggest impact, though, has been on the bottom line. "We were already pretty flexible in our costs and taking on traffic peaks and higher load in general, but with Kubernetes and the other CNCF tools we use, we have achieved about 30% in cost savings," says De Moor.

+ +

And there's room for even greater savings. Currently, most of Woorank's infrastructure is running on AWS on demand; the company pays a fixed price and makes some reservations for its planned amount of resources needed. De Moor is planning to experiment more with spot instances with certain resource-heavy workloads such as web crawls: "We can plan those things over a certain timeline, try to fit our resource usage to that, and then bring in spot instances, which will hopefully drive the costs down more."

+ +

Moving to Kubernetes has been so beneficial to Woorank that the company is doubling down on both cloud native technologies and the community. "It was definitely important for us to have CNCF as an umbrella above everything," says De Moor. "We've always been working with open source libraries and tools and technologies. It works very well for us, but sometimes things can drift, maintainers drop out, and projects go haywire. For us, it was indeed important to know that whatever project gets taken under this umbrella, it's taken very seriously. Our way of contributing back is also by joining this community. It's, for us, a way to show our appreciation for what's going on in this framework."

\ No newline at end of file diff --git a/content/bn/case-studies/woorank/woorank_featured_logo.png b/content/bn/case-studies/woorank/woorank_featured_logo.png new file mode 100644 index 0000000000000..f7d6ed300f186 Binary files /dev/null and b/content/bn/case-studies/woorank/woorank_featured_logo.png differ diff --git a/content/bn/case-studies/woorank/woorank_featured_logo.svg b/content/bn/case-studies/woorank/woorank_featured_logo.svg new file mode 100644 index 0000000000000..50b64e9a9c6e8 --- /dev/null +++ b/content/bn/case-studies/woorank/woorank_featured_logo.svg @@ -0,0 +1 @@ +kubernetes.io-logos2 \ No newline at end of file diff --git a/content/bn/case-studies/workiva/index.html b/content/bn/case-studies/workiva/index.html new file mode 100644 index 0000000000000..2ca399cdc9945 --- /dev/null +++ b/content/bn/case-studies/workiva/index.html @@ -0,0 +1,93 @@ +--- +title: Workiva Case Study +linkTitle: Workiva +case_study_styles: true +cid: caseStudies +draft: true +featured: true +weight: 20 +quote: > + With OpenTracing, my team was able to look at a trace and make optimization suggestions to another team without ever looking at their code. + +new_case_study_styles: true +heading_background: /images/case-studies/workiva/banner1.jpg +heading_title_logo: /images/workiva_logo.png +subheading: > + Using OpenTracing to Help Pinpoint the Bottlenecks +case_study_details: + - Company: Workiva + - Location: Ames, Iowa + - Industry: Enterprise Software +--- + +

Challenge

+ +

Workiva offers a cloud-based platform for managing and reporting business data. This SaaS product, Wdesk, is used by more than 70 percent of the Fortune 500 companies. As the company made the shift from a monolith to a more distributed, microservice-based system, "We had a number of people working on this, all on different teams, so we needed to identify what the issues were and where the bottlenecks were," says Senior Software Architect MacLeod Broad. With back-end code running on Google App Engine, Google Compute Engine, as well as Amazon Web Services, Workiva needed a tracing system that was agnostic of platform. While preparing one of the company's first products utilizing AWS, which involved a "sync and link" feature that linked data from spreadsheets built in the new application with documents created in the old application on Workiva's existing system, Broad's team found an ideal use case for tracing: There were circular dependencies, and optimizations often turned out to be micro-optimizations that didn't impact overall speed.

+ +

Solution

+ +

Broad's team introduced the platform-agnostic distributed tracing system OpenTracing to help them pinpoint the bottlenecks.

+ +

Impact

+ +

Now used throughout the company, OpenTracing produced immediate results. Software Engineer Michael Davis reports: "Tracing has given us immediate, actionable insight into how to improve our service. Through a combination of seeing where each call spends its time, as well as which calls are most often used, we were able to reduce our average response time by 95 percent (from 600ms to 30ms) in a single fix."

+ +{{< case-studies/quote author="MacLeod Broad, Senior Software Architect at Workiva" >}} +"With OpenTracing, my team was able to look at a trace and make optimization suggestions to another team without ever looking at their code." +{{< /case-studies/quote >}} + +{{< case-studies/lead >}} +Last fall, MacLeod Broad's platform team at Workiva was prepping one of the company's first products utilizing Amazon Web Services when they ran into a roadblock. +{{< /case-studies/lead >}} + +

Early on, Workiva's backend had run mostly on Google App Engine. But things changed along the way as Workiva's SaaS offering, Wdesk, a cloud-based platform for managing and reporting business data, grew its customer base to more than 70 percent of the Fortune 500 companies. "As customer needs grew and the product offering expanded, we started to leverage a wider offering of services such as Amazon Web Services as well as other Google Cloud Platform services, creating a multi-vendor environment."

+ +

With this new product, there was a "sync and link" feature by which data "went through a whole host of services starting with the new spreadsheet system [Amazon Aurora] into what we called our linking system, and then pushed through http to our existing system, and then a number of calculations would go on, and the results would be transmitted back into the new system," says Broad. "We were trying to optimize that for speed. We thought we had made this great optimization and then it would turn out to be a micro optimization, which didn't really affect the overall speed of things."

+ +

The challenges faced by Broad's team may sound familiar to other companies that have also made the shift from monoliths to more distributed, microservice-based systems. "We had a number of people working on this, all on different teams, so it was difficult to get our head around what the issues were and where the bottlenecks were," says Broad.

+ +

"Each service team was going through different iterations of their architecture and it was very hard to follow what was actually going on in each teams' system," he adds. "We had circular dependencies where we'd have three or four different service teams unsure of where the issues really were, requiring a lot of back and forth communication. So we wasted a lot of time saying, 'What part of this is slow? Which part of this is sometimes slow depending on the use case? Which part is degrading over time? Which part of this process is asynchronous so it doesn't really matter if it's long-running or not? What are we doing that's redundant, and which part of this is buggy?'"

+ +{{< case-studies/quote + image="/images/case-studies/workiva/banner3.jpg" + author="MACLEOD BROAD, SENIOR SOFTWARE ARCHITECT AT WORKIVA" +>}} +"A tracing system can at a glance explain an architecture, narrow down a performance bottleneck and zero in on it, and generally just help direct an investigation at a high level. Being able to do that at a glance is much faster than at a meeting or with three days of debugging, and it's a lot faster than never figuring out the problem and just moving on." +{{< /case-studies/quote >}} + +

Simply put, it was an ideal use case for tracing. "A tracing system can at a glance explain an architecture, narrow down a performance bottleneck and zero in on it, and generally just help direct an investigation at a high level," says Broad. "Being able to do that at a glance is much faster than at a meeting or with three days of debugging, and it's a lot faster than never figuring out the problem and just moving on."

+ +

With Workiva's back-end code running on Google Compute Engine as well as App Engine and AWS, Broad knew that he needed a tracing system that was platform agnostic. "We were looking at different tracing solutions," he says, "and we decided that because it seemed to be a very evolving market, we didn't want to get stuck with one vendor. So OpenTracing seemed like the cleanest way to avoid vendor lock-in on what backend we actually had to use."

+ +

Once they introduced OpenTracing into this first use case, Broad says, "The trace made it super obvious where the bottlenecks were." Even though everyone had assumed it was Workiva's existing code that was slowing things down, that wasn't exactly the case. "It looked like the existing code was slow only because it was reaching out to our next-generation services, and they were taking a very long time to service all those requests," says Broad. "On the waterfall graph you can see the exact same work being done on every request when it was calling back in. So every service request would look the exact same for every response being paged out. And then it was just a no-brainer of, 'Why is it doing all this work again?'"

+ +

Using the insight OpenTracing gave them, "My team was able to look at a trace and make optimization suggestions to another team without ever looking at their code," says Broad. "The way we named our traces gave us insight whether it's doing a SQL call or it's making an RPC. And so it was really easy to say, 'OK, we know that it's going to page through all these requests. Do the work once and stuff it in cache.' And we were done basically. All those calls became sub-second calls immediately."

+ +{{< case-studies/quote + image="/images/case-studies/workiva/banner4.jpg" + author="MACLEOD BROAD, SENIOR SOFTWARE ARCHITECT AT WORKIVA" +>}} +"We were looking at different tracing solutions and we decided that because it seemed to be a very evolving market, we didn't want to get stuck with one vendor. So OpenTracing seemed like the cleanest way to avoid vendor lock-in on what backend we actually had to use." +{{< /case-studies/quote >}} + +

After the success of the first use case, everyone involved in the trial went back and fully instrumented their products. Tracing was added to a few more use cases. "We wanted to get through the initial implementation pains early without bringing the whole department along for the ride," says Broad. "Now, a lot of teams add it when they're starting up a new service. We're really pushing adoption now more than we were before."

+ +

Some teams were won over quickly. "Tracing has given us immediate, actionable insight into how to improve our [Workspaces] service," says Software Engineer Michael Davis. "Through a combination of seeing where each call spends its time, as well as which calls are most often used, we were able to reduce our average response time by 95 percent (from 600ms to 30ms) in a single fix."

+ +

Most of Workiva's major products are now traced using OpenTracing, with data pushed into Google StackDriver. Even the products that aren't fully traced have some components and libraries that are.

+ +

Broad points out that because some of the engineers were working on App Engine and already had experience with the platform's Appstats library for profiling performance, it didn't take much to get them used to using OpenTracing. But others were a little more reluctant. "The biggest hindrance to adoption I think has been the concern about how much latency is introducing tracing [and StackDriver] going to cost," he says. "People are also very concerned about adding middleware to whatever they're working on. Questions about passing the context around and how that's done were common. A lot of our Go developers were fine with it, because they were already doing that in one form or another. Our Java developers were not super keen on doing that because they'd used other systems that didn't require that."But the benefits clearly outweighed the concerns, and today, Workiva's official policy is to use tracing."

+ +

In fact, Broad believes that tracing naturally fits in with Workiva's existing logging and metrics systems. "This was the way we presented it internally, and also the way we designed our use," he says. "Our traces are logged in the exact same mechanism as our app metric and logging data, and they get pushed the exact same way. So we treat all that data exactly the same when it's being created and when it's being recorded. We have one internal library that we use for logging, telemetry, analytics and tracing."

+ +{{< case-studies/quote author="Michael Davis, Software Engineer, Workiva" >}} +"Tracing has given us immediate, actionable insight into how to improve our [Workspaces] service. Through a combination of seeing where each call spends its time, as well as which calls are most often used, we were able to reduce our average response time by 95 percent (from 600ms to 30ms) in a single fix." +{{< /case-studies/quote >}} + +

For Workiva, OpenTracing has become an essential tool for zeroing in on optimizations and determining what's actually a micro-optimization by observing usage patterns. "On some projects we often assume what the customer is doing, and we optimize for these crazy scale cases that we hit 1 percent of the time," says Broad. "It's been really helpful to be able to say, 'OK, we're adding 100 milliseconds on every request that does X, and we only need to add that 100 milliseconds if it's the worst of the worst case, which only happens one out of a thousand requests or one out of a million requests."

+ +

Unlike many other companies, Workiva also traces the client side. "For us, the user experience is important—it doesn't matter if the RPC takes 100 milliseconds if it still takes 5 seconds to do the rendering to show it in the browser," says Broad. "So for us, those client times are important. We trace it to see what parts of loading take a long time. We're in the middle of working on a definition of what is 'loaded.' Is it when you have it, or when it's rendered, or when you can interact with it? Those are things we're planning to use tracing for to keep an eye on and to better understand."

+ +

That also requires adjusting for differences in external and internal clocks. "Before time correcting, it was horrible; our traces were more misleading than anything," says Broad. "So we decided that we would return a timestamp on the response headers, and then have the client reorient its time based on that—not change its internal clock but just calculate the offset on the response time to when the client got it. And if you end up in an impossible situation where a client RPC spans 210 milliseconds but the time on the response time is outside of that window, then we have to reorient that."

+ +

Broad is excited about the impact OpenTracing has already had on the company, and is also looking ahead to what else the technology can enable. One possibility is using tracing to update documentation in real time. "Keeping documentation up to date with reality is a big challenge," he says. "Say, we just ran a trace simulation or we just ran a smoke test on this new deploy, and the architecture doesn't match the documentation. We can find whose responsibility it is and let them know and have them update it. That's one of the places I'd like to get in the future with tracing."

\ No newline at end of file diff --git a/content/bn/case-studies/workiva/workiva_featured_logo.png b/content/bn/case-studies/workiva/workiva_featured_logo.png new file mode 100644 index 0000000000000..9998b471049e5 Binary files /dev/null and b/content/bn/case-studies/workiva/workiva_featured_logo.png differ diff --git a/content/bn/case-studies/workiva/workiva_featured_logo.svg b/content/bn/case-studies/workiva/workiva_featured_logo.svg new file mode 100644 index 0000000000000..0cde714f2315e --- /dev/null +++ b/content/bn/case-studies/workiva/workiva_featured_logo.svg @@ -0,0 +1 @@ +kubernetes.io-logos2 \ No newline at end of file diff --git a/content/bn/case-studies/yahoo-japan/index.html b/content/bn/case-studies/yahoo-japan/index.html new file mode 100644 index 0000000000000..724a41ae01558 --- /dev/null +++ b/content/bn/case-studies/yahoo-japan/index.html @@ -0,0 +1,4 @@ +--- +title: Yahoo! Japan +content_url: https://kubernetes.io/blog/2016/10/kubernetes-and-openstack-at-yahoo-japan +--- \ No newline at end of file diff --git a/content/en/case-studies/yahoo-japan/yahooJapan_logo.png b/content/bn/case-studies/yahoo-japan/yahooJapan_logo.png similarity index 100% rename from content/en/case-studies/yahoo-japan/yahooJapan_logo.png rename to content/bn/case-studies/yahoo-japan/yahooJapan_logo.png diff --git a/content/bn/case-studies/yahoo-japan/yahooJapan_logo.svg b/content/bn/case-studies/yahoo-japan/yahooJapan_logo.svg new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/content/en/case-studies/yahoo-japan/yahoojapan_featured.svg b/content/bn/case-studies/yahoo-japan/yahoojapan_featured.svg similarity index 100% rename from content/en/case-studies/yahoo-japan/yahoojapan_featured.svg rename to content/bn/case-studies/yahoo-japan/yahoojapan_featured.svg diff --git a/content/bn/case-studies/ygrene/index.html b/content/bn/case-studies/ygrene/index.html new file mode 100644 index 0000000000000..0e16b95004e50 --- /dev/null +++ b/content/bn/case-studies/ygrene/index.html @@ -0,0 +1,82 @@ +--- +title: Ygrene Case Study +linkTitle: Ygrene +case_study_styles: true +cid: caseStudies +logo: ygrene_featured_logo.png +featured: true +weight: 48 +quote: > + We had to change some practices and code, and the way things were built, but we were able to get our main systems onto Kubernetes in a month or so, and then into production within two months. That's very fast for a finance company. + +new_case_study_styles: true +heading_background: /images/case-studies/ygrene/banner1.jpg +heading_title_logo: /images/ygrene_logo.png +subheading: > + Ygrene: Using Cloud Native to Bring Security and Scalability to the Finance Industry +case_study_details: + - Company: Ygrene + - Location: Petaluma, Calif. + - Industry: Clean energy financing +--- + +

Challenge

+ +

A PACE (Property Assessed Clean Energy) financing company, Ygrene has funded more than $1 billion in loans since 2010. In order to approve and process those loans, "We have lots of data sources that are being aggregated, and we also have lots of systems that need to churn on that data," says Ygrene Development Manager Austin Adams. The company was utilizing massive servers, and "we just reached the limit of being able to scale them vertically. We had a really unstable system that became overwhelmed with requests just for doing background data processing in real time. The performance the users saw was very poor. We needed a solution that wouldn't require us to make huge refactors to the code base." As a finance company, Ygrene also needed to ensure that they were shipping their applications securely.

+ +

Solution

+ +

Moving from an Engine Yard platform and Amazon Elastic Beanstalk, the Ygrene team embraced cloud native technologies and practices: Kubernetes to help scale out vertically and distribute workloads, Notary to put in build-time controls and get trust on the Docker images being used with third-party dependencies, and Fluentd for "observing every part of our stack," all running on Amazon EC2 Spot.

+ +

Impact

+ +

Before, deployments typically took three to four hours, and two or three months' worth of work would be deployed at low-traffic times every week or two weeks. Now, they take five minutes for Kubernetes, and an hour for the overall deploy with smoke testing. And "we're able to deploy three or four times a week, with just one week's or two days' worth of work," Adams says. "We're deploying during the work week, in the daytime and without any downtime. We had to ask for business approval to take the systems down, even in the middle of the night, because people could be doing loans. Now we can deploy, ship code, and migrate databases, all without taking the system down. The company gets new features without worrying that some business will be lost or delayed." Additionally, by using the kops project, Ygrene can now run its Kubernetes clusters with AWS EC2 Spot, at a tenth of the previous cost. These cloud native technologies have "changed the game for scalability, observability, and security—we're adding new data sources that are very secure," says Adams. "Without Kubernetes, Notary, and Fluentd, we couldn't tell our investors and team members that we knew what was going on."

+ +{{< case-studies/quote author="Austin Adams, Development Manager, Ygrene Energy Fund" >}} +"CNCF projects are helping Ygrene determine the security and observability standards for the entire PACE industry. We're an emerging finance industry, and without these projects, especially Kubernetes, we couldn't be the industry leader that we are today." +{{< /case-studies/quote >}} + +{{< case-studies/lead >}} +In less than a decade, Ygrene has funded more than $1 billion in loans for renewable energy projects. +{{< /case-studies/lead >}} + +

A PACE (Property Assessed Clean Energy) financing company, "We take the equity in a home or a commercial building, and use it to finance property improvements for anything that saves electricity, produces electricity, saves water, or reduces carbon emissions," says Development Manager Austin Adams.

+ +

In order to approve those loans, the company processes an enormous amount of underwriting data. "We have tons of different points that we have to validate about the property, about the company, or about the person," Adams says. "So we have lots of data sources that are being aggregated, and we also have lots of systems that need to churn on that data in real time."

+ +

By 2017, deployments and scalability had become pain points. The company was utilizing massive servers, and "we just reached the limit of being able to scale them vertically," he says. Migrating to AWS Elastic Beanstalk didn't solve the problem: "The Scala services needed a lot of data from the main Ruby on Rails services and from different vendors, so they were asking for information from our Ruby services at a rate that those services couldn't handle. We had lots of configuration misses with Elastic Beanstalk as well. It just came to a head, and we realized we had a really unstable system."

+ +{{< case-studies/quote + image="/images/case-studies/ygrene/banner3.jpg" + author="Austin Adams, Development Manager, Ygrene Energy Fund" +>}} +"CNCF has been an amazing incubator for so many projects. Now we look at its webpage regularly to find out if there are any new, awesome, high-quality projects we can implement into our stack. It's actually become a hub for us for knowing what software we need to be looking at to make our systems more secure or more scalable." +{{< /case-studies/quote >}} + +

Adams along with the rest of the team set out to find a solution that would be transformational, but "wouldn't require us to make huge refactors to the code base," he says. And as a finance company, Ygrene needed security as much as scalability. They found the answer by embracing cloud native technologies: Kubernetes to help scale out vertically and distribute workloads, Notary to achieve reliable security at every level, and Fluentd for observability. "Kubernetes was where the community was going, and we wanted to be future proof," says Adams.

+ +

With Kubernetes, the team was able to quickly containerize the Ygrene application with Docker. "We had to change some practices and code, and the way things were built," Adams says, "but we were able to get our main systems onto Kubernetes in a month or so, and then into production within two months. That's very fast for a finance company."

+ +

How? Cloud native has "changed the game for scalability, observability, and security—we're adding new data sources that are very secure," says Adams. "Without Kubernetes, Notary, and Fluentd, we couldn't tell our investors and team members that we knew what was going on."

+ +

Notary, in particular, "has been a godsend," says Adams. "We need to know that our attack surface on third-party dependencies is low, or at least managed. We use it as a trust system and we also use it as a separation, so production images are signed by Notary, but some development images we don't sign. That is to ensure that they can't get into the production cluster. We've been using it in the test cluster to feel more secure about our builds."

+ +{{< case-studies/quote image="/images/case-studies/ygrene/banner4.jpg">}} +"We had to change some practices and code, and the way things were built," Adams says, "but we were able to get our main systems onto Kubernetes in a month or so, and then into production within two months. That's very fast for a finance company." +{{< /case-studies/quote >}} + +

By using the kops project, Ygrene was able to move from Elastic Beanstalk to running its Kubernetes clusters on AWS EC2 Spot, at a tenth of the previous cost. "In order to scale before, we would need to up our instance sizes, incurring high cost for low value," says Adams. "Now with Kubernetes and kops, we are able to scale horizontally on Spot with multiple instance groups."

+ +

That also helped them mitigate the risk that comes with running in the public cloud. "We figured out, essentially, that if we're able to select instance classes using EC2 Spot that had an extremely low likelihood of interruption and zero history of interruption, and we're willing to pay a price high enough, that we could virtually get the same guarantee using Kubernetes because we have enough nodes," says Software Engineer Zach Arnold, who led the migration to Kubernetes. "Now that we've re-architected these pieces of the application to not live on the same server, we can push out to many different servers and have a more stable deployment."

+ +

As a result, the team can now ship code any time of day. "That was risky because it could bring down your whole loan management software with it," says Arnold. "But we now can deploy safely and securely during the day."

+ +{{< case-studies/quote >}} +"In order to scale before, we would need to up our instance sizes, incurring high cost for low value," says Adams. "Now with Kubernetes and kops, we are able to scale horizontally on Spot with multiple instance groups." +{{< /case-studies/quote >}} + +

Before, deployments typically took three to four hours, and two or three months' worth of work would be deployed at low-traffic times every week or two weeks. Now, they take five minutes for Kubernetes, and an hour for an overall deploy with smoke testing. And "we're able to deploy three or four times a week, with just one week's or two days' worth of work," Adams says. "We're deploying during the work week, in the daytime and without any downtime. We had to ask for business approval to take the systems down for 30 minutes to an hour, even in the middle of the night, because people could be doing loans. Now we can deploy, ship code, and migrate databases, all without taking the system down. The company gets new features without worrying that some business will be lost or delayed."

+ +

Cloud native also affected how Ygrene's 50+ developers and contractors work. Adams and Arnold spent considerable time "teaching people to think distributed out of the box," says Arnold. "We ended up picking what we call the Four S's of Shipping: safely, securely, stably, and speedily." (For more on the security piece of it, see their article on their "continuous hacking" strategy.) As for the engineers, says Adams, "they have been able to advance as their software has advanced. I think that at the end of the day, the developers feel better about what they're doing, and they also feel more connected to the modern software development community."

+ +

Looking ahead, Adams is excited to explore more CNCF projects, including SPIFFE and SPIRE. "CNCF has been an amazing incubator for so many projects," he says. "Now we look at its webpage regularly to find out if there are any new, awesome, high-quality projects we can implement into our stack. It's actually become a hub for us for knowing what software we need to be looking at to make our systems more secure or more scalable."

\ No newline at end of file diff --git a/content/bn/case-studies/ygrene/ygrene_featured_logo.png b/content/bn/case-studies/ygrene/ygrene_featured_logo.png new file mode 100644 index 0000000000000..d0d69114784c8 Binary files /dev/null and b/content/bn/case-studies/ygrene/ygrene_featured_logo.png differ diff --git a/content/bn/case-studies/ygrene/ygrene_featured_logo.svg b/content/bn/case-studies/ygrene/ygrene_featured_logo.svg new file mode 100644 index 0000000000000..0b0ab458facd4 --- /dev/null +++ b/content/bn/case-studies/ygrene/ygrene_featured_logo.svg @@ -0,0 +1 @@ +kubernetes.io-logos2 \ No newline at end of file diff --git a/content/bn/case-studies/zalando/index.html b/content/bn/case-studies/zalando/index.html new file mode 100644 index 0000000000000..177c42962aa53 --- /dev/null +++ b/content/bn/case-studies/zalando/index.html @@ -0,0 +1,83 @@ +--- +title: Zalando Case Study +case_study_styles: true +cid: caseStudies + +new_case_study_styles: true +heading_background: /images/case-studies/zalando/banner1.jpg +heading_title_logo: /images/zalando_logo.png +subheading: > + Europe's Leading Online Fashion Platform Gets Radical with Cloud Native +case_study_details: + - Company: Zalando + - Location: Berlin, Germany + - Industry: Online Fashion +--- + +

Challenge

+ +

Zalando, Europe's leading online fashion platform, has experienced exponential growth since it was founded in 2008. In 2015, with plans to further expand its original e-commerce site to include new services and products, Zalando embarked on a radical transformation resulting in autonomous self-organizing teams. This change requires an infrastructure that could scale with the growth of the engineering organization. Zalando's technology department began rewriting its applications to be cloud-ready and started moving its infrastructure from on-premise data centers to the cloud. While orchestration wasn't immediately considered, as teams migrated to Amazon Web Services (AWS): "We saw the pain teams were having with infrastructure and Cloud Formation on AWS," says Henning Jacobs, Head of Developer Productivity. "There's still too much operational overhead for the teams and compliance. " To provide better support, cluster management was brought into play.

+ +

Solution

+ +

The company now runs its Docker containers on AWS using Kubernetes orchestration.

+ +

Impact

+ +

With the old infrastructure "it was difficult to properly embrace new technologies, and DevOps teams were considered to be a bottleneck," says Jacobs. "Now, with this cloud infrastructure, they have this packaging format, which can contain anything that runs on the Linux kernel. This makes a lot of people pretty happy. The engineers love autonomy."

+ +{{< case-studies/quote author="Henning Jacobs, Head of Developer Productivity at Zalando" >}} +"We envision all Zalando delivery teams running their containerized applications on a state-of-the-art, reliable and scalable cluster infrastructure provided by Kubernetes." +{{< /case-studies/quote >}} + +{{< case-studies/lead >}} +When Henning Jacobs arrived at Zalando in 2010, the company was just two years old with 180 employees running an online store for European shoppers to buy fashion items. +{{< /case-studies/lead >}} + +

"It started as a PHP e-commerce site which was easy to get started with, but was not scaling with the business' needs" says Jacobs, Head of Developer Productivity at Zalando.

+ +

At that time, the company began expanding beyond its German origins into other European markets. Fast-forward to today and Zalando now has more than 14,000 employees, 3.6 billion Euro in revenue for 2016 and operates across 15 countries. "With growth in all dimensions, and constant scaling, it has been a once-in-a-lifetime experience," he says.

+ +

Not to mention a unique opportunity for an infrastructure specialist like Jacobs. Just after he joined, the company began rewriting all their applications in-house. "That was generally our strategy," he says. "For example, we started with our own logistics warehouses but at first you don't know how to do logistics software, so you have some vendor software. And then we replaced it with our own because with off-the-shelf software you're not competitive. You need to optimize these processes based on your specific business needs."

+ +

In parallel to rewriting their applications, Zalando had set a goal of expanding beyond basic e-commerce to a platform offering multi-tenancy, a dramatic increase in assortments and styles, same-day delivery and even your own personal online stylist.

+ +

The need to scale ultimately led the company on a cloud-native journey. As did its embrace of a microservices-based software architecture that gives engineering teams more autonomy and ownership of projects. "This move to the cloud was necessary because in the data center you couldn't have autonomous teams. You have the same infrastructure and it was very homogeneous, so you could only run your Java or Python app," Jacobs says.

+ +{{< case-studies/quote image="/images/case-studies/zalando/banner3.jpg" >}} +"This move to the cloud was necessary because in the data center you couldn't have autonomous teams. You have the same infrastructure and it was very homogeneous, so you could only run your Java or Python app." +{{< /case-studies/quote >}} + +

Zalando began moving its infrastructure from two on-premise data centers to the cloud, requiring the migration of older applications for cloud-readiness. "We decided to have a clean break," says Jacobs. "Our Amazon Web Services infrastructure was set up like so: Every team has its own AWS account, which is completely isolated, meaning there's no 'lift and shift.' You basically have to rewrite your application to make it cloud-ready even down to the persistence layer. We bravely went back to the drawing board and redid everything, first choosing Docker as a common containerization, then building the infrastructure from there."

+ +

The company decided to hold off on orchestration at the beginning, but as teams were migrated to AWS, "we saw the pain teams were having with infrastructure and cloud formation on AWS," says Jacobs.

+ +

Zalandos 200+ autonomous engineering teams decide what technologies to use and could operate their own applications using their own AWS accounts. This setup proved to be a compliance challenge. Even with strict rules-of-play and automated compliance checks in place, engineering teams and IT-compliance were overburdened addressing compliance issues. "Violations appear for non-compliant behavior, which we detect when scanning the cloud infrastructure," says Jacobs. "Everything is possible and nothing enforced, so you have to live with violations (and resolve them) instead of preventing the error in the first place. This means overhead for teams—and overhead for compliance and operations. It also takes time to spin up new EC2 instances on AWS, which affects our deployment velocity."

+ +

The team realized they needed to "leverage the value you get from cluster management," says Jacobs. When they first looked at Platform as a Service (PaaS) options in 2015, the market was fragmented; but "now there seems to be a clear winner. It seemed like a good bet to go with Kubernetes."

+ +

The transition to Kubernetes started in 2016 during Zalando's Hack Week where participants deployed their projects to a Kubernetes cluster. From there 60 members of the tech infrastructure department were on-boarded - and then engineering teams were brought on one at a time. "We always start by talking with them and make sure everyone's expectations are clear," says Jacobs. "Then we conduct some Kubernetes training, which is mostly training for our CI/CD setup, because the user interface for our users is primarily through the CI/CD system. But they have to know fundamental Kubernetes concepts and the API. This is followed by a weekly sync with each team to check their progress. Once they have something in production, we want to see if everything is fine on top of what we can improve."

+ +{{< case-studies/quote image="/images/case-studies/zalando/banner4.jpg" >}} +Once Zalando began migrating applications to Kubernetes, the results were immediate. "Kubernetes is a cornerstone for our seamless end-to-end developer experience. We are able to ship ideas to production using a single consistent and declarative API," says Jacobs. +{{< /case-studies/quote >}} + +

At the moment, Zalando is running an initial 40 Kubernetes clusters with plans to scale for the foreseeable future. Once Zalando began migrating applications to Kubernetes, the results were immediate. "Kubernetes is a cornerstone for our seamless end-to-end developer experience. We are able to ship ideas to production using a single consistent and declarative API," says Jacobs. "The self-healing infrastructure provides a frictionless experience with higher-level abstractions built upon low-level best practices. We envision all Zalando delivery teams will run their containerized applications on a state-of-the-art reliable and scalable cluster infrastructure provided by Kubernetes."

+ +

With the old on-premise infrastructure "it was difficult to properly embrace new technologies, and DevOps teams were considered to be a bottleneck," says Jacobs. "Now, with this cloud infrastructure, they have this packaging format, which can contain anything that runs in the Linux kernel. This makes a lot of people pretty happy. The engineers love the autonomy."

+ +

There were a few challenges in Zalando's Kubernetes implementation. "We are a team of seven people providing clusters to different engineering teams, and our goal is to provide a rock-solid experience for all of them," says Jacobs. "We don't want pet clusters. We don't want to have to understand what workload they have; it should just work out of the box. With that in mind, cluster autoscaling is important. There are many different ways of doing cluster management, and this is not part of the core. So we created two components to provision clusters, have a registry for clusters, and to manage the whole cluster life cycle."

+ +

Jacobs's team also worked to improve the Kubernetes-AWS integration. "Thus you're very restricted. You need infrastructure to scale each autonomous team's idea." Plus, "there are still a lot of best practices missing," says Jacobs. The team, for example, recently solved a pod security policy issue. "There was already a concept in Kubernetes but it wasn't documented, so it was kind of tricky," he says. The large Kubernetes community was a big help to resolve the issue. To help other companies start down the same path, Jacobs compiled his team's learnings in a document called Running Kubernetes in Production.

+ +{{< case-studies/quote >}} +"The Kubernetes API allows us to run applications in a cloud provider-agnostic way, which gives us the freedom to revisit IaaS providers in the coming years... We expect the Kubernetes API to be the global standard for PaaS infrastructure and are excited about the continued journey." +{{< /case-studies/quote >}} + +

In the end, Kubernetes made it possible for Zalando to introduce and maintain the new products the company envisioned to grow its platform. "The fashion advice product used Scala, and there were struggles to make this possible with our former infrastructure," says Jacobs. "It was a workaround, and that team needed more and more support from the platform team, just because they used different technologies. Now with Kubernetes, it's autonomous. Whatever the workload is, that team can just go their way, and Kubernetes prevents other bottlenecks."

+ +

Looking ahead, Jacobs sees Zalando's new infrastructure as a great enabler for other things the company has in the works, from its new logistics software, to a platform feature connecting brands, to products dreamed up by data scientists. "One vision is if you watch the next James Bond movie and see the suit he's wearing, you should be able to automatically order it, and have it delivered to you within an hour," says Jacobs. "It's about connecting the full fashion sphere. This is definitely not possible if you have a bottleneck with everyone running in the same data center and thus very restricted. You need infrastructure to scale each autonomous team's idea."

+ +

For other companies considering this technology, Jacobs says he wouldn't necessarily advise doing it exactly the same way Zalando did. "It's okay to do so if you're ready to fail at some things," he says. "You need to set the right expectations. Not everything will work. Rewriting apps and this type of organizational change can be disruptive. The first product we moved was critical. There were a lot of dependencies, and it took longer than expected. Maybe we should have started with something less complicated, less business critical, just to get our toes wet."

+ +

But once they got to the other side "it was clear for everyone that there's no big alternative," Jacobs adds. "The Kubernetes API allows us to run applications in a cloud provider-agnostic way, which gives us the freedom to revisit IaaS providers in the coming years. Zalando Technology benefits from migrating to Kubernetes as we are able to leverage our existing knowledge to create an engineering platform offering flexibility and speed to our engineers while significantly reducing the operational overhead. We expect the Kubernetes API to be the global standard for PaaS infrastructure and are excited about the continued journey."

\ No newline at end of file diff --git a/content/bn/case-studies/zalando/zalando_feature_logo.png b/content/bn/case-studies/zalando/zalando_feature_logo.png new file mode 100644 index 0000000000000..ba6251050d15a Binary files /dev/null and b/content/bn/case-studies/zalando/zalando_feature_logo.png differ diff --git a/content/bn/case-studies/zalando/zalando_feature_logo.svg b/content/bn/case-studies/zalando/zalando_feature_logo.svg new file mode 100644 index 0000000000000..875d10c030218 --- /dev/null +++ b/content/bn/case-studies/zalando/zalando_feature_logo.svg @@ -0,0 +1 @@ +kubernetes.io-logos2 \ No newline at end of file diff --git a/content/bn/community/_index.html b/content/bn/community/_index.html new file mode 100644 index 0000000000000..b9c51592c6578 --- /dev/null +++ b/content/bn/community/_index.html @@ -0,0 +1,187 @@ +--- +title: কমিউনিটি +layout: basic +body_class: community +cid: community +community_styles_migrated: true +menu: + main: + weight: 50 +--- + + +
+

কুবারনেটিস কমিউনিটি — ব্যবহারকারী, অবদানকারী এবং আমরা যে সংস্কৃতি একসাথে তৈরি করেছি — + এই ওপেন সোর্স প্রকল্পের উল্কাগত উত্থানের সবচেয়ে বড় কারণগুলোর মধ্যে + একটি ৷ আমাদের সংস্কৃতি এবং মূল্যবোধগুলো ক্রমাগত বৃদ্ধি এবং পরিবর্তিত হতে থাকে + কারণ প্রকল্প নিজেই বৃদ্ধি পায় এবং পরিবর্তিত হয়।. আমরা সকলেই প্রকল্পের ক্রমাগত উন্নতি এবং + এটিতে কাজ করার উপায়গুলোর দিকে একসাথে কাজ করি।

+

আমরা এমন লোক যারা ইস্যুগুলো এবং পুল রিকোয়েস্টগুলো প্রদান করে, + SIG মিটিংয়ে, কুবারনেটিস মিটআপে এবং KubeCon-এ উপস্থিত থাকে, এটি গ্রহণ এবং + উদ্ভাবনের পক্ষে সমর্থন করে, kubectl get pods চালায় এবং হাজার হাজার + অন্যান্য গুরুত্বপূর্ণ উপায়ে অবদান রাখে। আপনি কীভাবে জড়িত হতে পারেন এবং এই আশ্চর্যজনক + কমিউনিটিয়ের অংশ হতে পারেন তা শিখতে পড়ুন।

+
+ + + + + +
+

কমিউনিটি মূল্যবোধ

+

কুবারনেটিস কমিউনিটিয়ের মূল্যবোধ হলো প্রকল্পের চলমান সাফল্যের মূল ভিত্তি।
+ এই নীতিগুলো কুবারনেটিস প্রকল্পের প্রতিটি দিক নির্দেশ করে।

+ + আরও পড়ুন + +
+ +
+

কোড অফ কন্ডাক্ট

+

কুবারনেটিস কমিউনিটি সম্মান এবং অন্তর্ভুক্তিকে মূল্য দেয়, এবং সমস্ত ইন্টারঅ্যাকশনে একটি আচরণবিধি প্রয়োগ করে।

+

আপনি যদি Slack, বা অন্য যোগাযোগ ব্যবস্থায় কোনও ইভেন্ট বা মিটিংয়ে আচরণবিধি লঙ্ঘন লক্ষ্য করেন, তাহলে কুবারনেটিস কোড অফ কন্ডাক্ট কমিটির সাথে যোগাযোগ করুন conduct@kubernetes.io তে । সমস্ত রিপোর্ট গোপন রাখা হয় । আপনি GitHub-এ কুবারনেটিস কমিউনিটি রিপোজিটরিতে কমিটি সম্পর্কে পড়তে পারেন।

+ + আরও পড়ুন + +
+ +
+

ভিডিও

+ +

কুবারনেটিস ইউটিউবে আছে, অনেক.  বিস্তৃত বিষয়গুলো জানার জন্য সাবস্ক্রাইব করুন৷

+ + +
+ +
+

আলোচনা

+ +

আমরা অনেক কথা বলি। আমাদের খুঁজুন এবং এইসব প্ল্যাটফর্মের যেকোনো একটিতে কথোপকথনে যোগ দিন।

+ +
+
+ + Forum + + Community forums ▶ +

বিষয়-ভিত্তিক প্রযুক্তিগত আলোচনা যা ডক্স সেতু করে, + সমস্যা সমাধান, এবং আরও অনেক কিছু৷

+
+ +
+ + 𝕏.org + + 𝕏 ▶ +

#kubernetesio

+

ব্লগ পোস্ট, ইভেন্ট, নিউজ, আইডিয়ার রিয়েল-টাইম ঘোষণা।

+
+ +
+ + GitHub + + GitHub ▶ +

সমস্ত প্রকল্প এবং ইস্যু ট্র্যাকিং, প্লাস অবশ্যই কোড।

+
+ +
+ + Server Fault + + Server Fault ▶ +

কুবারনেটিস-সম্পর্কিত আলোচনা Server Fault এ। প্রশ্ন জিজ্ঞাসা করুন, বা উত্তর দিন।

+
+ +
+ + Slack + + Slack ▶ +

170+ চ্যানেলের সাথে, আপনি আপনার প্রয়োজনের সাথে মানানসই একটি খুঁজে পাবেন।

+
Need an invitation? + Visit https://slack.k8s.io/ + for an invitation.
+
+
+
+ +
+
+

আসন্ন ইভেন্ট

+ {{< upcoming-events >}} +
+
+ +
+

গ্লোবাল কমিউনিটি

+

+ বিশ্বে 150 টিরও বেশি মিটআপের সাথে এবং ক্রমবর্ধমান, আপনার লোকাল kube লোকদের খুঁজুন। যদি কেউ কাছাকাছি না থাকে তবে দায়িত্ব নিন এবং নিজের তৈরি করুন। +

+ + একটি মিটআপ খুঁজুন + +
+ +
+

সাম্প্রতিক নিউজ

+ +
diff --git a/content/bn/community/code-of-conduct.md b/content/bn/community/code-of-conduct.md new file mode 100644 index 0000000000000..fb6ddecc17b3e --- /dev/null +++ b/content/bn/community/code-of-conduct.md @@ -0,0 +1,25 @@ +--- +title: Kubernetes Community Code of Conduct +body_class: code-of-conduct +cid: code-of-conduct +--- + +_কুবারনেটিস অনুসরণ করে +[CNCF কোড অফ কন্ডাক্ট](https://github.com/cncf/foundation/blob/main/code-of-conduct.md)। +CNCF CoC এর পাঠ্যটি নীচে প্রতিলিপি করা হয়েছে, +[commit 71412bb02](https://github.com/cncf/foundation/blob/71412bb029090d42ecbeadb39374a337bfb48a9c/code-of-conduct-languages/bn.md) হিসাবে_ + +
+{{< include "static/cncf-code-of-conduct.md" >}} +
+ +--- + +আপনি যদি কোনো ইভেন্টে বা মিটিংয়ে, স্ল্যাকে বা +অন্য যোগাযোগ ব্যবস্থায় আচরণবিধি লঙ্ঘন লক্ষ্য করেন, +তাহলে [কুবারনেটস কোড অফ কন্ডাক্ট কমিটির](https://git.k8s.io/community/committee-code-of-conduct) সাথে যোগাযোগ করুন। + +আপনি [conduct@kubernetes.io](mailto:conduct@kubernetes.io) এ ইমেলের মাধ্যমে তাদের কাছে পৌঁছাতে পারেন। +আপনার পরিচয় গোপন রাখা হবে। + +আপনি যদি লক্ষ্য করেন যে এই পৃষ্ঠাটি পুরানো, অনুগ্রহ করে [একটি ইস্যু তৈরি করুন](https://github.com/kubernetes/website/issues/new/choose)। diff --git a/content/bn/community/static/README.md b/content/bn/community/static/README.md new file mode 100644 index 0000000000000..b47f30fc872a6 --- /dev/null +++ b/content/bn/community/static/README.md @@ -0,0 +1,5 @@ +এই ডিরেক্টরির ফাইলগুলো অন্য উৎস থেকে আনা হয়েছে । +এগুলোকে নতুন সংস্করণ দিয়ে প্রতিস্থাপন ব্যতীত সরাসরি এডিট করবেন না ৷ + +স্থানীয়করণ নোট: আপনাকে এই ডিরেক্টরির কোনো +ফাইলের স্থানীয় সংস্করণ তৈরি করতে হবে না। diff --git a/content/bn/community/static/cncf-code-of-conduct.md b/content/bn/community/static/cncf-code-of-conduct.md new file mode 100644 index 0000000000000..87ccc3f7b4f35 --- /dev/null +++ b/content/bn/community/static/cncf-code-of-conduct.md @@ -0,0 +1,85 @@ + +## CNCF কমিউনিটি কোড অফ কন্ডাক্ট (CNCF Community Code of Conduct) v1.3 + +### কমিউনিটি কোড অফ কন্ডাক্ট (Contributor Code of Conduct) + +CNCF কমিউনিটির অবদানকারী, রক্ষণাবেক্ষণকারী এবং অংশগ্রহণকারী হিসাবে, এবং একটি উন্মুক্ত এবং একটি উন্মুক্ত +এবং স্বাগতম জানানো কমিউনিটিতে গড়ে তোলার স্বার্থে, যে সমস্ত লোক অংশগ্রহণ করে বা অবদান রেখে সমস্যাগুলি রিপোর্ট করা, +ফিচারগুলো অনুরোধ পোস্ট করা, ডকুমেন্টেশন আপডেট করা, পুল অনুরোধ বা প্যাচ জমা দেওয়া, কনফারেন্স বা ইভেন্টে যোগদান করা, +বা অন্যান্য কমিউনিটি বা প্রকল্পের কার্যকলাপে জড়িত থাকার মাধ্যমে তাদের সকলকে আমরা সম্মান করার অঙ্গীকার করি। + +আমরা বয়স, শরীরের আকার, বর্ণ, অক্ষমতা, জাতিগততা, অভিজ্ঞতার স্তর, পারিবারিক অবস্থা, লিঙ্গ, লিঙ্গ পরিচয় এবং অভিব্যক্তি, বৈবাহিক অবস্থা, সামরিক বা প্রবীণ অবস্থা, জাতীয়তা, ব্যক্তিগত চেহারা, জাতি, ধর্ম, যৌন দৃষ্টিভঙ্গি, আর্থ-সামাজিক অবস্থা, উপজাতি বা বৈচিত্র্যের অন্য কোনও মাত্রা নির্বিশেষে প্রত্যেকের জন্য CNCF কমিউনিটিয়ের অংশগ্রহণকে একটি হয়রানি-মুক্ত অভিজ্ঞতা তৈরি করতে প্রতিশ্রুতিবদ্ধ। + +## সুযোগ (Scope) + +এই আচরণবিধি প্রযোজ্য: +* প্রকল্প এবং কমিউনিটির স্থানগুলির মধ্যে, +* অন্যান্য স্থানগুলিতে যখন কোনও পৃথক CNCF কমিউনিটির অংশগ্রহণকারীর কথা বা ক্রিয়াগুলি কোনও CNCF প্রকল্প, CNCF কমিউনিটি বা অন্য কোনও CNCF কমিউনিটির অংশগ্রহণকারীর দিকে পরিচালিত হয় + +### CNCF ইভেন্টস (CNCF Events) + +পেশাদার ইভেন্ট কর্মীদের সাথে লিনাক্স ফাউন্ডেশন দ্বারা উত্পাদিত CNCF ইভেন্টগুলি ইভেন্ট পৃষ্ঠায় উপলব্ধ লিনাক্স ফাউন্ডেশন [ইভেন্টস কোড অফ কন্ডাক্ট] () দ্বারা পরিচালিত হয়। এটি CNCF আচরণবিধির সাথে একত্রে ব্যবহার করার জন্য ডিজাইন করা হয়েছে। + +## আমাদের মান (Our Standards) + +CNCF কমিউনিটি উন্মুক্ত, অন্তর্ভুক্তিমূলক এবং শ্রদ্ধাশীল। আমাদের কমিউনিটির প্রতিটি সদস্যের তাদের পরিচয়কে সম্মান করার অধিকার রয়েছে। + +ইতিবাচক পরিবেশে অবদান রাখে এমন আচরণের উদাহরণগুলির মধ্যে রয়েছে তবে সীমাবদ্ধ নয়: + +* অন্যান্য মানুষের প্রতি সহানুভূতি এবং দয়া প্রদর্শন করা +* ভিন্ন মতামত, দৃষ্টিভঙ্গি এবং অভিজ্ঞতার প্রতি শ্রদ্ধাশীল হওয়া +* গঠনমূলক প্রতিক্রিয়া দেওয়া এবং সদয়ভাবে গ্রহণ করা +* দায় স্বীকার করা এবং আমাদের ভুলের দ্বারা ক্ষতিগ্রস্থদের কাছে ক্ষমা চাওয়া, + এবং অভিজ্ঞতা থেকে শিখা +* যা কেবল ব্যক্তি হিসাবে আমাদের জন্য নয়, সামগ্রিক কমিউনিটির জন্য সর্বোত্তম সেদিকে + মনোনিবেশ করা +* স্বাগত এবং অন্তর্ভুক্তিমূলক ভাষা ব্যবহার করা + +অগ্রহণযোগ্য আচরণের উদাহরণগুলির মধ্যে রয়েছে তবে সীমাবদ্ধ নয়: + +* যৌন ভাষা বা চিত্রাবলীর ব্যবহার +* ট্রলিং, অপমানজনক বা অবমাননাকর মন্তব্য এবং ব্যক্তিগত বা রাজনৈতিক আক্রমণ +* যে কোনও রূপে পাবলিক বা প্রাইভেট হয়রানি +* অন্যদের ব্যক্তিগত তথ্য প্রকাশ, যেমন তাদের স্পষ্ট অনুমতি ছাড়া + প্রকৃত বা ইমেল ঠিকানা প্রকাশ করা +* সহিংসতা, সহিংসতার হুমকি দেওয়া বা অন্যকে হিংসাত্মক আচরণে জড়িত হতে উত্সাহিত করা +* কারও সম্মতি ছাড়াই অনুসরণ করা বা অনুসরণ করা +* অবাঞ্ছিত শারীরিক যোগাযোগ +* অবাঞ্ছিত যৌন বা রোমান্টিক মনোযোগ বা অগ্রগতি +* অন্যান্য আচরণ যা পেশাদার পরিবেশে যুক্তিসঙ্গতভাবে অনুপযুক্ত বলে + বিবেচিত হতে পারে + +নিম্নলিখিত আচরণগুলিও নিষিদ্ধ: + +* আচরণবিধির তদন্তের সাথে সম্পর্কিত জেনেশুনে মিথ্যা বা বিভ্রান্তিকর তথ্য সরবরাহ করা বা অন্যথায় ইচ্ছাকৃতভাবে তদন্তে হস্তক্ষেপ করা। +* কোনও ব্যক্তির বিরুদ্ধে প্রতিশোধ নেওয়া কারণ তারা কোনও ঘটনা রিপোর্ট করেছে বা সাক্ষী হিসাবে কোনও ঘটনা সম্পর্কে তথ্য সরবরাহ করেছে। + +প্রকল্প রক্ষণাবেক্ষণকারীদের কমেন্ট, কমিট, কোড, উইকি এডিট, ইস্যু এবং অন্যান্য অবদান যা এই আচরণবিধির সাথে সামঞ্জস্যপূর্ণ নয় তা অপসারণ, এডিট বা প্রত্যাখ্যান করার অধিকার এবং দায়িত্ব রয়েছে। +এই আচরণবিধি গ্রহণ করে, প্রকল্প রক্ষণাবেক্ষণকারীরা একটি CNCF প্রকল্প পরিচালনার প্রতিটি ক্ষেত্রে এই নীতিগুলি ন্যায্য এবং ধারাবাহিকভাবে প্রয়োগ +করার জন্য প্রতিশ্রুতিবদ্ধ। +প্রকল্প রক্ষণাবেক্ষণকারীরা যারা আচরণবিধি অনুসরণ বা প্রয়োগ করে না তাদের প্রকল্প দল থেকে অস্থায়ীভাবে বা স্থায়ীভাবে অপসারণ করা যেতে পারে। + +## প্রতিবেদন (Reporting) + +কুবারনেটিস কমিউনিটির মধ্যে ঘটে যাওয়া ঘটনাগুলির জন্য, মাধ্যমে [কুবারনেটিস কোড অফ কন্ডাক্ট কমিটি](https://git.k8s.io/community/committee-code-of-conduct) এর সাথে যোগাযোগ করুন। আপনি তিন কার্যদিবসের মধ্যে একটি প্রতিক্রিয়া আশা করতে পারেন। + +অন্যান্য প্রকল্পের জন্য, বা প্রকল্প-অজ্ঞেয়বাদী বা একাধিক CNCF প্রকল্পকে প্রভাবিত করে এমন ঘটনাগুলির জন্য, দয়া করে এর মাধ্যমে [CNCF কোড অফ কন্ডাক্ট কমিটি](https://www.cncf.io/conduct/committee/) এর সাথে যোগাযোগ করুন। বিকল্পভাবে, আপনি আপনার প্রতিবেদন জমা দেওয়ার জন্য [CNCF কোড অফ কন্ডাক্ট কমিটি](https://www.cncf.io/conduct/committee/) এর যে কোনও স্বতন্ত্র সদস্যের সাথে যোগাযোগ করতে পারেন। বেনামে কীভাবে রিপোর্ট জমা দিতে হয়, সেই সহ কীভাবে রিপোর্ট জমা দিতে হয়, সেই বিষয়ে আরও বিস্তারিত নির্দেশাবলীর জন্য অনুগ্রহ করে আমাদের [ইনসিডেন্ট রেজোলিউশন প্রসিডিউরস](https://github.com/cncf/foundation/blob/main/code-of-conduct/coc-incident-resolution-procedures.md) দেখুন। আপনি তিন কার্যদিবসের মধ্যে একটি প্রতিক্রিয়া আশা করতে পারেন। + +লিনাক্স ফাউন্ডেশন দ্বারা উত্পাদিত CNCF ইভেন্টে ঘটে যাওয়া ঘটনাগুলির জন্য, দয়া করে সাথে যোগাযোগ করুন। + +## প্রয়োগ (Enforcement) + +একটি রিপোর্ট করা ঘটনার পর্যালোচনা ও তদন্তের পরে, সিওসি প্রতিক্রিয়া দল যার এখতিয়ার রয়েছে তারা এই আচরণবিধি এবং এর সম্পর্কিত ডকুমেন্টেশনের ভিত্তিতে কোন পদক্ষেপটি উপযুক্ত তা নির্ধারণ করবে। + +কোন আচরণবিধির ঘটনাগুলি প্রকল্প নেতৃত্ব দ্বারা পরিচালিত হয়, কোন ঘটনাগুলি CNCF কোড অফ কন্ডাক্ট কমিটি দ্বারা পরিচালিত হয় এবং কোন ঘটনাগুলি লিনাক্স ফাউন্ডেশন (এর ইভেন্ট টিম সহ) দ্বারা পরিচালিত হয় সে সম্পর্কে তথ্যের জন্য আমাদের [এখতিয়ার নীতি](https://github.com/cncf/foundation/blob/main/code-of-conduct/coc-committee-jurisdiction-policy.md) দেখুন। + +## সংশোধনী (Amendments) + +CNCF সনদের সাথে সামঞ্জস্যপূর্ণ, এই আচরণবিধিতে যে কোনও উল্লেখযোগ্য পরিবর্তন অবশ্যই প্রযুক্তিগত তদারকি কমিটি দ্বারা অনুমোদিত হতে হবে। + +## স্বীকারোক্তি (Acknowledgements) + +এই আচরণবিধিটি অবদানকারী চুক্তি থেকে অভিযোজিত +(http://contributor-covenant.org), সংস্করণ 2.0 এখানে উপলব্ধ +http://contributor-covenant.org/version/2/0/code_of_conduct/ diff --git a/content/bn/docs/_index.md b/content/bn/docs/_index.md new file mode 100644 index 0000000000000..55aeef24feca0 --- /dev/null +++ b/content/bn/docs/_index.md @@ -0,0 +1,6 @@ +--- +linktitle: কুবারনেটিস ডকুমেন্টেশন +title: ডকুমেন্টেশন +sitemap: + priority: 1.0 +--- diff --git a/content/bn/docs/concepts/_index.md b/content/bn/docs/concepts/_index.md new file mode 100644 index 0000000000000..29cff2cc8e43a --- /dev/null +++ b/content/bn/docs/concepts/_index.md @@ -0,0 +1,14 @@ +--- +title: ধারণা +main_menu: true +content_type: concept +weight: 40 +--- + + + +ধারণা বিভাগটি আপনাকে কুবারনেটিস সিস্টেমের অংশগুলো এবং কুবারনেটিস আপনার {{< glossary_tooltip text="ক্লাস্টারের" term_id="cluster" length="all" >}} প্রতিনিধিত্ব করার জন্য যে অ্যাবস্ট্রাকশনগুলো ব্যবহার করে সেগুলো সম্পর্কে শিখতে সাহায্য করে এবং কুবারনেটিস কীভাবে কাজ করে সে সম্পর্কে আপনাকে গভীরভাবে বুঝতে সাহায্য করে । + + + + diff --git a/content/bn/docs/concepts/architecture/_index.md b/content/bn/docs/concepts/architecture/_index.md new file mode 100644 index 0000000000000..221d7aeacf8aa --- /dev/null +++ b/content/bn/docs/concepts/architecture/_index.md @@ -0,0 +1,8 @@ +--- +title: "ক্লাস্টার আর্কিটেকচার" +weight: 30 +description: > + কুবারনেটিসের পিছনে আর্কিটেকচারের ধারণা । +--- + +{{< figure src="/images/docs/kubernetes-cluster-architecture.svg" alt="কুবারনেটিসের উপাদান" caption="কুবারনেটিস ক্লাস্টার আর্কিটেকচার" class="diagram-large" >}} diff --git a/content/bn/docs/concepts/cluster-administration/_index.md b/content/bn/docs/concepts/cluster-administration/_index.md new file mode 100644 index 0000000000000..0864241069721 --- /dev/null +++ b/content/bn/docs/concepts/cluster-administration/_index.md @@ -0,0 +1,96 @@ +--- +title: ক্লাস্টার অ্যাডমিনিস্ট্রেশন +weight: 100 +content_type: concept +description: > + একটি কুবারনেটিস ক্লাস্টার তৈরি বা পরিচালনার জন্য প্রাসঙ্গিক নিম্ন-স্তরের ডিটেইল। +no_list: true +card: + name: setup + weight: 60 + anchors: + - anchor: "#securing-a-cluster" + title: একটি ক্লাস্টার সুরক্ষিতকরণ +--- + + + +ক্লাস্টার অ্যাডমিনিস্ট্রেশন ওভারভিউ(overview) যে কেউ একটি কুবারনেটিস ক্লাস্টার তৈরি বা পরিচালনা করছেন তাঁর জন্য। +এটি মূল কুবারনেটিসের [ধারণাগুলোর](/bn/docs/concepts/) সাথে কিছু পরিচিতি আশা করে ।। + + + +## একটি ক্লাস্টার পরিকল্পনা + +[সেট আপ](/bn/docs/setup/) এ নির্দেশিকাগুলি দেখুন কুবারনেটিস ক্লাস্টারগুলি কীভাবে পরিকল্পনা, সেট আপ এবং কনফিগার +করতে হয় তার উদাহরণগুলির জন্য৷ এই নিবন্ধে তালিকাভুক্ত সমাধানগুলিকে বলা হয় *distros*। + +{{< note >}} +সমস্ত ডিস্ট্রো(distros) সক্রিয়ভাবে রক্ষণাবেক্ষণ করা হয় না। কুবারনেটিসে সাম্প্রতিক সংস্করণের সাথে পরীক্ষা করা +হয়েছে এমন ডিস্ট্রোগুলি বেছে নিন। +{{< /note >}} + +একটি গাইড নির্বাচন করার আগে, এখানে কিছু বিবেচনা আছে: + +- আপনি কি আপনার কম্পিউটারে কুবারনেটিস ব্যবহার করে দেখতে চান, বা আপনি একটি উচ্চ-উপলব্ধতা(availability) তৈরি করতে চান, + মাল্টি-নোড ক্লাস্টার ? আপনার প্রয়োজনের জন্য সবচেয়ে উপযুক্ত ডিস্ট্রো বেছে নিন। +- আপনি কি ব্যবহার করবেন **হোস্ট করা কুবারনেটিস ক্লাস্টার** , যেমন + [গুগল কুবারনেটিস ইঞ্জিন](https://cloud.google.com/kubernetes-engine/), অথবা **আপনার নিজস্ব ক্লাস্টার হোস্ট করছেন**? +- আপনার ক্লাস্টার কি **অন-প্রিমিসেস**, বা **ক্লাউডে (IaaS)** হবে ? কুবারনেটিস হাইব্রিড ক্লাস্টারগুলিকে + সরাসরি সমর্থন করে না। এর পরিবর্তে, আপনি একাধিক ক্লাস্টার সেট আপ করতে পারেন। +- **যদি আপনি কুবারনেটিস অন-প্রিমিসেস কনফিগার করছেন**, তাহলে বিবেচনা করুন + [নেটওয়ার্কিং মডেল](/bn/docs/concepts/cluster-administration/networking/) সবচেয়ে উপযুক্ত। +- আপনি কি **"বেয়ার মেটাল(bare metal)" হার্ডওয়্যার** অথবা **ভার্চুয়াল মেশিনে (VMs)** চালাবেন? +- আপনি কি **একটি ক্লাস্টার চালাতে চান**, অথবা আপনি কি **কুবারনেটিস প্রজেক্ট কোডের সক্রিয় বিকাশ** করার আশা করছেন? + যদি পরেরটি হয়, একটি সক্রিয়ভাবে-বিকশিত ডিস্ট্রো নির্বাচন করুন। কিছু ডিস্ট্রো শুধুমাত্র বাইনারি রিলিজ ব্যবহার করে,কিন্তু, + পছন্দের একটি বৃহত্তর বৈচিত্র অফার করে। +- একটি ক্লাস্টার চালানোর জন্য প্রয়োজনীয় [উপাদান](/bn/docs/concepts/overview/components/) এর সাথে নিজেকে পরিচিত করুন৷ + +## একটি ক্লাস্টার পরিচালনা করা + +* শিখুন কিভাবে [নোড পরিচালনা করবেন](/bn/docs/concepts/architecture/nodes/)। + * এ সম্পর্কে পড়ুন [cluster autoscaling](/docs/concepts/cluster-administration/cluster-autoscaling/). + +* কিভাবে সেট আপ এবং পরিচালনা করতে হয় [রিসোর্স কোটা](/bn/docs/concepts/policy/resource-quotas/) শেয়ার্ড ক্লাস্টারগুলির জন্য তা শিখুন। + +## একটি ক্লাস্টার সুরক্ষিত করা + +* [জেনারেট সার্টিফিকেট](/bn/docs/tasks/administer-cluster/certificates/) বিভিন্ন টুল চেইন ব্যবহার করে সার্টিফিকেট + তৈরি করার ধাপগুলি বর্ণনা করে। + +* [কুবারনেটিস কন্টেইনার এনভায়রনমেন্ট](/bn/docs/concepts/containers/container-environment/) একটি কুবারনেটিস + নোডে Kubelet পরিচালিত কন্টেইনারগুলির পরিবেশ বর্ণনা করে। + +* [Kubernetes API-তে অ্যাক্সেস কন্ট্রোল](/bn/docs/concepts/security/controlling-access) বর্ণনা করে + কিভাবে কুবারনেটিস তার নিজস্ব API এর জন্য অ্যাক্সেস কন্ট্রোল প্রয়োগ করে। + +* [অথেন্টিকেশন](/bn/docs/reference/access-authn-authz/authentication/) বিভিন্ন অথেন্টিকেশন বিকল্প সহ, + কুবারনেটিসে অথেন্টিকেশনের ব্যাখ্যা দেয়। + +* [অথোরাইজেশন](/bn/docs/reference/access-authn-authz/authorization/) অথেন্টিকেশন থেকে আলাদা, + এবং HTTP কলগুলি কীভাবে পরিচালনা করা হয় তা নিয়ন্ত্রণ করে। + +* [অ্যাডমিশন কন্ট্রোলের ব্যবহার](/bn/docs/reference/access-authn-authz/admission-controllers/) + ব্যাখ্যা করে প্লাগ-ইনগুলি অথেন্টিকেশন এবং অথোরাইজেশনের পরে কুবারনেটস API সার্ভারে + অনুরোধগুলিকে বাধা দেয়। + +* [কুবারনেটিস ক্লাস্টারে Sysctls ব্যবহার ](/bn/docs/tasks/administer-cluster/sysctl-cluster/) + একজন অ্যাডমিনিস্ট্রেটর কাছে বর্ণনা করে যে কীভাবে কার্নেল প্যারামিটার সেট করতে `sysctl` কমান্ড-লাইন টুল ব্যবহার করতে হয় +। + +* [অডিটিং](/bn/docs/tasks/debug/debug-cluster/audit/) বর্ণনা করে কিভাবে কুবারনেটিসের অডিট লগের সাথে + যোগাযোগ করতে হয়। + +### Kubelet সুরক্ষিত করা + +* [কন্ট্রোল প্লেন-নোড কমিউনিকেশন](/bn/docs/concepts/architecture/control-plane-node-communication/) +* [TLS বুটস্ট্র্যাপিং](/bn/docs/reference/access-authn-authz/kubelet-tls-bootstrapping/) +* [Kubelet অথেন্টিকেশন /অথোরাইজেশন](/bn/docs/reference/access-authn-authz/kubelet-authn-authz/) + +## অপশনাল ক্লাস্টার সার্ভিস + +* [DNS ইন্টিগ্রেশন](/bn/docs/concepts/services-networking/dns-pod-service/) বর্ণনা করে কিভাবে সরাসরি কুবারনেটিস পরিষেবাতে + একটি DNS নাম সমাধান করা যায়। + +* [লগিং এবং মনিটরিং ক্লাস্টার অ্যাক্টিভিটি](/bn/docs/concepts/cluster-administration/logging/) + ব্যাখ্যা করে কিভাবে কুবারনেটিসে লগিং কাজ করে এবং কিভাবে এটি বাস্তবায়ন করা যায়। diff --git a/content/bn/docs/concepts/configuration/_index.md b/content/bn/docs/concepts/configuration/_index.md new file mode 100644 index 0000000000000..117efc49f0dd8 --- /dev/null +++ b/content/bn/docs/concepts/configuration/_index.md @@ -0,0 +1,6 @@ +--- +title: "কনফিগারেশন" +weight: 80 +description: > + পডস কনফিগার করার জন্য কুবারনেটিস যে রিসোর্সগুলো প্রদান করে । +--- diff --git a/content/bn/docs/concepts/containers/_index.md b/content/bn/docs/concepts/containers/_index.md new file mode 100644 index 0000000000000..7646cf5a4024f --- /dev/null +++ b/content/bn/docs/concepts/containers/_index.md @@ -0,0 +1,53 @@ +--- +title: কন্টেইনার +weight: 40 +description: রানটাইম নির্ভরতা সহ একটি অ্যাপ্লিকেশন প্যাকেজ করার প্রযুক্তি। +content_type: concept +card: + name: concepts + weight: 50 +--- + + + +আপনার চালানো প্রতিটি কন্টেইনার পুনরাবৃত্তিযোগ্য; +নির্ভরতা অন্তর্ভুক্ত করা থেকে প্রমিতকরণের (standardization) অর্থ হলো আপনি যেখানেই এটি চালান +সেখানই আপনি একই আচরণ পাবেন। + +কন্টেইনার অন্তর্নিহিত হোস্ট পরিকাঠামো থেকে অ্যাপ্লিকেশনগুলোকে দ্বিগুণ করে৷ +এটি বিভিন্ন ক্লাউড বা ওএস পরিবেশে ডিপ্লয়মেন্টকে সহজ করে তোলে। + +একটি কুবারনেটিস ক্লাস্টারের প্রতিটি {{< glossary_tooltip text="নোড" term_id="node" >}} , +সেই নোডের জন্য নির্ধারিত [পড](/bn/docs/concepts/workloads/pods/) +গঠনকারী কন্টেইনারগুলো চালায়। +একটি পডের কন্টেইনারগুলো একই নোডে চালানোর জন্য সহ-অবস্থিত (co-located) এবং সহ-নির্ধারিত (co-scheduled)। + + + + +## কন্টেইনার ছবি +একটি [কন্টেইনার ছবি](/bn/docs/concepts/containers/images/) হলো একটি রেডি-টু-রান সফ্টওয়্যার প্যাকেজ +যাতে একটি অ্যাপ্লিকেশন চালানোর জন্য প্রয়োজনীয় সমস্ত কিছু থাকে: +কোড এবং যেকোন রানটাইম, অ্যাপ্লিকেশন এবং সিস্টেম লাইব্রেরি +এবং যেকোনো প্রয়োজনীয় সেটিংসের জন্য ডিফল্ট মান। + +কন্টেইনারগুলো স্টেটলেস এবং [অপরিবর্তনীয়](https://glossary.cncf.io/bn/immutable-infrastructure/) +হওয়ার উদ্দেশ্যে করা হয়েছে: +আপনার এমন একটি কন্টেইনারের কোড পরিবর্তন করা উচিত নয় +যা ইতিমধ্যেই চলছে ৷ আপনার যদি একটি কন্টেইনারাইজড অ্যাপ্লিকেশন থাকে +এবং পরিবর্তন করতে চান, সঠিক প্রক্রিয়াটি হলো একটি নতুন ছবি তৈরি করা +যাতে পরিবর্তনটি অন্তর্ভুক্ত থাকে, +তারপর আপডেট করা ছবি থেকে শুরু করতে কন্টেইনারটি পুনরায় তৈরি করুন । + +## কন্টেইনার রানটাইম + +{{< glossary_definition term_id="container-runtime" length="all" >}} + +সাধারণত, আপনি আপনার ক্লাস্টারকে একটি পডের জন্য ডিফল্ট কন্টেইনার রানটাইম বাছাই করার +অনুমতি দিতে পারেন। আপনি যদি আপনার ক্লাস্টারে একাধিক কন্টেইনার রানটাইম ব্যবহার করতে চান, +আপনি একটি পডের জন্য [রানটাইম ক্লাস](/bn/docs/concepts/containers/runtime-class/) +নির্দিষ্ট করতে পারেন যাতে কুবারনেটিস একটি নির্দিষ্ট কন্টেইনার রানটাইম ব্যবহার করে +সেই কন্টেইনারগুলো চালায়। + +আপনি একই কন্টেইনার রানটাইম সহ বিভিন্ন পড চালানোর জন্য রানটাইম ক্লাস ব্যবহার করতে পারেন +কিন্তু ভিন্ন সেটিংসের সাথে। diff --git a/content/bn/docs/concepts/extend-kubernetes/_index.md b/content/bn/docs/concepts/extend-kubernetes/_index.md new file mode 100644 index 0000000000000..358d419679937 --- /dev/null +++ b/content/bn/docs/concepts/extend-kubernetes/_index.md @@ -0,0 +1,332 @@ +--- +title: কুবারনেটিস প্রসারিত করা +weight: 999 # this section should come last +description: আপনার কুবারনেটিস ক্লাস্টারের আচরণ পরিবর্তন করার বিভিন্ন উপায়। +feature: + title: সম্প্রসারণযোগ্যতার জন্য ডিজাইন করা হয়েছে + description: > + আপস্ট্রিম সোর্স কোড পরিবর্তন না করে আপনার কুবারনেটিস ক্লাস্টারে ফিচার যোগ করুন। +content_type: concept +no_list: true +--- + + + +কুবারনেটিস খুবই কনফিগারযোগ্য এবং সম্প্রসারণযোগ্য। ফলস্বরূপ, কুবারনেটিস প্রজেক্ট কোডে ফর্ক(fork) +বা প্যাচ জমা দেওয়ার খুব কমই প্রয়োজন হয়। + +এই নির্দেশিকাটি একটি কুবারনেটিস ক্লাস্টার কাস্টমাইজ করার উপায়গুলো বর্ণনা করে ৷ এই নির্দেশিকাটি +{{< glossary_tooltip text="ক্লাস্টার অপারেটরদের" term_id="cluster-operator" >}} লক্ষ্য করে বানানো যারা তাদের কুবারনেটিস ক্লাস্টারকে +তাদের কাজের পরিবেশের প্রয়োজনের সাথে কীভাবে মানিয়ে নিতে হয় তা বুঝতে চায়। +ডেভেলপাররা যারা সম্ভাব্য {{< glossary_tooltip text="প্ল্যাটফর্ম ডেভেলপকারী" term_id="platform-developer" >}} +বা কুবারনেটিস প্রজেক্ট {{< glossary_tooltip text="কন্ট্রিবিউটরা" term_id="contributor" >}} , +এক্সটেনশন পয়েন্ট (extension points) কি এবং প্যাটার্ন বিদ্যমান এর পরিচিতি হিসাবে এবং +তাদের ট্রেড-অফ আর সীমাবদ্ধতা জানার জন্য এই নির্দেশিকাটিকে দরকারী হিসেবে পাবে। + +কাস্টমাইজেশন পন্থাগুলিকে বিস্তৃতভাবে [কনফিগারেশনে](#কনফিগারেশন) বিভক্ত করা যেতে পারে, +যার মধ্যে শুধুমাত্র কমান্ড লাইন আর্গুমেন্ট, লোকাল কনফিগারেশন ফাইল বা API রিসোর্স পরিবর্তন করা জড়িত; +এবং [এক্সটেনশন](#এক্সটেনশন), যার মধ্যে অতিরিক্ত প্রোগ্রাম চালানো, অতিরিক্ত নেটওয়ার্ক সার্ভিস বা উভয়ই জড়িত। +এই ডকুমেন্টটি মূলত _এক্সটেনশন_ সম্পর্কে। + + + +## কনফিগারেশন + +*কনফিগারেশন ফাইল* এবং *কমান্ড আর্গুমেন্ট* অনলাইন ডকুমেন্টেশনের [রেফারেন্স](/bn/docs/reference/) বিভাগে ডকুমেন্টেড করা হয়েছে, +প্রতিটি বাইনারির জন্য একটি পৃষ্ঠা রয়েছে : + +* [`kube-apiserver`](/bn/docs/reference/command-line-tools-reference/kube-apiserver/) +* [`kube-controller-manager`](/bn/docs/reference/command-line-tools-reference/kube-controller-manager/) +* [`kube-scheduler`](/bn/docs/reference/command-line-tools-reference/kube-scheduler/) +* [`kubelet`](/bn/docs/reference/command-line-tools-reference/kubelet/) +* [`kube-proxy`](/bn/docs/reference/command-line-tools-reference/kube-proxy/) + +কমান্ড আর্গুমেন্ট এবং কনফিগারেশন ফাইল সবসময় একটি হোস্ট করা কুবারনেটিস সার্ভিস বা পরিচালিত ইনস্টলেশনের +সাথে একটি ডিস্ট্রিবিউশন এ পরিবর্তনযোগ্য নাও হতে পারে। যখন তারা পরিবর্তনযোগ্য হয়, তারা সাধারণত ক্লাস্টার +অপারেটর দ্বারা পরিবর্তনযোগ্য হয়। এছাড়াও, এগুলো ভবিষ্যতের কুবারনেটিস সংস্করণে পরিবর্তন হতে পারে, এবং সেগুলো +সেট করার জন্য রিস্টারটিং প্রক্রিয়া প্রয়োজন হতে পারে। এই কারণে, সেগুলি শুধুমাত্র তখনই ব্যবহার করা উচিত +যখন অন্য কোন বিকল্প থাকে না। + +বিল্ট-ইন *পলিসি API* গুলো, যেমন [ResourceQuota](/bn/docs/concepts/policy/resource-quotas/), +[NetworkPolicy](/bn/docs/concepts/services-networking/network-policies/) এবং Role-based Access Control +([RBAC](/bn/docs/reference/access-authn-authz/rbac/)), হলো বিল্ট-ইন কুবারনেটিস API যা ঘোষণামূলকভাবে কনফিগার করা পলিসি সেটিংস প্রদান করে। +API গুলো সাধারণত হোস্ট করা কুবারনেটিস সার্ভিস এবং পরিচালিত কুবারনেটিস ইনস্টলেশনগুলোর সাথে ব্যবহারযোগ্য। +বিল্ট-ইন পলিসি API গুলো অন্যান্য কুবারনেটিস রিসোর্স যেমন পডের মতো একই নিয়ম অনুসরণ করে। +আপনি যখন [স্থিতিশীল](/bn/docs/reference/using-api/#api-versioning) একটি পলিসি API ব্যবহার করেন, +তখন আপনি অন্যান্য কুবারনেটিস API-এর মতো একটি [সংজ্ঞায়িত সাপোর্ট পলিসি](/bn/docs/reference/using-api/deprecation-policy/) থেকে উপকৃত হন। +এই কারণে, পলিসি API গুলো *কনফিগারেশন ফাইল* এবং *কমান্ড আর্গুমেন্ট* এর বদলে যেখানে উপযুক্ত সেখানে সুপারিশ করা হয় । + +## এক্সটেনশন + +এক্সটেনশন হলো সফ্টওয়্যার উপাদান যা কুবারনেটিসের সাথে প্রসারিত এবং গভীরভাবে একত্রিত হয়। +তারা এটিকে নতুন টাইপের এবং নতুন ধরণের হার্ডওয়্যার সাপোর্ট করার জন্য মানিয়ে নেয়। + +অনেক ক্লাস্টার অ্যাডমিনিস্ট্রেটর কুবারনেটিসের হোস্টেড বা ডিস্ট্রিবিউশন উদাহরণ ব্যবহার করে। +এই ক্লাস্টারগুলো পূর্বে ইনস্টল করা এক্সটেনশনগুলোর সাথে আসে। ফলস্বরূপ, বেশিরভাগ কুবারনেটিস +ব্যবহারকারীদের এক্সটেনশন ইনস্টল করার প্রয়োজন হবে না এবং এমনকি কম ব্যবহারকারীদের নতুন বানাতে হবে। + +### এক্সটেনশন প্যাটার্ন + +কুবারনেটিস ডিজাইন করা হয়েছে ক্লায়েন্ট প্রোগ্রাম লিখার মাধ্যমে স্বয়ংক্রিয় হতে । +কুবারনেটিস API-তে পড়া এবং/অথবা লেখা যে কোনো প্রোগ্রাম দরকারী অটোমেশন প্রদান করতে পারে। +*অটোমেশন* ক্লাস্টারে চলতে পারে বা এটি বন্ধ করতে পারে। +এই ডকুমেন্টের নির্দেশিকা অনুসরণ করে আপনি অত্যন্ত উপলব্ধ এবং শক্তিশালী অটোমেশন লিখতে পারেন। +অটোমেশন সাধারণত হোস্ট করা ক্লাস্টার এবং পরিচালিত ইনস্টলেশন সহ যেকোন কুবারনেটিস +ক্লাস্টারের সাথে কাজ করে। + +ক্লায়েন্ট প্রোগ্রাম লেখার জন্য একটি নির্দিষ্ট প্যাটার্ন রয়েছে যা কুবারনেটিসের +সাথে ভালভাবে কাজ করে যাকে {{< glossary_tooltip term_id="controller" text="কন্ট্রোলার" >}} +প্যাটার্ন বলা হয়। কন্ট্রোলাররা সাধারণত একটি অবজেক্টের `.spec` পড়ে, সম্ভবত জিনিসগুলি করে এবং +তারপর অবজেক্টের `.status` আপডেট করে ৷ + +একটি কন্ট্রোলার হল কুবারনেটিস API-এর ক্লায়েন্ট। যখন কুবারনেটিস ক্লায়েন্ট হয় এবং +একটি রিমোট সার্ভিসে কল করে, কুবারনেটিস এটিকে একটি *webhook* বলে। রিমোট সার্ভিসকে +*webhook backend* বলা হয়। কাস্টম কন্ট্রোলারের মতো, webhook গুলো ব্যর্থতার একটি পয়েন্ট যোগ করে। + +{{< note >}} +কুবারনেটিসের বাইরে, “webhook” শব্দটি সাধারণত অ্যাসিঙ্ক্রোনাস(asynchronous) বিজ্ঞপ্তিগুলির জন্য একটি প্রক্রিয়াকে বোঝায়, +যেখানে webhook কল অন্য সিস্টেম বা উপাদানের জন্য একমুখী বিজ্ঞপ্তি হিসাবে কাজ করে। +কুবারনেটিস ইকোসিস্টেমে, এমনকি সিঙ্ক্রোনাস(synchronous) HTTP কলআউটগুলোকে প্রায়ই +“webhooks” হিসাবে বর্ণনা করা হয়। +{{< /note >}} + +webhook মডেলে, কুবারনেটিস একটি রিমোট সার্ভিসে একটি নেটওয়ার্ক অনুরোধ করে। +বিকল্প *binary Plugin* মডেলের সাথে, কুবারনেটস একটি বাইনারি (প্রোগ্রাম) চালায়। +বাইনারি প্লাগইনগুলো kubelet দ্বারা ব্যবহৃত হয় (উদাহরণস্বরূপ, [CSI storage plugins](https://kubernetes-csi.github.io/docs/) এবং +[CNI network plugins](/bn/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/)), এবং +kubectl দ্বারা ([প্লাগইনগুলোর সাথে প্রসারিত kubectl](/bn/docs/tasks/extend-kubectl/kubectl-plugins/) দেখুন)। + +### এক্সটেনশন পয়েন্ট + +এই ডায়াগ্রামটি একটি কুবারনেটিস ক্লাস্টারের এক্সটেনশন পয়েন্ট এবং এটি +অ্যাক্সেসকারী ক্লায়েন্টদের দেখায়। + + + +{{< figure src="/bn/docs/concepts/extend-kubernetes/extension-points.png" + alt="কুবারনেটিসের জন্য সাতটি সংখ্যাযুক্ত এক্সটেনশন পয়েন্টের প্রতীকী উপস্থাপনা" + class="diagram-large" caption="কুবারনেটিস এক্সটেনশন পয়েন্ট" >}} + +#### চিত্রের চাবিকাঠি + +1. ব্যবহারকারীরা প্রায়ই `kubectl` ব্যবহার করে কুবারনেটিস API এর সাথে যোগাযোগ করে। [প্লাগইন](#ক্লায়েন্ট-এক্সটেনশন) + ক্লায়েন্টদের আচরণ কাস্টমাইজ করে। জেনেরিক এক্সটেনশন রয়েছে যা বিভিন্ন ক্লায়েন্টের জন্য প্রযোজ্য হতে পারে, সেইসাথে`kubectl` + প্রসারিত করার নির্দিষ্ট উপায়ও । + +1. API সার্ভার সমস্ত অনুরোধ পরিচালনা করে। API সার্ভারে বিভিন্ন ধরণের এক্সটেনশন পয়েন্টগুলো তাদের কনটেন্টের উপর + ভিত্তি করে অনুরোধগুলো অথেন্টিকেটিং(authenticating), বা তাদের ব্লক করার অনুমতি দেয়, বিষয়বস্তু পরিবর্তন করে এবং + মুছে ফেলার ব্যবস্থা করে। এগুলো [API অ্যাক্সেস এক্সটেনশন](#API-অ্যাক্সেস-এক্সটেনশন) বিভাগে বর্ণিত হয়েছে। + +1. এপিআই সার্ভার বিভিন্ন ধরণের *রিসোর্স* সরবরাহ করে। *বিল্ট-ইন রিসোর্স ধরনের*, যেমন + `pods`, কুবারনেটিস প্রজেক্ট দ্বারা সংজ্ঞায়িত করা হয় এবং পরিবর্তন করা যায় না। + কুবারনেটিস API প্রসারিত করার বিষয়ে জানতে [API এক্সটেনশন](#API-এক্সটেনশন) পড়ুন। + +1. কুবারনেটিস শিডিউলার কোন নোডগুলোতে পড স্থাপন করবে তা + [নির্ধারণ](/bn/docs/concepts/scheduling-eviction/assign-pod-node/) করে। শিডিউলিং প্রসারিত করার + বিভিন্ন উপায় রয়েছে, যা [শিডিউলিং এক্সটেনশন](#শিডিউলিং-এক্সটেনশন) বিভাগে বর্ণিত করা হয়েছে। + +1. কুবারনেটিসের বেশিরভাগ আচরণ {{< glossary_tooltip term_id="controller" text="কন্ট্রোলার" >}} + নামক প্রোগ্রাম দ্বারা বাস্তবায়িত হয়, যেগুলো API সার্ভারের ক্লায়েন্ট। + কন্ট্রোলারগুলো প্রায়ই কাস্টম রিসোর্সগুলোর সাথে একত্রে ব্যবহৃত হয়। + আরও জানতে [অটোমেশনের সাথে নতুন API-এর সমন্বয়](#অটোমেশনের-সাথে-নতুন-API-এর-সমন্বয়) এবং + [বিল্ট-ইন রিসোর্স পরিবর্তন](#বিল্ট-ইন-রিসোর্স-পরিবর্তন) পড়ুন। + +1. kubelet সার্ভারে (নোড) চলে এবং ক্লাস্টার নেটওয়ার্কে তাদের নিজস্ব আইপি সহ ভার্চুয়াল সার্ভারের মতো পডগুলোকে + দেখাতে সহায়তা করে। [নেটওয়ার্ক প্লাগইনগুলো](#নেটওয়ার্ক-প্লাগইন) পড নেটওয়ার্কিং এর বিভিন্ন বাস্তবায়নের + অনুমতি দেয়। + +1. আপনি কাস্টম হার্ডওয়্যার বা অন্যান্য বিশেষ নোড-লোকাল সুবিধাগুলো একীভূত করতে [ডিভাইস প্লাগইনগুলো](#ডিভাইস-প্লাগইন) + ব্যবহার করতে পারেন এবং আপনার ক্লাস্টারে চলমান পডগুলোতে এগুলো উপলব্ধ করতে পারেন৷ + kubelet ডিভাইস প্লাগইনগুলোর সাথে কাজ করার জন্য সাপোর্ট অন্তর্ভুক্ত করে। + + kubelet পড এবং তাদের কন্টেইনারের জন্য + {{< glossary_tooltip text="ভলিউম" term_id="volume" >}} মাউন্ট এবং আনমাউন্ট করে। + আপনি নতুন ধরনের স্টোরেজ এবং অন্যান্য ভলিউম টাইপের জন্য সাপোর্ট যোগ করতে + [স্টোরেজ প্লাগইন](#স্টোরেজ-প্লাগইন) ব্যবহার করতে পারেন। + + +#### এক্সটেনশন পয়েন্ট চয়েস ফ্লোচার্ট {#এক্সটেনশন-ফ্লোচার্ট} + +আপনি কোথা থেকে শুরু করবেন তা নিশ্চিত না হলে, এই ফ্লোচার্টটি সাহায্য করতে পারবে৷ +মনে রাখবেন কিছু সমাধানে বিভিন্ন ধরনের এক্সটেনশন জড়িত থাকতে পারে। + + + +{{< figure src="/bn/docs/concepts/extend-kubernetes/flowchart.svg" + alt="প্রয়োগকারীদের জন্য ব্যবহারের ক্ষেত্র এবং নির্দেশিকা সম্পর্কে প্রশ্ন সহ ফ্লোচার্ট। সবুজ বৃত্ত হ্যাঁ নির্দেশ করে; লাল বৃত্ত না নির্দেশ করে।" + class="diagram-large" caption="একটি এক্সটেনশন পদ্ধতি নির্বাচন করতে ফ্লোচার্ট গাইড" >}} + +--- + +## ক্লায়েন্ট এক্সটেনশন + +kubectl-এর জন্য প্লাগইন হলো পৃথক বাইনারি যা নির্দিষ্ট সাবকমান্ডের আচরণ যোগ বা প্রতিস্থাপন করে। +`kubectl` টুলটি [ক্রেডেনশিয়াল(credential) প্লাগইনগুলোর](/bn/docs/reference/access-authn-authz/authentication/#client-go-credential-plugins) সাথেও একীভূত করতে পারে। +এই এক্সটেনশনগুলো শুধুমাত্র একটি একক ব্যবহারকারীর লোকাল পরিবেশকে প্রভাবিত করে, এবং তাই সাইট-ব্যাপী পলিসিগুলো প্রয়োগ করতে পারে না। + +আপনি যদি `kubectl` টুল প্রসারিত করতে চান, তাহলে [প্লাগইন সহ kubectl প্রসারিত করা](/bn/docs/tasks/extend-kubectl/kubectl-plugins/) পড়ুন। + +## API এক্সটেনশন + +### কাস্টম রিসোর্স সংজ্ঞা + +আপনি যদি নতুন কন্ট্রোলার, অ্যাপ্লিকেশন কনফিগারেশন অবজেক্ট বা অন্যান্য ডিক্লারেটিভ +API সংজ্ঞায়িত করতে চান এবং কুবারনেটিস টুলস যেমন `kubectl` ব্যবহার করে সেগুলো +পরিচালনা করতে চান তাহলে কুবারনেটিসে একটি কাস্টম রিসোর্স যোগ করার কথা বিবেচনা করুন। + +কাস্টম রিসোর্স সম্পর্কে আরও জানতে, +[কাস্টম রিসোর্স](/bn/docs/concepts/extend-kubernetes/api-extension/custom-resources/) কনসেপ্ট গাইড দেখুন। + +### API এগ্রিগেশন লেয়ার(aggregation layer) + +আপনি কুবারনেটিস [API এগ্রিগেশন লেয়ার](/bn/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation/) ব্যবহার করতে পারেন +কুবারনেটিস API-কে [মেট্রিক্সের](/bn/docs/tasks/debug/debug-cluster/resource-metrics-pipeline/) মতো অতিরিক্ত সার্ভিসের সাথে একীভূত করতে। + +### অটোমেশনের সাথে নতুন API-এর সমন্বয় + +একটি কাস্টম রিসোর্স API এবং একটি কন্ট্রোল লুপের সংমিশ্রণকে +{{< glossary_tooltip term_id="controller" text="কন্ট্রোলার" >}} প্যাটার্ন বলা হয়। যদি আপনার +কন্ট্রোলার একটি কাঙ্ক্ষিত অবস্থার উপর ভিত্তি করে অবকাঠামো স্থাপনকারী মানব অপারেটরের স্থান নেয়, +তাহলে কন্ট্রোলারও {{< glossary_tooltip text="অপারেটর প্যাটার্ন" term_id="operator-pattern" >}} অনুসরণ করতে পারে। +অপারেটর প্যাটার্ন নির্দিষ্ট অ্যাপ্লিকেশন পরিচালনা করতে ব্যবহৃত হয়; সাধারণত, এগুলো হলো এমন অ্যাপ্লিকেশন +যা অবস্থা বজায় রাখে এবং সেগুলোকে কীভাবে পরিচালনা করা হয় তার যত্নের প্রয়োজন হয়৷ + +আপনি আপনার নিজস্ব কাস্টম API এবং কন্ট্রোল লুপগুলোও তৈরি করতে পারেন যা অন্যান্য রিসোর্সগুলো পরিচালনা করতে পারে, +যেমন স্টোরেজ, বা পলিসিগুলো সংজ্ঞায়িত করতে (যেমন একটি অ্যাক্সেস কন্ট্রোল রেস্ট্রিকশন)। + +### বিল্ট-ইন রিসোর্স পরিবর্তন + +আপনি যখন কাস্টম রিসোর্স যোগ করে কুবারনেটিস API প্রসারিত করেন, তখন যোগ করা রিসোর্স সবসময় +একটি নতুন API গ্রুপে পড়ে। আপনি বিদ্যমান API গ্রুপগুলোকে প্রতিস্থাপন বা পরিবর্তন করতে পারবেন না ৷ +একটি API যোগ করলে আপনাকে বিদ্যমান API-এর আচরণকে সরাসরি প্রভাবিত করতে দেওয়া না (যেমন পড), +যেখানে _API অ্যাক্সেস এক্সটেনশানগুলো_ করে। + +## API অ্যাক্সেস এক্সটেনশন + +যখন একটি অনুরোধ কুবারনেটিস API সার্ভারে পৌঁছায়, এটি প্রথমে _অথেন্টিকেটেড(authenticated)_ করা হয়, +তারপর _অনুমোদিত(authorized)_ হয় এবং তারপরে আসে বিভিন্ন ধরণের _অ্যাডমিশন কন্ট্রোলের(admission control)_ +বিষয় (কিছু অনুরোধ প্রকৃতপক্ষে অথেন্টিকেটেড(authenticated) নয়, এবং স্পেশাল ট্রিটমেন্ট পান)। +এই প্রবাহ সম্পর্কে আরও জানতে +[কুবারনেটিস API-এ অ্যাক্সেস কন্ট্রোল করা](/bn/docs/concepts/security/controlling-access/) দেখুন। + +কুবারনেটিস অথেন্টিকেশন/অথোরাইজেশন প্রবাহের প্রতিটি ধাপ এক্সটেনশন পয়েন্ট অফার করে। + +### অথেন্টিকেশন(Authentication) + +ক্লায়েন্ট অনুরোধ করার জন্য একটি ব্যবহারকারীর নামের সমস্ত অনুরোধে +[অথেন্টিকেশন](/bn/docs/reference/access-authn-authz/authentication/) শিরোনাম বা সার্টিফিকেট যুক্ত করে। + +কুবারনেটিস এর বেশ কয়েকটি বিল্ট-ইন অথেন্টিকেশন পদ্ধতি রয়েছে যা এটি সাপোর্ট করে। +এটি একটি অথেন্টিকেটিং প্রক্সির পিছনেও বসতে পারে এবং এটি একটি `অথোরাইজেশন(Authorization)` টোকেন পাঠাতে পারে যাচাইয়ের জন্য: +শিরোনাম থেকে একটি রিমোট সার্ভিসে (একটি [অথেন্টিকেশন webhook](/bn/docs/reference/access-authn-authz/authentication/#webhook-token-authentication)) +যদি সেগুলো আপনার প্রয়োজনগুলো পূরণ না করে৷ + +### অথোরাইজেশন(Authorization) + +[অথোরাইজেশন](/bn/docs/reference/access-authn-authz/authorization/) নির্ধারণ করে যে নির্দিষ্ট ব্যবহারকারীরা API রিসোর্সগুলোতে +পড়তে, লিখতে এবং অন্যান্য ক্রিয়াকলাপ করতে পারে কিনা। এটি সম্পূর্ণ রিসোর্সের লেভেলে কাজ করে -- এটি ইচ্ছামত +অবজেক্টের ফিল্ডের উপর ভিত্তি করে বৈষম্য করে না। + +যদি বিল্ট-ইন অথোরাইজেশনের উপায়গুলো আপনার চাহিদা পূরণ না করে, +তাহলে একটি [অথোরাইজেশন webhook](/bn/docs/reference/access-authn-authz/webhook/) +কাস্টম কোডে কল করার অনুমতি দেয় যা একটি অথোরাইজেশনের সিদ্ধান্ত নেয়। + +### ডাইনামিক অ্যাডমিশন কন্ট্রোল + +একটি অনুরোধ অনুমোদিত হওয়ার পরে, যদি এটি একটি লিখিত অপারেশন হয়, তবে এটি +[অ্যাডমিশন কন্ট্রোলের](/bn/docs/reference/access-authn-authz/admission-controllers/) পদক্ষেপগুলোর +মধ্য দিয়ে যায়। বিল্ট-ইন পদক্ষেপগুলো ছাড়াও, বেশ কয়েকটি এক্সটেনশন রয়েছে: + +* [ইমেজ পলিসি webhook](/bn/docs/reference/access-authn-authz/admission-controllers/#imagepolicywebhook) + কন্টেইনারে কোন ইমেজ চালানো যাবে তা সীমাবদ্ধ করে। +* ইচ্ছামত অ্যাডমিশন কন্ট্রোলের সিদ্ধান্ত নিতে, একটি সাধারণ + [অ্যাডমিশন webhook](/bn/docs/reference/access-authn-authz/extensible-admission-controllers/#admission-webhooks) + ব্যবহার করা যেতে পারে। অ্যাডমিশন webhook সৃষ্টি বা আপডেট প্রত্যাখ্যান করতে পারে। + কিছু অ্যাডমিশন webhook ইনকামিং রিকোয়েস্ট ডেটা পরিবর্তন করে কুবারনেটিস দ্বারা আরও পরিচালনা করার আগে। + +## অবকাঠামো এক্সটেনশন + +### ডিভাইস প্লাগইন + +_ডিভাইস প্লাগইনগুলো_ একটি [ডিভাইস প্লাগইনের](/bn/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/) +মাধ্যমে একটি নোডকে নতুন নোড রিসোর্সগুলো (CPU এবং মেমরির মতো বিল্টইনগুলো ছাড়াও) +আবিষ্কার করতে দেয় । + +### স্টোরেজ প্লাগইন + +{{< glossary_tooltip text="কন্টেইনার স্টোরেজ ইন্টারফেস(Container Storage Interface)" term_id="csi" >}} (CSI) +প্লাগইনগুলো নতুন ধরনের ভলিউমের জন্য সাপোর্ট সহ কুবারনেটিস প্রসারিত করার একটি উপায় প্রদান করে। +ভলিউমগুলো টেকসই এক্সটার্নাল স্টোরেজ দ্বারা সাহায্যপ্রাপ্ত করা যেতে পারে, বা ক্ষণস্থায়ী স্টোরেজ(ephemeral storage) প্রদান করতে পারে, +অথবা তারা একটি ফাইল সিস্টেম প্যারাডাইম ব্যবহার করে তথ্যের জন্য একটি রিড-অনলি ইন্টারফেস দিতে পারে । + +কুবারনেটিস এছাড়াও [FlexVolume](/bn/docs/concepts/storage/volumes/#flexvolume) প্লাগইনগুলোর জন্য সাপোর্ট অন্তর্ভুক্ত করে, +যা কুবারনেটিস v1.23 (CSI-এর পক্ষে) থেকে অবমূল্যায়িত(deprecated) করা হয়েছে । + +FlexVolume প্লাগইনগুলো ব্যবহারকারীদের ভলিউম প্রকারগুলো মাউন্ট করার অনুমতি দেয় যা সাধারণত কুবারনেটিস দ্বারা সাপোর্টেড নয়। +আপনি যখন FlexVolume স্টোরেজের উপর নির্ভর করে এমন একটি পড চালান, তখন kubelet ভলিউম মাউন্ট করার জন্য একটি বাইনারি প্লাগইন কল করে। +আর্কাইভ করা [FlexVolume](https://git.k8s.io/design-proposals-archive/storage/flexvolume-deployment.md) +ডিজাইন প্রস্তাবে এই পদ্ধতির আরও বিশদ বিবরণ রয়েছে। + +[The Kubernetes Volume Plugin FAQ for Storage Vendors](https://github.com/kubernetes/community/blob/master/sig-storage/volume-plugin-faq.md#kubernetes-volume-plugin-faq-for-storage-vendors) তে +স্টোরেজ প্লাগইনগুলোর সাধারণ তথ্য অন্তর্ভুক্ত রয়েছে । + +### নেটওয়ার্ক প্লাগইন + +আপনার কুবারনেটিস ক্লাস্টারের একটি _নেটওয়ার্ক প্লাগইন_ প্রয়োজন যাতে একটি কার্যকরী পড নেটওয়ার্ক থাকে +এবং কুবারনেটিস নেটওয়ার্ক মডেলের অন্যান্য দিকগুলোকে সাপোর্ট করতে পারে৷ + +[নেটওয়ার্ক প্লাগইনগুলো](/bn/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/) +কুবারনেটিসকে বিভিন্ন নেটওয়ার্কিং টপোলজি এবং প্রযুক্তির সাথে কাজ করার অনুমতি দেয়। + +### Kubelet ইমেজ ক্রেডেনশিয়াল প্রোভাইডার প্লাগইন (Kubelet image credential provider plugins) + +{{< feature-state for_k8s_version="v1.26" state="stable" >}} +Kubelet ইমেজ ক্রেডেনশিয়াল প্রোভাইডাররা হলো Kubelet এর জন্য প্লাগইন যা ডাইনামিকভাবে ইমেজ +রেজিস্ট্রি ক্রেডেনশিয়ালগুলো পুনরুদ্ধার করতে পারে। কন্টেইনার ইমেজ রেজিস্ট্রি থেকে ইমেজ তোলার সময় +ক্রেডেনশিয়ালগুলো ব্যবহার করা হয় যা কনফিগারেশনের সাথে মেলে। + +প্লাগইনগুলো বহিরাগত সার্ভিসগুলোর সাথে যোগাযোগ করতে পারে বা ক্রেডেনশিয়ালগুলো পেতে লোকাল ফাইলগুলো ব্যবহার করতে পারে ৷ +এইভাবে, kubelet এর প্রতিটি রেজিস্ট্রির জন্য স্ট্যাটিক ক্রেডেনশিয়ালের প্রয়োজন নেই এবং বিভিন্ন অথেন্টিকেশন পদ্ধতি এবং +প্রোটোকল সাপোর্ট করতে পারে । + +প্লাগইন কনফিগারেশনের বিশদ বিবরণের জন্য, +[একটি Kubelet ইমেজ ক্রেডেনশিয়াল প্রোভাইডার কনফিগার ](/bn/docs/tasks/administer-cluster/kubelet-credential-provider/) দেখুন। + +## শিডিউলিং এক্সটেনশন + +শিডিউলার হলো একটি বিশেষ ধরনের কন্ট্রোলার যা পডগুলো দেখে এবং +নোডগুলোতে পড বরাদ্দ করে। +অন্যান্য কুবারনেটিস উপাদানগুলো ব্যবহার করা চালিয়ে যাওয়ার সময় ডিফল্ট শিডিউলার +সম্পূর্ণরূপে প্রতিস্থাপন করা যেতে পারে, অথবা +[একাধিক শিডিউলার](/bn/docs/tasks/extend-kubernetes/configure-multiple-schedulers/) একই সময়ে চলতে পারে। + +এটি একটি উল্লেখযোগ্য উদ্যোগ, এবং প্রায় সমস্ত কুবারনেটিস ব্যবহারকারীরা দেখতে পান +যে তাদের শিডিউলার পরিবর্তন করার প্রয়োজন নেই। + +আপনি কোন [শিডিউলার প্লাগইনগুলো](/bn/docs/reference/scheduling/config/#scheduling-plugins) সক্রিয় তা নিয়ন্ত্রণ করতে পারেন বা +বিভিন্ন নামযুক্ত [শিডিউলার প্রোফাইলের](/bn/docs/reference/scheduling/config/#multiple-profiles) সাথে প্লাগইনগুলোর সেটগুলোকে সংযুক্ত করতে পারেন ৷ +আপনি আপনার নিজস্ব প্লাগইনও লিখতে পারেন যা এক বা একাধিক kube-scheduler এর +[এক্সটেনশন পয়েন্টের](/bn/docs/concepts/scheduling-eviction/scheduling-framework/#extension-points) সাথে একত্রিত হয় । + +অবশেষে, বিল্ট-ইন `kube-scheduler` উপাদানটি একটি +[webhook](https://git.k8s.io/design-proposals-archive/scheduling/scheduler_extender.md)কে +সাপোর্ট করে যা একটি রিমোট HTTP ব্যাকএন্ড (শিডিউলার এক্সটেনশন) ফিল্টার এবং/অথবা +নোডগুলোকে অগ্রাধিকার দেওয়ার অনুমতি দেয় যা kube-scheduler একটি পডের জন্য বেছে নেয়। + +{{< note >}} +আপনি শুধুমাত্র একটি শিডিউলার এক্সটেন্ডার webhook এর মাধ্যমে নোড ফিল্টারিং +এবং নোড অগ্রাধিকারকে প্রভাবিত করতে পারেন; +webhook ইন্টিগ্রেশনের মাধ্যমে অন্যান্য এক্সটেনশন পয়েন্ট পাওয়া যায় না। +{{< /note >}} + +## {{% heading "whatsnext" %}} + +* অবকাঠামো এক্সটেনশন সম্পর্কে আরও জানুন + * [ডিভাইস প্লাগইন](/bn/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/) + * [নেটওয়ার্ক প্লাগইন](/bn/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/) + * CSI [স্টোরেজ প্লাগইন](https://kubernetes-csi.github.io/docs/) +* [kubectl প্লাগইন](/bn/docs/tasks/extend-kubectl/kubectl-plugins/) সম্পর্কে জানুন +* [কাস্টম রিসোর্স](/bn/docs/concepts/extend-kubernetes/api-extension/custom-resources/) সম্পর্কে আরও জানুন +* [এক্সটেনশন API সার্ভার](/bn/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation/) সম্পর্কে আরও জানুন +* [ডাইনামিক অ্যাডমিশন কন্ট্রোল](/bn/docs/reference/access-authn-authz/extensible-admission-controllers/) সম্পর্কে জানুন +* [অপারেটর প্যাটার্ন](/bn/docs/concepts/extend-kubernetes/operator/) সম্পর্কে জানুন diff --git a/content/bn/docs/concepts/extend-kubernetes/api-extension/_index.md b/content/bn/docs/concepts/extend-kubernetes/api-extension/_index.md new file mode 100644 index 0000000000000..c4d9b34aa0047 --- /dev/null +++ b/content/bn/docs/concepts/extend-kubernetes/api-extension/_index.md @@ -0,0 +1,19 @@ +--- +title: কুবারনেটিস API প্রসারিত করা +weight: 30 +--- + +কাস্টম রিসোর্স হলো কুবারনেটিস API এর এক্সটেনশন। কুবারনেটিস আপনার ক্লাস্টারে কাস্টম রিসোর্স যোগ করার দুটি উপায় প্রদান করে: + +- [CustomResourceDefinition](bn/docs/concepts/extend-kubernetes/api-extension/custom-resources/) (CRD) + মেকানিজম আপনাকে একটি API গ্রুপ, ধরনের, এবং স্কিমা দিয়ে ঘোষণামূলকভাবে একটি নতুন কাস্টম API সংজ্ঞায়িত করতে দেয় + যা আপনি নির্দিষ্ট করেছেন। + কুবারনেটিস কন্ট্রোল প্লেন আপনার কাস্টম রিসোর্সের স্টোরেজ পরিবেশন এবং পরিচালনা করে। CRD গুলো আপনাকে আপনার + ক্লাস্টারের জন্য একটি কাস্টম API সার্ভার না লিখে এবং চালানো ছাড়াই নতুন ধরণের রিসোর্স তৈরি করতে দেয় । +- [এগ্রিগেশন লেয়ারটি](bn/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation/) + প্রাইমারি API সার্ভারের পিছনে থাকে, যা একটি প্রক্সি হিসেবে কাজ করে। + এই ব্যবস্থাটিকে API এগ্রিগেশন (API Aggregation)(AA) বলা হয়, যা আপনাকে আপনার + নিজস্ব API সার্ভার লিখে এবং স্থাপন করার মাধ্যমে আপনার কাস্টম রিসোর্সগুলোর জন্য + বিশেষায়িত বাস্তবায়ন প্রদান করতে দেয়। + প্রধান API সার্ভার আপনার API সার্ভারে আপনার নির্দিষ্ট করা কাস্টম API গুলোর জন্য অনুরোধগুলো অর্পণ করে, + সেগুলোকে এর সমস্ত ক্লায়েন্টদের জন্য উপলব্ধ করে৷ diff --git a/content/bn/docs/concepts/extend-kubernetes/compute-storage-net/_index.md b/content/bn/docs/concepts/extend-kubernetes/compute-storage-net/_index.md new file mode 100644 index 0000000000000..bfbbbc0940b11 --- /dev/null +++ b/content/bn/docs/concepts/extend-kubernetes/compute-storage-net/_index.md @@ -0,0 +1,43 @@ +--- +title: কম্পিউট, স্টোরেজ, এবং নেটওয়ার্কিং এক্সটেনশন +weight: 30 +no_list: true +--- + +এই বিভাগটি আপনার ক্লাস্টারের এক্সটেনশনগুলোকে কভার করে যা কুবারনেটিসের অংশ হিসাবে আসে না। +আপনি এই এক্সটেনশনগুলো আপনার ক্লাস্টারে নোডগুলোকে উন্নত করতে বা পডকে একসাথে লিঙ্ক করে এমন +নেটওয়ার্ক ফ্যাব্রিক প্রদান করতে ব্যবহার করতে পারেন। + +* [CSI](/bn/docs/concepts/storage/volumes/#csi) এবং [FlexVolume](/bn/docs/concepts/storage/volumes/#flexvolume) স্টোরেজ প্লাগইন + + {{< glossary_tooltip text="কন্টেইনার স্টোরেজ ইন্টারফেস" term_id="csi" >}} (CSI) প্লাগইনগুলো নতুন ধরনের + ভলিউমের জন্য সাপোর্ট সহ কুবারনেটিসকে প্রসারিত করার একটি উপায় প্রদান করে।ভলিউমগুলি টেকসই এক্সটার্নাল স্টোরেজ + দ্বারা ব্যাক করা যেতে পারে, বা ক্ষণস্থায়ী স্টোরেজ প্রদান করতে পারে, অথবা তারা একটি ফাইল সিস্টেম প্যারাডাইম ব্যবহার করে + তথ্যের জন্য একটি পঠনযোগ্য ইন্টারফেস অফার করতে পারে। + + কুবারনেটিস এছাড়াও [FlexVolume](/bn/docs/concepts/storage/volumes/#flexvolume) প্লাগইনগুলোর জন্য সাপোর্ট অন্তর্ভুক্ত করে, + যা কুবারনেটিস v1.23 (CSI-এর পক্ষে) থেকে অবমূল্যায়িত(deprecated) করা হয়েছে । + + FlexVolume প্লাগইনগুলো ব্যবহারকারীদের ভলিউম প্রকারগুলো মাউন্ট করার অনুমতি দেয় যা সাধারণত কুবারনেটিস + দ্বারা সাপোর্টেড নয়। আপনি যখন FlexVolume স্টোরেজের উপর নির্ভর করে এমন একটি পড চালান, তখন kubelet + ভলিউম মাউন্ট করার জন্য একটি বাইনারি প্লাগইন কল করে। আর্কাইভ করা + [FlexVolume](https://git.k8s.io/design-proposals-archive/storage/flexvolume-deployment.md) + ডিজাইন প্রস্তাবে এই পদ্ধতির আরও বিশদ বিবরণ রয়েছে। + + [The Kubernetes Volume Plugin FAQ for Storage Vendors](https://github.com/kubernetes/community/blob/master/sig-storage/volume-plugin-faq.md#kubernetes-volume-plugin-faq-for-storage-vendors) তে + স্টোরেজ প্লাগইনগুলোর সাধারণ তথ্য অন্তর্ভুক্ত রয়েছে । + +* [ডিভাইস প্লাগইন](/bn/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/) + + ডিভাইস প্লাগইনগুলো একটি নোডকে নতুন নোড সুবিধাগুলি আবিষ্কার করার অনুমতি দেয় + (বিল্ট-ইন নোড রিসোর্স যেমন `cpu` এবং `মেমরি` ছাড়াও), এবং তাদের অনুরোধকারী + পডগুলোতে এই কাস্টম নোড-লোকাল সুবিধাগুলো সরবরাহ করে। + +* [নেটওয়ার্ক প্লাগইন](/bn/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/) + + একটি নেটওয়ার্ক প্লাগইন কুবারনেটিসকে বিভিন্ন নেটওয়ার্কিং টপোলজি এবং প্রযুক্তির সাথে কাজ করার অনুমতি দেয়। + আপনার কুবারনেটিস ক্লাস্টারের একটি _নেটওয়ার্ক প্লাগইন_ প্রয়োজন যাতে একটি কার্যকরী পড নেটওয়ার্ক থাকে + এবং কুবারনেটিস নেটওয়ার্ক মডেলের অন্যান্য দিকগুলোকে সাপোর্ট করতে পারে ৷ + + কুবারনেটিস {{< skew currentVersion >}} {{< glossary_tooltip text="CNI" term_id="cni" >}} + নেটওয়ার্ক প্লাগইনগুলোর সাথে সামঞ্জস্যপূর্ণ ৷ diff --git a/content/bn/docs/concepts/extend-kubernetes/control-plane.png b/content/bn/docs/concepts/extend-kubernetes/control-plane.png new file mode 100644 index 0000000000000..fa61599e94118 Binary files /dev/null and b/content/bn/docs/concepts/extend-kubernetes/control-plane.png differ diff --git a/content/bn/docs/concepts/extend-kubernetes/extension-points.png b/content/bn/docs/concepts/extend-kubernetes/extension-points.png new file mode 100644 index 0000000000000..8e9359c0cb60b Binary files /dev/null and b/content/bn/docs/concepts/extend-kubernetes/extension-points.png differ diff --git a/content/bn/docs/concepts/extend-kubernetes/flowchart.svg b/content/bn/docs/concepts/extend-kubernetes/flowchart.svg new file mode 100644 index 0000000000000..94bc7c72222cf --- /dev/null +++ b/content/bn/docs/concepts/extend-kubernetes/flowchart.svg @@ -0,0 +1,4 @@ + + + +
হ্যাঁ
হ্যাঁ
"API Extensions" এ যান
"API Extensions" এ যান
আপনি কি কুবারনেটিস API এ সম্পূর্ণ নতুন ধরনের যোগ করতে চান?
আপনি যোগ করতে চান...
না
না
আপনি কি কিছু বা সমস্ত API প্রকারে ফিল্ডগুলোকে রেস্ট্রিক্ট বা স্বয়ংক্রিয়ভাবে এডিট করতে চান?
আপনি কি রেস্ট্রিক্ট করতে চান বা...
হ্যাঁ
হ্যাঁ
"API Access Extensions" এ যান
"API Access Extensions" এ যান
না
না
আপনি কি বিল্ট-ইন API প্রকারের অন্তর্নিহিত বাস্তবায়ন পরিবর্তন করতে চান?
আপনি কি বিল্ট-ইন API প্রকারের...
হ্যাঁ
হ্যাঁ
না
না
না
না
হ্যাঁ
হ্যাঁ
আপনি কি ভলিউম, সার্ভিস, ইনগ্রেস, পারসিস্টেন্ট ভলিউম পরিবর্তন করতে চান?
আপনি কি ভলিউম, সার্ভিস, ইনগ্রেস...
"Infrastructure" এ যান
"Infrastructure" এ যান
"Automation" এ যান
"Automation" এ যান
Text is not SVG - cannot display
diff --git a/content/bn/docs/concepts/overview/_index.md b/content/bn/docs/concepts/overview/_index.md new file mode 100644 index 0000000000000..2aab2102d5d6b --- /dev/null +++ b/content/bn/docs/concepts/overview/_index.md @@ -0,0 +1,179 @@ +--- +title: "ওভারভিউ" +description: > + কুবারনেটিস হল একটি পোর্টেবল, এক্সটেনসিবল, ওপেন সোর্স প্ল্যাটফর্ম যা কন্টেইনারাইজড ওয়ার্কলোড এবং সার্ভিসগুলি পরিচালনা করার জন্য, ঘোষণামূলক কনফিগারেশন এবং অটোমেশন উভয়কেই সহজতর করে। এটির একটি বড়, দ্রুত বর্ধনশীল ইকোসিস্টেম রয়েছে। কুবারনেটিস সার্ভিসগুলি, সাপোর্ট, এবং টুলস ব্যাপকভাবে সহজলভ্য। +content_type: concept +weight: 20 +card: + name: concepts + weight: 10 + anchors: + - anchor: "#why-you-need-kubernetes-and-what-can-it-do" + title: কুবারনেটিস কেন? +no_list: true +--- + + +এই পৃষ্ঠাটি কুবারনেটিসের একটি পরিপূর্ণ ধারণা প্রদান করে । + + + + +কুবারনেটিস হল একটি পোর্টেবল, বর্ধনশীল, ওপেন সোর্স প্ল্যাটফর্ম যা কনটেইনারাইজড +ওয়ার্কলোড এবং পরিষেবাগুলি পরিচালনা করার জন্য, ঘোষণামূলক কনফিগারেশন এবং অটোমেশন উভয়কেই সহজতর করে। +এটির একটি বড়, দ্রুত বর্ধনশীল ইকোসিস্টেম রয়েছে। কুবারনেটিস পরিষেবাগুলি, সমর্থন, এবং সরঞ্জাম ব্যাপকভাবে সহজলভ্য। + +কুবারনেটিস নামটি গ্রীক থেকে এসেছে, যার অর্থ হেলমসম্যান বা পাইলট। +"K" এবং "s" এর মধ্যে আটটি অক্ষর গণনা করার ফলে একটি সংক্ষিপ্ত রূপ K8s। গুগল ২০১৪ সালে +কুবারনেটিস প্রজেক্টটি ওপেন সোর্স করেছে। কুবারনেটিস +[15 বছরেরও বেশি সময় ধরে Google-এর অভিজ্ঞতাকে](/blog/2015/04/borg-predecessor-to-kubernetes/) একত্রিত করেছে +যা কমিউনিটির সেরা আইডিয়া এবং অনুশীলনের সাথে স্কেলে উৎপাদন কাজের চাপ চালানোর। + +## অতিতে যাই + +চলুন অতিতে যেয়ে এক নজরে দেখে নেওয়া যাক কেন কুবারনেটিস এতটা কাজে লাগে। + +![ডিপ্লয়মেন্টের বিবর্তন](/images/docs/Container_Evolution.svg) + +**ঐতিহ্যবাহী ডিপ্লয়মেন্টের যুগ:** +প্রথম দিকে, সংস্থাগুলি ফিজিক্যাল সার্ভারগুলিতে অ্যাপ্লিকেশন চালাত। +একটি ফিজিক্যাল সার্ভারে অ্যাপ্লিকেশনের জন্য রিসোর্স সীমানা নির্ধারণ করার কোন উপায় ছিল না, +এবং এর ফলে রিসোর্স বরাদ্দ সমস্যা হয়েছে। উদাহরণস্বরূপ, যদি একটি ফিজিক্যাল সার্ভারে একাধিক অ্যাপ্লিকেশান চালিত হয়, +এমন উদাহরণ হতে পারে যেখানে একটি অ্যাপ্লিকেশন বেশিরভাগ সংস্থান গ্রহণ করবে, এবং ফলস্বরূপ, অন্যান্য অ্যাপ্লিকেশনগুলি কম পারফর্ম করবে। +এই জন্য একটি সমাধান একটি ভিন্ন ফিজিক্যাল সার্ভারে প্রতিটি অ্যাপ্লিকেশন চালানো হবে। +কিন্তু সম্পদের অব্যবহৃত হওয়ার কারণে এটির মাপকাঠিি ঠিক করা যায়নি এবং +অনেকগুলি ফিজিক্যাল সার্ভার বজায় রাখা সংস্থাগুলির জন্য ব্যয়বহুল ছিল। + +**ভার্চুয়ালাইজড ডিপ্লয়মেন্টর যুগ:** একটি সমাধান হিসাবে, ভার্চুয়ালাইজেশন চালু করা হয়েছিল। এটি আপনাকে +একটি একক ফিজিক্যাল সার্ভারের CPU-তে একাধিক ভার্চুয়াল মেশিন (VMs) চালানো যায়। ভার্চুয়ালাইজেশন +অ্যাপ্লিকেশনগুলিকে VM-এর মধ্যে বিচ্ছিন্ন করার অনুমতি দেয় এবং একটি স্তরের নিরাপত্তা প্রদান করে +কারণ একটি অ্যাপ্লিকেশনের তথ্য অন্য অ্যাপ্লিকেশন দ্বারা অবাধে অ্যাক্সেস করা যায় না। + +ভার্চুয়ালাইজেশন একটি ফিজিক্যাল সার্ভারে রিসোর্সগুলির আরও ভালো ব্যবহারের অনুমতি দেয় এবং +আরও ভাল স্কেলেবিলিটির অনুমতি দেয় কারণ একটি অ্যাপ্লিকেশন সহজে যোগ বা আপডেট করা যায়, হার্ডওয়্যার খরচ কমায় +এবং আরও অনেক কিছু। ভার্চুয়ালাইজেশনের মাধ্যমে আপনি ডিসপোজেবল ভার্চুয়াল মেশিনের +একটি ক্লাস্টার হিসাবে ফিজিক্যাল সম্পদের একটি সেট উপস্থাপন করতে পারেন। + +প্রতিটি VM হল একটি সম্পূর্ণ মেশিন যা ভার্চুয়ালাইজড হার্ডওয়্যারের উপরে নিজস্ব অপারেটিং সিস্টেম +সহ সমস্ত উপাদান চালায়। + +**কন্টেইনার স্থাপনের যুগ:** কনটেইনারগুলি VM-এর মতোই, তবে অ্যাপ্লিকেশনগুলির +মধ্যে অপারেটিং সিস্টেম (OS) ভাগ করার জন্য তাদের শিথিল বিচ্ছিন্নতা বৈশিষ্ট্য রয়েছে৷ +অতএব, কন্টেইনারগুলোকে হালকা বলে মনে করা হয়। একটি VM-এর মতো, একটি কনটেইনারের +নিজস্ব ফাইল সিস্টেম, CPU ভাগ, মেমরি, প্রক্রিয়া স্থান এবং আরও অনেক কিছু রয়েছে। যেহেতু এগুলি +অন্তর্নিহিত অবকাঠামো থেকে আলাদা করা হয়েছে, তারা ক্লাউড এবং +OS ডিস্ট্রিবিউশন জুড়ে বহনযোগ্য। + +কন্টেইনারগুলো জনপ্রিয় হয়ে উঠেছে কারণ তারা অতিরিক্ত সুবিধা প্রদান করে, যেমন: + +* এজাইল (Agile) অ্যাপ্লিকেশন তৈরি এবং ডিপ্লয়মেন্টয়: ভিএম ইমেজ (VM Image) ব্যবহারের তুলনায় কন্টেইনার ইমেজ (Container Image) + তৈরির সহজতা এবং দক্ষতা বেশি। +* ক্রমাগত বিকাশ, একীকরণ এবং ডিপ্লয়মেন্ট: নির্ভরযোগ্য এবং ঘন ঘন + কন্টেইনার ইমেজ তৈরি এবং ডিপ্লয়মেন্টের জন্য প্রদান করে দ্রুত এবং + দক্ষ রোলব্যাকের (ইমেজ অপরিবর্তনীয়তার কারণে) সাথে । +* ডেভ (Dev) এবং অপস (Ops) উদ্বেগের বিচ্ছেদ: বিল্ড/রিলিজের সময়ে + অ্যাপ্লিকেশন কন্টেইনার ইমেজ তৈরি করে ডিপ্লয়মেন্টের সময়ের তুলনায়, + ফলস্বরূপ অ্যাপ্লিকেশনগুলি অবকাঠামো থেকে বিচ্ছিন্ন হয়। +* পর্যবেক্ষণযোগ্যতা: শুধুমাত্র OS-স্তরের তথ্য এবং মেট্রিক্সই নয়, + প্রয়োগের স্বাস্থ্য এবং অন্যান্য সংকেতও। +* ডেভেলপমেন্ট, টেস্টিং এবং প্রোডাকশন জুড়ে পরিবেশগত সামঞ্জস্য: একটি + ল্যাপটপে ক্লাউডের মতোই চলে। +* ক্লাউড এবং ওএস ডিস্ট্রিবিউশন পোর্টেবিলিটি: উবুন্টু (Ubuntu), রেল (RHEL), কোরওস (CoreOS), অন-প্রিমিসেস (on-premises), + প্রধান পাবলিক ক্লাউডসর উপর, এবং অন্য কোথাও চলে। +* অ্যাপ্লিকেশন-কেন্দ্রিক ব্যবস্থাপনা: ভার্চুয়াল হার্ডওয়্যারে একটি OS চালানো থেকে + লজিক্যাল রিসোর্স ব্যবহার করে একটি OS-এ একটি অ্যাপ্লিকেশন চালানো পর্যন্ত বিমূর্ততার স্তর বাড়ায়। +* ঢিলেঢালাভাবে সংযুক্ত, বিতরণ করা, স্থিতিস্থাপক, মুক্ত মাইক্রো-পরিষেবা: অ্যাপ্লিকেশনগুলিকে + ছোট, স্বাধীন টুকরোগুলিতে বিভক্ত করা হয় এবং গতিশীলভাবে স্থাপন ও পরিচালনা করা যায় – + একটি বড় একক-উদ্দেশ্য মেশিনে চলমান একটি মনোলিথিক স্ট্যাক নয়।. +* রিসোর্স আইসোলেশন: অনুমানযোগ্য অ্যাপ্লিকেশন কর্মক্ষমতা। +* রিসোর্স ব্যবহার: উচ্চ দক্ষতা এবং ঘনত্ব। + +## আপনার কেন কুবারনেটিস দরকার এবং এটি কী করতে পারে {#why-you-need-kubernetes-and-what-can-it-do} + +কন্টেইনারসমূহ অ্যাপ্লিকেশন একত্রকরণ এবং চালানোর একটি ভালো উপায়৷ একটি উৎপাদন +পরিবেশে, কন্টেইনারসমূহ এমন ভাবে পরিচালনা করতে হবে যা অ্যাপ্লিকেশনগুলি চালানোর সময় +যেন কোনো ডাউনটাইম না থাকে তা নিশ্চিত করবে। উদাহরণস্বরূপ, যদি একটি কন্টেইনার ডাউন হয়, তাহলে অন্য +কন্টেইনার কে সেই মুহূর্তে চালু হতে হবে। আর এই অবস্থাটি একটি সিস্টেম দ্বারা পরিচালিত হলে এটি কি সহজ হবে না? + +এভাবেই কুবারনেটস উদ্ধারে আসে!। কুবারনেটিস একটি কাঠামো প্রদান করে আপনাকে +সিস্টেমগুলিকে স্থিতিস্থাপকভাবে চালানোর জন্য । এটি আপনার অ্যাপ্লিকেশনের জন্য স্কেলিং এবং ফেইলওভারের যত্ন নেয়, +ডিপ্লয়মেন্টের নিদর্শন প্রদান করে এবং আরও অনেক কিছু করে। উদাহরণস্বরূপ, কুবারনেটিস +সহজেই আপনার সিস্টেমের জন্য একটি ক্যানারি ডিপ্লয়মেন্ট পরিচালনা করতে পারে। + +কুবারনেটিস আপনাকে সরবরাহ করে: + +* **পরিষেবা আবিষ্কার এবং লোড ব্যালেন্সিং** + কুবারনেটিস ডিএনএস নাম ব্যবহার করে বা তাদের নিজস্ব আইপি ঠিকানা ব্যবহার করে একটি ধারক প্রকাশ করতে পারে। + একটি কন্টেইনারে ট্রাফিক বেশি হলে, কুবারনেটিস লোড ব্যালেন্স এবং নেটওয়ার্ক ট্র্যাফিক + বিতরণ করতে সক্ষম হয় যাতে ডিপ্লয়মেন্ট স্থিতিশীল থাকে। +* **স্টোরেজ অর্কেস্ট্রেশন** + কুবারনেটিস আপনাকে স্বয়ংক্রিয়ভাবে আপনার পছন্দের একটি স্টোরেজ সিস্টেম মাউন্ট করার অনুমতি দেয়, যেমন + স্থানীয় স্টোরেজ, পাবলিক ক্লাউড প্রদানকারী এবং আরও অনেক কিছু। +* **স্বয়ংক্রিয় রোলআউট এবং রোলব্যাক** + আপনি কুবারনেটিস ব্যবহার করে আপনার স্থাপন করা কন্টেইনার জন্য পছন্দসই অবস্থা বর্ণনা করতে পারেন + এবং এটি একটি নিয়ন্ত্রিত হারে প্রকৃত অবস্থাকে পছন্দসই অবস্থায় পরিবর্তন করতে পারে। + উদাহরণস্বরূপ, আপনি কুবারনেটিস দিয়ে স্বয়ংক্রিয়ভাবে আপনার ডিপ্লয়মেন্টের জন্য নতুন কন্টেইনার তৈরি, + বিদ্যমান কন্টেইনারগুলি সরাতে এবং নতুন কন্টেইনারে তাদের সমস্ত রিসোর্স গ্রহণ করতে পারেন। +* **স্বয়ংক্রিয় বিন প্যাকিং** + আপনি কুবারনেটিসকে নোডের একটি ক্লাস্টার প্রদান করেন যা এটি কন্টেইনারাইজড কাজ চালাতে ব্যবহার করতে পারে। + আপনি কুবারনেটিসকেে বলুন প্রতিটি কন্টেইনারের কত CPU এবং মেমরি (RAM) প্রয়োজন। কুবারনেটিস আপনার + সম্পদের সর্বোত্তম ব্যবহার করতে আপনার নোডগুলিতে কন্টেইনারে মানানসই করতে পারে। +* **স্ব-নিরাময়** + কুবারনেটিস ব্যর্থ কন্টেইনারগুলি পুনরায় চালু করে, কন্টেইনারগুলিকে প্রতিস্থাপন করে, এমন কন্টেইনারগুলিকে বন্ধ করে + যেটি আপনার ব্যবহারকারী-সংজ্ঞায়িত স্বাস্থ্য পরীক্ষায় সাড়া দেয় না এবং ক্লায়েন্টদের কাছে তাদের বিজ্ঞাপন দেবেন না যতক্ষণ না এটি + পরিবেশন করার জন্য প্রস্তুত। +* **গোপন এবং কনফিগারেশন ব্যবস্থাপনা** + কুবারনেটিস আপনাকে সংবেদনশীল তথ্য সংরক্ষণ এবং পরিচালনা করতে দেয়, যেমন পাসওয়ার্ড, ওঅথ (OAuth) টোকেন + এবং এসএসএইচ (SSH) কী। আপনি গোপনীয়তা এবং অ্যাপ্লিকেশনের কনফিগারেশন স্থাপন এবং আপডেট করতে পারবেন + আপনার কন্টেইনার চিত্রগুলি পুনর্নির্মাণ করা ছাড়াই, এবং আপনার স্ট্যাক কনফিগারেশনের গোপনীয়তা প্রকাশ না করে। +* **ব্যাচ এক্সেকিউশন** + পরিষেবাগুলি ছাড়াও, কুবারনেটস আপনার ব্যাচ এবং সিআই ওয়ার্কলোডগুলি পরিচালনা করতে পারে, যদি ইচ্ছা হয় তবে ব্যর্থ কন্টেইনারগুলি প্রতিস্থাপন করতে পারে। +* **অনুভূমিক স্কেলিং** + একটি সাধারণ কমান্ডের সাহায্যে, একটি UI সহ, বা স্বয়ংক্রিয়ভাবে CPU ব্যবহারের উপর ভিত্তি করে আপনার অ্যাপ্লিকেশনকে উপরে এবং নীচে স্কেল করুন। +* **IPv4/IPv6 ডুয়াল-স্ট্যাক** + পড এবং পরিষেবাগুলিতে IPv4 এবং IPv6 ঠিকানাগুলির বরাদ্দ৷ +* **এক্সটেনসিবিলিটির জন্য ডিজাইন** + আপস্ট্রিম (upstream) সোর্স কোড পরিবর্তন না করে আপনার কুবারনেটিস ক্লাস্টারে বৈশিষ্ট্য যোগ করুন। + +## কুবারনেটিস কি নয় + +কুবারনেটিস একটি ঐতিহ্যগত, সর্ব-অন্তর্ভুক্ত PaaS (পরিষেবা হিসাবে প্ল্যাটফর্ম) সিস্টেম নয়। +যেহেতু Kubernetes হার্ডওয়্যার স্তরের পরিবর্তে কন্টেইনার স্তরে কাজ করে, +তাই এটি PaaS অফারগুলির জন্য সাধারণভাবে কিছু প্রযোজ্য বৈশিষ্ট্য প্রদান করে, যেমন +ডিপ্লয়মেন্ট, স্কেলিং, লোড ব্যালেন্সিং, এবং ব্যবহারকারীদের তাদের লগিং, পর্যবেক্ষণ +এবং সতর্কতা সমাধানগুলিকে একীভূত করতে দেয়৷ যাইহোক, কুবারনেটিস একচেটিয়া নয়, এবং এই ডিফল্ট সমাধান +ঐচ্ছিক এবং প্লাগযোগ্য। কুবারনেটিস বিকাশকারী প্ল্যাটফর্ম তৈরির জন্য বিল্ডিং ব্লক সরবরাহ করে, +কিন্তু যেখানে এটি গুরুত্বপূর্ণ সেখানে ব্যবহারকারীর পছন্দ এবং নমনীয়তা সংরক্ষণ করে। + +কুবারনেটিস: + +* সমর্থিত অ্যাপ্লিকেশনের ধরন সীমাবদ্ধ করে না। কুবারনেটিস একটি লক্ষ্য হল + স্টেটলেস, স্টেটফুল এবং ডেটা-প্রসেসিং সহ অত্যন্ত বৈচিত্র্যময় কাজের চাপ কাজের ভার + সমর্থন করা। যদি একটি অ্যাপ্লিকেশন একটি কন্টেইনারে চলতে পারে তবে কুবারনেটিসেও দুর্দান্ত চলা উচিত। +* সোর্স কোড স্থাপন করে না এবং আপনার অ্যাপ্লিকেশন তৈরি করে না। একটানা ইন্টিগ্রেশন, + ডেলিভারি, এবং ডিপ্লোয়মেন্ট (CI/CD) ওয়ার্কফ্লোগুলি প্রতিষ্ঠানের সংস্কৃতি এবং + পছন্দের পাশাপাশি প্রযুক্তিগত প্রয়োজনীয়তা দ্বারা নির্ধারিত হয়। +* অ্যাপ্লিকেশন-স্তরের পরিষেবা প্রদান করে না, যেমন মিডলওয়্যার (উদাহরণস্বরূপ, বার্তা বাস), + ডেটা-প্রসেসিং ফ্রেমওয়ার্ক (উদাহরণস্বরূপ, স্পার্ক), ডাটাবেস (উদাহরণস্বরূপ, মাইএসকিউএল), ক্যাশে, বা + বিল্ট-ইন পরিষেবা হিসাবে ক্লাস্টার স্টোরেজ সিস্টেম (উদাহরণস্বরূপ, Ceph)। এই ধরনের উপাদান চলতে পারে + কুবারনেটিসে, এবং/অথবা পোর্টেবল মেকানিজমের মাধ্যমে কুবারনেটিস এ চলমান অ্যাপ্লিকেশন দ্বারা অ্যাক্সেস করা + যেতে পারে, যেমন [ওপেন সার্ভিস ব্রোকার](https://openservicebrokerapi.org/)। +* লগিং, মনিটরিং বা সতর্কতা সমাধান নির্দেশ করে না। এটি ধারণার প্রমাণ হিসাবে কিছু ইন্টিগ্রেশন প্রদান + করে, এবং মেট্রিক্স সংগ্রহ এবং রপ্তানি করার প্রক্রিয়া। +* এটি কনফিগারেশন ভাষা/সিস্টেম প্রদান বা আদেশ দেয় না (উদাহরণস্বরূপ, Jsonnet)। এটি একটি ঘোষণামূলক + এপিআই প্রদান করে যা ঘোষণামূলক স্পেসিফিকেশনের নির্বিচারে ফর্ম দ্বারা লক্ষ্য করা যেতে পারে। +* কোন ব্যাপক মেশিন কনফিগারেশন, রক্ষণাবেক্ষণ, ব্যবস্থাপনা প্রদান বা গ্রহণ করে না, + বা স্ব-নিরাময় সিস্টেম। +* উপরন্তু, কুবারনেটিস একটি নিছক অর্কেস্ট্রেশন সিস্টেম নয়। আসলে, এটি অর্কেস্ট্রেশনের জন্য + প্রয়োজনীয়তা দূর করে। অর্কেস্ট্রেশনের প্রযুক্তিগত সংজ্ঞা হল একটি সংজ্ঞায়িত ওয়ার্কফ্লো কার্যকর করা: + প্রথমে A, তারপর B, তারপর C করুন। বিপরীতে, কুবারনেটিস স্বাধীন, কম্পোজযোগ্য একটি সেট নিয়ে গঠিত + নিয়ন্ত্রণ প্রক্রিয়া যা ক্রমাগত বর্তমান অবস্থাকে প্রদত্ত পছন্দসই অবস্থার দিকে চালিত করে। + আপনি A থেকে C পর্যন্ত কিভাবে যাবেন তা বিবেচ্য নয়। কেন্দ্রীভূত নিয়ন্ত্রণেরও প্রয়োজন নেই। এই + সিস্টেমের ফলাফল যা ব্যবহার করা সহজ এবং আরও শক্তিশালী, মজবুত, স্থিতিস্থাপক এবং এক্সটেনসিবল। + +## {{% heading "whatsnext" %}} + +* [কুবারনেটিস উপাদান](/bn/docs/concepts/overview/components/) একবার দেখুন +* [কুবারনেটিস এপিআই](/bn/docs/concepts/overview/kubernetes-api/) একবার দেখুন +* [ক্লাস্টার আর্কিটেকচার](/bn/docs/concepts/architecture/) একবার দেখুন +* [শুরু করতে](/bn/docs/setup/) প্রস্তুত আপনি? diff --git a/content/bn/docs/concepts/overview/working-with-objects/_index.md b/content/bn/docs/concepts/overview/working-with-objects/_index.md new file mode 100644 index 0000000000000..2f3f6bdb8c55c --- /dev/null +++ b/content/bn/docs/concepts/overview/working-with-objects/_index.md @@ -0,0 +1,175 @@ +--- +title: কুবারনেটিসে অবজেক্ট +content_type: concept +weight: 10 +description: > + কুবারনেটিস অবজেক্ট হল কুবারনেটিস সিস্টেমে স্থায়ী সত্তা। + কুবারনেটিস আপনার ক্লাস্টারের অবস্থার প্রতিনিধিত্ব করতে এই সত্তাগুলি ব্যবহার করে। + কুবারনেটিস অবজেক্ট মডেল এবং এই বস্তুর সাথে কিভাবে কাজ করতে হয় সে সম্পর্কে জানুন। +simple_list: true +card: + name: concepts + weight: 40 +--- + + + +এই পৃষ্ঠাটি ব্যাখ্যা করে কুবারনেটিস API-তে কুবারনেটিস অবজেক্টগুলি কীভাবে প্রতিনিধিত্ব করা হয় এবং +আপনি কিভাবে তা `.yaml` ফরম্যাটে প্রকাশ করতে পারেন। + + + +## কুবারনেটিস অবজেক্ট বোঝা {#kubernetes-objects} + +*কুবারনেটিস অবজেক্ট* হল কুবারনেটিস সিস্টেমের সত্তা সংরক্ষিত এন্টিটিগুলি। কুবারনেটিস এই +এন্টিটিগুলি ব্যবহার করে আপনার ক্লাস্টারের অবস্থা প্রকাশ করতে। বিশেষভাবে, তারা বর্ণনা করতে পারে: + +- কোন কন্টেনার অ্যাপ্লিকেশন কি রান করছে (এবং কোন নোডগুলিতে) +- ঐ অ্যাপ্লিকেশনগুলির জন্য রিসোর্স +- ঐ অ্যাপ্লিকেশনগুলির কিভাবে ব্যবহার করতে হবে, উদাহরণস্বরূপ রিস্টার্ট নীতি, আপগ্রেড, এবং ফল্ট-টলারেন্স + +একটি কুবারনেটিস অবজেক্ট হল একটি "উদ্দেশ্যের রেকর্ড" - একবার আপনি অবজেক্ট তৈরি করে দিলে, +কুবারনেটিস সিস্টেম সরাসরি এই অবজেক্টটি থাকার নিশ্চয়তার জন্য কাজ করবে। অবজেক্ট তৈরি করে +আপনি সাধারণত কুবারনেটিস সিস্টেমকে বলে দিচ্ছেন যে আপনার ক্লাস্টারের ওয়ার্কলোড কি হবে; এটা +হল আপনার ক্লাস্টারের *কাঙ্ক্ষিত অবস্থা*। + +কুবারনেটিস অবজেক্টগুলির সাথে কাজ করতে - তা তৈরি, পরিবর্তন করতে বা মুছতে - আপনার +[কুবারনেটিস API](/bn/docs/concepts/overview/kubernetes-api/) ব্যবহার করতে হবে। উদাহরণস্বরূপ, +যখন আপনি `kubectl` কমান্ড-লাইন ইন্টারফেস ব্যবহার করেন, তখন CLI আপনার জন্য প্রয়োজনীয় +কুবারনেটিস API কল করে। আপনি একটি +[Client Libraries](/docs/reference/using-api/client-libraries/) ব্যবহার করে নিজের প্রোগ্রামে কুবারনেটিস API সরাসরি ব্যবহার করতে পারেন। + +### অবজেক্ট স্পেক এবং স্ট্যাটাস + +প্রায় সব কুবারনেটিস অবজেক্টের একটি `spec` এবং একটি `status` নেস্টেড অবজেক্ট ফিল্ড রয়েছে +যা অবজেক্টের কনফিগারেশন নিয়ন্ত্রণ করে: অবজেক্টের _`spec`_ এবং _`status`_। +যে অবজেক্টগুলির `spec` থাকে, আপনার অবজেক্ট তৈরি করতে এটা নির্ধারণ করতে হবে +যখন অবজেক্ট তৈরি করবেন, +যে রিসোর্সের বৈশিষ্ট্য বর্ণনা প্রদান করবেন: এর _কাঙ্ক্ষিত অবস্থা_। + +`status` অবজেক্টের _বর্তমান অবস্থা_ বর্ণনা করে, যা কুবারনেটিস সিস্টেম এবং এর উপাদানগুলি +প্রদান এবং আপডেট করে। কুবারনেটিস +{{< glossary_tooltip text="control plane" term_id="control-plane" >}} +সরাসরি এবং সক্রিয়ভাবে প্রতিটি অবজেক্টের +বর্তমান অবস্থা পরিচালনা করে যাতে আপনার প্রদত্ত অবস্থা মিলে। + +উদাহরণস্বরূপ: কুবারনেটিসে, একটি ডিপ্লয়মেন্ট একটি অবজেক্ট যা আপনার ক্লাস্টারে চলমান একটি +অ্যাপ্লিকেশন প্রতিনিধিত্ব করতে পারে। ডিপ্লয়মেন্ট তৈরি করতে যখন আপনি +ডিপ্লয়মেন্ট তৈরি করেন, আপনি ডিপ্লয়মেন্ট `spec` সেট করতে পারেন যে +আপনি চাইছেন অ্যাপ্লিকেশনের তিনটি রিপ্লিকা চলমান থাকুক। +কুবারনেটিস সিস্টেম ডিপ্লয়মেন্ট স্পেক পড়ে এবং আপনার প্রদত্ত ডিপ্লয়মেন্ট +এর তিনটি ইনস্ট্যান্স চালু করে, এই স্ট্যাটাস আপনার স্পেক অনুসারে আপডেট করে। +যদি সেই ইনস্ট্যান্সগুলোর মধ্যে কোনওটি ব্যর্থ হয় (একটি স্ট্যাটাস পরিবর্তন), +কুবারনেটিস সিস্টেম স্পেক এবং স্ট্যাটাস মধ্যে পার্থক্যের প্রতিক্রিয়া দিয়ে একটি +সংশোধন করে এই ক্ষেত্রে, একটি প্রতিস্থাপন ইনস্ট্যান্স চালু করে। + +বিস্তারিত তথ্যের জন্য অবজেক্ট স্পেক, স্ট্যাটাস এবং মেটাডেটা দেখুন, +[Kubernetes API Conventions](https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md). + +### একটি কুবারনেটিস অবজেক্ট বর্ণনা + +যখন আপনি কুবারনেটিসে একটি অবজেক্ট তৈরি করবেন, আপনাকে অবজেক্ট spec প্রদান করতে হবে +যা দরকার তার কাঙ্ক্ষিত অবস্থা বর্ণনা করে এবং অবজেক্ট সম্পর্কে +কিছু মৌলিক তথ্য (যেমন নাম) প্রদান করতে হবে। যখন আপনি অবজেক্ট তৈরি +করতে কুবারনেটিস API ব্যবহার করেন (এটা সরাসরি বা `kubectl` এর মাধ্যমে), +তখন ঐ API অনুরোধটি এই তথ্যকে একটি JSON রিকোয়েস্ট বডি হিসেবে অন্তর্ভুক্ত করতে হবে। +সাধারণত, আপনি একটি manifest নামে পরিচিত ফাইলে kubectl কে তথ্য প্রদান করেন। নিয়ম অনুসারে, ম্যানিফেস্ট হল YAML (আপনি JSON +ফরম্যাটও ব্যবহার করতে পারেন)। HTTP-এর মাধ্যমে API অনুরোধ করার সময় টুল যেমন kubectl একটি ম্যানিফেস্ট থেকে তথ্যকে JSON বা অন্য +সমর্থিত সিরিয়ালাইজেশন ফরম্যাটে রূপান্তর করে। + +এখানে একটি উদাহরণ ম্যানিফেস্ট দেওয়া হল একটি কুবারনেটিস ডিপ্লয়মেন্টের জন্য প্রয়োজনীয় +ক্ষেত্রগুলি এবং অবজেক্ট স্পেকের জন্যের একটি নমুনা: + +{{% code_sample file="application/deployment.yaml" %}} + +একটি উপরের মতো ম্যানিফেস্ট ফাইল ব্যবহার করে একটি ডিপ্লয়মেন্ট তৈরি করার একটি উপায় হল +[`kubectl apply`](/bn/docs/reference/generated/kubectl/kubectl-commands#apply) কমান্ড ব্যবহার +করা, `kubectl` এর কমান্ড-লাইন ইন্টারফেসে `yaml` ফাইলটি আর্গুমেন্ট হিসেবে পাঠানো। একটি উদাহরণ: + +```shell +kubectl apply -f https://k8s.io/examples/application/deployment.yaml +``` + +আউটপুট এর অনুরূপ: + +``` +deployment.apps/nginx-deployment created +``` + + +### প্রয়োজনীয় ক্ষেত্র + +আপনার কুবারনেটিস অবজেক্ট এর জন্য ম্যানিফেস্ট (YAML বা JSON ফাইল) এ নিম্নলিখিত ক্ষেত্রগুলির জন্য মান নির্ধারণ করতে হবে: + +* `apiVersion` - আপনি কোন ভার্সনের কুবারনেটিস API ব্যবহার করছেন তা উল্লেখ করতে হবে +* `kind` - আপনি কোন ধরনের অবজেক্ট তৈরি করতে চান তা উল্লেখ করতে হবে +* `metadata` - অবজেক্ট যে সাহায্য করে অনন্যভাবে সনাক্ত করা যায়, যেমন `name` স্ট্রিং, `UID`, এবং ঐচ্ছিক `namespace` +* `spec` - অবজেক্টের জন্য আপনি কি অবস্থা চান + +অবজেক্ট spec সুনির্দিষ্ট ফরম্যাট প্রতিটি কুবারনেটিস অবজেক্টের জন্য আলাদা, এবং +সেই বস্তুর জন্য নির্দিষ্ট নেস্টেড ক্ষেত্র রয়েছে। [Kubernetes API রেফারেন্স](/bn/docs/reference/kubernetes-api/) +ব্যবহার করে আপনি যে সমস্ত অবজেক্ট তৈরি করতে পারেন তার জন্য নির্দিষ্ট ফরম্যাট খুঁজে পেতে সাহায্য করতে পারে। + +উদাহরণস্বরূপ, দেখুন [`spec` ফিল্ড](/bn/docs/reference/kubernetes-api/workload-resources/pod-v1/#PodSpec) +Pod API রেফারেন্স এর জন্য। +প্রতিটি Pod এর জন্য, `.spec` ক্ষেত্রটি পড এবং তার কাঙ্ক্ষিত অবস্থা (যেমন সেই পডের মধ্যে প্রতিটি কন্টেইনারের জন্য কন্টেইনার ইমেজের নাম) +নির্দিষ্ট করে৷ +আরও একটি অবজেক্ট স্পেসিফিকেশনের উদাহরণ হল +[`spec` ফিল্ড](/bn/docs/reference/kubernetes-api/workload-resources/stateful-set-v1/#StatefulSetSpec) +StatefulSet API এর জন্য। StatefulSet এর জন্য, `.spec` ফিল্ড নির্দিষ্ট করে এবং সেট করে +এর অবস্থা। +StatefulSet এর `.spec` এর মধ্যে একটি [টেমপ্লেট](/bn/docs/concepts/workloads/pods/#pod-templates) +পড অবজেক্টের জন্য । সেই টেমপ্লেটটি Pods বর্ণনা করে যা +StatefulSet কন্ট্রোলার স্টেটফুলসেট স্পেসিফিকেশন সন্তুষ্ট করার জন্য তৈরি করবে। +অন্যান্য প্রকারের অবজেক্ট গুলির জন্য বিভিন্ন `.status` থাকতে পারে; আবার, API রেফারেন্স পৃষ্ঠাগুলি +এই `.status` ফিল্ডের গঠন এবং এর প্রত্যেক বিভিন্ন প্রকারের অবজেক্টের জন্য তার বিষয়বস্তু বিবরণ করে। + +{{< note >}} +YAML কনফিগারেশন ফাইল লেখার অতিরিক্ত তথ্যের জন্য +[কনফিগারেশন সেরা অনুশীলন](/bn/docs/concepts/configuration/overview/) দেখুন। +{{< /note >}} + +## সার্ভার সাইড ফিল্ড ভেরিফিকেশন + +কুবারনেটিস v1.25 থেকে শুরু করে, API সার্ভার সার্ভার সাইড +[field validation](/bn/docs/reference/using-api/api-concepts/#field-validation) +যা অব্যক্ত বা পুনরায় ফিল্ড অনুমান করে একটি অবজেক্টে। এটি সমস্ত কার্যকারিতা প্রদান করে +`kubectl --validate` এর সার্ভার সাইড এ কর্মক্ষমতা। + +`kubectl` টুলটি ব্যবহার করে `--validate` ফ্ল্যাগ ব্যবহার করে ফিল্ড ভেরিফিকেশনের স্তর সেট করে। এটি গ্রহণ করে +মান `ignore`, `warn`, এবং `strict` এবং এটি `true` (`strict` এর সমান) +এবং `false` ( `ignore` এর সমান) মান গ্রহণ করে। `kubectl` এর ডিফল্ট ভেরিফিকেশন সেটিং হল `--validate=true`। + +`Strict` +: Strict ফিল্ড ভেরিফিকেশন, ভেরিফিকেশন ব্যর্থ হওয়ায় errors দেখায় + +`Warn` +: ফিল্ড ভেরিফিকেশন করা হয়, কিন্তু errors গুলি অনুরোধ ব্যর্থ হওয়ার পরিবর্তে সতর্কতা হিসাবে প্রকাশ করা হয় + +`Ignore` +: কোনো সার্ভার সাইড ফিল্ড ভেরিফিকেশন করা হয় না + +যখন `kubectl` এর একটি API সার্ভারে সংযোগ করতে পারে না যে কোন ফিল্ড ভেরিফিকেশন সাপোর্ট করে তখন এটি ফেলে যায় +ক্লায়েন্ট-সাইড ভেরিফিকেশন ব্যবহার করা হয়। কুবারনেটিস 1.27 এবং তারপরের সংস্করণ সবসময় ফিল্ড ভেরিফিকেশন প্রদান করে; +পুরাতন কুবারনেটিস রিলিসেগুলিতে এটি হতে পারে না। যদি আপনার ক্লাস্টার v1.27 এর চেয়ে পুরানো হয় তবে এপনার কুবারনেটিস +সংস্করণের জন্য ডকুমেন্টেশন চেক করুন। + +## {{% heading "whatsnext" %}} + +যদি আপনি নতুন কুবারনেটিসে এসেছেন, তাহলে নিম্নলিখিত বিষয়গুলি সম্পর্কে আরো পড়ুন: + +* [Pods](/bn/docs/concepts/workloads/pods/) যা হলে সবচেয়ে গুরুত্বপূর্ণ মৌলিক কুবারনেটিস অবজেক্ট। +* [Deployment](/bn/docs/concepts/workloads/controllers/deployment/) অবজেক্টগুলি। +* [Controllers](/bn/docs/concepts/architecture/controller/) কুবারনেটিসে। +* [kubectl](/bn/docs/reference/kubectl/) এবং [kubectl কমান্ড](/bn/docs/reference/generated/kubectl/kubectl-commands)। + +[কুবারনেটিস অবজেক্ট ম্যানেজমেন্ট](/bn/docs/concepts/overview/working-with-objects/object-management/) +`kubectl` ব্যবহার করে অবজেক্ট পরিচালনা করার উপায়গুলি বিস্তারিত ভাবে বর্ণনা করে। +আপনার কাছে যদি আগে থেকে না থাকে তাহলে [kubectl ইনস্টল করুন](/bn/docs/tasks/tools/#kubectl)। + +কুবারনেটিস API সাধারণভাবে সম্পর্কে জানতে, পড়ুন: + +* [Kubernetes API overview](/bn/docs/reference/using-api/) + +কুবারনেটিসে অবজেক্টগুলির বিস্তারিত জানতে, এই বিভাগে অন্যান্য পৃষ্ঠাগুলি পড়ুন: + \ No newline at end of file diff --git a/content/bn/docs/concepts/policy/_index.md b/content/bn/docs/concepts/policy/_index.md new file mode 100644 index 0000000000000..c769903b11c66 --- /dev/null +++ b/content/bn/docs/concepts/policy/_index.md @@ -0,0 +1,68 @@ +--- +title: "নীতিমালা" +weight: 90 +no_list: true +description: > + নীতিগুলির সাথে সুরক্ষা এবং সর্বোত্তম-অনুশীলনগুলি পরিচালনা করুন +--- + + + +কুবারনেটিস নীতিগুলি এমন কনফিগারেশন যা অন্যান্য কনফিগারেশন বা রানটাইম আচরণগুলি পরিচালনা করে। কুবারনেটিস বিভিন্ন ধরণের নীতি সরবরাহ করে নীচে তা বর্ণিত হলো: + + + +## এপিআই (API) অবজেক্ট ব্যবহার করে পলিসি প্রয়োগ করুন + + কিছু API অবজেক্ট নীতি হিসাবে কাজ করে। এখানে কিছু উদাহরণ দেওয়া হল: +* [নেটওয়ার্ক নীতি](/bn/docs/concepts/services-networking/network-policies/) একটি কাজের চাপের জন্য প্রবেশ এবং প্রস্থানে ট্র্যাফিক সীমাবদ্ধ করতে ব্যবহার করা যেতে পারে। +* [লিমিট রেঞ্জ](/bn/docs/concepts/policy/limit-range/) বিভিন্ন বস্তুর ধরণের জুড়ে রিসোর্স বরাদ্দের সীমাবদ্ধতা পরিচালনা করে। +* [রিসোর্স কোটা](/bn/docs/concepts/policy/resource-quotas/) একটি জন্য সম্পদ খরচ সীমাবদ্ধ করুন {{< glossary_tooltip text="নেমস্পেস" term_id="namespace" >}} + +## ভর্তি নিয়ন্ত্রক ব্যবহার করে নীতিমালা প্রয়োগ করুন + +একটি {{< glossary_tooltip text="ভর্তি নিয়ন্ত্রক" term_id="admission-controller" >}} +API সার্ভারে চলে +এবং API অনুরোধগুলিকে যাচাই বা পরিবর্তন করতে পারে। কিছু ভর্তি নিয়ন্ত্রক নীতি প্রয়োগ করার জন্য কাজ করে। +উদাহরণস্বরূপ, [অলওয়েজইমেজপুল](/bn/docs/reference/access-authn-authz/admission-controllers/#alwayspullimages) অ্যাডমিশন কন্ট্রোলার ইমেজ পুল পলিসি `অলওয়েজ` এ সেট করতে একটি নতুন পড সংশোধন করে। + +কুবারনেটিস বেশ কয়েকটি অন্তর্নির্মিত ভর্তি নিয়ামক রয়েছে যা API সার্ভারের মাধ্যমে কনফিগারযোগ্য `--enable-admission-plugin` ফ্লাগ। + +ভর্তি নিয়ন্ত্রকদের বিবরণ, উপলব্ধ ভর্তি নিয়ন্ত্রকদের সম্পূর্ণ তালিকা সহ, একটি ডেডিকেটেড অংশে নথিভুক্ত ( ডকুমেন্ট) করা হয়েছে। + +* [ভর্তি নিয়ন্ত্রকরা](/bn/docs/reference/access-authn-authz/admission-controllers/) + +## ভ্যালিডেটিংএডমিশনপলিসি ব্যবহার করে নীতিগুলি প্রয়োগ করুন + +ভর্তি নীতিগুলি যাচাই করা কমন এক্সপ্রেশন ল্যাঙ্গুয়েজ (সিইএল) ব্যবহার করে API সার্ভারে কনফিগারযোগ্য বৈধতা চেকগুলি কার্যকর করার অনুমতি দেয়। উদাহরণস্বরূপ, `সর্বশেষ` চিত্র ট্যাগের ব্যবহার নিষিদ্ধ করতে একটি `ভ্যালিডেটিংঅ্যাডমিশনপলিসি` ব্যবহার করা যেতে পারে। + +একটি `ভ্যালিডেটিঅ্যাডমিশনপলিসি` একটি API অনুরোধের ভিত্তিতে কাজ করে এবং ব্যবহারকারীদের অ-সম্মতিযুক্ত কনফিগারেশন সম্পর্কে ব্লক, নিরীক্ষণ (হিসাবনিকাশ) এবং সতর্ক করতে ব্যবহার করা যেতে পারে। + +উদাহরণ সহ `ভ্যালিডেটিংএডমিশনপলিসি` API সম্পর্কে বিশদ বিবরণ একটি ডেডিকেটেড অংশে নথিভুক্ত (ডকুমেন্ট) করা হয়েছে: +* [ভ্যালিডেটিং এডমিশন পলিসি](/bn/docs/reference/access-authn-authz/validating-admission-policy/) + + +## ডাইনামিক ভর্তি নিয়ন্ত্রণ ব্যবহার করে নীতিমালা প্রয়োগ করুন + +ডায়নামিক অ্যাডমিশন কন্ট্রোলার (বা অ্যাডমিশন ওয়েবহুক) এপিআই সার্ভারের বাইরে পৃথক অ্যাপ্লিকেশন হিসাবে চালিত হয় যা এপিআই অনুরোধগুলির বৈধতা বা মিউটেশন সম্পাদনের জন্য ওয়েবহুক অনুরোধগুলি গ্রহণ করতে নিবন্ধন করে। + +ডায়নামিক অ্যাডমিশন কন্ট্রোলারগুলি এপিআই অনুরোধগুলিতে নীতি প্রয়োগ করতে এবং অন্যান্য নীতি-ভিত্তিক কর্মপ্রবাহকে ট্রিগার করতে ব্যবহার করা যেতে পারে। একটি ডায়নামিক ভর্তি কন্ট্রোলার অন্যান্য ক্লাস্টার সংস্থান এবং বহিরাগত ডেটা পুনরুদ্ধারের প্রয়োজন সহ জটিল চেকগুলি সম্পাদন করতে পারে। উদাহরণস্বরূপ, একটি ইমেজ যাচাইকরণ কন্টেইনার চিত্রের স্বাক্ষর এবং প্রত্যয়নগুলি যাচাই করতে ওসিআই (OCI) রেজিস্ট্রি থেকে ডেটা খুঁজতে পারে। + +ডায়নামিক ভর্তি নিয়ন্ত্রণের বিশদ বিবরণ একটি নিয়োজিত (ডেডিকেটেড) অংশে নথিভুক্ত ( ডকুমেন্ট) করা হয়েছে: +* [ডাইনামিক ভর্তি নিয়ন্ত্রণ](/bn/docs/reference/access-authn-authz/extensible-admission-controllers/) + +### বাস্তবায়ন {#implementations-admission-control} + +{{% thirdparty-content %}} + +নমনীয় নীতি ইঞ্জিন হিসাবে কাজ করে এমন ডায়নামিক অ্যাডমিশন কন্ট্রোলারগুলি কুবারনেটিস ইকোসিস্টেমে উন্নত(ডেভলাপ) করা হচ্ছে, যেমন: +- [Kubewarden](https://github.com/kubewarden) +- [Kyverno](https://kyverno.io) +- [OPA Gatekeeper](https://github.com/open-policy-agent/gatekeeper) +- [Polaris](https://polaris.docs.fairwinds.com/admission-controller/) + +## Kubelet কনফিগারেশন ব্যবহার করে নীতি প্রয়োগ করুন + +কুবারনেটিস প্রতিটি ওর্য়াকার নোডে Kubelet কনফিগার করার অনুমতি দেয়। কিছু Kubelet কনফিগারেশন নীতি হিসাবে কাজ করে: +* [প্রক্রিয়া আইডি সীমা এবং সংরক্ষণ](/bn/docs/concepts/policy/pid-limiting/) বরাদ্দযোগ্য পিআইডি সীমাবদ্ধ এবং সংরক্ষণ করতে ব্যবহৃত হয়। +* [নোড রিসোর্স ম্যানেজার](/bn/docs/concepts/policy/node-resource-managers/) বিলম্ব-সমালোচনামূলক এবং উচ্চ-থ্রুপুট ওয়ার্কলোডের জন্য গণনা, মেমরি এবং ডিভাইস সংস্থানগুলি পরিচালনা করতে পারে। diff --git a/content/bn/docs/concepts/scheduling-eviction/_index.md b/content/bn/docs/concepts/scheduling-eviction/_index.md new file mode 100644 index 0000000000000..abce2f356c5c6 --- /dev/null +++ b/content/bn/docs/concepts/scheduling-eviction/_index.md @@ -0,0 +1,41 @@ +--- +title: "শিডিউলিং, প্রিএম্পশন এবং ইভিকশন (Scheduling, Preemption and Eviction)" +weight: 95 +content_type: concept +description: > + কুবারনেটিস, শিডিউলিং মানে হল নিশ্চিত করা যে পডগুলি নোডগুলির + সাথে মিলিত হয়েছে কিনা যাতে kubelet তাদের রান করতে পারে। + প্রিএম্পশন হল স্বল্প অগ্রাধিকার পডগুলি বাতিল করার প্রক্রিয়া যাতে উচ্চ + অগ্রাধিকার পডগুলি নোডগুলিতে শিডিউল করতে পারে। ইভিকশন হল + রিসোর্স-ক্ষুধার্ত নোডগুলিতে এক বা একাধিক পডগুলি প্রত্যাহার করার প্রক্রিয়া। +no_list: True +--- + +কুবারনেটিসে, শিডিউলিং মানে হল নিশ্চিত করা যে {{}} +{{}} সাথে মিলিত হয়েছে কিনা যাতে +{{}} তাদের রান করতে পারে। প্রিএম্পশন হল স্বল্প +{{}} পডগুলি বাতিল করার প্রক্রিয়া যাতে উচ্চ +অগ্রাধিকার পডগুলি নোডগুলিতে শিডিউল করতে পারে। ইভিকশন হল রিসোর্স-ক্ষুধার্ত নোডগুলিতে এক বা একাধিক +পডগুলি প্রত্যাহার করার প্রক্রিয়া। + +## শিডিউলিং + +* [কুবারনেটস এর শিডিউলিং](/bn/docs/concepts/scheduling-eviction/kube-scheduler/) +* [নোডগুলিতে পডস বরাদ্দ করা](/bn/docs/concepts/scheduling-eviction/assign-pod-node/) +* [পডসের অতিরিক্ত ব্যয়](/bn/docs/concepts/scheduling-eviction/pod-overhead/) +* [পডস এর টপোলজি ছড়িয়ে যাওয়ার সীমাবদ্ধতা](/bn/docs/concepts/scheduling-eviction/topology-spread-constraints/) +* [টেইন্টস এবং টলারেশনস](/bn/docs/concepts/scheduling-eviction/taint-and-toleration/) +* [শিডিউলিং ফ্রেমওয়ার্ক](/bn/docs/concepts/scheduling-eviction/scheduling-framework) +* [ডাইনামিক রিসোর্স বরাদ্দ করা](/bn/docs/concepts/scheduling-eviction/dynamic-resource-allocation) +* [শিডিউলার পারফরমেন্স টিউনিং](/bn/docs/concepts/scheduling-eviction/scheduler-perf-tuning/) +* [সম্প্রসারিত রিসোর্স এর জন্য রিসোর্স বিন প্যাকিং](/bn/docs/concepts/scheduling-eviction/resource-bin-packing/) +* [পড শিডিউলিং এর সাধনযোগ্যতা](/bn/docs/concepts/scheduling-eviction/pod-scheduling-readiness/) +* [ডিশেডিউলার](https://github.com/kubernetes-sigs/descheduler#descheduler-for-kubernetes) + +## পডস এর ভাঙ্গন + +{{}} + +* [পড অগ্রাধিকার এবং প্রিম্পশন](/bn/docs/concepts/scheduling-eviction/pod-priority-preemption/) +* [নোড-প্রেসার ইভিকশন](/bn/docs/concepts/scheduling-eviction/node-pressure-eviction/) +* [API-প্রবর্তিত ইভিকশন](/bn/docs/concepts/scheduling-eviction/api-eviction/) diff --git a/content/bn/docs/concepts/security/_index.md b/content/bn/docs/concepts/security/_index.md new file mode 100644 index 0000000000000..cc87ce22cd062 --- /dev/null +++ b/content/bn/docs/concepts/security/_index.md @@ -0,0 +1,129 @@ +--- +title: "নিরাপত্তা" +weight: 85 +description: > + ক্লাউড-নেটিভ ওয়ার্কলোডকে নিরাপত্তা রক্ষা করার প্রস্তুতির জন্য ধারণাগুলি। +simple_list: true +--- + +এই কুবারনেটিস ডকুমেন্টেশনের এই অংশের উদ্দেশ্য আপনাকে ক্লাউড-নেটিভ প্রযুক্তিতে +নিরাপত্তামুলকভাবে ওয়ার্কলোডগুলি পরিচালনা শেখানোর সাহায্য করা এবং একটি কুবারনেটিসের ক্লাস্টার +নিরাপত্তামুলকভাবে রাখার গুরুত্বপূর্ণ দিক সম্পর্কে জানানো। + +কুবারনেটিস ক্লাউড-নেটিভ স্থাপত্যের উপর ভিত্তি করে এবং ক্লাউড-নেটিভ তথ্য নিরাপত্তা সম্পর্কে ভাল অনুশীলনের +জন্য {{< glossary_tooltip text="CNCF" term_id="cncf" >}} থেকে +পরামর্শ প্রদান করে। + +আপনার ক্লাস্টার এবং অ্যাপ্লিকেশনগুলিকে কীভাবে সুরক্ষিত করবেন +সে সম্পর্কে বিস্তৃত প্রেক্ষাপটের জন্য +[ক্লাউড নেটিভ নিরাপত্তা এবং কুবারনেটিস](/bn/docs/concepts/security/cloud-native-security/) পড়ুন। + +## কুবারনেটিসের নিরাপত্তা ব্যবস্থা {#security-mechanisms} + +কুবারনেটিসের মধ্যে বেশ কয়েকটি API এবং নিরাপত্তা কন্ট্রোল রয়েছে, +সেইসাথে [পলিসিগুলি](#পলিসি) সংজ্ঞায়িত করার উপায় যা আপনি কীভাবে তথ্য সুরক্ষা পরিচালনা করেন তার অংশ গঠন করতে পারে। + +### কন্ট্রোল প্লেন সুরক্ষা + +কোন কুবারনেটিসের ক্লাস্টারের জন্য একটি প্রধান নিরাপত্তা ব্যবস্থা হলো +[কুবারনেটিস API-এ অ্যাক্সেস কন্ট্রোল ](bn/docs/concepts/security/controlling-access) করা। + +কুবারনেটিস আশা করে যে আপনি কন্ট্রোল প্লেনের মধ্যে এবং কন্ট্রোল প্লেন এবং এর ক্লায়েন্টদের মধ্যে +[ট্রানজিটে ডেটা এনক্রিপশন](/bn/docs/tasks/tls/managing-tls-in-a-cluster/) +প্রদান করতে TLS কনফিগার করবেন এবং ব্যবহার করবেন। আপনি কুবারনেটিস কন্ট্রোল প্লেনের মধ্যে +সংরক্ষিত ডেটার জন্য [এনক্রিপশন এট রেস্ট(encryption at rest)](/bn/docs/tasks/administer-cluster/encrypt-data/) সক্ষম করতে পারেন; +এটি আপনার নিজের ওয়ার্কলোডের ডেটার জন্য এনক্রিপশন এট রেস্ট ব্যবহার করা থেকে আলাদা, +যা একটি ভাল আইডিয়াও হতে পারে + +### সিক্রেট + +[সিক্রেট](/bn/docs/concepts/configuration/secret/) API কনফিগারেশন ভ্যালুগুলির জন্য মৌলিক +সুরক্ষা প্রদান করে যার জন্য গোপনীয়তা প্রয়োজন । + +### ওয়ার্কলোড সুরক্ষা + +পড এবং তাদের কন্টেনারগুলি যথাযথভাবে আইসোলেট নিশ্চিত করতে +[পড নিরাপত্তা স্ট্যান্ডার্ডস](/bn/docs/concepts/security/pod-security-standards/) +আপনার প্রয়োজন হলে কাস্টম আইসোলেশন নির্ধারণ করার জন্য আপনি +[RuntimeClasses](/bn/docs/concepts/containers/runtime-class) ব্যবহার করতে পারেন। + +[নেটওয়ার্ক পলিসি](/bn/docs/concepts/services-networking/network-policies/) আপনাকে +পডগুলির মধ্যে, অথবা আপনার ক্লাস্টারের বাইরের নেটওয়ার্ক মধ্যে নেটওয়ার্ক ট্রাফিক নিয়ন্ত্রণ করতে দেয়। + +আপনি পড, তাদের কন্টেনারগুলি এবং তাদের মধ্যে চলা ইমেজগুলির চারপাশে প্রতিরোধমূলক বা ডিটেক্টিভ +কন্ট্রোলগুলি প্রয়োগ করতে বিস্তৃত ইকোসিস্টেম থেকে সিকিউরিটি কন্ট্রোল স্থাপন করতে পারেন । + +### অডিটিং + +কুবারনেটিস [অডিটিং লগিং](/bn/docs/tasks/debug/debug-cluster/audit/) একটি নিরাপত্তা-সংশ্লিষ্ট, +সময়ানুক্রমিক সেট অফ রেকর্ড সরবরাহ করে যা ক্লাস্টারের ক্রিয়াকলাপের অনুক্রমিক ডকুমেন্ট করে। ক্লাস্টার +ব্যবহারকারীদের দ্বারা উত্পন্ন ক্রিয়াকলাপ, কুবার্নিটিস API ব্যবহার করা অ্যাপ্লিকেশন এবং নিয়ন্ত্রণ প্লেন নিজস্ব +ক্রিয়াকলাপগুলি অডিট করে। + +## ক্লাউড প্রোভাইডার নিরাপত্তা + +{{% thirdparty-content vendor="true" %}} + +আপনি যদি আপনার নিজের হার্ডওয়্যার বা অন্য কোনো ক্লাউড প্রোভাইডার এ একটি কুবারনেটিস ক্লাস্টার চালান, +তাহলে নিরাপত্তার সর্বোত্তম অনুশীলনের জন্য আপনার ডকুমেন্টেশনের সাথে পরামর্শ করুন। +এখানে কিছু জনপ্রিয় ক্লাউড প্রোভাইডার এর নিরাপত্তা ডকুমেন্টেশনের লিঙ্ক রয়েছে : + +{{< table caption="ক্লাউড প্রদায়কের নিরাপত্তা" >}} + +IaaS প্রদায়ক | লিঙ্ক | +-------------------- | ------------ | +আলিবাবা ক্লাউড | https://www.alibabacloud.com/trust-center | +আমাজন ওয়েব সার্ভিস | https://aws.amazon.com/security | +গুগল ক্লাউড প্ল্যাটফর্ম | https://cloud.google.com/security | +হুয়াওয়ে ক্লাউড | https://www.huaweicloud.com/intl/en-us/securecenter/overallsafety | +আইবিএম ক্লাউড | https://www.ibm.com/cloud/security | +মাইক্রোসফট আজওর | https://docs.microsoft.com/en-us/azure/security/azure-security | +অরাকেল ক্লাউড ইন্ফ্রাস্ট্রাকচার | https://www.oracle.com/security | +VMware vSphere | https://www.vmware.com/security/hardening-guides | + +{{< /table >}} + +## পলিসি + +আপনি কুবারনেটিস-নেটিভ মেকানিজম ব্যবহার করে নিরাপত্তা পলিসি নির্ধারণ করতে পারেন, +যেমন [NetworkPolicy](/bn/docs/concepts/services-networking/network-policies/) +(নেটওয়ার্ক প্যাকেট ফিল্টারিং উপর ঘোষণামূলক কন্ট্রোল) বা +[ValidatingAdmisisonPolicy](/bn/docs/reference/access-authn-authz/validating-admission-policy/) +(কুবারনেটিস API ব্যবহার করে কেউ কী পরিবর্তন করতে পারে তার ঘোষণামূলক সীমাবদ্ধতা)। + +তবে, আপনি কুবারনেটিস পরিবেশের চারপাশে পলিসি কার্যান্বয়নে নির্ভর করতে পারেন। কুবারনেটিস এক্সটেনশন মেকানিজম সরবরাহ করে +এই পরিবেশ প্রকল্পগুলির উপর তাদের নিজস্ব পলিসি নিয়ন্ত্রণ সাধারণের জন্য +উন্মোচনের সুযোগ প্রদান করতে। এগুলি উদাহরণ হিসেবে উল্লেখ করা যেতে পারে: +সোর্স কোড পর্যালোচনা, কন্টেনার ইমেজ অনুমোদন, এপিআই অ্যাক্সেস নিয়ন্ত্রণ, +নেটওয়ার্কিং, এবং অন্যান্য। + +পলিসি মেকানিজম এবং কুবারনেটিসের সম্পর্কে আরও তথ্য জানতে, +[পলিসি](/bn/docs/concepts/policy/) পড়ুন। + +## {{% heading "whatsnext" %}} + +সম্পর্কিত কুবারনেটিস নিরাপত্তা বিষয়গুলি জানুন: + +* [আপনার ক্লাস্টার নিরাপত্তা সুরক্ষা করা](/bn/docs/tasks/administer-cluster/securing-a-cluster/) +* [পরিচিত দুর্বলতা](/bn/docs/reference/issues-security/official-cve-feed/) + কুবারনেটিসে (এবং আরও তথ্যের লিঙ্ক) +* [ট্রানজিটে ডেটা এনক্রিপশন](/bn/docs/tasks/tls/managing-tls-in-a-cluster/) কন্ট্রোল প্লেনের জন্য +* [ডেটা এনক্রিপশন এট রেস্ট](/bn/docs/tasks/administer-cluster/encrypt-data/) +* [কুবারনেটিস API অ্যাক্সেস নিয়ন্ত্রণ](/bn/docs/concepts/security/controlling-access) +* [নেটওয়ার্ক পলিসি](/bn/docs/concepts/services-networking/network-policies/) পড এর জন্য +* [কুবারনেটিসে সিক্রেট](/bn/docs/concepts/configuration/secret/) +* [পডগুলির নিরাপত্তা স্ট্যান্ডার্ডস](/bn/docs/concepts/security/pod-security-standards/) +* [RuntimeClasses](/bn/docs/concepts/containers/runtime-class) + +প্রসঙ্গ জানুন: + + +* [ক্লাউড নেটিভ নিরাপত্তা এবং কুবারনেটিস](/bn/docs/concepts/security/cloud-native-security/) পড়ুন। + +সার্টিফাইড: + +* [সার্টিফাইড কুবারনেটিস নিরাপত্তা বিশেষজ্ঞ](https://training.linuxfoundation.org/certification/certified-kubernetes-security-specialist/) + সার্টিফিকেশন এবং অফিসিয়াল প্রশিক্ষণ কোর্স। + +এই অধ্যায়ে আরো পড়ুন: + diff --git a/content/bn/docs/concepts/services-networking/_index.md b/content/bn/docs/concepts/services-networking/_index.md new file mode 100644 index 0000000000000..ebb1e3e374959 --- /dev/null +++ b/content/bn/docs/concepts/services-networking/_index.md @@ -0,0 +1,65 @@ +--- +title: "সার্ভিস, লোড ব্যালেন্সিং এবং নেটওয়ার্কিং" +weight: 60 +description: > + কুবারনেটিসে নেটওয়ার্কিংয়ের পিছনে থাকা ধারণা এবং রিসোর্স। +--- + +## কুবারনেটিস নেটওয়ার্ক মডেল + +একটি ক্লাস্টারের প্রতিটি [`পড`](/bn/docs/concepts/workloads/pods/) তার নিজস্ব ক্লাস্টার-ওয়াইড আইপি ঠিকানা পায়। +এর অর্থ হলো আপনাকে `পডের` মধ্যে স্পষ্টভাবে লিঙ্ক তৈরি করার দরকার নেই +এবং পোর্টগুলো হোস্ট করার জন্য আপনাকে ম্যাপিং কন্টেইনার পোর্টগুলোর সাথে মোকাবিলা করতে হবে না। +এটি একটি পরিষ্কার, পিছনের-সামঞ্জস্যপূর্ণ মডেল (backwards-compatible model) তৈরি করে +যেখানে পোর্ট বরাদ্দকরণ, নামকরণ, সার্ভিস আবিষ্কার (service discovery), [লোড ব্যালেন্সিং](/bn/docs/concepts/services-networking/ingress/#load-balancing), অ্যাপ্লিকেশন কনফিগারেশন এবং মাইগ্রেশনের +দৃষ্টিকোণ থেকে `পডগুলোকে` অনেকটা ভিএম (Virtual Machine) বা ফিজিক্যাল হোস্টের মতোই +বিবেচনা করা যেতে পারে। + +কুবারনেটিস যেকোন নেটওয়ার্কিং বাস্তবায়নে নিম্নলিখিত মৌলিক প্রয়োজনীয়তাগুলো আরোপ করে +(যেকোনো ইচ্ছাকৃত নেটওয়ার্ক বিভাজন নীতি ব্যতীত): + + * পড NAT ছাড়া অন্য কোনো [নোডে](/bn/docs/concepts/architecture/nodes/) + অন্য সব পডের সঙ্গে যোগাযোগ করতে পারে + * একটি নোডের এজেন্ট (যেমন system daemons, kubelet) সেই নোডের সমস্ত + পডের সাথে যোগাযোগ করতে পারে + +দ্রষ্টব্য: হোস্ট নেটওয়ার্কে (যেমন লিনাক্স) চলমান `পডগুলোকে` সমর্থন করে এমন প্ল্যাটফর্মগুলোর জন্য, +যখন পডগুলো একটি নোডের হোস্ট নেটওয়ার্কের সাথে সংযুক্ত থাকে তখনও +তারা সমস্ত নোডের সমস্ত পডের সাথে যোগাযোগ করতে পারে NAT ছাড়া ৷ + +এই মডেলটি শুধুমাত্র সামগ্রিকভাবে কম জটিল নয়, +এটি প্রধানত কুবারনেটিসের ইচ্ছার সাথে সামঞ্জস্যপূর্ণ যাতে ভিএম থেকে কন্টেইনারে +অ্যাপের লো-ফ্রিকশন পোর্টিং সক্ষম করা যায়। যদি আপনার কাজ আগে কোনো ভিএম-এ চলত, তাহলে আপনার ভিএম-এর IP ছিল এবং +আপনার প্রোজেক্টের অন্যান্য ভিএম-এর সাথে কথা বলতে পারে। এটি একই মৌলিক মডেল। + +কুবারনেটিস আইপি ঠিকানাগুলো `পড` স্কোপে বিদ্যমান - একটি `পডের` মধ্যে থাকা কন্টেনারগুলো +তাদের নেটওয়ার্ক নেমস্পেসগুলো ভাগ করে - তাদের IP ঠিকানা এবং MAC ঠিকানা সহ। +এর মানে হলো যে একটি `পডের` মধ্যে থাকা কন্টেইনারগুলো একে অপরের পোর্টে `লোকালহোস্টে` পৌঁছাতে পারে। +এটি আরো বোঝায় যে একটি `পডের` মধ্যে থাকা কন্টেইনারগুলোকে পোর্ট ব্যবহারের সমন্বয় করতে হবে, +তবে এটি একটি ভিএম-এর প্রক্রিয়াগুলোর থেকে আলাদা নয়। +এটিকে "IP-per-pod" মডেল বলা হয়। + +এটি কীভাবে প্রয়োগ করা হয় তা ব্যবহার করা নির্দিষ্ট কন্টেইনার রানটাইমের একটি ডিটেইল। + +`নোডেই` পোর্টের জন্য অনুরোধ করা সম্ভব যা আপনার `পডে` ফরোয়ার্ড করা হয় +(যাকে হোস্ট পোর্ট বলা হয়), কিন্তু এটি একটি খুব বিশিষ্ট অপারেশন। +সেই ফরোয়ার্ডিং কীভাবে বাস্তবায়িত হয় তাও কন্টেইনার রানটাইমের ডিটেইল। +`পড` নিজেই হোস্ট পোর্টের অস্তিত্ব বা অ-অস্তিত্ব সম্পর্কে অন্ধ। + +কুবারনেটিস নেটওয়ার্কিং চারটি উদ্বেগের সমাধান করে: +- লুপব্যাকের মাধ্যমে একটি পডের মধ্যে কন্টেইনার [যোগাযোগের জন্য নেটওয়ার্কিং ব্যবহার করে](/bn/docs/concepts/services-networking/dns-pod-service/) । +- ক্লাস্টার নেটওয়ার্কিং বিভিন্ন পডের মধ্যে যোগাযোগ প্রদান করে। +- [সার্ভিস](/bn/docs/concepts/services-networking/service/) API আপনাকে আপনার ক্লাস্টারের + বাইরে থেকে পৌঁছানোর জন্য [পডসে চলমান একটি অ্যাপ্লিকেশন প্রকাশ](/bn/docs/tutorials/services/connect-applications-service/) + করতে দেয় । + - [ইনগ্রেস](/bn/docs/concepts/services-networking/ingress/) বিশেষত HTTP অ্যাপ্লিকেশন, ওয়েবসাইট + এবং এপিআই প্রকাশ করার জন্য অতিরিক্ত কার্যকারিতা প্রদান করে। + - [গেটওয়ে API](/bn/docs/concepts/services-networking/gateway/) হলো একটি {{}} + যেটি মডেলিং সার্ভিস নেটওয়ার্কিংয়ের জন্য API ধরণের একটি অভিব্যক্তিপূর্ণ (expressive), এক্সটেনসিবল, এবং ভূমিকা-ভিত্তিক পরিবার প্রদান করে। +- এছাড়া আপনি [শুধুমাত্র আপনার ক্লাস্টারের মধ্যে ব্যবহারের জন্য সার্ভিসগুলো প্রকাশ করতে](/bn/docs/concepts/services-networking/service-traffic-policy/) + সার্ভিসগুলো ব্যবহার করতে পারেন । + +[কানেক্টিং অ্যাপ্লিকেশানস উইথ সার্ভিস](/bn/docs/tutorials/services/connect-applications-service/) টিউটোরিয়াল আপনাকে একটি হ্যান্ডস-অন উদাহরণ সহ পরিষেবা এবং কুবারনেটিস নেটওয়ার্কিং সম্পর্কে শিখতে দেয়। + +[ক্লাস্টার নেটওয়ার্কিং](/bn/docs/concepts/cluster-administration/networking/) ব্যাখ্যা করে +কিভাবে আপনার ক্লাস্টারের জন্য নেটওয়ার্কিং সেট আপ করতে হয় এবং এর সাথে জড়িত প্রযুক্তিগুলোর একটি ওভারভিউ প্রদান করে। diff --git a/content/bn/docs/concepts/storage/_index.md b/content/bn/docs/concepts/storage/_index.md new file mode 100644 index 0000000000000..96a52c44f4945 --- /dev/null +++ b/content/bn/docs/concepts/storage/_index.md @@ -0,0 +1,6 @@ +--- +title: "স্টোরেজ" +weight: 70 +description: > + আপনার ক্লাস্টারে পডগুলোতে দীর্ঘমেয়াদী এবং অস্থায়ী উভয় স্টোরেজ সরবরাহ করার উপায়। +--- diff --git a/content/bn/docs/concepts/windows/_index.md b/content/bn/docs/concepts/windows/_index.md new file mode 100644 index 0000000000000..c0d75ab5a3238 --- /dev/null +++ b/content/bn/docs/concepts/windows/_index.md @@ -0,0 +1,33 @@ +--- +title: "কুবারনেটিসে উইন্ডোজ" +simple_list: true +weight: 200 # late in list +description: >- + কুবারনেটিস নোড সমর্থন করে যা মাইক্রোসফ্ট উইন্ডোজ চালায়। +--- + +কুবারনেটিস ওয়ার্কার {{< glossary_tooltip text="নোড" term_id="node" >}} লিনাক্স বা মাইক্রোসফ্ট +উইন্ডোজ চালাতে সহায়তা করে। + +{{% thirdparty-content single="true" %}} + +CNCF এবং এর মূল লিনাক্স ফাউন্ডেশন সামঞ্জস্যের প্রতি বিক্রেতা-নিরপেক্ষ +পদ্ধতি গ্রহণ করে। আপনার [উইন্ডোজ সার্ভার](https://www.microsoft.com/en-us/windows-server) এ একটি কুবারনেটিস ক্লাস্টারে +একটি ওয়ার্কার নোড হিসাবে যোগদান করা সম্ভব। + +আপনি [উইন্ডোজে kubectl ইনস্টল এবং সেট আপ ](/bn/docs/tasks/tools/install-kubectl-windows/) করতে পারেন +আপনার ক্লাস্টারের মধ্যে যে কোন অপারেটিং সিস্টেম ব্যবহার করেন না কেন। + +আপনি যদি উইন্ডোজে নোড ব্যবহার করেন তবে আপনি পড়তে পারেন: + +* [উইন্ডোজে নেটওয়ার্কিং](/bn/docs/concepts/services-networking/windows-networking/) +* [কুবারনেটিসে উইন্ডোজ স্টোরেজ](/bn/docs/concepts/storage/windows-storage/) +* [উইন্ডোজ নোডের জন্য রিসোর্স ব্যবস্থাপনা](/bn/docs/concepts/configuration/windows-resource-management/) +* [উইন্ডোজ পড এবং কন্টেইনারগুলির জন্য RunAsUserName কনফিগার করুন](/bn/docs/tasks/configure-pod-container/configure-runasusername/) +* [একটি উইন্ডোজ হোস্টপ্রসেস(HostProcess) পড তৈরি করুন](/bn/docs/tasks/configure-pod-container/create-hostprocess-pod/) +* [উইন্ডোজ পড এবং কন্টেইনারগুলির জন্য গ্রুপ পরিচালিত পরিষেবা অ্যাকাউন্টগুলি কনফিগার করুন](/bn/docs/tasks/configure-pod-container/configure-gmsa/) +* [উইন্ডোজ নোডের জন্য নিরাপত্তা](/bn/docs/concepts/security/windows-security/) +* [উইন্ডোজ ডিবাগিং টিপস](/bn/docs/tasks/debug/debug-cluster/windows/) +* [কুবারনেটিসে উইন্ডোজ কন্টেইনার নির্ধারণের জন্য নির্দেশিকা](/bn/docs/concepts/windows/user-guide) + +অথবা, একটি ওভারভিউ জন্য, পড়ুন: diff --git a/content/bn/docs/concepts/workloads/_index.md b/content/bn/docs/concepts/workloads/_index.md new file mode 100644 index 0000000000000..ec2432c66a9a2 --- /dev/null +++ b/content/bn/docs/concepts/workloads/_index.md @@ -0,0 +1,91 @@ +--- +title: "ওয়ার্কলোড" +weight: 55 +description: > + পড বুঝুন, কুবারনেটিসের সবচেয়ে ছোট ডেপ্লয়বল কম্পিউট অবজেক্ট এবং উচ্চ-লেভেল অবস্ট্রাক্শন যা আপনাকে সেগুলো চালাতে সাহায্য করে । +no_list: true +card: + title: ওয়ার্কলোড এবং পড + name: concepts + weight: 60 +--- + +{{< glossary_definition term_id="workload" length="short" >}} +আপনার ওয়ার্কলোড একটি একক উপাদান বা একাধিক যা একসাথে কাজ করে, কুবারনেটিসে আপনি এটিকে +[_pods_](/bn/docs/concepts/workloads/pods) এর একটি সেটের মধ্যে চালান। +কুবারনেটিসে, একটি পড আপনার ক্লাস্টারে +{{< glossary_tooltip text="কন্টেইনারগুলোর" term_id="container" >}} চলমান একটি সেট উপস্থাপন করে৷ + +কুবারনেটিস পডের একটি [সংজ্ঞায়িত জীবনচক্র](/bn/docs/concepts/workloads/pods/pod-lifecycle/). +উদাহরণস্বরূপ, একবার আপনার ক্লাস্টারে একটি পড চলমান হলে +{{< glossary_tooltip text="নোড" term_id="node" >}} যেখানে সেই পডটি চলছে +সেখানে একটি গুরুতর ত্রুটির অর্থ হলো সেই নোডের সমস্ত পড ব্যর্থ হয়েছে ৷ কুবারনেটিস ব্যর্থতার সেই লেভেলটিকে চূড়ান্ত হিসাবে বিবেচনা করে: +আপনাকে পুনরুদ্ধার করার জন্য একটি নতুন পড তৈরি করতে হবে, এমনকি যদি নোডটি পরে সুস্থ হয়ে ওঠে। + +যাইহোক, জীবনকে যথেষ্ট সহজ করতে, আপনাকে প্রতিটি পড সরাসরি পরিচালনা করতে হবে না। +পরিবর্তে, আপনি _ওয়ার্কলোড রিসোর্স_ ব্যবহার করতে পারেন যা আপনার পক্ষ থেকে পডের একটি সেট পরিচালনা করে। +এই রিসোর্সগুলো {{< glossary_tooltip term_id="controller" text="কন্ট্রোলার" >}} কনফিগার করে +যা নিশ্চিত করে যে সঠিক সংখ্যক পড চলছে, আপনার নির্দিষ্ট +স্টেটের সাথে মেলে৷ + +কুবারনেটিস বেশ কয়েকটি বিল্ট ইন ওয়ার্কলোড রিসোর্স সরবরাহ করে: + +* [ডিপ্লয়মেন্ট](/bn/docs/concepts/workloads/controllers/deployment/) এবং [রেপ্লিকাসেট](/bn/docs/concepts/workloads/controllers/replicaset/), + (ডিপ্লয়মেন্ট হলো একটি প্রতিস্থাপন ব্যবস্থা লিগেসি + {{< glossary_tooltip text="ReplicationController" term_id="replication-controller" >}} API এর) + ক্লাস্টারে একটি স্টেটলেস অ্যাপ্লিকেশান ওয়ার্কলোড পরিচালনা করার জন্য ডিপ্লয়মেন্ট একটি উপযুক্ত ব্যবস্থা , যেখানে + ডিপ্লয়মেন্টের যেকোনো পড বিনিময়যোগ্য এবং প্রয়োজনে প্রতিস্থাপন করা যেতে পারে। +* [স্টেটফুলসেট](/bn/docs/concepts/workloads/controllers/statefulset/) + আপনাকে এক বা একাধিক সম্পর্কিত পড চালাতে দেয় যা কোনোভাবে ট্র্যাক স্টেট করে। উদাহরণস্বরূপ, যদি আপনার + ওয়ার্কলোড ক্রমাগতভাবে ডেটা রেকর্ড করে, আপনি একটি স্টেটফুলসেট চালাতে পারেন যা প্রতিটি পডের সাথে একটি + [PersistentVolume](/bn/docs/concepts/storage/persistent-volumes/) এর সাথে মেলে। আপনার কোড, সেই স্টেটফুলসেটের + জন্য পডগুলিতে চলমান, সামগ্রিক স্থিতিস্থাপকতা উন্নত করতে একই স্টেটফুলসেটের অন্যান্য + পডগুলিতে ডেটা প্রতিলিপি করতে পারে। +* একটি [ডেমনসেট](/bn/docs/concepts/workloads/controllers/daemonset/) পডগুলোকে সংজ্ঞায়িত করে + যা একটি নির্দিষ্ট {{< glossary_tooltip text="নোড" term_id="node" >}} লোকাল সুবিধা প্রদান করে; + প্রতিবার আপনি আপনার ক্লাস্টারে একটি নোড যুক্ত করেন যা একটি ডেমনসেটের স্পেসিফিকেশনের সাথে মেলে, + কন্ট্রোল প্লেন সেই ডেমনসেটের জন্য একটি পডকে নতুন নোডে নির্ধারণ করে। + ডেমনসেটের প্রতিটি পড একটি ক্লাসিক Unix / POSIX সার্ভারে সিস্টেম ডেমনের মতো একই ভূমিকা পালন + করে। একটি ডেমনসেট আপনার ক্লাস্টার পরিচালনার জন্য গুরুত্বপূর্ণ হতে পারে, যেমন একটি প্লাগইন নোড অ্যাক্সেস করতে দেয় + [ক্লাস্টার নেটওয়ার্কিং](/bn/docs/concepts/cluster-administration/networking/#how-to-implement-the-kubernetes-network-model), + এটি আপনাকে নোড পরিচালনা করতে সাহায্য করতে পারে, + অথবা এটি ঐচ্ছিক আচরণ প্রদান করতে পারে যা আপনি যে কন্টেইনার প্ল্যাটফর্মটি চালাচ্ছেন তা উন্নত করে। +* আপনি একটি + [Job](/bn/docs/concepts/workloads/controllers/job/) + এবং / বা একটি [CronJob](/bn/docs/concepts/workloads/controllers/cron-jobs/) + ব্যবহার করতে পারেন যা কাজগুলোকে চিহ্নিত করবে + যা সমাপ্তির জন্য চলবে পরে থামবে। + একটি Job একটি একক টাস্কের প্রতিনিধিত্ব করে, + যেখানে প্রতিটি CronJob একটি শিডিউল অনুযায়ী পুনরাবৃত্তি করে। + +বৃহত্তর কুবারনেটস ইকোসিস্টেমে, আপনি তৃতীয় পক্ষের ওয়ার্কলোডের রিসোর্সগুলো খুঁজে পেতে পারেন +যা অতিরিক্ত আচরণ প্রদান করে। একটি +[কাস্টম রিসোর্স ডেফিনিশন](/bn/docs/concepts/extend-kubernetes/api-extension/custom-resources/) ব্যবহার করে, +আপনি যদি কুবারনেটসের মূল অংশ নয় এমন একটি নির্দিষ্ট আচরণ চান তাহলে আপনি একটি তৃতীয় পক্ষের ওয়ার্কলোডের রিসোর্স যোগ করতে পারেন । +উদাহরণস্বরূপ, আপনি যদি আপনার অ্যাপ্লিকেশনের জন্য পডগুলির একটি গ্রুপ চালাতে চান +কিন্তু কাজ বন্ধ করতে চান যদি না _সব_ পডগুলো উপলব্ধ হয় (সম্ভবত কিছু উচ্চ-থ্রুপুট ডিস্ট্রিবিউট করা কাজের জন্য), +তাহলে আপনি সেই ফিচারটি প্রদান করে এমন একটি এক্সটেনশন বাস্তবায়ন বা ইনস্টল করতে পারেন। + +## {{% heading "whatsnext" %}} + +ওয়ার্কলোড ম্যানেজমেন্টের জন্য প্রতিটি API ধরণের সম্পর্কে পড়ার পাশাপাশি, +আপনি কীভাবে নির্দিষ্ট কাজগুলো করতে হবে তা পড়তে পারেন: + +* [একটি ডিপ্লয়মেন্ট ব্যবহার করে একটি স্টেটলেস অ্যাপ্লিকেশন চালান](/bn/docs/tasks/run-application/run-stateless-application-deployment/) +* একটি [একক উদাহরণ](/bn/docs/tasks/run-application/run-single-instance-stateful-application/) হিসাবে একটি স্টেটফুল অ্যাপ্লিকেশন চালান + অথবা একটি [রেপ্লিকেটেড সেট](/bn/docs/tasks/run-application/run-replicated-stateful-application/) হিসাবে +* [CronJob দিয়ে স্বয়ংক্রিয় কাজ চালান](/bn/docs/tasks/job/automated-tasks-with-cron-jobs/) + +কনফিগারেশন থেকে কোড আলাদা করার জন্য কুবারনেটিসের প্রক্রিয়া সম্পর্কে জানতে, +[কনফিগারেশন](/bn/docs/concepts/configuration/) দেখুন। + +কুবারনেটিস কীভাবে অ্যাপ্লিকেশনগুলির জন্য পডগুলো পরিচালনা করে +সে সম্পর্কে পটভূমি প্রদান করে এমন দুটি সাপোর্টকারী ধারণা রয়েছে: +* [গার্বেজ কালেকশন](/bn/docs/concepts/architecture/garbage-collection/) আপনার ক্লাস্টার + থেকে বস্তুগুলিকে তাদের _মালিকানাধীন রিসোর্স_ সরিয়ে ফেলার পরে পরিষ্কার করে। +* [_time-to-live after finished_ কন্ট্রোলার](/bn/docs/concepts/workloads/controllers/ttlafterfinished/) + কাজগুলো সম্পূর্ণ করার পর একটি নির্দিষ্ট সময় পেরিয়ে গেলে তা সরিয়ে দেয়। + +একবার আপনার অ্যাপ্লিকেশানটি চালু হয়ে গেলে, আপনি এটিকে একটি [সার্ভিস](/bn/docs/concepts/services-networking/service/) +হিসাবে ইন্টারনেটে উপলব্ধ করতে চাইতে পারেন বা, শুধুমাত্র ওয়েব অ্যাপ্লিকেশনের জন্য, +একটি [ইনগ্রেস](/bn/docs/concepts/services-networking/ingress) ব্যবহার করে । diff --git a/content/bn/docs/concepts/workloads/controllers/_index.md b/content/bn/docs/concepts/workloads/controllers/_index.md new file mode 100644 index 0000000000000..fa6a96f6830da --- /dev/null +++ b/content/bn/docs/concepts/workloads/controllers/_index.md @@ -0,0 +1,61 @@ +--- +title: "ওয়ার্কলোড ম্যানেজমেন্ট" +weight: 20 +simple_list: true +--- + +কুবারনেটিস বিভিন্ন বিল্ট ইন API দেয় ঘোষণামূলক ম্যানেজমেন্ট +{{< glossary_tooltip text="ওয়ার্কলোড" term_id="workload" >}} +এবং ওয়ার্কলোডের উপাদানের জন্য। + +অবশেষে, আপনার অ্যাপ্লিকেশন {{< glossary_tooltip term_id="Pod" text="পডের" >}} +মধ্যে রান হয় কন্টেইনার হিসেবে; যাইহোক, একক পড ম্যানেজ করা কষ্টসাধ্য। +উদাহরণস্বরূপ, যদি একটি পড ব্যর্থ হয়, আপনি তাহলে নতুন পড চালিয়ে এটিকে +রিপ্লেস করতে চাইবেন। কুবারনেটিস আপনার জন্য এটি করে দিবে। + +আপনি ওয়ার্ক লোড তৈরি করার জন্য কুবারনেটিস API ব্যবহার করতে পারেন +{{< glossary_tooltip text="অবজেক্ট" term_id="object" >}} যা পড +থেকে বেশি অবস্ট্রাক্শন লেভেল প্রদর্শন করে, তারপর কুবারনেটিস +{{< glossary_tooltip text="কন্ট্রোল প্লেন" term_id="control-plane" >}} স্বয়ংক্রিয়ভাবে আপনার +পক্ষ থেকে পড অবজেক্ট পরিচালনা করে, আপনার সংজ্ঞায়িত ওয়ার্কলোড অবজেক্টের স্পেসিফিকেশনের উপর ভিত্তি করে। + +ওয়ার্কলোড পরিচালনার জন্য বিল্ট ইন API গুলো হলো: + +[ডিপ্লয়মেন্ট](/bn/docs/concepts/workloads/controllers/deployment/) (এবং, পরোক্ষভাবে, [রেপ্লিকাসেট](/bn/docs/concepts/workloads/controllers/replicaset/)), +আপনার ক্লাস্টারে একটি অ্যাপ্লিকেশন চালানোর সবচেয়ে সাধারণ উপায়। +ক্লাস্টারে একটি স্টেটলেস অ্যাপ্লিকেশান ওয়ার্কলোড পরিচালনা করার জন্য ডিপ্লয়মেন্ট একটি উপযুক্ত ব্যবস্থা , যেখানে +ডিপ্লয়মেন্টের যেকোনো পড বিনিময়যোগ্য এবং প্রয়োজনে প্রতিস্থাপন করা যেতে পারে। +(ডিপ্লয়মেন্ট হলো একটি প্রতিস্থাপন ব্যবস্থা লিগেসি +{{< glossary_tooltip text="ReplicationController" term_id="replication-controller" >}} API এর). + +একটি [স্টেটফুলসেট](/bn/docs/concepts/workloads/controllers/statefulset/) আপনাকে অনুমতি দেয় +এক বা একাধিক পড পরিচালনা করার – সব একই অ্যাপ্লিকেশন কোড চালায় – যেখানে পড +একটি স্বতন্ত্র পরিচয় রাখতে চায়। এটি ডিপ্লয়মেন্ট থেকে ভিন্ন যেখানে +পড বিনিময়যোগ্য হয়ে থাকে । +স্টেটফুলসেটের সাধারণ কাজ হলো পড এবং পারসিসটেন্ট স্টোরেজ এর মধ্যে একটি লিঙ্ক তৈরি করা। +উদাহরণস্বরূপ, আপনি একটি স্টেটফুল সেট চালাতে পারেন যা প্রতিটি পডকে সংযুক্ত করে +একটি [PersistentVolume](/bn/docs/concepts/storage/persistent-volumes/) এর সাহায্যে।যদি +স্টেটফুলসেটের একটি পডও ব্যর্থ হয়, তাহলে কুবারনেটিস একটি প্রতিস্থাপন পড তৈরি করে +যা একই PersistentVolume এর সাথে সংযুক্ত থাকে। + +একটি [ডেমনসেট](/bn/docs/concepts/workloads/controllers/daemonset/) পডগুলোকে সংজ্ঞায়িত করে +যা একটি নির্দিষ্ট {{< glossary_tooltip text="নোড" term_id="node" >}} লোকাল সুবিধা প্রদান করে; +উদাহরণস্বরূপ, ড্রাইভার নোডের কন্টেইনারগুলোকে স্টোরেজ সিস্টেম অ্যাক্সেস করতে দেয়। আপনি তখন ডেমনসেট ব্যবহার করতে পারেন +যখন ড্রাইভার, বা অন্যান্য নোড-লেভেলের সার্ভিস,নোডে চালাতে হবে যেখানে এটি দরকারী। +ডেমনসেটের প্রতিটি পড একটি ক্লাসিক Unix / POSIX সার্ভারে সিস্টেম ডেমনের মতো +একই ভূমিকা পালন করে। +একটি ডেমনসেট আপনার ক্লাস্টার পরিচালনার জন্য গুরুত্বপূর্ণ হতে পারে, +যেমন একটি প্লাগইন নোড অ্যাক্সেস করতে দেয় +[ক্লাস্টার নেটওয়ার্কিং](/bn/docs/concepts/cluster-administration/networking/#how-to-implement-the-kubernetes-network-model), +এটি আপনাকে নোড পরিচালনা করতে সাহায্য করতে পারে, +বা এটি কম সুবিধা প্রদান করতে পারে যা আপনি যে কন্টেইনার প্ল্যাটফর্মটি চালাচ্ছেন তা উন্নত করে। +আপনি আপনার ক্লাস্টারের প্রতিটি নোড জুড়ে বা শুধুমাত্র একটি উপসেট জুড়ে ডেমনসেট (এবং তাদের পড) চালাতে পারেন (উদাহরণস্বরূপ, +শুধুমাত্র GPU ইনস্টল করা নোডগুলিতে GPU এক্সিলারেটর ড্রাইভার ইনস্টল করুন)। + +আপনি একটি [Job](/bn/docs/concepts/workloads/controllers/job/) এবং / বা +একটি [CronJob](/bn/docs/concepts/workloads/controllers/cron-jobs/) ব্যবহার করতে পারেন +যা কাজগুলোকে চিহ্নিত করবে যা সমাপ্তির জন্য চলবে পরে থামবে। একটি Job একটি +একক টাস্কের প্রতিনিধিত্ব করে, যেখানে প্রতিটি CronJob একটি শিডিউল অনুযায়ী পুনরাবৃত্তি করে। + +এই বিভাগে অন্যান্য বিষয়: + diff --git a/content/bn/docs/concepts/workloads/pods/_index.md b/content/bn/docs/concepts/workloads/pods/_index.md new file mode 100644 index 0000000000000..a64e891ba7ed3 --- /dev/null +++ b/content/bn/docs/concepts/workloads/pods/_index.md @@ -0,0 +1,398 @@ +--- +title: পড +content_type: concept +weight: 10 +no_list: true +--- + + + +_পড_ হলো কম্পিউটিংয়ের ক্ষুদ্রতম স্থাপনযোগ্য একক যা আপনি কুবারনেটিসে তৈরি এবং পরিচালনা করতে পারেন। + +একটি পড (তিমি বা মটর শুঁটির একটি পডের মতো) এক বা একাধিক +{{< glossary_tooltip text="কন্টেইনার" term_id="container" >}} এর একটি গ্রুপ , শেয়ার্ড স্টোরেজ এবং নেটওয়ার্ক রিসোর্স গুলির সাথে, এবং কন্টেইনার কীভাবে চালানো যায় তার জন্য একটি স্পেসিফিকেশন। একটি পডের সামগ্রী সর্বদা সহ-অবস্থিত এবং +সহ-নির্ধারিত, এবং একটি শেয়ার্ড প্রসঙ্গে চালে। একটি পড মডেল একটি +অ্যাপ্লিকেশন-নির্দিষ্ট "লজিক্যাল হোস্ট": এটিতে এক বা একাধিক অ্যাপ্লিকেশন রয়েছে +কন্টেইনার যা তুলনামূলকভাবে শক্তভাবে মিলিত হয়। +নন-ক্লাউড প্রেক্ষাপটে, একই ভৌত (ফিজিক্যাল) বা ভার্চুয়াল মেশিনে সঞ্চালিত অ্যাপ্লিকেশনগুলি একই লজিক্যাল হোস্টে নির্বাহিত ক্লাউড অ্যাপ্লিকেশনগুলির সাথে সাদৃশ্যপূর্ণ। + +পাশাপাশি অ্যাপ্লিকেশন কন্টেইনারে, একটি পড থাকতে পারে +{{< glossary_tooltip text="init কন্টেইনার" term_id="init-container" >}} যেটি চলে +পড স্টার্টআপের সময়। আপনি ইনজেকশনও করতে পারেন +{{< glossary_tooltip text="ephemeral কন্টেইনার" term_id="ephemeral-container" >}} +একটি চলমান পড ডিবাগ করার জন্য। + + + +## পড কি ? + +{{< note >}} +ক্লাস্টারের প্রতিটি নোডে আপনাকে একটি [কন্টেইনার রানটাইম](/bn/docs/setup/production-environment/container-runtimes/) ইনস্টল +করতে হবে যাতে পডগুলি সেখানে চলতে পারে। +{{< /note >}} + +একটি পডের শেয়ার্ড প্রসঙ্গ হল লিনাক্স নেমস্পেস(namespaces), cgroups এবং +বিচ্ছিন্নতার সম্ভাব্য অন্যান্য দিক - একই জিনিস যা একটি {{< glossary_tooltip text="কন্টেইনার" term_id="container" >}} বিচ্ছিন্ন করে। একটি পডের প্রেক্ষাপটের মধ্যে, পৃথক অ্যাপ্লিকেশন থাকতে পারে +আরও উপ-বিচ্ছিন্নতা প্রয়োগ করা হয়েছে। + +একটি পড শেয়ার্ড নেমস্পেস এবং শেয়ার্ড ফাইল সিস্টেম ভলিউম সহ কন্টেইনারগুলির সেটের অনুরূপ। + +কুবারনেটিস ক্লাস্টারের পড প্রধানত দুটি উপায়ে ব্যবহৃত হয়: + +* **পড যা একটি একক কন্টেইনার চালায়**" এক-কন্টেইনার-প্রতি-পড" মডেলটি + কুবারনেটিসের সবচেয়ে সাধারণ ব্যবহার; এই ক্ষেত্রে, আপনি একটি একক কন্টেইনারর চারপাশে একটি মোড়ক হিসাবে + একটি পডকে ভাবতে পারেন; কুবারনেটিস সরাসরি কন্টেইনারগুলি পরিচালনা করার পরিবর্তে + পডগুলি পরিচালনা করে। +* **পডগুলি যা একাধিক কন্টেইনার চালায় যা একসাথে কাজ করা দরকার** একটি পড + [একাধিক সহ-অবস্থিত কন্টেইনার](#how-pods-manage-multiple-containers) + দ্বারা গঠিত একটি অ্যাপ্লিকেশনকে এনক্যাপসুলেট(encapsulate) করতে পারে + যেগুলি শক্তভাবে সংযুক্ত এবং রিসোর্স ভাগ করতে হবে৷ এই সহ-অবস্থিত কন্টেইনারগুলি + একটি একক সমন্বিত ইউনিট গঠন করে। + + একটি একক পডে একাধিক সহ-অবস্থিত এবং সহ-পরিচালিত কন্টেইনারে গোষ্ঠীবদ্ধ করা একটি + অপেক্ষাকৃত উন্নত ব্যবহারের ক্ষেত্রে। আপনার এই প্যাটার্নটি কেবলমাত্র নির্দিষ্ট ক্ষেত্রে ব্যবহার করা উচিত + যেখানে আপনার কন্টেইনারে শক্তভাবে সংযুক্ত করা হয়েছে। + + প্রতিলিপি প্রদানের জন্য আপনাকে একাধিক কন্টেইনার চালানোর দরকার নেই (স্থিতিস্থাপকতার জন্য + বা ক্ষমতা); আপনার একাধিক প্রতিলিপি প্রয়োজন হলে, দেখুন + [ওয়ার্কলোড ম্যানেজমেন্ট](/bn/docs/concepts/workloads/controllers/)। + +## পডের ব্যবহার + +নিচে একটি পডের উদাহরণ দেওয়া হল যেটিতে একটি কন্টেইনার রয়েছে যা `nginx:1.14.2` ইমেজটি চালাচ্ছে। + +{{% code_sample file="pods/simple-pod.yaml" %}} + +উপরে দেখানো পড তৈরি করতে, নিম্নলিখিত কমান্ডটি চালান: +```shell +kubectl apply -f https://k8s.io/examples/pods/simple-pod.yaml +``` + +পড সাধারণত সরাসরি তৈরি করা হয় না এবং ওয়ার্কলোড রিসোর্স ব্যবহার করে তৈরি করা হয়। +কিভাবে পডগুলিব্যবহার করা হয় ওয়ার্কলোড রিসোর্স সহ সে সম্পর্কে আরও তথ্যের জন্য +[পডগুলির সাথে কাজ](#working-with-pods) দেখুন। + +### পড পরিচালনার জন্য ওয়ার্কলোডের রিসোর্স + +সাধারণত আপনাকে সরাসরি পড তৈরি করতে হবে না, এমনকি সিঙ্গেলটন পডও। পরিবর্তে, ওয়ার্কলোড রিসোর্সগুলি ব্যবহার করে সেগুলি তৈরি করুন যেমন {{< glossary_tooltip text="ডিপলয়ম্যান্ট" +term_id="deployment" >}} বা {{< glossary_tooltip text="জব" term_id="job" >}}। +আপনার পডের অবস্থা ট্র্যাক করার প্রয়োজন হলে, বিবেচনা করুন +{{< glossary_tooltip text="স্টেটফুলসেট" term_id="statefulset" >}} রিসোর্স। + + +প্রতিটি পড একটি প্রদত্ত অ্যাপ্লিকেশনের একটি একক উদাহরণ চালানোর জন্য বোঝানো হয়। যদি আপনি চান +আপনার অ্যাপ্লিকেশনকে অনুভূমিকভাবে স্কেল করতে (আরো উদাহরণ চালানোর মাধ্যমে আরো সামগ্রিক রিসোর্স +প্রদান করতে), আপনার একাধিক পড ব্যবহার করা উচিত, প্রতিটি উদাহরণের জন্য একটি। +কুবারনেটিসে-এ, এটিকে সাধারণত _প্রতিলিপি_ হিসাবে উল্লেখ করা হয়। +প্রতিলিপিকৃত পডগুলি সাধারণত একটি ওয়ার্কলোড রিসোর্স +{{< glossary_tooltip text="কন্ট্রোলার" term_id="controller" >}} দ্বারা একটি গ্রুপ হিসাবে তৈরি এবং পরিচালিত হয়। + +দেখুন [পড এবং কন্ট্রোলার](#pods-and-controllers) আরও তথ্যের জন্য কিভাবে +কুবারনেটিস অ্যাপ্লিকেশন স্কেলিং এবং অটো-হিলিং বাস্তবায়নের জন্য ওয়ার্কলোড রিসোর্স এবং তাদের কন্ট্রোলার +ব্যবহার করে। + +পডগুলি স্থানীয়ভাবে তাদের উপাদান কন্টেইনারের জন্য দুই ধরণের ভাগ করা রিসোর্স সরবরাহ করে: +[নেটওয়ার্কিং](#pod-networking) এবং [স্টোরেজ](#pod-storage)। + + +## পডগুলি নিয়ে কাজ করা + +আপনি খুবই কম সময় সরাসরি কুবারনেটিস-এ এমনকি সিঙ্গেলটন পডগুলি-তে পৃথক পড তৈরি করবেন। এই +কারণে পডগুলি তুলনামূলকভাবে অল্পক্ষণস্থায়ী, নিষ্পত্তিযোগ্য(disposable) হিসাবে ডিজাইন করা হয়েছে। যখন +একটি পড তৈরি করা হয় (প্রত্যক্ষভাবে আপনার দ্বারা বা পরোক্ষভাবে একটি +{{}}) দ্বারা, নতুন পডটি +আপনার ক্লাস্টারে একটি {{< glossary_tooltip term_id="node" >}} চালানোর জন্য নির্ধারিত হয়। +পডটি সেই নোডে থাকে যতক্ষণ না পড এক্সিকিউশন শেষ করে, পড অবজেক্টটি মুছে ফেলা হয়, +পডটিকে রিসোর্সের অভাবের জন্য *উচ্ছেদ* করা হয়, বা নোড ব্যর্থ হয়। + +{{< note >}} +একটি পডের মধ্যে একটি কন্টেইনার পুনরায় চালু করা সাথে একটি পড পুনরায় চালু করার বিভ্রান্ত হওয়া উচিত নয়। একটি পড +একটি প্রক্রিয়া নয়, কিন্তু কন্টেইনার(গুলি) চালানোর জন্য এটি একটি পরিবেশ। এটি মুছে ফেলা না হওয়া পর্যন্ত +একটি পড টিকে থাকে। +{{< /note >}} + +একটি পডের নাম অবশ্যই একটি বৈধ +[DNS সাবডোমেন](/bn/docs/concepts/overview/working-with-objects/names#dns-subdomain-names) +মান হতে হবে, তবে এটি পড হোস্টনামের জন্য অপ্রত্যাশিত ফলাফল তৈরি করতে পারে। সর্বোত্তম সামঞ্জস্যের জন্য, +নামের আরো সীমাবদ্ধ নিয়ম অনুসরণ করা উচিত +[DNS লেবেল](/bn/docs/concepts/overview/working-with-objects/names#dns-label-names)এর জন্য। + +### পড অপারেটিং সিস্টেম(OS) + +{{< feature-state state="stable" for_k8s_version="v1.25" >}} + +আপনি যে OS-এ পড চালাতে চান তা নির্দেশ করার জন্য আপনাকে `.spec.os.name` যে কোনো একটি ক্ষেত্রে `windows` +বা `linux`-তে সেট করতে হবে। এই দুটিই একমাত্র অপারেটিং সিস্টেম যা এখন +কুবারনেটিস দ্বারা সমর্থিত। ভবিষ্যতে, এই তালিকা প্রসারিত করা হতে পারে। + +কুবারনেটিস v{{< skew currentVersion >}} -এ, এই ক্ষেত্রের জন্য আপনি যে +মান সেট করেছেন তা পডগুলির জন্য {{< glossary_tooltip text="শিডিউলিং" term_id="kube-scheduler" >}} কোনও প্রভাব ফেলবে না ৷ +সেট করা `.spec.os.name` পড অপারেটিং সিস্টেমকে প্রামাণিকভাবে +সনাক্ত করতে সহায়তা করে এবং বৈধতার জন্য ব্যবহৃত হয়। Kubelet একটি পড চালাতে প্রত্যাখ্যান করে যেখানে আপনি একটি পড +এর অপারেটিং সিস্টেম নির্দিষ্ট করেছেন, যদি এটি সেই নোডের অপারেটিং সিস্টেমের মতো না হয় +যেখানে সেই Kubelet চলছে। +[পডের নিরাপত্তা মান](/bn/docs/concepts/security/pod-security-standards/) সেই অপারেটিং সিস্টেমের সাথে প্রাসঙ্গিক নয় +এমন নীতি প্রয়োগ করা এড়াতে এই ক্ষেত্রটিও ব্যবহার করা হয়৷ + +### পড এবং কন্ট্রোলার + +আপনি আপনার জন্য একাধিক পড তৈরি এবং পরিচালনা করতে ওয়ার্কলোড রিসোর্স ব্যবহার করতে পারেন। পড ব্যর্থতার +ক্ষেত্রে রিসোর্স জন্য একটি কন্ট্রোলার প্রতিলিপি এবং রোলআউট এবং স্বয়ংক্রিয় +নিরাময় পরিচালনা করে। উদাহরণস্বরূপ, যদি একটি নোড ব্যর্থ হয়, একটি কন্ট্রোলার লক্ষ্য করে যে +সেই নোডের পডগুলি কাজ করা বন্ধ করে দিয়েছে এবং একটি প্রতিস্থাপন পড তৈরি করে। শিডিউলার একটি সুস্থ নোডে +প্রতিস্থাপন পড স্থাপন করে। + +এখানে ওয়ার্কলোড রিসোর্সগুলির কিছু উদাহরণ রয়েছে যা এক বা একাধিক পডগুলি পরিচালনা করে: + +* {{< glossary_tooltip text="ডিপলয়ম্যান্টস" term_id="deployment" >}} +* {{< glossary_tooltip text="স্টেটফুল সেট" term_id="statefulset" >}} +* {{< glossary_tooltip text="ডেমোনসেট" term_id="daemonset" >}} + +### পড টেমপ্লেট + +রিসোর্সগুলির জন্য {{< glossary_tooltip text="ওয়ার্কলোড" term_id="workload">}} কন্ট্রোলাররা একটি _পড টেমপ্লেট_ থেকে পড তৈরি করে +এবং আপনার পক্ষে সেই পডগুলি পরিচালনা করে৷ + +পড টেমপ্লেটগুলি হল পড তৈরির জন্য স্পেসিফিকেশন, এবং ওয়ার্কলোড রিসোর্সগুলির অন্তর্ভুক্ত যেমন +[ডিপলয়ম্যান্টস](/bn/docs/concepts/workloads/controllers/deployment/), +[জব](/bn/docs/concepts/workloads/controllers/job/), এবং +[ডেমোনসেট](/bn/docs/concepts/workloads/controllers/daemonset/)। + +ওয়ার্কলোড রিসোর্সের প্রতিটি কন্ট্রোলার প্রকৃত পড তৈরি করতে ওয়ার্কলোড অবজেক্টের ভিতরে `পড টেম্পলেট` +ব্যবহার করে। `পড টেম্পলেট` হল আপনার অ্যাপ চালানোর জন্য যে কোনো ওয়ার্কলোড রিসোর্স ব্যবহার +করা সেই কাঙ্ক্ষিত অবস্থার অংশ। + +নিচের নমুনাটি একটি সাধারণ কাজের জন্য একটি `টেমপ্লেট` সহ একটি ম্যানিফেস্ট যা একটি কন্টেইনার শুরু করে। +সেই পডের কন্টেইনারটি একটি বার্তা প্রিন্ট করে তারপর বিরতি দেয়। + +```yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: hello +spec: + template: + # This is the pod template + spec: + containers: + - name: hello + image: busybox:1.28 + command: ['sh', '-c', 'echo "Hello, Kubernetes!" && sleep 3600'] + restartPolicy: OnFailure + # The pod template ends here +``` + +পড টেমপ্লেট পরিবর্তন করা বা একটি নতুন পড টেমপ্লেটে স্যুইচ করা আগে থেকেই বিদ্যমান পডগুলিতে +সরাসরি প্রভাব ফেলে না। আপনি যদি কোনও ওয়ার্কলোড রিসোর্সের জন্য পড টেমপ্লেট পরিবর্তন করেন তবে সেই রিসোর্সটি এর +প্রতিস্থাপন পড তৈরি করতে হবে যা আপডেট করা টেমপ্লেট ব্যবহার করে। + +উদাহরণস্বরূপ, স্টেটফুলসেট কন্ট্রোলার নিশ্চিত করে যে চলমান পডগুলি প্রতিটি স্টেটফুলসেট অবজেক্টের +বর্তমান পড টেমপ্লেটের একই । আপনি যদি স্টেটফুলসেট ইডিট করেন তার পড টেমপ্লেট পরিবর্তন করতে, +স্টেটফুলসেট আপডেট করা টেমপ্লেটের উপর ভিত্তি করে নতুন পড তৈরি করা শুরু করে। +অবশেষে, সমস্ত পুরানো পড নতুন পড দিয়ে প্রতিস্থাপিত হয় এবং আপডেট সম্পূর্ণ হয়। + +প্রতিটি ওয়ার্কলোড রিসোর্স পড টেমপ্লেটের পরিবর্তনগুলি পরিচালনা করার জন্য নিজস্ব নিয়মগুলি প্রয়োগ করে৷ +আপনি যদি বিশেষভাবে স্টেটফুলসেট সম্পর্কে আরও পড়তে চান, স্টেটফুলসেট বেসিক টিউটোরিয়ালটিতে +[আপডেট কৌশল](/bn/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets) পড়ুন। + +নোডগুলিতে, {{< glossary_tooltip term_id="kubelet" text="kubelet">}} +পড টেমপ্লেট এবং আপডেটগুলির আশেপাশে কোনও বিবরণ সরাসরি পর্যবেক্ষণ বা পরিচালনা করে না; +এই বিবরণগুলি দূরে এব্যস্ট্রাক হয় ৷ উদ্বেগের এব্যস্ট্রাকশন এবং বিচ্ছেদ সিস্টেমের +শব্দার্থিকে সরল করে, এবং বিদ্যমান কোড পরিবর্তন না করে ক্লাস্টারের আচরণকে প্রসারিত করা +সম্ভবপর করে তোলে। + +## পড আপডেট এবং প্রতিস্থাপন + +পূর্ববর্তী সেকশনে উল্লিখিত হিসাবে, যখন একটি ওয়ার্কলোড রিসোর্সের জন্য পড +টেমপ্লেট পরিবর্তন করা হয়, তখন কন্ট্রোলার বিদ্যমান পডগুলিকে আপডেট বা প্যাচ করার পরিবর্তে আপডেট করা টেমপ্লেটের +উপর ভিত্তি করে নতুন পড তৈরি করে। + +কুবারনেটিস আপনাকে সরাসরি পড পরিচালনা করতে বাধা দেয় না। এটি একটি +চলমান পডের কিছু ক্ষেত্র আপডেট করা সম্ভব। যাইহোক, পড আপডেটের ক্রিয়াকলাপ +যেমন +[`প্যাচ`](/bn/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#patch-pod-v1-core), এবং +[`প্রতিস্থাপন`](/bn/docs/reference/generated/kubernetes-api/{{< param "version" >}}/#replace-pod-v1-core) +কিছু সীমাবদ্ধতা রয়েছে : + +- একটি পড সম্পর্কে বেশিরভাগ মেটাডেটা অপরিবর্তনীয়। উদাহরণস্বরূপ + আপনি `namespace`, `name`, `uid`, বা `creationTimestamp` ক্ষেত্র + পরিবর্তন করতে পারবেন না; `generation` ক্ষেত্রটি অনন্য। এটি কেবলমাত্র সেই আপডেটগুলি গ্রহণ করে যা ক্ষেত্রের বর্তমান + মান বৃদ্ধি করে। +- যদি `metadata.deletionTimestamp` সেট করা থাকে, তবে `metadata.finalizers` তালিকায় কোনো + নতুন এন্ট্রি যোগ করা যাবে না। +- পড আপডেটগুলি `spec.containers[*].image`, `spec.initContainers[*].image`, `spec.activeDeadlineSeconds` বা + `spec.tolerations` ব্যতীত অন্য কোনো ক্ষেত্র পরিবর্তন করতে পারে না। + `spec.tolerations` এর জন্য, আপনি শুধুমাত্র নতুন এন্ট্রি যোগ করতে পারেন। +- `spec.activeDeadlineSeconds` ক্ষেত্র আপডেট করার সময়, দুই ধরনের আপডেটের + অনুমতি দেওয়া হয়: + + 1. একটি ধনাত্মক সংখ্যায় আনঅ্যাসাইন করা ক্ষেত্র সেট করা; + 2. ক্ষেত্রটিকে একটি ধনাত্মক সংখ্যা থেকে একটি ছোট, অ-ঋণাত্মক সংখ্যায় + আপডেট করা। + +## রিসোর্স ভাগাভাগি এবং যোগাযোগ + +পডগুলি তাদের উপাদান কন্টেইনারর মধ্যে ডেটা ভাগ করে নেওয়া এবং যোগাযোগ করতে +সক্ষম করে। + +### পডগুলিতে স্টোরেজ {#pod-storage} + +একটি পড শেয়ার্ড স্টোরেজ {{< glossary_tooltip text="ভলিউম" term_id="volume" >}} +এর একটি সেট নির্দিষ্ট করতে পারে। পডের সমস্ত +কন্টেইনারে ভাগ করা ভলিউমগুলি অ্যাক্সেস করতে পারে, সেই কন্টেইনারগুলিকে ডেটা +ভাগ করতে দেয় ৷ ভলিউমগুলি একটি পডের মধ্যে স্থায়ী ডেটাকে টিকে থাকার +অনুমতি দেয় যদি এর মধ্যে থাকা একটি কন্টেইনারকে পুনরায় চালু করতে হয় । দেখুন +[স্টোরেজ](/bn/docs/concepts/storage/) কুবারনেটিস কীভাবে শেয়ার্ড স্টোরেজ +প্রয়োগ করে এবং এটি পডের কাছে উপলব্ধ করে সে বিষয়ে আরও তথ্যের জন্য। + +### পড নেটওয়ার্কিং + +প্রতিটি পড প্রতিটি এড্রেস পরিবারের জন্য একটি একক আইপি এড্রেস বরাদ্দ করা হয় । একটি পডের +প্রতিটি কন্টেইনার আইপি এড্রেস এবং নেটওয়ার্ক পোর্ট সহ নেটওয়ার্ক +নেমস্পেস শেয়ার করে । একটি পডের ভিতরে (এবং **শুধুমাত্র** তখন), পডের অন্তর্গত কন্টেইনারগুলি +`লোকালহোস্ট` ব্যবহার করে একে অপরের সাথে যোগাযোগ করতে পারে। যখন একটি পডের কন্টেইনারগুলি *পডের বাইরে* এনটিটির সাথে যোগাযোগ করে, +তখন তাদের অবশ্যই সমন্বয় করতে হবে যে +তারা কীভাবে ভাগ করা নেটওয়ার্ক রিসোর্সগুলি (যেমন পোর্ট) ব্যবহার করে। +একটি পডের মধ্যে, কন্টেইনারগুলি একটি আইপি ঠিকানা এবং পোর্ট স্পেস ভাগ করে এবং +একে অপরকে `লোকালহোস্ট` এর মাধ্যমে খুঁজে পেতে পারে। একটি পডের কন্টেইনারগুলি যেমন SystemV semaphores বা +POSIX শেয়ার্ড মেমরির সাথে স্ট্যান্ডার্ড ইন্টার-প্রসেস ব্যবহার করে +একে অপরের সাথে যোগাযোগ করতে পারে। বিভিন্ন পডের কন্টেইনারগুলির ইউনিক IP এড্রেস থাকে +এবং বিশেষ কনফিগারেশন ছাড়া OS-লেভেলের IPC দ্বারা যোগাযোগ করতে পারে না। +যে কন্টেইনারগুলি একটি ভিন্ন পডে চলমান একটি কন্টেইনারের সাথে ইন্টারঅ্যাক্ট করতে চায় তারা যোগাযোগের জন্য +আইপি নেটওয়ার্কিং ব্যবহার করতে পারে। + +পডের মধ্যে থাকা কন্টেইনারগুলি সিস্টেমের হোস্টনামটিকে পডের জন্য কনফিগার করা +`নাম`-এর মতই দেখতে পায়। এই সেকশনে [networking](/bn/docs/concepts/cluster-administration/networking/) এই বিষয়ে +আরো আছে। + +## কন্টেইনারগুলির জন্য বিশেষাধিকার মোড + +{{< note >}} +আপনার {{< glossary_tooltip text="কন্টেইনার রানটাইম" term_id="container-runtime">}} এই সেটিংটি প্রাসঙ্গিক হওয়ার জন্য একটি বিশেষাধিকারপ্রাপ্ত কন্টেইনারের ধারণাকে সমর্থন করতে হবে৷ +{{< /note >}} + +অপারেটিং সিস্টেমের প্রশাসনিক ক্ষমতা ব্যবহার করার জন্য একটি পডের যেকোনো কন্টেইনার বিশেষ সুবিধাপ্রাপ্ত মোডে চলতে পারে +যা অন্যথায় অ্যাক্সেসযোগ্য হবে না। এটি উইন্ডোজ এবং লিনাক্স উভয়ের জন্যই সহজলভ্য। + +### লিনাক্স বিশেষাধিকার কন্টেইনার + +লিনাক্সে, পডের যেকোনো কনটেইনার স্পেকের +[নিরাপত্তা প্রসঙ্গ](/bn/docs/tasks/configure-pod-container/security-context/) তে `privileged` (লিনাক্স) ফ্লাগ ব্যবহার করে সুবিধাপ্রাপ্ত মোড +সক্রিয় করতে পারে। এটি এমন কন্টেইনারগুলির জন্য দরকারী যেগুলি অপারেটিং সিস্টেমের প্রশাসনিক +ক্ষমতাগুলি ব্যবহার করতে চায় যেমন নেটওয়ার্ক স্ট্যাক নিপূণভাবে ব্যবহার করা বা হার্ডওয়্যার ডিভাইসগুলি অ্যাক্সেস করা। + +### উইন্ডোজ বিশেষাধিকার কন্টেইনার + +{{< feature-state for_k8s_version="v1.26" state="stable" >}} + +উইন্ডোজে, আপনি পড স্পেকের নিরাপত্তা প্রসঙ্গে `windowsOptions.hostProcess` ফ্লাগ সেট করে একটি +[উইন্ডোজে HostProcess পড](/bn/docs/tasks/configure-pod-container/create-hostprocess-pod) তৈরি করতে পারেন। এই পডের সমস্ত কন্টেইনার +অবশ্যই উইন্ডোজ HostProcess কন্টেইনার হিসাবে চালাতে হবে। HostProcess পডগুলি সরাসরি হোস্টে চলে এবং লিনাক্স সুবিধাপ্রাপ্ত +কন্টেইনারগুলির মতো প্রশাসনিক কাজ সম্পাদন করতেও ব্যবহার করা যেতে পারে। + +## স্ট্যাটিক পডগুলি + +_স্ট্যাটিক পডগুলি_ একটি নির্দিষ্ট নোডে kubelet daemon দ্বারা +সরাসরি পরিচালিত হয়, {{< glossary_tooltip text="API সার্ভার"term_id="kube-apiserver" >}} +তাদের পর্যবেক্ষণ করে না। +যেখানে বেশিরভাগ পড নিয়ন্ত্রণ কন্ট্রোল প্লেন দ্বারা পরিচালিত হয় (উদাহরণস্বরূপ, একটি +{{< glossary_tooltip text="ডিপ্লয়মেন্ট" term_id="deployment">}}), +স্ট্যাটিক পডগুলির জন্য, kubelet সরাসরি প্রতিটি স্ট্যাটিক পডের তত্ত্বাবধান করে (এবং এটি ব্যর্থ হলে পুনরায় চালু করে)। + +একটি নির্দিষ্ট নোডে স্ট্যাটিক পডগুলি সবসময় একটি {{< glossary_tooltip term_id="kubelet">}} এর সাথে আবদ্ধ থাকে। +স্ট্যাটিক পডের প্রধান ব্যবহার হল একটি স্ব-হোস্টেড কন্ট্রোল প্লেনে চালানো: অন্য কথায়, +ব্যক্তিগত [কন্ট্রোল প্লেন উপাদান](/bn/docs/concepts/overview/components/#control-plane-components) তত্ত্বাবধানে kubelet ব্যবহার করা। + +kubelet স্বয়ংক্রিয়ভাবে প্রতিটি স্ট্যাটিক পডের জন্য কুবারনেটিস API সার্ভারে একটি +{{< glossary_tooltip text="মিরর পড" term_id="mirror-pod" >}} তৈরির চেষ্টা করে। +এর মানে হল যে একটি নোডে চলমান পডগুলি API সার্ভারে দৃশ্যমান, +তবে সেখান থেকে নিয়ন্ত্রণ করা যায় না। আরও তথ্যের জন্য [স্থির পড তৈরি করুন](/bn/docs/tasks/configure-pod-container/static-pod) গাইডটি দেখুন। + +{{< note >}} +একটি স্ট্যাটিক পডের `spec` অন্যান্য API অবজেক্টের উল্লেখ করতে পারে না +(উদাহরণস্বরূপ, {{< glossary_tooltip text="ServiceAccount" term_id="service-account" >}}, +{{< glossary_tooltip text="ConfigMap" term_id="configmap" >}}, +{{< glossary_tooltip text="Secret" term_id="secret" >}}, ইত্যাদি). +{{< /note >}} + +## একাধিক কন্টেইনার সহ পড {#how-pods-manage-multiple-containers} + +পডগুলি একাধিক সহযোগিতা প্রক্রিয়া (কন্টেইনার হিসাবে) সমর্থন করার জন্য ডিজাইন করা হয়েছে যা পরিষেবার একটি +সমন্বিত একক গঠন করে। একটি পডের কন্টেইনারগুলি ক্লাস্টারের একই ফিজিক্যাল বা ভার্চুয়াল মেশিনে +স্বয়ংক্রিয়ভাবে সহ-অবস্থিত এবং সহ-নির্ধারিত। কন্টেইনারগুলি +রিসোর্স এবং নির্ভরতা ভাগ করে নিতে পারে, একে অপরের সাথে যোগাযোগ করতে পারে এবং কখন এবং কীভাবে সেগুলি +বন্ধ করা হয় তা সমন্বয় করতে পারে। + + +কুবারনেটিস ক্লাস্টারের পড দুটি প্রধান উপায়ে ব্যবহৃত হয়: + +* **পড যা একটি একক কন্টেইনার চালায়**। "এক-কন্টেইনার-প্রতি-পড" মডেলটি কুবারনেটিসের + সবচেয়ে সাধারণ ব্যবহারের ক্ষেত্রে; এই ক্ষেত্রে, আপনি একটি একক কন্টেইনারের + চারপাশে একটি মোড়ক হিসাবে একটি পডকে ভাবতে পারেন; কুবারনেটস সরাসরি কন্টেইনারগুলি পরিচালনা + করার পরিবর্তে পডগুলি পরিচালনা করে। +* **পড যা একাধিক কন্টেইনার চালায় যেগুলি একসাথে কাজ করতে হবে**। একটি পড + একাধিক সহ-অবস্থিত কন্টেইনারে গঠিত একটি অ্যাপ্লিকেশনকে + এনক্যাপসুলেট করতে পারে যা শক্তভাবে সংযুক্ত থাকে এবং + রিসোর্স ভাগ করে নেওয়ার প্রয়োজন হয়। এই সহ-অবস্থিত কন্টেইনারগুলি + পরিষেবার একটি একক সমন্বিত ইউনিট গঠন করে—উদাহরণস্বরূপ, + একটি কন্টেইনার জনসাধারণের কাছে ভাগ করা ভলিউমে ডেটা সংরক্ষণ করে, যখন একটি পৃথক + {{< glossary_tooltip text="সাইডকার কন্টেইনার" term_id="sidecar-container" >}} + সেই ফাইলগুলিকে রিফ্রেশ বা আপডেট করে৷ + পড এই কন্টেইনার, স্টোরেজ রিসোর্স এবং একটি ক্ষণস্থায়ী নেটওয়ার্ক পরিচয়কে একক + ইউনিট হিসাবে একত্রে মোড়ক করে। + +উদাহরণস্বরূপ, আপনার কাছে একটি কন্টেইনার থাকতে পারে যেটি +একটি শেয়ার্ড ভলিউমের ফাইলগুলির জন্য একটি ওয়েব সার্ভার হিসাবে কাজ করে এবং একটি পৃথক +[সাইডকার কন্টেইনার](/bn/docs/concepts/workloads/pods/sidecar-containers/) +যা একটি দূরবর্তী উৎস থেকে সেই ফাইলগুলিকে আপডেট করে, যেমনটি নিম্নলিখিত চিত্রে রয়েছে: + +{{< figure src="/images/docs/pod.svg" alt="পড তৈরির চিত্র" class="diagram-medium" >}} + +কিছু পডের আছে {{< glossary_tooltip text="init কন্টেইনার" term_id="init-container" >}} +পাশাপাশি {{< glossary_tooltip text="অ্যাপ কন্টেইনার" term_id="app-container" >}}। +ডিফল্টরূপে, init কন্টেইনারগুলি অ্যাপ কন্টেইনারগুলি শুরু হওয়ার আগে চলে এবং সম্পূর্ণ হয়। + +আপনার কাছে [সাইডকার কন্টেইনার](/bn/docs/concepts/workloads/pods/sidecar-containers/) থাকতে পারে +যেগুলি প্রধান অ্যাপ্লিকেশন পডকে সহায়ক পরিষেবা প্রদান করে (উদাহরণস্বরূপ: একটি পরিষেবা মেশ)। + +{{< feature-state for_k8s_version="v1.29" state="beta" >}} + +ডিফল্টরূপে সক্রিয় করা হয়েছে, `SidecarContainers` [ফিচার গেট](/bn/docs/reference/command-line-tools-reference/feature-gates/) +init কন্টেইনারগুলির জন্য আপনাকে `restartPolicy: Always` নির্দিষ্ট করতে দেয়। +`Always` পুনঃসূচনা নীতি সেট করা নিশ্চিত করে যে কন্টেইনারগুলি যেখানে আপনি এটি সেট করেছেন +সেগুলিকে _sidecars_ হিসাবে গণ্য করা হয় যেগুলি পডের পুরো জীবনকাল চলা অবস্থায় থাকে। +যে কন্টেইনারগুলিকে আপনি স্পষ্টভাবে সাইডকার কন্টেনার +হিসাবে সংজ্ঞায়িত করেছেন সেগুলি মূল অ্যাপ্লিকেশন পডের আগে শুরু হয় এবং পড বন্ধ না +হওয়া পর্যন্ত চলমান থাকে। + + +## কন্টেইনার probes + +একটি _probe_ একটি ডায়াগনস্টিক যা একটি কন্টেইনার kubelet দ্বারা পর্যায়ক্রমে সম্পাদিত হয়। একটি ডায়গনিস্টিক সঞ্চালনের জন্য, kubelet বিভিন্ন ক্রিয়াকলাপ করতে পারে: + +- `ExecAction` (কন্টেইনারের রানটাইমের সাহায্যে সম্পাদিত হয়েছে) +- `TCPSocketAction` (kubelet দ্বারা সরাসরি চেক করা হয়েছে) +- `HTTPGetAction` (kubelet দ্বারা সরাসরি চেক করা হয়েছে) + +আপনি পডের জীবনচক্র ডকুমেন্টেশনে [probes](/bn/docs/concepts/workloads/pods/pod-lifecycle/#container-probes) +সম্পর্কে আরও পড়তে পারেন। + +## {{% heading "whatsnext" %}} + +* [একটি পডের জীবনচক্র](/bn/docs/concepts/workloads/pods/pod-lifecycle/) সম্পর্কে জানুন। +* [RuntimeClass](/bn/docs/concepts/containers/runtime-class/) সম্পর্কে জানুন এবং আপনি কীভাবে এটি ব্যবহার করতে পারেন + বিভিন্ন কন্টেইনারের রানটাইম কনফিগারেশন সহ বিভিন্ন পড কনফিগার করুন। +* [PodDisruptionBudget](/bn/docs/concepts/workloads/pods/disruptions/) সম্পর্কে পড়ুন এবং প্রতিবন্ধকতার সময় অ্যাপ্লিকেশনের প্রাপ্যতা পরিচালনা করার জন্য আপনি কীভাবে এটি ব্যবহার করতে পারেন। +* Pod হল Kubernetes REST API-এর একটি শীর্ষ-স্তরের রিসোর্স। + {{< api-reference page="workload-resources/pod-v1" >}} + অবজেক্টের সংজ্ঞা বস্তুর বিস্তারিত বর্ণনা করে। +* [ডিস্ট্রিবিউটেড সিস্টেম টুলকিট: কম্পোজিট কন্টেইনারগুলির জন্য প্যাটার্ন](/blog/2015/06/the-distributed-system-toolkit-patterns/) একাধিক কন্টেইনার সহ পডগুলির জন্য সাধারণ লেআউটগুলি ব্যাখ্যা করে। +* [পড টপোলজি স্প্রেড সীমাবদ্ধতা] (/docs/concepts/scheduling-eviction/topology-spread-constraints/) সম্পর্কে পড়ুন। + +কুবারনেটিস কেন অন্যান্য রিসোর্সগুলিতে মোড়ানোর প্রসঙ্গটি বোঝার জন্য একটি সাধারণ পড API (যেমন {{< glossary_tooltip text="স্টেটফুল সেট" term_id="statefulset" >}}) বা {{< glossary_tooltip text="ডিপলয়মেন্ট" term_id="deployment">}}) তে , আপনি পূর্ববর্তী আর্ট সম্পর্কে পড়তে পারেন, যার মধ্যে রয়েছে: + +* [Aurora](https://aurora.apache.org/documentation/latest/reference/configuration/#job-schema) +* [Borg](https://research.google.com/pubs/pub43438.html) +* [Marathon](https://mesosphere.github.io/marathon/docs/rest-api.html) +* [Omega](https://research.google/pubs/pub41684/) +* [Tupperware](https://engineering.fb.com/data-center-engineering/tupperware/)। diff --git a/content/bn/docs/concepts/workloads/pods/sidecar-containers.md b/content/bn/docs/concepts/workloads/pods/sidecar-containers.md new file mode 100644 index 0000000000000..17a94f0d48ada --- /dev/null +++ b/content/bn/docs/concepts/workloads/pods/sidecar-containers.md @@ -0,0 +1,149 @@ +--- +title: সাইডকার কন্টেইনার +content_type: concept +weight: 50 +--- + + +{{< feature-state for_k8s_version="v1.29" state="beta" >}} + +সাইডকার কন্টেইনার হল সেকেন্ডারি কন্টেইনার যা একই {{< glossary_tooltip text="পড" term_id="pod" >}} +এর মধ্যে প্রধান অ্যাপ্লিকেশন কন্টেইনারের সাথে চলে। +এই কন্টেইনারগুলি প্রাথমিক অ্যাপ্লিকেশন কোডটি সরাসরি পরিবর্তন না করেই অতিরিক্ত পরিষেবা, বা +লগিং, মনিটরিং, নিরাপত্তা বা ডেটা সিঙ্ক্রোনাইজেশনের মতো কার্যকারিতা প্রদান করে +প্রধান অ্যাপ্লিকেশন কন্টেইনারের কার্যকারিতা বাড়াতে বা প্রসারিত করতে ব্যবহৃত হয়। + +সাধারণত, আপনার একটি পডে শুধুমাত্র একটি অ্যাপ কন্টেইনার থাকে। উদাহরণস্বরূপ, যদি আপনার কাছে একটি ওয়েব +অ্যাপ্লিকেশন থাকে যার জন্য একটি লোকাল ওয়েবসার্ভার প্রয়োজন, লোকাল ওয়েব সার্ভারটি একটি সাইডকার এবং +ওয়েব অ্যাপ্লিকেশনটি নিজেই অ্যাপ কন্টেইনার৷ + + + +## কুবারনেটিসের মধ্যে সাইডকার কন্টেইনার {#pod-sidecar-containers} + +কুবারনেটিস একটি বিশেষ ক্ষেত্রে +[init কন্টেইনারে](/bn/docs/concepts/workloads/pods/init-containers/); পড স্টার্টআপের পরে +সাইডকারের কন্টেইনারগুলি চলমান থাকে। এই নথিটি _regular init containers_ শব্দটি ব্যবহার করে স্পষ্টভাবে +সেই কন্টেইনারগুলিকে বোঝাতে যা শুধুমাত্র পড স্টার্টআপের সময় চলে। + +আপনার ক্লাস্টারে `SidecarContainers` +[ফিচার গেট](/bn/docs/reference/command-line-tools-reference/feature-gates/) এনেবলড করা +থাকলে (কুবারনেটস v1.29 থেকে ফিচারটি ডিফল্টরূপে সক্রিয় থাকে), আপনি একটি `restartPolicy` নির্দিষ্ট করতে পারেন +পডের `initContainers` ক্ষেত্রে তালিকাভুক্ত কন্টেইনারগুলির জন্য। +এই রিস্টার্টেবল _sidecar_ কন্টেইনারগুলি অন্যান্য init কন্টেইনার থেকে এবং +একই পডের মধ্যে প্রধান অ্যাপ্লিকেশন কন্টেইনার(গুলি) থেকে স্বাধীন। +এগুলি মূল অ্যাপ্লিকেশন কন্টেইনার এবং অন্যান্য init কন্টেইনারগুলিকে প্রভাবিত না করেই শুরু, বন্ধ +এবং পুনরায় চালু করা যেতে পারে। + +আপনি একাধিক কন্টেইনারে একটি পড চালাতে পারেন যেগুলি init বা সাইডকার কন্টেইনার হিসাবে +চিহ্নিত নয়৷ এটি উপযুক্ত যদি পডের মধ্যে থাকা কন্টেইনারগুলি পডের সামগ্রিকভাবে +কাজ করার জন্য প্রয়োজন হয়, তবে আপনার কোন কন্টেইনারগুলি প্রথমে শুরু হবে বা থামবে তা নিয়ন্ত্রণ করার দরকার নেই৷ +আপনি যদি কুবারনেটিসের পুরানো ভার্শনসগুলিকে সমর্থন করতে চান যা একটি কন্টেইনার-স্তরের `restartPolicy` +ষেত্র সমর্থন করে না তবে আপনি এটিও করতে পারেন। + +### উদাহরণ অ্যাপ্লিকেশন {#sidecar-example} + +এখানে দুটি কন্টেইনার সহ একটি ডেপ্লয়মেন্টের একটি উদাহরণ রয়েছে, যার মধ্যে একটি সাইডকার: + +{{% code_sample language="yaml" file="application/deployment-sidecar.yaml" %}} + +## সাইডকার কন্টেইনার এবং পড জীবনচক্র (Pod lifecycle) + +যদি একটি init কন্টেইনার তৈরি করা হয় তার `restartPolicy` `Always` সেট করে, +তাহলে এটি শুরু হবে এবং পডের পুরো জীবনকালে চলতে থাকবে। এটি প্রধান অ্যাপ্লিকেশন +কন্টেইনারগুলি থেকে আলাদা করে সহায়ক সেবা চালানোর জন্য সহায়ক হতে পারে৷ + +যদি এই init কন্টেইনারের জন্য একটি `readinessProbe` নির্দিষ্ট করা হয়, তাহলে এর ফলাফল +পডের `রেডি` অবস্থা নির্ধারণ করতে ব্যবহার করা হবে। + +যেহেতু এই কন্টেইনারগুলিকে init কন্টেইনার হিসাবে সংজ্ঞায়িত করা হয়, তাই তারা অন্যান্য +init কন্টেইনারগুলির মতো একই ক্রম এবং অনুক্রমিক গ্যারান্টি থেকে উপকৃত হয়, +যাতে সেগুলিকে জটিল পড ইনিশিয়ালাইজেশন প্রবাহে অন্যান্য init কন্টেইনার মিশ্রিত করা যায়। + +রেগুলার init কন্টেইনারগুলির তুলনায়, `initContainers`-এর মধ্যে সংজ্ঞায়িত সাইডকারগুলি +শুরু হওয়ার পরে চলতে থাকে। এটি গুরুত্বপূর্ণ যখন একটি পডের জন্য `.spec.initContainers` +এর ভিতরে একাধিক এন্ট্রি থাকে। একটি সাইডকার-স্টাইলের init কন্টেইনার চালু হওয়ার পরে (কুবেলেট (kubelet) +সেই init কন্টেইনারের জন্য `start` স্ট্যাটাসটিকে সত্যে সেট করেছে), কুবেলেট (kubelet) তারপর অর্ডারকৃত +`.spec.initContainers` তালিকা থেকে পরবর্তী init কন্টেইনার শুরু করে। +সেই স্ট্যাটাসটি হয় সত্য হয়ে যায় কারণ কন্টেইনারে একটি প্রক্রিয়া চলছে এবং কোনো স্টার্টআপ +প্রোব (probe) সংজ্ঞায়িত করা হয়নি, অথবা এর `startupProbe` সফল হওয়ার ফলে। + +### সাইডকার কন্টেইনারে জবস (Jobs with sidecar containers) + +আপনি যদি কুবারনেটস-স্টাইলের init কন্টেইনার ব্যবহার করে সাইডকার ব্যবহার করে এমন একটি জব (job) ডিফাইন করেন, +তবে প্রতিটি পডের সাইডকার কন্টেইনার মূল কন্টেইনার শেষ হওয়ার পরে কাজটি +সম্পূর্ণ হতে বাধা দেয় না। + +এখানে দুটি কন্টেইনার সহ একটি কাজের উদাহরণ রয়েছে, যার মধ্যে একটি সাইডকার: + +{{% code_sample language="yaml" file="application/job/job-sidecar.yaml" %}} + +## অ্যাপ্লিকেশন কন্টেইনার থেকে পার্থক্য + +সাইডকার কন্টেইনারগুলি একই পডে _app containers_ পাশাপাশি চলে৷ যাইহোক, তারা প্রাইমারি +প্রয়োগ যুক্তি এক্সিকিউট না করে; পরিবর্তে, তারা প্রধান অ্যাপ্লিকেশনে সহায়ক +কার্যকারিতা প্রদান করে। + +সাইডকার কন্টেইনারদের নিজস্ব স্বাধীন জীবনচক্র আছে। এগুলি রেগুলার কন্টেইনারে স্বাধীনভাবে শুরু, বন্ধ +এবং পুনরায় চালু করা যেতে পারে। এর মানে আপনি প্রাইমারি অ্যাপ্লিকেশনকে +প্রভাবিত না করেই আপডেট, স্কেল বা মেইনটেইন করতে পারবেন। + +সাইডকার কন্টেইনার প্রাইমারি কন্টেইনারের সাথে একই নেটওয়ার্ক এবং স্টোরেজ নেমস্পেস +শেয়ার করে। এই সহ-অবস্থান তাদের ঘনিষ্ঠভাবে ইন্টারঅ্যাক্ট করতে এবং সম্পদ শেয়ার করতে দেয়। + +## init কন্টেইনার থেকে পার্থক্য + +সাইডকার কন্টেইনার প্রধান কন্টেইনারের পাশাপাশি কাজ করে, এর কার্যকারিতা প্রসারিত করে এবং +অতিরিক্ত পরিষেবা প্রদান করে। + +সাইডকার কন্টেইনার প্রধান অ্যাপ্লিকেশন কন্টেইনারের সাথে একযোগে চালান। তারা পডের +জীবনচক্র জুড়ে সক্রিয় থাকে এবং মূল কন্টেইনার থেকে স্বাধীনভাবে শুরু এবং +বন্ধ করা যেতে পারে। [init কন্টেইনার](/bn/docs/concepts/workloads/pods/init-containers/) থেকে ভিন্ন, +সাইডকার কন্টেইনার সমর্থন [probe](/bn/docs/concepts/workloads/pods/pod-lifecycle/#types-of-probe) নিয়ন্ত্রণ করতে তাদের জীবনচক্র। + +সাইডকার কন্টেইনারগুলি প্রধান অ্যাপ্লিকেশন কন্টেইনারগুলির সাথে সরাসরি ইন্টারঅ্যাক্ট করতে পারে, কারণ +init কন্টেইনারগুলির মতো তারা সবসময় একই নেটওয়ার্ক ভাগ করে এবং ঐচ্ছিকভাবে ভলিউম (ফাইলসিস্টেম) +ভাগ করতে পারে। + +Init কন্টেইনারগুলি মূল পাত্রে শুরু হওয়ার আগে বন্ধ হয়ে যায়, তাই init কন্টেইনারগুলি একটি Pod-এ +অ্যাপ কন্টেইনারের সাথে মেসেজেস বিনিময় করতে পারে না। যে কোনো ডেটা পাসিং একমুখী হয় +(উদাহরণস্বরূপ, একটি init কন্টেইনার একটি `emptyDir` ভলিউমের মধ্যে তথ্য রাখতে পারে)। + +## কন্টেইনারে সম্পদ ভাগাভাগি + +{{< comment >}} +এই বিভাগটি [init কন্টেইনার](/bn/docs/concepts/workloads/pods/init-containers/) পৃষ্ঠাতেও রয়েছে। +আপনি যদি এই বিভাগটি সম্পাদনা করেন তবে উভয় স্থানে পরিবর্তন করুন৷ +{{< /comment >}} + +init, sidecar এবং অ্যাপ কন্টেইনারগুলির জন্য কার্যকর করার আদেশ দেওয়া হলে, সম্পদ ব্যবহারের জন্য +নিম্নলিখিত নিয়মগুলি প্রযোজ্য: + +* সমস্ত init কন্টেইনার সংজ্ঞায়িত কোনো নির্দিষ্ট রিসোর্স অনুরোধ বা সীমার মধ্যে + সর্বোচ্চ হল *এফেক্টিভ রিকোয়েস্ট/লিমিট*। যদি কোন সম্পদের কোন সম্পদ সীমা + নির্দিষ্ট না থাকে তবে এটি সর্বোচ্চ সীমা হিসাবে বিবেচিত হয়। +* একটি সম্পদের জন্য Pod এর *এফেক্টিভ রিকোয়েস্ট/লিমিট* হল + [pod overhead](/bn/docs/concepts/scheduling-eviction/pod-overhead/) এর সমষ্টি এবং এর উচ্চতর: + * সমস্ত non-init কন্টেইনারের সমষ্টি (অ্যাপ এবং সাইডকার কন্টেইনার) রিসোর্সের জন্য + অনুরোধ/সীমা + * একটি সম্পদের জন্য এফেক্টিভ রিকোয়েস্ট/লিমিট +* সময়সূচী কার্যকরী অনুরোধ/সীমার উপর ভিত্তি করে করা হয়, যার অর্থ init কন্টেইনারগুলি + প্রারম্ভিকতার জন্য সংস্থান সংরক্ষণ করতে পারে যা পডের জীবদ্দশায় + ব্যবহৃত হয় না। +* পডের *কার্যকর QoS টিয়ার* এর QoS (পরিষেবার গুণমান) স্তর হল সমস্ত init, + সাইডকার এবং অ্যাপ কন্টেইনারগুলির জন্য QoS স্তর। + +কার্যকর পড অনুরোধ এবং সীমার উপর ভিত্তি করে কোটা এবং সীমা প্রয়োগ +করা হয়। + +### সাইডকার কন্টেইনার এবং লিনাক্স cgroups {#cgroups} + +লিনাক্সে, পড লেভেল কন্ট্রোল গ্রুপের (cgroups) জন্য রিসোর্স বরাদ্দ করা হয় কার্যকর পড রিকোয়েস্ট এবং লিমিট উপর ভিত্তি করে, +যেমন শিডিউলারের মতো। + +## {{% heading "whatsnext" %}} + +* [নেটিভ সাইডকার কন্টেইনারে](/bn/blog/2023/08/25/native-sidecar-containers/) এ একটি ব্লগ পোস্ট পড়ুন। +* [একটি পড তৈরি করা যাতে একটি init কন্টেইনার রয়েছে](/bn/docs/tasks/configure-pod-container/configure-pod-initialization/#create-a-pod-that-has-an-init-container) সম্পর্কে পড়ুন। +* [প্রোবের প্রকার](/bn/docs/concepts/workloads/pods/pod-lifecycle/#types-of-probe) সম্পর্কে জানুন: সজীবতা, প্রস্তুতি, স্টার্টআপ প্রোব। +* [pod overhead](/bn/docs/concepts/scheduling-eviction/pod-overhead/) সম্পর্কে জানুন। diff --git a/content/bn/docs/contribute/_index.md b/content/bn/docs/contribute/_index.md new file mode 100644 index 0000000000000..db7d22f881f6b --- /dev/null +++ b/content/bn/docs/contribute/_index.md @@ -0,0 +1,31 @@ +--- +content_type: concept +title: কুবারনেটিসে অবদান +linktitle: অবদান +main_menu: true +no_list: true +weight: 80 +card: + name: অবদান + weight: 10 + title: কুবারনেটিসে অবদান +--- + + + +কুবারনেটিসে অবদান রাখার অনেক উপায় আছে। আপনি নতুন ফিচারগুলোর জন্য ডিজাইনে কাজ করতে পারেন, +আপনি আমাদের কাছে ইতিমধ্যে থাকা কোডটি ডকুমেন্ট করতে পারেন, আপনি আমাদের [ব্লগের](/bn/blog) জন্য লিখতে পারেন। +আরও আছে: আপনি সেই নতুন ফিচারগুলোর বাস্তবায়ন করতে পারেন বা বাগগুলি ঠিক করতে পারেন৷ আপনি লোকেদের আমাদের +অবদানকারী কমিউনিটিতে যোগ দিতে সাহায্য করতে পারেন, বা বিদ্যমান অবদানকারীদের সাপোর্ট করতে পারেন৷ + +এই সমস্ত ভিন্ন উপায়ে প্রকল্পে পার্থক্য আনতে, আমরা - কুবারনেটিস - +একটি ডেডিকেটেড ওয়েবসাইট তৈরি করেছি: https://k8s.dev/। +কুবারনেটিসে অবদান রাখার বিষয়ে আরও জানতে আপনি সেখানে যেতে পারেন। + +আপনি যদি বিশেষভাবে _এই_ ডকুমেন্টেশনে অবদান রাখার বিষয়ে জানতে চান, পড়ুন +[কুবারনেটিস ডকুমেন্টেশনে অবদান রাখুন](/docs/contribute/docs/)। + +এছাড়াও আপনি পড়তে পারেন +{{< glossary_tooltip text="CNCF" term_id="cncf" >}} +[পৃষ্ঠা](https://contribute.cncf.io/contributors/projects/#kubernetes) +কুবারনেটিস অবদান সম্পর্কে। diff --git a/content/bn/docs/home/_index.md b/content/bn/docs/home/_index.md new file mode 100644 index 0000000000000..35ba8b18e96d0 --- /dev/null +++ b/content/bn/docs/home/_index.md @@ -0,0 +1,67 @@ +--- +# approvers: +# - chenopis ( The list of approvers is not necessary for the localized version. However, it is included because it helps maintain a certain line break, which further aids in updating a file.That's why it's kept in comment form. ) +title: কুবারনেটিস ডকুমেন্টেশন +noedit: true +cid: docsHome +layout: docsportal_home +class: gridPage gridPageHome +linkTitle: "ডকুমেন্টেশন" +main_menu: true +weight: 10 +hide_feedback: true +menu: + main: + title: "ডকুমেন্টেশন" + weight: 10 +description: > + কুবারনেটিস হলো একটি ওপেন সোর্স কন্টেইনার অর্কেস্ট্রেশন ইঞ্জিন যাতে কন্টেইনারাইজড অ্যাপ্লিকেশনের ডিপ্লয়মেন্ট, স্কেলিং এবং ম্যানেজমেন্ট করা যায়। ওপেন সোর্স প্রকল্পটি Cloud Native Computing Foundation দ্বারা হোস্ট করা হয়। +overview: > + কুবারনেটিস হলো একটি ওপেন সোর্স কন্টেইনার অর্কেস্ট্রেশন ইঞ্জিন যাতে কন্টেইনারাইজড অ্যাপ্লিকেশনের ডিপ্লয়মেন্ট, স্কেলিং এবং ম্যানেজমেন্ট করা যায়। ওপেন সোর্স প্রকল্পটি Cloud Native Computing Foundation (CNCF) দ্বারা হোস্ট করা হয়। +cards: +- name: concepts + title: "কুবারনেটিস বুঝুন" + description: "কুবারনেটিস এবং এর মৌলিক ধারণা সম্পর্কে জানুন।" + button: "ধারণা দেখুন" + button_path: "/bn/docs/concepts" +- name: tutorials + title: "কুবারনেটিস ব্যাবহার করুন" + description: "কুবারনেটিস এ অ্যাপ্লিকেশন কিভাবে স্থাপন করতে হয় তা শিখতে টিউটোরিয়াল অনুসরণ করুন।" + button: "টিউটোরিয়াল দেখুন" + button_path: "/bn/docs/tutorials" +- name: setup + title: "একটি K8s ক্লাস্টার সেট আপ করুন" + description: "আপনার রিসোর্স এবং প্রয়োজনের উপর ভিত্তি করে কুবারনেটিস চালান।" + button: "কুবারনেটিস সেট আপ করুন" + button_path: "/bn/docs/setup" +- name: tasks + title: "কুবারনেটিস কীভাবে ব্যবহার করবেন তা শিখুন" + description: "সাধারণ টাস্কগুলো এবং পদক্ষেপগুলোর একটি সংক্ষিপ্ত ক্রম ব্যবহার করে কীভাবে সেগুলো সম্পাদন করা যায় দেখুন ।" + button: "টাস্ক দেখুন" + button_path: "/bn/docs/tasks" +- name: reference + title: রেফারেন্স তথ্য দেখুন + description: পরিভাষা, কমান্ড লাইন সিনট্যাক্স, API রিসোর্স প্রকার এবং সেটআপ টুল ডকুমেন্টেশন ব্রাউজ করুন। + button: "রেফারেন্স দেখুন" + button_path: "/bn/docs/reference" +- name: contribute + title: কুবারনেটিসে অবদান + description: আপনি কিভাবে কুবারনেটিসে আরও ভাল করতে সাহায্য করতে পারেন তা খুঁজে বের করুন। + button: "অবদান রাখার উপায় দেখুন" + button_path: "/bn/docs/contribute" +- name: training + title: "প্রশিক্ষণ" + description: "কুবারনেটিসে সার্টিফাইড হন এবং আপনার ক্লাউড নেটিভ প্রকল্পগুলিকে সফল করুন!" + button: "প্রশিক্ষণ দেখুন" + button_path: "/bn/training" +- name: Download + title: কুবারনেটিস ডাউনলোড করুন + description: কুবারনেটিস ইনস্টল করুন বা নতুন সংস্করণে আপগ্রেড করুন। + button: "কুবারনেটিস ডাউনলোড করুন" + button_path: "/bn/releases/download" +- name: about + title: ডকুমেন্টেশন সম্পর্কে + description: এই ওয়েবসাইটটিতে কুবারনেটিসের বর্তমান এবং পূর্ববর্তী 4 সংস্করণের ডকুমেন্টেশন রয়েছে। + button: "উপলব্ধ সংস্করণ দেখুন" + button_path: "/bn/docs/home/supported-doc-versions" +--- diff --git a/content/bn/docs/home/supported-doc-versions.md b/content/bn/docs/home/supported-doc-versions.md new file mode 100644 index 0000000000000..1b9831ad463d6 --- /dev/null +++ b/content/bn/docs/home/supported-doc-versions.md @@ -0,0 +1,14 @@ +--- +title: উপলব্ধ ডকুমেন্টেশনের সংস্করণ +content_type: custom +layout: supported-versions +weight: 10 +--- + +এই ওয়েবসাইটটিতে কুবারনেটিসের বর্তমান সংস্করণ এবং +কুবারনেটিসের আগের চারটি সংস্করণের ডকুমেন্টেশন রয়েছে। + +কুবারনেটিস সংস্করণের জন্য ডকুমেন্টেশনের প্রাপ্যতা সেই রিলিজটি +বর্তমানে সাপোর্টেড কিনা তা থেকে আলাদা। +কুবারনেটিসের কোন সংস্করণ আনুষ্ঠানিকভাবে সাপোর্টেড, এবং কতদিনের জন্য +তা জানতে [সাপোর্ট সময়কাল](/bn/releases/patch-releases/#support-period) পড়ুন। diff --git a/content/bn/docs/images/diagram-guide-example-3.svg b/content/bn/docs/images/diagram-guide-example-3.svg new file mode 100644 index 0000000000000..f72b797fe3130 --- /dev/null +++ b/content/bn/docs/images/diagram-guide-example-3.svg @@ -0,0 +1 @@ +mecontrol planeapi-servercontrol planeetcd datastorecontrol planecontrollermanagercontrol planeschedulernodekubeletnodecontainerruntime1. kubectl create -f pod.yaml2. save new state3. check for changes4. watch for unassigned pods(s)5. notify about pod w nodename=" "6. assign pod to node7. save new state8. look for newly assigned pod(s)9. bind pod to node10. start container11. update pod status12. save new statemecontrol planeapi-servercontrol planeetcd datastorecontrol planecontrollermanagercontrol planeschedulernodekubeletnodecontainerruntime diff --git a/content/bn/docs/images/gateway-kind-relationships.svg b/content/bn/docs/images/gateway-kind-relationships.svg new file mode 100644 index 0000000000000..f5ed657cf733b --- /dev/null +++ b/content/bn/docs/images/gateway-kind-relationships.svg @@ -0,0 +1 @@ +
cluster
GatewayClass
Gateway
HTTPRoute
diff --git a/content/bn/docs/images/gateway-request-flow.svg b/content/bn/docs/images/gateway-request-flow.svg new file mode 100644 index 0000000000000..e6b4c666b45f9 --- /dev/null +++ b/content/bn/docs/images/gateway-request-flow.svg @@ -0,0 +1 @@ + diff --git a/content/bn/docs/images/ha-control-plane-(bn-version).svg b/content/bn/docs/images/ha-control-plane-(bn-version).svg new file mode 100644 index 0000000000000..e680077825146 --- /dev/null +++ b/content/bn/docs/images/ha-control-plane-(bn-version).svg @@ -0,0 +1,5 @@ + + + + + diff --git a/content/bn/docs/images/ingress.svg b/content/bn/docs/images/ingress.svg new file mode 100644 index 0000000000000..ffd1b4c72e1d1 --- /dev/null +++ b/content/bn/docs/images/ingress.svg @@ -0,0 +1 @@ +
cluster
Ingress-managed
load balancer
routing rule
Ingress
Pod
Service
Pod
client
diff --git a/content/bn/docs/images/ingressFanOut.svg b/content/bn/docs/images/ingressFanOut.svg new file mode 100644 index 0000000000000..d65422c663d66 --- /dev/null +++ b/content/bn/docs/images/ingressFanOut.svg @@ -0,0 +1 @@ +
cluster
Ingress-managed
load balancer
/foo
/bar
Ingress, 178.91.123.132
Pod
Service service1:4200
Pod
Pod
Service service2:8080
Pod
client
diff --git a/content/bn/docs/images/ingressNameBased.svg b/content/bn/docs/images/ingressNameBased.svg new file mode 100644 index 0000000000000..51a355d45923c --- /dev/null +++ b/content/bn/docs/images/ingressNameBased.svg @@ -0,0 +1 @@ +
cluster
Ingress-managed
load balancer
Host: foo.bar.com
Host: bar.foo.com
Ingress, 178.91.123.132
Pod
Service service1:80
Pod
Pod
Service service2:80
Pod
client
diff --git a/content/bn/docs/images/kubernetes-cluster-network.svg b/content/bn/docs/images/kubernetes-cluster-network.svg new file mode 100644 index 0000000000000..03add4e256018 --- /dev/null +++ b/content/bn/docs/images/kubernetes-cluster-network.svg @@ -0,0 +1 @@ + diff --git a/content/bn/docs/images/podSchedulingGates.svg b/content/bn/docs/images/podSchedulingGates.svg new file mode 100644 index 0000000000000..2d08f823ddf61 --- /dev/null +++ b/content/bn/docs/images/podSchedulingGates.svg @@ -0,0 +1 @@ +
scheduling gate removed
no
yes
pod created
pod scheduling gated
pod scheduling ready
pod running
empty scheduling gates?
diff --git a/content/bn/docs/images/tutor-service-nodePort-fig01.svg b/content/bn/docs/images/tutor-service-nodePort-fig01.svg new file mode 100644 index 0000000000000..b84ebb6f9919a --- /dev/null +++ b/content/bn/docs/images/tutor-service-nodePort-fig01.svg @@ -0,0 +1 @@ +
SNAT
SNAT
client
Node 2
Node 1
Endpoint
diff --git a/content/bn/docs/images/tutor-service-nodePort-fig02.svg b/content/bn/docs/images/tutor-service-nodePort-fig02.svg new file mode 100644 index 0000000000000..7d084b859708b --- /dev/null +++ b/content/bn/docs/images/tutor-service-nodePort-fig02.svg @@ -0,0 +1 @@ +
client
Node 1
Node 2
endpoint
diff --git a/content/bn/docs/reference/_index.md b/content/bn/docs/reference/_index.md new file mode 100644 index 0000000000000..144e09b0844a2 --- /dev/null +++ b/content/bn/docs/reference/_index.md @@ -0,0 +1,115 @@ +--- +title: রেফারেন্স +# approvers: +# - chenopis ( The list of approvers is not necessary for the localized version. However, it is included because it helps maintain a certain line break, which further aids in updating a file.That's why it's kept in comment form. ) +linkTitle: "রেফারেন্স" +main_menu: true +weight: 70 +content_type: concept +no_list: true +--- + + + +কুবারনেটিস ডকুমেন্টেশনের এই বিভাগে রেফারেন্স রয়েছে। + + + +## API রেফারেন্স + +- [শব্দকোষ](/bn/docs/reference/glossary/) - কুবারনেটিস পরিভাষার একটি ব্যাপক, প্রমিত তালিকা + +- [কুবারনেটিস API রেফারেন্স](/bn/docs/reference/kubernetes-api/) +- [কুবারনেটিস {{< param "version" >}} জন্য এক-পৃষ্ঠা API রেফারেন্স ](/docs/reference/generated/kubernetes-api/{{< param "version" >}}/) +- [কুবারনেটিস API-এর ব্যবহার](/bn/docs/reference/using-api/) - কুবারনেটিস API-এর ওভারভিউ +- [API অ্যাক্সেস কন্ট্রোল](/bn/docs/reference/access-authn-authz/) কুবারনেটিস কীভাবে API অ্যাক্সেস কন্ট্রোল করে তার বিশদ বিবরণ +- [সুপরিচিত লেবেল, Annotations এবং Taints](/bn/docs/reference/labels-annotations-taints/) + +## অফিসিয়ালি সাপোর্টেড ক্লায়েন্ট লাইব্রেরি + +একটি প্রোগ্রামিং ভাষা থেকে কুবারনেটিস API কল করতে, আপনি ব্যবহার করতে পারেন +[ক্লায়েন্ট লাইব্রেরি](/bn/docs/reference/using-api/client-libraries/). অফিসিয়ালি সাপোর্টেড +ক্লায়েন্ট লাইব্রেরিগুলো: + +- [কুবারনেটিস Go ক্লায়েন্ট লাইব্রেরি](https://github.com/kubernetes/client-go/) +- [কুবারনেটিস Python ক্লায়েন্ট লাইব্রেরি](https://github.com/kubernetes-client/python) +- [কুবারনেটিস Java ক্লায়েন্ট লাইব্রেরি](https://github.com/kubernetes-client/java) +- [কুবারনেটিস JavaScript ক্লায়েন্ট লাইব্রেরি](https://github.com/kubernetes-client/javascript) +- [কুবারনেটিস C# ক্লায়েন্ট লাইব্রেরি](https://github.com/kubernetes-client/csharp) +- [কুবারনেটিস Haskell ক্লায়েন্ট লাইব্রেরি](https://github.com/kubernetes-client/haskell) + +## CLI + +* [kubectl](/bn/docs/reference/kubectl/) - কমান্ড চালানো এবং কুবারনেটিস ক্লাস্টার পরিচালনার জন্য প্রধান CLI টুল। + * [JSONPath](/bn/docs/reference/kubectl/jsonpath/) - সিনট্যাক্স গাইড [JSONPath expressions](https://goessner.net/articles/JsonPath/) kubectl এর সাথে ব্যবহারের জন্য । +* [kubeadm](/bn/docs/reference/setup-tools/kubeadm/) - CLI টুল যা সহজে একটি নিরাপদ কুবারনেটিস ক্লাস্টার সরবরাহ করতে পারে। + +## উপাদান + +* [kubelet](/bn/docs/reference/command-line-tools-reference/kubelet/) - প্রাথমিক + এজেন্ট যা প্রতিটি নোডে চলে। kubelet টি পডস্পেকসের একটি সেট নেয় + এবং নিশ্চিত করে যে বর্ণিত কন্টেইনার গুলো চলমান এবং স্বাস্থ্যকর। +* [kube-apiserver](/bn/docs/reference/command-line-tools-reference/kube-apiserver/) - + REST API যা API অবজেক্ট যেমন পড, সার্ভিস, রেপ্লিকেশন কন্ট্রোলারের জন্য + ডেটা যাচাই করে এবং কনফিগার করে। +* [kube-controller-manager](/bn/docs/reference/command-line-tools-reference/kube-controller-manager/) - + ডেমন(Daemon) যা কুবারনেটসের সাথে পাঠানো মূল কন্ট্রোল লুপগুলোকে এম্বেড করে। +* [kube-proxy](/bn/docs/reference/command-line-tools-reference/kube-proxy/) - + ব্যাক-এন্ডের একটি সেট জুড়ে সাধারণ TCP/UDP স্ট্রিম ফরওয়ার্ডিং বা রাউন্ড-রবিন TCP/UDP + ফরওয়ার্ডিং করতে পারে। +* [kube-scheduler](/bn/docs/reference/command-line-tools-reference/kube-scheduler/) - + শিডিউলার যে প্রাপ্যতা, পারফরমেন্স, এবং ক্ষমতা পরিচালনা করে। + + * [শিডিউলার পলিসি](/bn/docs/reference/scheduling/policies) + * [শিডিউলার প্রোফাইল](/bn/docs/reference/scheduling/config#profiles) + +- [পোর্ট এবং প্রোটোকলের](/bn/docs/reference/ports-and-protocols/) তালিকা যা + কন্ট্রোল প্লেন এবং ওয়ার্কার নোডগুলোতে খুলে রাখা উচিত + +## কনফিগ API গুলো + +এই বিভাগটি "unpublished" API-এর জন্য ডকুমেন্টেশন হোস্ট করে +যা কুবারনেটিস উপাদান বা টুল কনফিগার করতে ব্যবহৃত হয়। +এই API গুলোর বেশিরভাগই API সার্ভার দ্বারা RESTful উপায়ে প্রকাশ করা হয় না +যদিও সেগুলো একটি ব্যবহারকারী বা অপারেটরের জন্য একটি ক্লাস্টার ব্যবহার বা পরিচালনা করার জন্য অপরিহার্য। + + +* [kubeconfig (v1)](/bn/docs/reference/config-api/kubeconfig.v1/) +* [kube-apiserver admission (v1)](/bn/docs/reference/config-api/apiserver-admission.v1/) +* [kube-apiserver configuration (v1alpha1)](/bn/docs/reference/config-api/apiserver-config.v1alpha1/) এবং +* [kube-apiserver configuration (v1beta1)](/bn/docs/reference/config-api/apiserver-config.v1beta1/) এবং + [kube-apiserver configuration (v1)](/bn/docs/reference/config-api/apiserver-config.v1/) +* [kube-apiserver event rate limit (v1alpha1)](/bn/docs/reference/config-api/apiserver-eventratelimit.v1alpha1/) +* [kubelet configuration (v1alpha1)](/bn/docs/reference/config-api/kubelet-config.v1alpha1/) , + [kubelet configuration (v1beta1)](/bn/docs/reference/config-api/kubelet-config.v1beta1/) এবং + [kubelet configuration (v1)](/bn/docs/reference/config-api/kubelet-config.v1/) +* [kubelet credential providers (v1)](/bn/docs/reference/config-api/kubelet-credentialprovider.v1/) +* [kube-scheduler configuration (v1beta3)](/bn/docs/reference/config-api/kube-scheduler-config.v1beta3/) এবং + [kube-scheduler configuration (v1)](/bn/docs/reference/config-api/kube-scheduler-config.v1/) +* [kube-controller-manager configuration (v1alpha1)](/bn/docs/reference/config-api/kube-controller-manager-config.v1alpha1/) +* [kube-proxy configuration (v1alpha1)](/bn/docs/reference/config-api/kube-proxy-config.v1alpha1/) +* [`audit.k8s.io/v1` API](/bn/docs/reference/config-api/apiserver-audit.v1/) +* [Client authentication API (v1beta1)](/bn/docs/reference/config-api/client-authentication.v1beta1/) এবং + [Client authentication API (v1)](/bn/docs/reference/config-api/client-authentication.v1/) +* [WebhookAdmission configuration (v1)](/bn/docs/reference/config-api/apiserver-webhookadmission.v1/) +* [ImagePolicy API (v1alpha1)](/bn/docs/reference/config-api/imagepolicy.v1alpha1/) + +## kubeadm এর জন্য কনফিগ API + +* [v1beta3](/bn/docs/reference/config-api/kubeadm-config.v1beta3/) +* [v1beta4](/bn/docs/reference/config-api/kubeadm-config.v1beta4/) + +## এক্সটার্নাল API গুলো + +এগুলো হলো কুবারনেটিস প্রকল্প দ্বারা সংজ্ঞায়িত API, কিন্তু মূল প্রকল্প দ্বারা +বাস্তবায়িত হয় না: + +* [Metrics API (v1beta1)](/bn/docs/reference/external-api/metrics.v1beta1/) +* [Custom Metrics API (v1beta2)](/bn/docs/reference/external-api/custom-metrics.v1beta2) +* [External Metrics API (v1beta1)](/bn/docs/reference/external-api/external-metrics.v1beta1) + +## ডিজাইন ডক্স + +কুবারনেটিস কার্যকারিতার জন্য ডিজাইন ডক্সের একটি সংরক্ষণাগার। ভাল শুরু পয়েন্ট হয় +[কুবারনেটিস আর্কিটেকচার](https://git.k8s.io/design-proposals-archive/architecture/architecture.md) এবং +[কুবারনেটিস ডিজাইন ওভারভিউ](https://git.k8s.io/design-proposals-archive)। diff --git a/content/bn/docs/reference/glossary/addons.md b/content/bn/docs/reference/glossary/addons.md new file mode 100644 index 0000000000000..03021f212c8d7 --- /dev/null +++ b/content/bn/docs/reference/glossary/addons.md @@ -0,0 +1,16 @@ +--- +title: অ্যাড-অন +id: addons +date: 2019-12-15 +full_link: /bn/docs/concepts/cluster-administration/addons/ +short_description: > + কুবারনেটিসের কার্যকারিতা প্রসারিত করে এমন রিসোর্স। + +aka: +tags: +- tool +--- + কুবারনেটিসের কার্যকারিতা প্রসারিত করে এমন রিসোর্স। + + +[অ্যাডঅন ইনস্টল করা](/bn/docs/concepts/cluster-administration/addons/) আপনার ক্লাস্টারের সাথে অ্যাড-অন ব্যবহার করার বিষয়ে আরও ব্যাখ্যা করে এবং কিছু জনপ্রিয় অ্যাড-অন তালিকাভুক্ত করে। diff --git a/content/bn/docs/reference/glossary/admission-controller.md b/content/bn/docs/reference/glossary/admission-controller.md new file mode 100644 index 0000000000000..b8ccf61c3e30a --- /dev/null +++ b/content/bn/docs/reference/glossary/admission-controller.md @@ -0,0 +1,22 @@ +--- +title: অ্যাডমিশন কন্ট্রোলার +id: admission-controller +date: 2019-06-28 +full_link: /bn/docs/reference/access-authn-authz/admission-controllers/ +short_description: > + কোডের একটি অংশ যা অবজেক্টের পার্সিস্টেন্সের(persistence) পূর্বে কুবারনেটিস API সার্ভারের অনুরোধগুলিকে বাধা দেয়। + +aka: +tags: +- extension +- security +--- +কোডের একটি অংশ যা অবজেক্টের পার্সিস্টেন্সের(persistence) পূর্বে কুবারনেটিস API সার্ভারের অনুরোধগুলিকে বাধা দেয়। + + + +অ্যাডমিশন কন্ট্রোলার কুবারনেটিস এপিআই (API) সার্ভারের জন্য কনফিগারযোগ্য এবং হতে পারে "বৈধকরণ", "পরিবর্তন" বা +উভয়ই। যেকোনো অ্যাডমিশন কন্ট্রোলার অনুরোধ প্রত্যাখ্যান করতে পারে। পরিবর্তন কন্ট্রোলারগুলি যে বস্তুগুলি স্বীকার করে তা পরিবর্তন করতে পারে; +কন্ট্রোলারগুলি বৈধ নাও হতে পারে + +* [কুবারনেটিস ডকুমেন্টেশনে অ্যাডমিশন কন্ট্রোলার](/bn/docs/reference/access-authn-authz/admission-controllers/) diff --git a/content/bn/docs/reference/glossary/affinity.md b/content/bn/docs/reference/glossary/affinity.md new file mode 100644 index 0000000000000..45dbfd8cb1a3f --- /dev/null +++ b/content/bn/docs/reference/glossary/affinity.md @@ -0,0 +1,22 @@ +--- +title: অ্যাফিনিটি (Affinity) +id: affinity +date: 2019-01-11 +full_link: /bn/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity +short_description: > + শিডিউলার দ্বারা ব্যবহৃত নিয়ম নির্ধারণ করে পড কোথায় রাখতে হবে +aka: +tags: +- fundamental +--- + +কুবারনেটিসে, _অ্যাফিনিটি_ হল নিয়মগুলির একটি সেট যা পডগুলি কোথায় রাখতে হবে সে সম্পর্কে শিডিউলারকে ইঙ্গিত দেয়। + + +দুই ধরণের অ্যাফিনিটি রয়েছে: +* [নোড অ্যাফিনিটি](/bn/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity) +* [পড-টু-পড অ্যাফিনিটি](/bn/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-ant-affinity) + +নিয়মগুলি সংজ্ঞায়িত করা হয়েছে কুবারনেটিস ব্যবহার করে {{< glossary_tooltip term_id="label" text="লেবেল">}}, +এবং {{< glossary_tooltip term_id="selector" text="নির্বাচক">}} এ উল্লেখ করা হয়েছে {{< glossary_tooltip term_id="pod" text="পড" >}}, +এবং সেগুলি হয় প্রয়োজন বা পছন্দের হতে পারে, আপনি কতটা কঠোরভাবে শিডিউলারকে তাদের প্রয়োগ করতে চান তার উপর নির্ভর করে। diff --git a/content/bn/docs/reference/glossary/annotation.md b/content/bn/docs/reference/glossary/annotation.md new file mode 100644 index 0000000000000..b1e073e6bb9b2 --- /dev/null +++ b/content/bn/docs/reference/glossary/annotation.md @@ -0,0 +1,17 @@ +--- +title: এনোটেশন +id: annotation +date: 2018-04-12 +full_link: /docs/concepts/overview/working-with-objects/annotations +short_description: > + একটি কী-ভ্যালু(key-value) জোড় যা অবজেক্টের সাথে ইচ্ছামত অ-শনাক্তকারী মেটাডেটা সংযুক্ত করতে ব্যবহৃত হয়। + +aka: +tags: +- fundamental +--- +একটি কী-ভ্যালু(key-value) জোড় যা অবজেক্টের সাথে ইচ্ছামত অ-শনাক্তকারী মেটাডেটা সংযুক্ত করতে ব্যবহৃত হয়। + + + +এনোটেশনের মেটাডেটা ছোট অথবা বড়, বিন্যাসিত বা অবিন্যাসিত হতে পারে, এবং এটিতে {{< glossary_tooltip text="লেবেল" term_id="label" >}} দ্বারা অনুমোদিত নয় এমন ক্যারেক্টার থাকতে পারে। টুল এবং লাইব্রেরির মতো ক্লায়েন্ট এই মেটাডেটা পুনরুদ্ধার করতে পারে। diff --git a/content/bn/docs/reference/glossary/app-container.md b/content/bn/docs/reference/glossary/app-container.md new file mode 100644 index 0000000000000..4f31f513e50bf --- /dev/null +++ b/content/bn/docs/reference/glossary/app-container.md @@ -0,0 +1,18 @@ +--- +title: অ্যাপ কন্টেইনার +id: app-container +date: 2019-02-12 +full_link: +short_description: > + কোনো ওয়ার্কলোডের একটি অংশ চালানোর জন্য ব্যবহৃত কন্টেইনার। init কন্টেইনারের সাথে তুলনা করুন। + +aka: +tags: +- workload +--- +অ্যাপ্লিকেশন কন্টেইনার (অথবা অ্যাপ কন্টেইনার) হল একটি {{< glossary_tooltip text="পড" term_id="pod" >}}-এর মধ্যে থাকা {{< glossary_tooltip text="কন্টেইনার" term_id="container" >}}গুলি, যা যেকোনো {{< glossary_tooltip text="ইনিশিয়ালাইজেশন কন্টেইনার" term_id="init-container" >}} সম্পন্ন হওয়ার পর চালু হয়। + + + +একটি ইনিট কন্টেইনার আপনাকে ইনিশিয়ালাইজেশনের বিবরণ পৃথক করতে দেয় যা সার্বিকভাবে {{< glossary_tooltip text="ওয়ার্কলোড" term_id="workload" >}} এর জন্য গুরুত্বপূর্ণ, এবং যা অ্যাপ্লিকেশন কন্টেইনার চালু হওয়ার পরে চালানোর প্রয়োজন নেই। +যদি একটি পডে কোনো ইনিট কন্টেইনার কনফিগার করা না হয়, তবে সেই পডের সব কন্টেইনারই অ্যাপ কন্টেইনার হয়। diff --git a/content/bn/docs/reference/glossary/cluster.md b/content/bn/docs/reference/glossary/cluster.md new file mode 100644 index 0000000000000..1495610814ea8 --- /dev/null +++ b/content/bn/docs/reference/glossary/cluster.md @@ -0,0 +1,23 @@ +--- +title: ক্লাস্টার(Cluster) +id: cluster +date: 2019-06-15 +full_link: +short_description: > + ওয়ার্কার(worker) মেশিনের একটি সেট, যাকে নোড বলা হয়, যা কন্টেইনারাইজড অ্যাপ্লিকেশন চালায়। প্রতিটি ক্লাস্টারে কমপক্ষে একটি ওয়ার্কার নোড থাকে। + +aka: +tags: +- fundamental +- operation +--- +ওয়ার্কার(worker) মেশিনের একটি সেট, যাকে {{< glossary_tooltip text="নোড" term_id="node" >}} বলা হয়, +যা কন্টেইনারাইজড অ্যাপ্লিকেশন চালায়। প্রতিটি ক্লাস্টারে কমপক্ষে একটি ওয়ার্কার নোড থাকে। + + +ওয়ার্কিং নোড(গুলো) {{< glossary_tooltip text="পড" term_id="pod" >}} হোস্ট করে যা +অ্যাপ্লিকেশন ওয়ার্কলোড এর উপাদান। +{{< glossary_tooltip text="কন্ট্রোল প্লেন" term_id="control-plane" >}} ক্লাস্টারে থাকা +ওয়ার্কার নোডগুলো এবং পডগুলো পরিচালনা করে। প্রোডাকশন পরিবেশে, কন্ট্রোল প্লেন সাধারণত একাধিক +কম্পিউটার জুড়ে চলে এবং একটি ক্লাস্টার সাধারণত একাধিক নোড চালায়, +ত্রুটি-সহনশীলতা(fault-tolerance) এবং উচ্চ প্রাপ্যতা(high-availability) প্রদান করে। diff --git a/content/bn/docs/reference/glossary/configmap.md b/content/bn/docs/reference/glossary/configmap.md new file mode 100644 index 0000000000000..8d542977ecd7c --- /dev/null +++ b/content/bn/docs/reference/glossary/configmap.md @@ -0,0 +1,20 @@ +--- +title: কনফিগম্যাপ (ConfigMap) +id: configmap +date: 2018-04-12 +full_link: /bn/docs/concepts/configuration/configmap/ +short_description: > + একটি API অবজেক্ট কী-ভ্যালু (key-value) জোড়ায় অ-গোপনীয় ডেটা সংরক্ষণ করতে ব্যবহৃত হয়। এটি এনভায়রনমেন্ট ভেরিয়েবল , কমান্ড-লাইন আর্গুমেন্ট বা একটি ভলিউমে কনফিগারেশন ফাইল হিসাবে ব্যবহার করা যেতে পারে। + +aka: +tags: +- core-object +--- +একটি API অবজেক্ট কী-ভ্যালু(key-value) জোড়ায় অ-গোপনীয় ডেটা সংরক্ষণ করতে ব্যবহৃত হয়। +{{< glossary_tooltip text="পডগুলো" term_id="pod" >}} কনফিগম্যাপকে এনভায়রনমেন্ট ভেরিয়েবল, +কমান্ড-লাইন আর্গুমেন্ট বা {{< glossary_tooltip text="ভলিউমে" term_id="volume" >}} +কনফিগারেশন ফাইল হিসেবে ব্যবহার করতে পারে। + + + +একটি কনফিগম্যাপ (ConfigMap) আপনাকে আপনার {{< glossary_tooltip text="কন্টেইনার ইমেজ" term_id="image" >}} থেকে এনভায়রনমেন্ট-নির্দিষ্ট কনফিগারেশন দ্বিগুণ করার অনুমতি দেয়, যাতে আপনার অ্যাপ্লিকেশনগুলি সহজেই পোর্টেবল হয়৷ diff --git a/content/bn/docs/reference/glossary/container-runtime.md b/content/bn/docs/reference/glossary/container-runtime.md new file mode 100644 index 0000000000000..aa3c681a790ab --- /dev/null +++ b/content/bn/docs/reference/glossary/container-runtime.md @@ -0,0 +1,22 @@ +--- +title: কন্টেইনার রানটাইম +id: container-runtime +date: 2019-06-05 +full_link: /docs/setup/production-environment/container-runtimes +short_description: > + কন্টেইনার রানটাইম হল সেই সফটওয়্যার যা কন্টেইনার চালানোর জন্য দায়ী। + +aka: +tags: +- fundamental +- workload +--- + একটি মৌলিক উপাদান যা কুবারনেটিসকে কার্যকরভাবে কন্টেইনার চালানোর ক্ষমতা দেয়। + এটি কুবারনেটিস পরিবেশের মধ্যে কন্টেইনারগুলির সম্পাদন এবং জীবনচক্র পরিচালনার জন্য দায়ী। + + + +কুবারনেটস কনটেইনার রানটাইম সমর্থন করে যেমন +{{< glossary_tooltip term_id="containerd" >}}, {{< glossary_tooltip term_id="cri-o" >}}, +এবং [কুবারনেটিস CRI (কন্টেইনার রানটাইম +ইন্টারফেস)](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-node/container-runtime-interface.md)। \ No newline at end of file diff --git a/content/bn/docs/reference/glossary/container.md b/content/bn/docs/reference/glossary/container.md new file mode 100644 index 0000000000000..58cb674855621 --- /dev/null +++ b/content/bn/docs/reference/glossary/container.md @@ -0,0 +1,19 @@ +--- +title: কন্টেইনার(Container) +id: container +date: 2018-04-12 +full_link: /bn/docs/concepts/containers/ +short_description: > + একটি লাইটওয়েট এবং পোর্টেবল এক্সিকিউটেবল ইমেজ যা সফ্টওয়্যার এবং এর সকল ডিপেন্ডেন্সি ধারণ করে। + +aka: +tags: +- fundamental +- workload +--- + একটি লাইটওয়েট এবং পোর্টেবল এক্সিকিউটেবল ইমেজ যা সফ্টওয়্যার এবং এর সকল ডিপেন্ডেন্সি ধারণ করে। + + + +কন্টেইনার হোস্ট ইনফ্রাস্ট্রাকচার থেকে অ্যাপ্লিকেশন কে আলাদা(decouple) করে বিভিন্ন ক্লাউড এবং OS এনভায়রনমেন্টে ডিপ্লয়মেন্ট সহজ করার জন্য, এবং সহজে স্কেলিং করার জন্য। +যে অ্যাপ্লিকেশনগুলি কন্টেইনারের ভিতরে চলে তাদের কন্টেইনারাইজড অ্যাপ্লিকেশন বলা হয়। এই অ্যাপ্লিকেশনগুলো এবং তাদের ডিপেন্ডেন্সিগুলোকে বান্ডেল করার প্রক্রিয়াকে কন্টেইনারেইজেশন বলা হয়। diff --git a/content/bn/docs/reference/glossary/control-plane.md b/content/bn/docs/reference/glossary/control-plane.md new file mode 100644 index 0000000000000..8d6d0cc85e9c0 --- /dev/null +++ b/content/bn/docs/reference/glossary/control-plane.md @@ -0,0 +1,25 @@ +--- +title: কন্ট্রোল প্লেন +id: control-plane +date: 2019-05-12 +full_link: +short_description: > + কন্টেইনার অর্কেস্ট্রেশন লেয়ার যা কন্টেইনারের জীবনচক্র সংজ্ঞায়িত, ডেপ্লয় এবং পরিচালনা করতে API এবং ইন্টারফেসগুলিকে প্রকাশ করে। + +aka: +tags: +- fundamental +--- +কন্টেইনার অর্কেস্ট্রেশন লেয়ার যা কন্টেইনারের জীবনচক্র সংজ্ঞায়িত, ডেপ্লয় এবং পরিচালনা করতে API এবং ইন্টারফেসগুলিকে প্রকাশ করে। + + + + এই লেয়ারটি বিভিন্ন উপাদান দ্বারা গঠিত, যেমন (কিন্তু এতে সীমাবদ্ধ নয়): + + * {{< glossary_tooltip text="etcd" term_id="etcd" >}} + * {{< glossary_tooltip text="API সার্ভার" term_id="kube-apiserver" >}} + * {{< glossary_tooltip text="শিডিউলার" term_id="kube-scheduler" >}} + * {{< glossary_tooltip text="কন্ট্রোলার ম্যানেজারr" term_id="kube-controller-manager" >}} + * {{< glossary_tooltip text="ক্লাউড কন্ট্রোলার ম্যানেজার" term_id="cloud-controller-manager" >}} + + এই উপাদানগুলি ট্রাডিশনাল অপারেটিং সিস্টেম সার্ভিস (daemons) বা কন্টেইনার হিসাবে চালানো যেতে পারে। এই উপাদানগুলি চালানো হোস্টগুলিকে ঐতিহাসিকভাবে {{< glossary_tooltip text="masters" term_id="master" >}} বলা হত। diff --git a/content/bn/docs/reference/glossary/daemonset.md b/content/bn/docs/reference/glossary/daemonset.md new file mode 100644 index 0000000000000..592b08d3d9d8a --- /dev/null +++ b/content/bn/docs/reference/glossary/daemonset.md @@ -0,0 +1,19 @@ +--- +title: ডেমনসেট +id: daemonset +date: 2018-04-12 +full_link: /bn/docs/concepts/workloads/controllers/daemonset +short_description: > + একটি পডের একটি কপি একটি ক্লাস্টারে নোডগুলির একটি সেট জুড়ে চলছে তা নিশ্চিত করে৷ + +aka: +tags: +- fundamental +- core-object +- workload +--- + একটি {{< glossary_tooltip text="পডের" term_id="pod" >}} একটি কপি একটি {{< glossary_tooltip text="ক্লাস্টারে" term_id="cluster" >}} নোডগুলির একটি সেট জুড়ে চলছে তা নিশ্চিত করে৷ + + + +লগ কালেক্টর এবং মনিটরিং এজেন্টের মতো সিস্টেম ডেমন(daemon) ডেপ্লয় করতে ব্যবহৃত হয় যা সাধারণত প্রতিটি {{< glossary_tooltip term_id="node" >}}-এ চলতে হবে। diff --git a/content/bn/docs/reference/glossary/deployment.md b/content/bn/docs/reference/glossary/deployment.md new file mode 100644 index 0000000000000..496194415ce59 --- /dev/null +++ b/content/bn/docs/reference/glossary/deployment.md @@ -0,0 +1,21 @@ +--- +title: ডিপ্লয়মেন্ট(Deployment) +id: deployment +date: 2018-04-12 +full_link: /bn/docs/concepts/workloads/controllers/deployment/ +short_description: > + আপনার ক্লাস্টারে একটি প্রতিলিপিকৃত(replicated) অ্যাপ্লিকেশন পরিচালনা করে। + +aka: +tags: +- fundamental +- core-object +- workload +--- + একটি API অবজেক্ট যা একটি প্রতিলিপিকৃত অ্যাপ্লিকেশন পরিচালনা করে, সাধারণত লোকাল স্টেট ব্যতিত পড রান করার মাধ্যমে। + + + +প্রতিটি প্রতিলিপি একটি {{< glossary_tooltip term_id="pod" >}} দ্বারা উপস্থাপিত হয়, এবং পডগুলি তাদের মধ্যে ডিস্ট্রিবিউট করা হয় +একটি ক্লাস্টারের {{< glossary_tooltip text="নোড" term_id="node" >}} এর মাধমে। +যেসকল ওয়ার্কলোড এর জন্য লোকাল স্টেট প্রয়োজন, তাদের জন্য {{< glossary_tooltip term_id="StatefulSet" >}} ব্যবহার করা বিবেচনা করুন। diff --git a/content/bn/docs/reference/glossary/ephemeral-container.md b/content/bn/docs/reference/glossary/ephemeral-container.md new file mode 100644 index 0000000000000..8b15c76c763d1 --- /dev/null +++ b/content/bn/docs/reference/glossary/ephemeral-container.md @@ -0,0 +1,19 @@ +--- +title: ইফেমেরাল কন্টেইনার(Ephemeral Container) +id: ephemeral-container +date: 2019-08-26 +full_link: /bn/docs/concepts/workloads/pods/ephemeral-containers/ +short_description: > + এক ধরনের কন্টেইনার যা আপনি অস্থায়ীভাবে একটি পডের ভিতরে রান করতে পারেন। + +aka: +tags: +- fundamental +--- +এক ধরনের {{< glossary_tooltip term_id="container" >}} যা আপনি অস্থায়ীভাবে একটি {{< glossary_tooltip term_id="pod" >}}এর ভিতরে রান করতে পারেন। + + + +আপনি যদি সমস্যা নিয়ে চলমান একটি পড তদন্ত করতে চান তবে আপনি সেই পডে একটি অস্থায়ী কন্টেইনার যোগ করতে পারেন এবং ডায়াগনস্টিকস চালাতে পারেন। ইফেমেরাল কন্টেইনারগুলির কোনও রিসোর্স বা শিডিউলিং গ্যারান্টি নেই এবং ওয়ার্কলোড এর কোনও অংশ চালানোর জন্য সেগুলি আপনার ব্যবহার করা উচিত নয়। + +ইফেমেরাল কনটেইনার {{< glossary_tooltip text="স্ট্যাটিক পড" term_id="static-pod" >}} দ্বারা সাপোর্টেড নয়৷ diff --git a/content/bn/docs/reference/glossary/gateway.md b/content/bn/docs/reference/glossary/gateway.md new file mode 100644 index 0000000000000..a803065d25499 --- /dev/null +++ b/content/bn/docs/reference/glossary/gateway.md @@ -0,0 +1,18 @@ +--- +title: গেটওয়ে API +id: gateway-api +date: 2023-10-19 +full_link: /docs/concepts/services-networking/gateway/ +short_description: > + কুবারনেটিস-এ মডেলিং সার্ভিস নেটওয়ার্কিংয়ের জন্য একটি API। + +aka: +tags: +- networking +- architecture +- extension +--- +কুবারনেটিস-এ মডেলিং সার্ভিস নেটওয়ার্কিংয়ের জন্য API ধরণের একটি ফ্যামিলি। + +গেটওয়ে API কুবারনেটিস-এ মডেলিং সার্ভিস নেটওয়ার্কিংয়ের জন্য এক্সটেনসিবল, রোল-ভিত্তিক, প্রোটোকল-সচেতন +API ধরণের একটি পরিবার সরবরাহ করে। diff --git a/content/bn/docs/reference/glossary/index.md b/content/bn/docs/reference/glossary/index.md new file mode 100644 index 0000000000000..a7ba4589ea824 --- /dev/null +++ b/content/bn/docs/reference/glossary/index.md @@ -0,0 +1,15 @@ +--- +# approvers: +# - chenopis +# - abiogenesis-now ( The list of approvers is not necessary for the localized version. However, it is included because it helps maintain a certain line break, which further aids in updating a file.That's why it's kept in comment form. ) +title: শব্দকোষ +layout: glossary +noedit: true +body_class: glossary +default_active_tag: fundamental +weight: 5 +card: + name: reference + weight: 10 + title: শব্দকোষ +--- diff --git a/content/bn/docs/reference/glossary/init-container.md b/content/bn/docs/reference/glossary/init-container.md new file mode 100644 index 0000000000000..9ff121cc66349 --- /dev/null +++ b/content/bn/docs/reference/glossary/init-container.md @@ -0,0 +1,20 @@ +--- +title: ইনিট কন্টেইনার(Init Container) +id: init-container +date: 2018-04-12 +full_link: /bn/docs/concepts/workloads/pods/init-containers/ +short_description: > + এক বা একাধিক ইনিশিয়ালাইজেশন কন্টেইনার যা অবশ্যই কোনো অ্যাপ কন্টেইনার রান করার পূর্বে রান হতে হবে। +aka: +tags: +- fundamental +--- + এক বা একাধিক ইনিশিয়ালাইজেশন {{< glossary_tooltip text="কন্টেইনার" term_id="container" >}} যা অবশ্যই কোনো অ্যাপ কন্টেইনার রান করার পূর্বে রান হতে হবে। + + + +ইনিশিয়ালাইজেশন(ইনিট) কন্টেনারগুলি সাধারণ অ্যাপ কন্টেইনারগুলির মতো, একটি পার্থক্য সহ: কোন অ্যাপ কন্টেইনার শুরু করার আগে ইনিট কন্টেইনারগুলি অবশ্যই রান হতে হবে৷ ইনিট কন্টেইনারগুলো ধারাবাহিকভাবে রান করে: প্রতিটি ইনিট কন্টেইনারকে অবশ্যই পরবর্তী কন্টেইনার শুরু হওয়ার পূর্বেই রান হতে হবে। + +{{< glossary_tooltip text="সাইডকার কন্টেইনার" term_id="sidecar-container" >}}এর বিপরীতে, ইনিট কন্টেইনারগুলি পড স্টার্টআপের পরে চলতে থাকে না। + +আরও তথ্যের জন্য, পড়ুন [ইনিট কন্টেইনার](/bn/docs/concepts/workloads/pods/init-containers/). diff --git a/content/bn/docs/reference/glossary/job.md b/content/bn/docs/reference/glossary/job.md new file mode 100644 index 0000000000000..0ff5d633866f1 --- /dev/null +++ b/content/bn/docs/reference/glossary/job.md @@ -0,0 +1,19 @@ +--- +title: জব +id: job +date: 2018-04-12 +full_link: /bn/docs/concepts/workloads/controllers/job/ +short_description: > + একটি সীমিত বা ব্যাচ টাস্ক যা সম্পূর্ণ হওয়া পর্যন্ত চলে৷ + +aka: +tags: +- fundamental +- core-object +- workload +--- + একটি সীমিত বা ব্যাচ টাস্ক যা সম্পূর্ণ হওয়া পর্যন্ত চলে৷ + + + +এক বা একাধিক {{< glossary_tooltip term_id="pod" >}} অবজেক্ট তৈরি করে এবং নিশ্চিত করে যে তাদের একটি নির্দিষ্ট সংখ্যক সফলভাবে সমাপ্ত হয়েছে। পডগুলো সফলভাবে সম্পন্ন হওয়ার সাথে সাথে জব(Job) সফল সমাপ্তিগুলিকে ট্র্যাক করে৷ diff --git a/content/bn/docs/reference/glossary/kube-apiserver.md b/content/bn/docs/reference/glossary/kube-apiserver.md new file mode 100644 index 0000000000000..5d0a74c8df35b --- /dev/null +++ b/content/bn/docs/reference/glossary/kube-apiserver.md @@ -0,0 +1,23 @@ +--- +title: API সার্ভার +id: kube-apiserver +date: 2018-04-12 +full_link: /bn/docs/concepts/overview/components/#kube-apiserver +short_description: > + কন্ট্রোল প্লেন উপাদান যা কুবারনেটিস API পরিবেশন করে। + +aka: +- kube-apiserver +tags: +- architecture +- fundamental +--- + API সার্ভার হলো কুবারনেটিস {{< glossary_tooltip text="কন্ট্রোল প্লেন" term_id="control-plane" >}} +এর একটি উপাদান যা কুবারনেটিস API কে প্রকাশ করে ৷ +API সার্ভার কুবারনেটিস কন্ট্রোল প্লেনের ফ্রন্ট এন্ড হিসেবে কাজ করে। + + + +কুবারনেটিস API সার্ভারের প্রধান বাস্তবায়ন হলো [kube-apiserver](/docs/reference/generated/kube-apiserver/)। +kube-apiserver অনুভূমিকভাবে স্কেল করার জন্য ডিজাইন করা হয়েছে—অর্থাৎ, এটি আরও বেশি উদাহরণ ডেপ্লয় করে স্কেল করে। +আপনি একাধিক kube-apiserver উদাহরণ চালাতে পারেন এবং সেই উদাহরণগুলির মধ্যে ট্রাফিক ব্যালেন্স করতে পারেন। diff --git a/content/bn/docs/reference/glossary/mirror-pod.md b/content/bn/docs/reference/glossary/mirror-pod.md new file mode 100644 index 0000000000000..56ba89ba383bd --- /dev/null +++ b/content/bn/docs/reference/glossary/mirror-pod.md @@ -0,0 +1,22 @@ +--- +title: মিরর-পড +id: mirror-pod +date: 2019-08-06 +short_description: > + API সার্ভারের একটি অবজেক্ট যা একটি kubelet এ একটি স্ট্যাটিক পড ট্র্যাক করে। + +aka: +tags: +- fundamental +--- + +একটি {{< glossary_tooltip text="পড" term_id="pod" >}} অবজেক্ট যা একটি {{< glossary_tooltip text="kubelet" term_id="kubelet" >}} +ব্যবহার করে {{< glossary_tooltip text="স্ট্যাটিক পড" term_id="static-pod" >}} উপস্থাপন করতে । + + + +যখন kubelet তার কনফিগারেশনে একটি স্ট্যাটিক পড খুঁজে পায়, তখন এটি স্বয়ংক্রিয়ভাবে +এটির জন্য কুবারনেটিস API সার্ভারে একটি পড অবজেক্ট তৈরি করার চেষ্টা করে। +এর মানে হল যে পডটি API সার্ভারে দৃশ্যমান হবে, কিন্তু সেখান থেকে কন্ট্রোল করা যাবে না। + +(উদাহরণস্বরূপ, একটি মিরর পড অপসারণ করা kubelet ডেমন(daemon) এটি চালানো বন্ধ করবে না)। diff --git a/content/bn/docs/reference/glossary/namespace.md b/content/bn/docs/reference/glossary/namespace.md new file mode 100644 index 0000000000000..05de1bb9c367a --- /dev/null +++ b/content/bn/docs/reference/glossary/namespace.md @@ -0,0 +1,17 @@ +--- +title: নেমস্পেস(namespace) +id: namespace +date: 2018-04-12 +full_link: /bn/docs/concepts/overview/working-with-objects/namespaces +short_description: > + একটি অ্যাবস্ট্রাকশন যা কুবার্নেটিস ব্যবহার করে রিসোর্স গ্রুপের আইসোলেশন সাপোর্ট করার জন্য, একটি একক ক্লাস্টার এর মধ্যে। + +aka: +tags: +- fundamental +--- +একটি অ্যাবস্ট্রাকশন যা কুবার্নেটিস ব্যবহার করে রিসোর্স গ্রুপের আইসোলেশন সাপোর্ট করার জন্য, একটি একক {{< glossary_tooltip text="ক্লাস্টার" term_id="cluster" >}} এর মধ্যে। + + + +নেমস্পেস একটি ক্লাস্টারে অবজেক্টগুলিকে সংগঠিত করতে এবং ক্লাস্টার রিসোর্সগুলোকে বিভক্ত করার উপায় প্রদান করতে ব্যবহৃত হয়। রিসোর্সের নাম একটি নেমস্পেস এর মধ্যে অনন্য(unique) হতে হবে, কিন্তু নেমস্পেস জুড়ে নয় নয়। নেমস্পেস-ভিত্তিক স্কোপিং শুধুমাত্র নেমস্পেস এর মধ্যে থাকা অবজেক্টের জন্য প্রযোজ্য (যেমন ডিপ্লয়মেন্টস, সার্ভিস, ইত্যাদি) এবং ক্লাস্টার-ওয়াইড অবজেক্টের জন্য নয় (যেমন স্টোরেজক্লাস, নোড, পারসিস্টেন্ট ভলিউম, ইত্যাদি)। diff --git a/content/bn/docs/reference/glossary/node.md b/content/bn/docs/reference/glossary/node.md new file mode 100644 index 0000000000000..6d7b9f87d100a --- /dev/null +++ b/content/bn/docs/reference/glossary/node.md @@ -0,0 +1,19 @@ +--- +title: নোড +id: node +date: 2018-04-12 +full_link: /docs/concepts/architecture/nodes/ +short_description: > + নোড হলো কুবারনেটিসে একটি ওয়ার্কার মেশিন। + +aka: +tags: +- fundamental +--- +নোড হলো কুবারনেটিসে একটি ওয়ার্কার মেশিন। + + + +একটি ওয়ার্কার নোড একটি ভার্চুয়াল মেশিন বা ফিজিক্যাল মেশিন হতে পারে, একটি ক্লাস্টারের উপর নির্ভর করে । এটির {{< glossary_tooltip text="পডগুলোকে" term_id="pod" >}} সহজে চালানোর জন্য প্রয়োজনীয় সকল লোকাল ডেমন(daemons) বা সার্ভিস আছে এবং এটি কন্ট্রোল প্লেন দ্বারা পরিচালিত হয়। একটি নোডের ডেমনগুলোতে থাকে {{< glossary_tooltip text="kubelet" term_id="kubelet" >}}, {{< glossary_tooltip text="kube-proxy" term_id="kube-proxy" >}}, এবং একটি কন্টেইনার রানটাইম যা {{< glossary_tooltip text="CRI" term_id="cri" >}} সম্পাদন করে যেমন {{< glossary_tooltip term_id="docker" >}} + +প্রাথমিক কুবারনেটিস সংস্করণে, নোডগুলি "Minions" হিসেবে পরিচিত ছিল। diff --git a/content/bn/docs/reference/glossary/object.md b/content/bn/docs/reference/glossary/object.md new file mode 100644 index 0000000000000..2ba25d021bbcb --- /dev/null +++ b/content/bn/docs/reference/glossary/object.md @@ -0,0 +1,21 @@ +--- +title: অবজেক্ট +id: object +date: 2020-10-12 +full_link: /bn/docs/concepts/overview/working-with-objects/#kubernetes-objects +short_description: > + কুবারনেটিস সিস্টেমের একটি সত্তা, আপনার ক্লাস্টারের অবস্থার অংশ প্রতিনিধিত্ব করে। +aka: +tags: +- fundamental +--- + কুবারনেটিস সিস্টেমের একটি সত্তা। কুবারনেটিস API আপনার ক্লাস্টারের অবস্থা উপস্থাপন করতে +এই সত্তাগুলি ব্যবহার করে। + + + +একটি কুবারনেটিস অবজেক্ট সাধারণত একটি "record of intent" - একবার আপনি অবজেক্টটি তৈরি করলে, +কুবারনেটিস {{< glossary_tooltip text="কন্ট্রোল প্লেনটি " term_id="control-plane" >}} +এটির প্রতিনিধিত্ব করে এমন আইটেমটি আসলে বিদ্যমান আছে কিনা তা নিশ্চিত করতে ক্রমাগত কাজ করে। +একটি অবজেক্ট তৈরি করে, আপনি কুবারনেটিস সিস্টেমকে কার্যকরভাবে বলছেন যে আপনি আপনার ক্লাস্টারের +ওয়ার্কলোডের সেই অংশটি কেমন দেখতে চান; এটি আপনার ক্লাস্টারের কাঙ্ক্ষিত অবস্থা। diff --git a/content/bn/docs/reference/glossary/pod-disruption.md b/content/bn/docs/reference/glossary/pod-disruption.md new file mode 100644 index 0000000000000..af80c0d9181ed --- /dev/null +++ b/content/bn/docs/reference/glossary/pod-disruption.md @@ -0,0 +1,24 @@ +--- +id: pod-disruption +title: পড ব্যাঘাত +full_link: /bn/docs/concepts/workloads/pods/disruptions/ +date: 2021-05-12 +short_description: > + প্রক্রিয়া যার মাধ্যমে নোডের পডগুলো স্বেচ্ছায় বা অনিচ্ছাকৃতভাবে বন্ধ করা হয়। + +aka: +related: + - pod + - container +tags: + - operation +--- + +[পড-ব্যঘাত](/bn/docs/concepts/workloads/pods/disruptions/) হলো সেই প্রক্রিয়া যার +মাধ্যমে নোডের পডগুলো স্বেচ্ছায় বা অনিচ্ছাকৃতভাবে বন্ধ করা হয়। + + + +অ্যাপ্লিকেশন মালিক বা ক্লাস্টার অ্যাডমিনিস্ট্রেটররা ইচ্ছাকৃতভাবে স্বেচ্ছায় ব্যাঘাত শুরু করে। +অনিচ্ছাকৃত ব্যাঘাতগুলি অনিচ্ছাকৃত এবং নোডের রিসোর্স ফুরিয়ে যাওয়ার মতো অনিবার্য +সমস্যার কারণে বা দুর্ঘটনাবশত মুছে ফেলার কারণে ট্রিগার হতে পারে। diff --git a/content/bn/docs/reference/glossary/pod.md b/content/bn/docs/reference/glossary/pod.md new file mode 100644 index 0000000000000..e6b6ebc0d2d6d --- /dev/null +++ b/content/bn/docs/reference/glossary/pod.md @@ -0,0 +1,18 @@ +--- +title: পড +id: pod +date: 2018-04-12 +full_link: /bn/docs/concepts/workloads/pods/ +short_description: > + একটি পড আপনার ক্লাস্টারে চলমান কন্টেইনারগুলোর একটি সেট উপস্থাপন করে। + +aka: +tags: +- core-object +- fundamental +--- + সবচেয়ে ছোট এবং সরল কুবারনেটিস অবজেক্ট। একটি পড আপনার ক্লাস্টারে চলমান {{< glossary_tooltip text="কন্টেইনারগুলোর" term_id="container" >}} একটি সেট উপস্থাপন করে। + + + + একটি পড সাধারণত একটি একক প্রাথমিক কন্টেইনার চালানোর জন্য সেট আপ করা হয়। এটি অপশনাল সাইডকার কন্টেইনারগুলোও চালাতে পারে যা লগিংয়ের মতো পরিপূরক ফিচার যুক্ত করে। পডগুলি সাধারণত একটি {{< glossary_tooltip term_id="deployment" >}} দ্বারা পরিচালিত হয় ৷ diff --git a/content/bn/docs/reference/glossary/replication-controller.md b/content/bn/docs/reference/glossary/replication-controller.md new file mode 100644 index 0000000000000..0e542650a274f --- /dev/null +++ b/content/bn/docs/reference/glossary/replication-controller.md @@ -0,0 +1,25 @@ +--- +title: ReplicationController +id: replication-controller +date: 2018-04-12 +full_link: +short_description: > + একটি (ডেপ্রিসিয়েটেড) API অবজেক্ট যা একটি প্রতিলিপিকৃত অ্যাপ্লিকেশন পরিচালনা করে। + +aka: +tags: +- workload +- core-object +--- + একটি ওয়ার্কলোডের রিসোর্স যা একটি প্রতিলিপিকৃত অ্যাপ্লিকেশন পরিচালনা করে, +নিশ্চিত করে যে একটি {{< glossary_tooltip text="পড" term_id="pod" >}} এর একটি নির্দিষ্ট সংখ্যক দৃষ্টান্ত চলছে ৷ + + + +কন্ট্রোল প্লেন নিশ্চিত করে যে নির্দিষ্ট সংখ্যক পড চলছে, এমনকি যদি কিছু পড ব্যর্থ হয়, +যদি আপনি ম্যানুয়ালি পড মুছে ফেলেন, বা যদি অনেকগুলি ভুল করে শুরু করা হয়। + +{{< note >}} +ReplicationController ডেপ্রিসিয়েটেড করা হয়েছে। দেখুন +{{< glossary_tooltip text="ডিপ্লয়মেন্ট" term_id="deployment" >}}, যা একই রকম। +{{< /note >}} diff --git a/content/bn/docs/reference/glossary/secret.md b/content/bn/docs/reference/glossary/secret.md new file mode 100644 index 0000000000000..f3a94ee75b01c --- /dev/null +++ b/content/bn/docs/reference/glossary/secret.md @@ -0,0 +1,27 @@ +--- +title: সিক্রেট +id: secret +date: 2018-04-12 +full_link: /bn/docs/concepts/configuration/secret/ +short_description: > + সংবেদনশীল তথ্য, যেমন পাসওয়ার্ড, OAuth টোকেন এবং ssh কী(keys) গুলো সংরক্ষণ করে। + +aka: +tags: +- core-object +- security +--- +সংবেদনশীল তথ্য, যেমন পাসওয়ার্ড, OAuth টোকেন এবং ssh কী(keys) গুলো সংরক্ষণ করে। + + + +সিক্রেট গুলো আপনাকে কীভাবে সংবেদনশীল তথ্য ব্যবহার করা হয় তার উপর আরও নিয়ন্ত্রণ দেয় +এবং দুর্ঘটনাজনিত এক্সপোজারের ঝুঁকি হ্রাস করে। সিক্রেট ভ্যালুগুলি base64 স্ট্রিং হিসাবে এনকোড +করা হয় এবং ডিফল্টরূপে এনক্রিপ্ট না করে সংরক্ষণ করা হয় তবে +[বাকি সময়ে এনক্রিপ্ট](/bn/docs/tasks/administer-cluster/encrypt-data/#ensure-all-secrets-are-encrypted) করার জন্য কনফিগার করা যেতে পারে। + +একটি {{< glossary_tooltip text="পড" term_id="pod" >}} বিভিন্ন উপায়ে সিক্রেটকে উল্লেখ করতে পারে, +যেমন একটি ভলিউম মাউন্ট বা এনভায়রনমেন্ট ভেরিয়েবল হিসাবে। +সিক্রেটগুলি গোপনীয় ডেটার জন্য ডিজাইন করা হয়েছে এবং +[কনফিগম্যাপগুলি](/bn/docs/tasks/configure-pod-container/configure-pod-configmap/) +অ-গোপনীয় ডেটার জন্য ডিজাইন করা হয়েছে ৷ diff --git a/content/bn/docs/reference/glossary/service-account.md b/content/bn/docs/reference/glossary/service-account.md new file mode 100644 index 0000000000000..b85c8d2b9b61b --- /dev/null +++ b/content/bn/docs/reference/glossary/service-account.md @@ -0,0 +1,19 @@ +--- +title: সার্ভিস অ্যাকাউন্ট +id: service-account +date: 2018-04-12 +full_link: /docs/tasks/configure-pod-container/configure-service-account/ +short_description: > + পডে চলমান কার্যক্রমের জন্য একটি পরিচিতি সরবরাহ করে। + +aka: +tags: +- fundamental +- core-object +--- + +{{< glossary_tooltip text="পডে" term_id="pod" >}} চলমান কার্যক্রমের জন্য একটি পরিচিতি সরবরাহ করে। + + + +যখন পডে ভেতরের প্রক্রিয়াগুলি ক্লাস্টার অ্যাক্সেস করে, তাদেরকে প্রাসঙ্গিক সার্ভিস অ্যাকাউন্ট হিসেবে API সার্ভার দ্বারা অথেনটিকেট(authenticate) করা হয়, উদাহরণস্বরূপ, `default`। যদি আপনি সার্ভিস অ্যাকাউন্ট নির্দিষ্ট না করে একটি পড তৈরি করেন, তাহলে স্বয়ংক্রিয়ভাবে একই {{< glossary_tooltip text="নেমস্পেস" term_id="namespace" >}} এ ডিফল্ট সার্ভিস অ্যাকাউন্টটি নির্ধারন করে দেওয়া হয়। diff --git a/content/bn/docs/reference/glossary/sidecar-container.md b/content/bn/docs/reference/glossary/sidecar-container.md new file mode 100644 index 0000000000000..c0f1a78863a5d --- /dev/null +++ b/content/bn/docs/reference/glossary/sidecar-container.md @@ -0,0 +1,20 @@ +--- +title: সাইডকার কন্টেইনার (Sidecar Container) +id: sidecar-container +date: 2018-04-12 +full_link: +short_description: > + একটি সহায়ক কন্টেইনার যা একটি পডের জীবনচক্র জুড়ে চলতে থাকে। +full_link: /bn/docs/concepts/workloads/pods/sidecar-containers/ +tags: +- fundamental +--- + এক বা একাধিক {{< glossary_tooltip text="কন্টেইনার" term_id="container">}} যেটি সাধারণত কোনো অ্যাপ কন্টেইনার চলার আগে শুরু হয়। + + + +সাইডকার কন্টেইনারগুলি রেগুলার অ্যাপ কন্টেইনারগুলির মতো, কিন্তু একটি ভিন্ন উদ্দেশ্য সহ: সাইডকার প্রধান অ্যাপ কন্টেইনারে একটি পড-লোকাল সার্ভিস প্রদান করে। +{{< glossary_tooltip text="init কন্টেইনার" term_id="init-container" >}} এর বিপরীতে, +পড স্টার্টআপের পরে সাইডকার কন্টেইনার চলতে থাকে। + +আরও তথ্যের জন্য [সাইডকার কন্টেইনার](/bn/docs/concepts/workloads/pods/sidecar-containers/) পড়ুন। diff --git a/content/bn/docs/reference/glossary/statefulset.md b/content/bn/docs/reference/glossary/statefulset.md new file mode 100644 index 0000000000000..7371b69a77f48 --- /dev/null +++ b/content/bn/docs/reference/glossary/statefulset.md @@ -0,0 +1,22 @@ +--- +title: স্টেটফুলসেট +id: statefulset +date: 2018-04-12 +full_link: /bn/docs/concepts/workloads/controllers/statefulset/ +short_description: > + একটি স্টেটফুলসেট প্রতিটি পডের জন্য টেকসই স্টোরেজ এবং ক্রমাগত শনাক্তকারী সহ পডের একটি সেট ডিপ্লয়মেন্ট এবং স্কেলিং পরিচালনা করে। + +aka: +tags: +- fundamental +- core-object +- workload +- storage +--- + এটি একটি {{< glossary_tooltip text="পড" term_id="pod">}} সেটের ডিপ্লয়মেন্ট এবং স্কেলিং পরিচালনা করে , এবং পডগুলির *ক্রম এবং অনন্যতা সম্পর্কে গ্যারান্টি প্রদান করে* ৷ + + + +যেমন একটি {{< glossary_tooltip term_id="deployment" >}}, একটি স্টেটফুলসেট পডগুলি পরিচালনা করে যা একটি অভিন্ন কন্টেইনার স্পেকের উপর ভিত্তি করে। একটি ডিপ্লয়মেন্টের থেকে ভিন্ন, একটি স্টেটফুলসেট তার প্রতিটি পডের জন্য একটি স্টিকি পরিচয় বজায় রাখে। এই পডগুলি একই স্পেক (spec) থেকে তৈরি করা হয়েছে, কিন্তু বিনিময়যোগ্য নয়: প্রতিটিরই একটি ক্রমাগত শনাক্তকারী থাকে যা এটি যেকোনো রিশিডিউলিং জুড়ে বজায় রাখে। + +আপনি যদি আপনার ওয়ার্কলোডের জন্য পার্সিস্টেন্স (persistence) প্রদান করতে স্টোরেজ ভলিউম ব্যবহার করতে চান, আপনি সমাধানের অংশ হিসাবে স্টেটফুলসেট ব্যবহার করতে পারেন। যদিও স্টেটফুলসেটে পৃথক পড ব্যর্থতার জন্য সংবেদনশীল, ক্রমাগত পড শনাক্তকারী নতুন পডের সাথে বিদ্যমান ভলিউমগুলিকে মেলানো সহজ করে তোলে যা ব্যর্থ হয়েছে এমন যেকোনোটি প্রতিস্থাপন করে। diff --git a/content/bn/docs/reference/glossary/volume.md b/content/bn/docs/reference/glossary/volume.md new file mode 100644 index 0000000000000..b2b9fb395d432 --- /dev/null +++ b/content/bn/docs/reference/glossary/volume.md @@ -0,0 +1,20 @@ +--- +title: ভলিউম +id: volume +date: 2018-04-12 +full_link: /bn/docs/concepts/storage/volumes/ +short_description: > + ডাটা ধারণকারী একটি ডিরেক্টরি, একটি পডের কন্টেইনারে অ্যাক্সেসযোগ্য। + +aka: +tags: +- core-object +- fundamental +--- +ডাটা ধারণকারী একটি ডিরেক্টরি, একটি {{< glossary_tooltip term_id="pod" >}} এর {{< glossary_tooltip text="কন্টেইনারে" term_id="container" >}} অ্যাক্সেসযোগ্য। + + + +একটি কুবারনেটিস ভলিউম যতক্ষণ পড এটিকে ঘিরে থাকে ততক্ষণ বেঁচে থাকে। ফলস্বরূপ, একটি ভলিউম পডের মধ্যে চলা যেকোন কন্টেইনারের মধ্যে দিয়ে যায়, এবং ভলিউমের ডেটা কন্টেইনার রিস্টার্ট জুড়ে সংরক্ষণ করা হয়। + +আরও তথ্যের জন্য [স্টোরেজ](/bn/docs/concepts/storage/) দেখুন। diff --git a/content/bn/docs/reference/glossary/workload.md b/content/bn/docs/reference/glossary/workload.md new file mode 100644 index 0000000000000..7dd37c9a33a3c --- /dev/null +++ b/content/bn/docs/reference/glossary/workload.md @@ -0,0 +1,22 @@ +--- +title: ওয়ার্কলোড +id: workload +date: 2019-02-13 +full_link: /bn/docs/concepts/workloads/ +short_description: > + ওয়ার্কলোড হলো কুবারনেটিস-এ চলমান একটি অ্যাপ্লিকেশন। + +aka: +tags: +- fundamental +--- + ওয়ার্কলোড হলো কুবারনেটিস-এ চলমান একটি অ্যাপ্লিকেশন। + + + +বিভিন্ন কোর অবজেক্ট যা ওয়ার্কলোডের বিভিন্ন ধরণ বা অংশগুলির প্রতিনিধিত্ব করে তাদের মধ্যে ডেমনসেট(DaemonSet), +ডিপ্লয়মেন্ট, জব, রেপ্লিকাসেট(ReplicaSet), এবং স্টেটফুলসেট অবজেক্ট(StatefulSet objects) অন্তর্ভুক্ত। + +উদাহরণস্বরূপ, একটি ওয়ার্কলোড যেখানে একটি ওয়েব সার্ভার এবং একটি ডেটাবেস রয়েছে +তারা ডেটাবেসটি একটি {{< glossary_tooltip term_id="StatefulSet" >}} এ +এবং ওয়েব সার্ভারটি একটি {{< glossary_tooltip term_id="Deployment" >}} এ চালাতে পারে। diff --git a/content/bn/docs/setup/_index.md b/content/bn/docs/setup/_index.md new file mode 100644 index 0000000000000..fdb4f570fa970 --- /dev/null +++ b/content/bn/docs/setup/_index.md @@ -0,0 +1,67 @@ +--- +title: শুরু করা যাক +main_menu: true +weight: 20 +content_type: concept +no_list: true +card: + name: setup + weight: 20 + anchors: + - anchor: "#learning-environment" + title: লার্নিং পরিবেশ + - anchor: "#production-environment" + title: প্রোডাকশন পরিবেশ +--- + + + +এই বিভাগে কুবারনেটিস সেট আপ এবং চালানোর বিভিন্ন উপায় তালিকাভুক্ত করা হয়েছে। +আপনি যখন কুবারনেটিস ইনস্টল করেন, তখন : রক্ষণাবেক্ষণের সহজতা, নিরাপত্তা, +কন্ট্রোল, উপলব্ধ রিসোর্স, এবং একটি ক্লাস্টার পরিচালনা ও পরিচালনার জন্য প্রয়োজনীয় দক্ষতা এর উপর ভিত্তি করে একটি ইনস্টলেশন প্রকার নির্বাচন করুন । + +আপনি একটি কুবারনেটিস ক্লাস্টার স্থাপন করতে [কুবারনেটিস ডাউনলোড](/bn/releases/download/) করতে পারেন +একটি স্থানীয় মেশিনে, ক্লাউডে বা আপনার নিজস্ব ডেটাসেন্টারের জন্য। + +বেশ কিছু [কুবারনেটিস উপাদান](/bn/docs/concepts/overview/components/) যেমন {{< glossary_tooltip text="kube-apiserver" term_id="kube-apiserver" >}} বা {{< glossary_tooltip text="kube-proxy" term_id="kube-proxy" >}} +[কন্টেইনার ইমেজ](/bn/releases/download/#container-images) হিসাবেও ক্লাস্টারের মধ্যে স্থাপন করা যায়। + +যেখানেই সম্ভব কুবারনেটিস উপাদানগুলি কন্টেইনার ইমেজ হিসাবে রান করা , +এবং কুবারনেটিস দ্বারা সেই উপাদানগুলি পরিচালনা করার **সুপারিশ** করা হয়েছে । +যে উপাদানগুলি কন্টেইনার রান করে - বিশেষভাবে, kubelet - সেগুলিকে এই বিভাগে অন্তর্ভুক্ত করা যায় না। + +আপনি যদি নিজে একটি কুবারনেটিস ক্লাস্টার পরিচালনা করতে না চান, তাহলে আপনি একটি পরিচালিত পরিষেবা বাছাই করতে পারেন, যার মধ্যে রয়েছে +[সার্টিফাইড প্ল্যাটফর্ম](/bn/docs/setup/production-environment/turnkey-solutions/)। +এছাড়াও ক্লাউড এবং বিস্তৃত পরিসর জুড়ে অন্যান্য স্ট্যান্ডার্ডাইজেড এবং বেয়ার মেটাল পরিবেশ +সম্বলিত কাস্টম সমাধান রয়েছে । + + + +## লার্নিং পরিবেশ + +আপনি যদি কুবারনেটিস শিখছেন, কুবারনেটিস কমিউনিটি দ্বারা সমর্থিত টুল ব্যবহার করে, +ইকোসিস্টেম টুল ব্যবহার করে লোকাল মেশিনে কুবারনেটিস ক্লাস্টার সেট আপ করার জন্য +তাহলে [ইনস্টল টুলস](/bn/docs/tasks/tools/) দেখুন। + +## প্রোডাকশন পরিবেশ + +একটি [প্রোডাকশন পরিবেশ](/bn/docs/setup/production-environment/) এর সমাধান মূল্যায়ন করার সময় +বিবেচনা করুন কোন দিকগুলো একটি কুবারনেটিস ক্লাস্টার (বা _abstractions_) পরিচালনা করে আপনি +নিজের মাধ্যমেে পরিচালনা করতে চান এবং কোন দিকগুলো আপনি +একটি প্রদানকারীর কাছে হস্তান্তর করতে পছন্দ করেন। + +নিজের থেকে একটি ক্লাস্টার পরিচালনার জন্য কুবারনেটিস হতে +আনুষ্ঠানিকভাবে সমর্থিত টুল [kubeadm](/bn/docs/setup/production-environment/tools/kubeadm/) রয়েছে। + +## {{% heading "whatsnext" %}} + +- [কুবারনেটিস ডাউনলোড করুন](/bn/releases/download/) +- `kubectl` সহ টুলগুলো ডাউনলোড এবং [ইনস্টল](/bn/docs/tasks/tools/) করুন +- আপনার নতুন ক্লাস্টারের জন্য একটি [কন্টেইনার রানটাইম](/bn/docs/setup/production-environment/container-runtimes/) নির্বাচন করুন +- ক্লাস্টার সেটআপের জন্য [সর্বোত্তম অনুশীলন](/bn/docs/setup/best-practices/) সম্পর্কে জানুন + +কুবারনেটিস এর {{< glossary_tooltip term_id="control-plane" text="কন্ট্রোল প্লেন" >}} এর জন্য ডিজাইন করা হয়েছে যা +লিনাক্সে চালান। আপনার ক্লাস্টারের মধ্যে আপনি লিনাক্স বা উইন্ডোজ সহ অন্যান্য অপারেটিং সিস্টেমে +অ্যাপ্লিকেশন চালাতে পারেন । + +- [উইন্ডোজ নোডের সাথে ক্লাস্টার সেট আপ](/bn//docs/concepts/windows/) শিখুন diff --git a/content/bn/docs/setup/best-practices/_index.md b/content/bn/docs/setup/best-practices/_index.md new file mode 100644 index 0000000000000..2a07325ea3ee7 --- /dev/null +++ b/content/bn/docs/setup/best-practices/_index.md @@ -0,0 +1,4 @@ +--- +title: সেরা অনুশীলন +weight: 40 +--- diff --git a/content/bn/docs/setup/learning-environment/_index.md b/content/bn/docs/setup/learning-environment/_index.md new file mode 100644 index 0000000000000..6e5a8a25b64ed --- /dev/null +++ b/content/bn/docs/setup/learning-environment/_index.md @@ -0,0 +1,28 @@ +--- +title: লার্নিং এনভায়রনমেন্ট +weight: 20 +--- + + + + diff --git a/content/bn/docs/setup/production-environment/_index.md b/content/bn/docs/setup/production-environment/_index.md new file mode 100644 index 0000000000000..3453b38a736dd --- /dev/null +++ b/content/bn/docs/setup/production-environment/_index.md @@ -0,0 +1,301 @@ +--- +title: "প্রোডাকশন এনভায়রনমেন্ট " +description: একটি প্রোডাকশন-কোয়ালিটির কুবারনেটিস ক্লাস্টার তৈরি করুন +weight: 30 +no_list: true +--- + + +একটি প্রোডাকশন-কোয়ালিটির কুবারনেটিস ক্লাস্টারের জন্য পরিকল্পনা এবং প্রস্তুতি প্রয়োজন। +আপনার কুবারনেটিস ক্লাস্টার যদি গুরুতর ওয়ার্কলোড চালাতে হয়, তাহলে এটি অবশ্যই সহনশীল হওয়ার জন্য কনফিগার করতে হবে। +এই পৃষ্ঠাটি ব্যাখ্যা করে যে আপনি একটি প্রোডাকশন-রেডি ক্লাস্টার সেট আপ করতে নিতে পারেন, +অথবা প্রোডাকশন ব্যবহারের জন্য একটি বিদ্যমান ক্লাস্টার প্রচার করতে পারেন। +আপনি যদি ইতিমধ্যেই প্রোডাকশন সেটআপের সাথে পরিচিত হন এবং লিঙ্কগুলি চান তবে সরাসরি +[পরবর্তী কি](#what-s-next) তে যান। + + + +## প্রোডাকশন বিবেচনা + +সাধারণত, ব্যক্তিগত শিক্ষা, ডেভেলপমেন্ট, বা টেস্ট এনভায়রনমেন্ট কুবারনেটিস এর চেয়ে বেশি প্রয়োজনীয়তা রয়েছে +একটি প্রোডাকশন কুবারনেটিস ক্লাস্টার এনভায়রনমেন্টে। একটি প্রোডাকশন এনভায়রনমেন্টের প্রয়োজন হতে পারে +অনেক ব্যবহারকারীর দ্বারা নিরাপদ অ্যাক্সেস, ধারাবাহিক প্রাপ্যতা, এবং চাহিদা পরিবর্তনে মানিয়ে নেওয়ার +জন্য রিসোর্স। + +আপনি যেখানে আপনার প্রোডাকশন কুবারনেটিস এনভায়রনমেন্ট লাইভ করাতে চান তা নির্ধারণ করুন +(প্রেমিসেস বা একটি ক্লাউড) এবং আপনি যে পরিমাণ ব্যবস্থাপনা নিতে চান বা অন্যদের হাতে দিতে চান, +বিবেচনা করুন কিভাবে একটি কুবারনেটিস ক্লাস্টারের জন্য আপনার প্রয়োজনীয়তাগুলি +নিম্নলিখিত সমস্যাগুলির দ্বারা প্রভাবিত হয়: + +- *প্রাপ্যতা*: একটি একক-মেশিন কুবারনেটিস [লার্নিং এনভায়রনমেন্ট](/bn/docs/setup/#learning-environment) + ব্যর্থতার একক পয়েন্ট আছে। একটি অত্যন্ত প্রাপ্য ক্লাস্টার তৈরি করার অর্থ বিবেচনা করা: + - ওয়ার্কার নোড থেকে কন্ট্রোল প্লেন আলাদা করা। + - মাল্টিপল নোডের উপর কন্ট্রোল প্লেন কম্পোনেন্ট প্রতিলিপি করা। + - ক্লাস্টারের {{< glossary_tooltip term_id="kube-apiserver" text="API সার্ভার" >}}-এ ব্যালেন্সিং ট্রাফিক লোড করুন। + - পর্যাপ্ত ওয়ার্কার নোড এভেইল্যাবল থাকা, বা দ্রুত এভেইল্যাবল হতে সক্ষম, কারণ পরিবর্তিত ওয়ার্কলোড এটি নিশ্চিত করে। + +- *স্কেল*: আপনি যদি আশা করেন আপনার প্রোডাকশন কুবারনেটিস এনভায়রনমেন্ট একটি স্থিতিশীল পরিমাণে + চাহিদা পাবে, তাহলে আপনি আপনার প্রয়োজনীয় ক্ষমতার জন্য সেট আপ করতে সক্ষম হবেন এবং সম্পন্ন হবেন। কিন্তু, + যদি সময়ের সাথে সাথে আপনার চাহিদা বাড়ে বা নাটকীয়ভাবে পরিবর্তন করে সিজন বা বিশেষ ইভেন্টের + মতো জিনিসের উপর ভিত্তি করে, তাহলে আপনাকে পরিকল্পনা করতে হবে কিভাবে কন্ট্রোল প্লেন এবং ওয়ার্কার নোডগুলিতে + আরও অনুরোধ থেকে বর্ধিত চাপ কমাতে বা অব্যবহৃত রিসোর্সগুলি কমাতে স্কেল + কমাতে হবে। + +- *নিরাপত্তা এবং অ্যাক্সেস ব্যবস্থাপনা*: আপনার নিজের কুবারনেটিস লার্নিং ক্লাস্টারে আপনার সম্পূর্ণ + অ্যাডমিন সুবিধা রয়েছে। কিন্তু গুরুত্বপূর্ণ ওয়ার্কলোড সহ ভাগ করা ক্লাস্টার, এবং + এক বা দুইজনের বেশি ব্যবহারকারীর জন্য কে এবং কি ক্লাস্টার রিসোর্স অ্যাক্সেস করতে পারে তার + জন্য আরও পরিমার্জিত পদ্ধতির প্রয়োজন। আপনি রোল-বেসড অ্যাক্সেস কন্ট্রোল + ([RBAC](/bn/docs/reference/access-authn-authz/rbac/)) এবং অন্যান্য + নিরাপত্তা ব্যবস্থা ব্যবহার করতে পারেন যাতে ব্যবহারকারী এবং ওয়ার্কলোড তাদের প্রয়োজনীয় + রিসোর্সগুলিতে অ্যাক্সেস পেতে পারে ওয়ার্কলোড, এবং ক্লাস্টার নিজেই নিরাপদ। + [পলিসি](/bn/docs/concepts/policy/) এবং + [কন্টেইনার রিসোর্স](/bn/docs/concepts/configuration/manage-resources-containers/) পরিচালনার মাধ্যমে + ব্যবহারকারী এবং ওয়ার্কলোড যে রিসোর্সগুলি অ্যাক্সেস করতে পারে তার উপর আপনি সীমা নির্ধারণ করতে পারেন। + +কুবারনেটিস প্রোডাকশন এনভায়রনমেন্ট নিজে থেকে তৈরি করার আগে, এই +কাজের কিছু বা সমস্ত কিছু +[টার্নকি ক্লাউড সলিউশন](/bn/docs/setup/production-environment/turnkey-solutions/) +প্রদানকারী বা অন্যান্য [কুবারনেটিস পার্টনারস](/bn/partners/) এর কাছে হস্তান্তর করার কথা বিবেচনা করুন। +বিকল্প অন্তর্ভুক্ত: + +- *সার্ভারহীন*: কোনও ক্লাস্টার পরিচালনা না করেই কেবল তৃতীয় পক্ষের + টুলসগুলিতে ওয়ার্কলোড চালান। CPU ব্যবহার, মেমরি এবং ডিস্ক অনুরোধের মতো জিনিসগুলির জন্য আপনাকে + চার্জ করা হবে। +- *ম্যানেজড কন্ট্রোল প্লেন*: প্রদানকারীকে স্কেল এবং প্রাপ্যতা পরিচালনা করতে দিন + ক্লাস্টারের কন্ট্রোল প্লেন, সেইসাথে প্যাচ এবং আপগ্রেড হ্যান্ডেল করতে দিন। +- *ম্যানেজড ওয়ার্কার নোড*: আপনার প্রয়োজন মেটাতে নোডের পুল কনফিগার করুন, + তারপর প্রদানকারী নিশ্চিত করে যে সেই নোডগুলি আয়ত্তগত এবং প্রয়োজনে আপগ্রেড + বাস্তবায়নের জন্য প্রস্তুত। +- *ইন্টিগ্রেশন*: এমন প্রদানকারী আছে যারা কুবারনেটিসকে অন্য সার্ভিসের সাথে একীভূত করে + যা আপনার প্রয়োজন হতে পারে, যেমন স্টোরেজ, কন্টেইনার রেজিস্ট্রি, অথেনটিকেশন + পদ্ধতি, এবং ডেভেলপমেন্ট টুলস। + +আপনি নিজে একটি প্রোডাকশন কুবারনেটিস ক্লাস্টার তৈরি করুন বা অংশীদার এর সাথে +কাজ করুন, নিম্নলিখিত বিভাগগুলি পর্যালোচনা করুন যাতে আপনার প্রয়োজনগুলি মূল্যায়ন করা যায় +আপনার ক্লাস্টারের *কন্ট্রোল প্লেন*, *ওয়ার্কার নোড*, *ইউজার অ্যাক্সেস*, এবং +*ওয়ার্কলোড রিসোর্স*। + +## প্রোডাকশন ক্লাস্টার সেটআপ + +একটি প্রোডাকশন-কোয়ালিটির কুবারনেটিস ক্লাস্টারে, কন্ট্রোল প্লেন সার্ভিসগুলি থেকে +ক্লাস্টার পরিচালনা করে যা মাল্টিপল কম্পিউটারে ছড়িয়ে দেওয়া যেতে পারে +বিভিন্ন উপায়ে. প্রতিটি ওয়ার্কার নোড, যাইহোক, একটি একক সত্তা প্রতিনিধিত্ব করে যে +কুবারনেটিস পড চালানোর জন্য কনফিগার করা হয়েছে। + +### প্রোডাকশন কন্ট্রোল প্লেন + +সহজতম কুবারনেটিস ক্লাস্টারে একই মেশিনে পুরো কন্ট্রোল প্লেন এবং ওয়ার্কার +নোড সার্ভিসগুলি চলে। আপনি ওয়ার্কার নোডগুলি যোগ করে সেই এনভায়রনমেন্ট +বাড়াতে পারেন, যেমনটি [কুবারনেটিস কম্পোনেন্ট](/bn/docs/concepts/overview/components/) এ +চিত্রিত চিত্রে প্রতিফলিত হয়েছে। +যদি ক্লাস্টারটি অল্প সময়ের জন্য পাওয়া যায় বা কিছু গুরুতর ভুল হয়ে গেলে তা +বাতিল করা যেতে পারে, তাহলে এটি আপনার প্রয়োজন মেটাতে পারে। + +আপনার যদি আরও স্থায়ী, অত্যন্ত সহজলভ্য ক্লাস্টারের প্রয়োজন হয় তবে, আপনার কন্ট্রোল প্লেন +প্রসারিত করার উপায়গুলি বিবেচনা করা উচিত। নকশা অনুসারে, একক মেশিনে চলমান +এক-মেশিন কন্ট্রোল প্লেন সার্ভিসগুলি খুব বেশি সহজলভ্য নয়। +যদি ক্লাস্টার চালু রাখা এবং চালানো +এবং কিছু ভুল হয়ে গেলে তা মেরামত করা যায় তা নিশ্চিত করা গুরুত্বপূর্ণ, +এই পদক্ষেপগুলি বিবেচনা করুন: + +- *ডিপ্লয়মেন্ট টুল নির্বাচন করুন*: আপনি kubeadm, kops, এবং kubespray-এর মতো + টুল ব্যবহার করে একটি কন্ট্রোল প্লেন স্থাপন করতে পারেন। প্রতিটি + ডিপ্লয়মেন্ট পদ্ধতি ব্যবহার করে প্রোডাকশন-কোয়ালিটির ডিপ্লয়মেন্টের জন্য টিপস শিখতে + [ডিপ্লয়মেন্ট টুলের সাথে কুবারনেটিস ইনস্টল করার](/bn/docs/setup/production-environment/tools/) পদ্ধতি + দেখুন। বিভিন্ন [কন্টেইনার রানটাইম](/bn/docs/setup/production-environment/container-runtimes/) + আপনার ডিপ্লয়মেন্টের সাথে ব্যবহার করার জন্য সহজলভ্য। +- *সার্টিফিকেট পরিচালনা করুন*: কন্ট্রোল প্লেন সার্ভিসগুলির মধ্যে সুরক্ষিত যোগাযোগ সার্টিফিকেট + ব্যবহার করে প্রয়োগ করা হয়। সার্টিফিকেটগুলি স্থাপনের সময় স্বয়ংক্রিয়ভাবে তৈরি হয় বা + আপনি আপনার নিজের সার্টিফিকেট কর্তৃপক্ষ ব্যবহার করে সেগুলি তৈরি করতে পারেন৷ + বিস্তারিত জানার জন্য [PKI সার্টিফিকেট এবং প্রয়োজনীয়তা](/bn/docs/setup/best-practices/certificates/) দেখুন। +- *apiserver এর জন্য লোড ব্যালেন্সার কনফিগার করুন*: বিভিন্ন নোডে চলমান apiserver + সার্ভিস দৃষ্টান্তগুলিতে বহিরাগত API অনুরোধগুলি বিতরণ করতে একটি লোড ব্যালেন্সার কনফিগার করুন। দেখুন + [একটি বাহ্যিক লোড ব্যালেন্সার তৈরি করুন](/bn/docs/tasks/access-application-cluster/create-external-load-balancer/) + বিস্তারিত জানার জন্য. +- *পৃথক এবং ব্যাকআপ etcd সার্ভিস*: etcd সার্ভিসগুলি হয় অন্যান্য কন্ট্রোল প্লেন + সার্ভিসগুলির মতো একই মেশিনে চলতে পারে বা অতিরিক্ত নিরাপত্তা এবং প্রাপ্যতার জন্য + আলাদা মেশিনে চলতে পারে। যেহেতু etcd ক্লাস্টার কনফিগারেশন ডেটা সঞ্চয় করে, + তাই etcd ডাটাবেসের ব্যাকআপ নিয়মিত করা উচিত যাতে আপনি প্রয়োজনে + সেই ডাটাবেসটি মেরামত করতে পারেন। + etcd কনফিগার করা এবং ব্যবহার করার বিষয়ে বিস্তারিত জানার জন্য [etcd FAQ](https://etcd.io/docs/v3.5/faq/) দেখুন। + [কুবারনেটিস-এর জন্য অপারেটিং etcd ক্লাস্টার](/bn/docs/tasks/administer-cluster/configure-upgrade-etcd/) + দেখুন এবং [kubeadm-এর সাথে একটি উচ্চ প্রাপ্যতা etcd ক্লাস্টার সেট আপ করুন](/bn/docs/setup/production-environment/tools/kubeadm/setup-ha-etcd-with-kubeadm/) + বিস্তারিত জানার জন্য। +- *মাল্টিপল কন্ট্রোল প্লেন সিস্টেম তৈরি করুন*: উচ্চ প্রাপ্যতা জন্য, + কন্ট্রোল প্লেন একটি একক মেশিনে সীমাবদ্ধ করা উচিত নয়। যদি কন্ট্রোল প্লেন + সার্ভিসগুলি একটি init সার্ভিস দ্বারা চালিত হয় (যেমন systemd), প্রতিটি সার্ভিস কমপক্ষে + তিনটি মেশিনে চালানো উচিত। যাইহোক, কুবারনেটিস-এ পড হিসাবে কন্ট্রোল প্লেন সার্ভিসগুলি চালানো + নিশ্চিত করে যে আপনার অনুরোধ করা সার্ভিসগুলির প্রতিলিপিকৃত সংখ্যা + সর্বদা সহজলভ্য থাকবে৷ + সময়সূচী ত্রুটি সহনশীল হওয়া উচিত, + কিন্তু অত্যন্ত সহজলভ্য নয়। কুবারনেটিস সার্ভিসগুলির লিডার নির্বাচন করার জন্য + কিছু ডিপ্লয়মেন্টে টুলগুলি [Raft](https://raft.github.io/) সম্মতিমূলক অ্যালগরিদম সেট আপ করে৷ যদি + প্রাইমারিটা চলে যায়, অন্য একটি সার্ভিস নিজেকে নির্বাচন করে এবং দখল করে নেয়। +- *মাল্টিপল জোন স্প্যান করুন*: যদি আপনার ক্লাস্টারকে সর্বদা এভেইল্যাবল রাখা + গুরুত্বপূর্ণ হয়, তবে একটি ক্লাস্টার তৈরি করার কথা বিবেচনা করুন যা মাল্টিপল ডেটা সেন্টার জুড়ে চলে, + ক্লাউড এনভায়রনমেন্টে জোন হিসাবে উল্লেখ করা হয়। জোনের গ্রুপগুলিকে রিজিওন হিসাবে উল্লেখ করা হয়। + একই রিজিওনে মাল্টিপল জোনে + একটি ক্লাস্টার ছড়িয়ে দেওয়ার মাধ্যমে, এটি একটি জোন অপ্রাপ্য হয়ে গেলেও + আপনার ক্লাস্টারটি কাজ করা চালিয়ে যাওয়ার সম্ভাবনাকে উন্নত করতে পারে। + বিস্তারিত জানার জন্য [মাল্টিপল জোনে চলমান](/bn/docs/setup/best-practices/multiple-zones/) দেখুন। +- *চলমান ফিচারগুলি পরিচালনা করুন*: আপনি যদি সময়ের সাথে সাথে আপনার ক্লাস্টার রাখার পরিকল্পনা করেন + তবে এর স্বাস্থ্য এবং নিরাপত্তা বজায় রাখার জন্য আপনাকে কিছু কাজ করতে হবে। উদাহরণস্বরূপ, + আপনি যদি kubeadm-এর সাথে ইনস্টল করেন, তাহলে আপনাকে + [সার্টিফিকেট ম্যানেজমেন্ট](/bn/docs/tasks/administer-cluster/kubeadm/kubeadm-certs/) + এবং [kubeadm ক্লাস্টার আপগ্রেড করা](/bn/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade/) সাহায্য করার জন্য নির্দেশাবলী রয়েছে। + কুবারনেটিস অ্যাডমিনিস্ট্রেটিভ কাজগুলির একটি দীর্ঘ তালিকার জন্য + [একটি ক্লাস্টার পরিচালনা](/bn/docs/tasks/administer-cluster/) দেখুন। + +আপনি যখন কন্ট্রোল প্লেন সার্ভিসগুলি চালান তখন এভেইল্যাবল বিকল্পগুলি সম্পর্কে জানতে, +[kube-apiserver](/bn/docs/reference/command-line-tools-reference/kube-apiserver/), +[kube-controller-manager](/bn/docs/reference/command-line-tools-reference/kube-controller-manager/) দেখুন, +এবং [kube-scheduler](/bn/docs/reference/command-line-tools-reference/kube-scheduler/) +কম্পোনেন্ট পৃষ্ঠা। হাইলি এভেইল্যাবল কন্ট্রোল প্লেন উদাহরণের জন্য, +[হাইলি এভেইল্যাবল টপোলজির বিকল্পগুলি](/bn/docs/setup/production-environment/tools/kubeadm/ha-topology/), +[kubeadm-এর সাহায্যে হাইলি এভেইল্যাবল ক্লাস্টার তৈরি করা](/bn/docs/setup/production-environment/tools/kubeadm/high-availability/) দেখুন, +এবং [অপারেটিং etcd ক্লাস্টার-এর জন্য কুবারনেটিস](/bn/docs/tasks/administer-cluster/configure-upgrade-etcd/)। +একটি etcd ব্যাকআপ প্ল্যান তৈরির তথ্যের জন্য +[একটি etcd ক্লাস্টার ব্যাক আপ করা](/bn/docs/tasks/administer-cluster/configure-upgrade-etcd/#backing-up-an-etcd-cluster) দেখুন। + +### প্রোডাকশন ওয়ার্কার নোড + +প্রোডাকশন-কোয়ালিটির ওয়ার্কলোড স্থিতিস্থাপক হতে হবে এবং তারা যেকোন কিছুর উপর নির্ভর করে +স্থিতিস্থাপক হতে হবে (যেমন CoreDNS)। আপনি নিজের কন্ট্রোল প্লেন +পরিচালনা করুন বা একটি ক্লাউড প্রদানকারী আপনার জন্য এটা করে, আপনাকে +বিবেচনা করতে হবে কিভাবে আপনি আপনার ওয়ার্কার নোডগুলি পরিচালনা করতে চান (এছাড়াও সহজভাবে +*নোড* হিসাবে উল্লেখ করা হয়)। + +- *নোডগুলি কনফিগার করুন*: নোডগুলি ফিজিক্যাল বা ভার্চুয়াল মেশিন হতে পারে। আপনি যদি + নিজের নোডগুলি তৈরি করতে এবং পরিচালনা করতে চান তবে আপনি একটি সমর্থিত অপারেটিং সিস্টেম ইনস্টল করতে পারেন, + তারপরে উপযুক্ত [নোড সার্ভিসগুলি](/bn/docs/concepts/overview/components/#node-components) + যোগ করুন এবং চালান। বিবেচনা করুন: + - উপযুক্ত মেমরি, সিপি উ, এবং ডিস্কের গতি এবং স্টোরেজ ক্ষমতা এভেইল্যাবল থাকার মাধ্যমে আপনি যখন নোড সেট আপ করেন তখন আপনার ওয়ার্কলোডের চাহিদা। + - জেনেরিক কম্পিউটার সিস্টেমগুলি করবে কিনা বা আপনার কাছে এমন ওয়ার্কলোড আছে যেগুলির জন্য জিপিউ প্রসেসর, উইন্ডোজ নোড, বা ভিএম আইসোলেশন প্রয়োজন। +- *ভ্যালিডেট নোড*: কিভাবে একটি নোড একটি কুবারনেটিস ক্লাস্টারে + যোগদানের প্রয়োজনীয়তা পূরণ করে তা নিশ্চিত করার তথ্যের জন্য + [ভ্যালিড নোড সেটআপ](/bn/docs/setup/best-practices/node-conformance/) দেখুন। +- *ক্লাস্টারে নোড যোগ করুন*: আপনি যদি নিজের ক্লাস্টার পরিচালনা করেন তাহলে আপনি + আপনার নিজস্ব মেশিন সেট আপ করে নোড যোগ করতে পারেন এবং হয় সেগুলিকে ম্যানুয়ালি যোগ করে অথবা + ক্লাস্টারের apiserver এ নিজেদের নিবন্ধন করতে পারেন। এই উপায়ে + নোড যোগ করার জন্য কুবারনেটিস কিভাবে সেট আপ করতে হয় সে সম্পর্কে তথ্যের জন্য [নোডসমুহ](/bn/docs/concepts/architecture/nodes/) বিভাগটি দেখুন। +- *নোড স্কেল করুন*: আপনার ক্লাস্টারের শেষ পর্যন্ত যে ক্ষমতা প্রয়োজন তা প্রসারিত করার জন্য একটি + পরিকল্পনা করুন। আপনার চালানোর জন্য কতগুলি পড এবং কন্টেইনার প্রয়োজন + তার উপর ভিত্তি করে আপনার কতগুলি নোড প্রয়োজন তা নির্ধারণ করতে সাহায্য করতে + [বড় ক্লাস্টারগুলির জন্য বিবেচনা](/bn/docs/setup/best-practices/cluster-large/) দেখুন। আপনি যদি নিজে নোড পরিচালনা করেন, তাহলে এর অর্থ + হতে পারে আপনার নিজের ফিজিক্যাল টুল কেনা এবং ইনস্টল করা। +- *নোড অটোস্কেল করুন*: আপনার নোডগুলি স্বয়ংক্রিয়ভাবে পরিচালনা করার জন্য উপলব্ধ সরঞ্জামগুলি এবং + তাদের সরবরাহ করা ক্ষমতা সম্পর্কে জানতে + [Cluster Autoscaling](/docs/concepts/cluster-administration/cluster-autoscaling) পড়ুন। +- *নোড স্বাস্থ্য পরীক্ষা সেট আপ করুন*: গুরুত্বপূর্ণ ওয়ার্কলোডের জন্য, আপনি নিশ্চিত করতে চান যে + সেই নোডগুলিতে চলমান নোড এবং পডগুলি স্বাস্থ্যকর। + [নোড প্রবলেম ডিটেক্টর](/bn/docs/tasks/debug-application-cluster/monitor-node-health/) + daemon ব্যবহার করে, আপনি নিশ্চিত করতে পারেন আপনার নোডগুলি সুস্থ। + +## প্রোডাকশন ব্যবহারকারী ব্যবস্থাপনা + +প্রোডাকশনে, আপনি হয়ত এমন একটি মডেল থেকে সরে যাচ্ছেন যেখানে আপনি বা একটি ছোট গ্রুপ +ক্লাস্টারে প্রবেশ করছেন যেখানে সম্ভাব্য কয়েক ডজন বা +শত শত লোক থাকতে পারে। একটি লার্নিং এনভায়রনমেন্ট বা প্ল্যাটফর্ম প্রোটোটাইপে, আপনি যা করেন তার জন্য +আপনার একটি একক অ্যাডমিনিস্ট্রেটিভ অ্যাকাউন্ট থাকতে পারে । প্রোডাকশনে, আপনি +বিভিন্ন নেমস্পেসে বিভিন্ন স্তরের অ্যাক্সেস সহ আরও অ্যাকাউন্ট চাইবেন। + +একটি প্রোডাকশন-কোয়ালিটির ক্লাস্টার নেওয়ার অর্থ হল আপনি কীভাবে +বেছে বেছে অন্য ব্যবহারকারীদের অ্যাক্সেসের অনুমতি দিতে চান তা নির্ধারণ করা। বিশেষ করে, যারা আপনার +ক্লাস্টার (অথেনটিকেশন) অ্যাক্সেস করার চেষ্টা করে তাদের পরিচয় যাচাই +করার জন্য এবং তারা যা জিজ্ঞাসা করছে (অথোরাইজেশন) করার অনুমতি আছে কিনা তা নির্ধারণ করার জন্য আপনাকে +কৌশল নির্বাচন করতে হবেঃ + +- *অথেনটিকেশন*: apiserver ব্যবহারকারীদের অথেনটিকেশন করতে পারে ক্লায়েন্ট সার্টিফিকেট, + bearer টোকেন, একটি অথেনটিকেশন প্রক্সি, বা HTTP মৌলিক অথেনটিকেশন ব্যবহার করে। + আপনি কোন অথেনটিকেশন পদ্ধতি ব্যবহার করতে চান তা নির্বাচন করতে পারেন। + প্লাগইন ব্যবহার করে, apiserver আপনার প্রতিষ্ঠানের বিদ্যমান সুবিধা নিতে পারে + অথেনটিকেশন পদ্ধতি, যেমন LDAP বা Kerberos। দেখা + [অথেনটিকেশন](/bn/docs/reference/access-authn-authz/authentication/) + কুবারনেটিস ব্যবহারকারীদের অথেনটিকেশনের এই বিভিন্ন পদ্ধতির বর্ণনার জন্য। +- *অথোরাইজেশন*: আপনি যখন আপনার নিয়মিত ব্যবহারকারীদের অথোরাইজেশন করার জন্য প্রস্তুত হন, আপনি সম্ভবত + RBAC এবং ABAC অথোরাইজেশনের মধ্যে বেছে নেবেন। ব্যবহারকারীর অ্যাকাউন্ট অথোরাইজেশনের জন্য বিভিন্ন মোড পর্যালোচনা করতে + [অথোরাইজেশন ওভারভিউ](/bn/docs/reference/access-authn-authz/authorization/) দেখুন (সেইসাথে আপনার ক্লাস্টারে সার্ভিস + অ্যাকাউন্ট অ্যাক্সেস): + - *রোল-বেসড অ্যাক্সেস কন্ট্রোল* ([RBAC](/bn/docs/reference/access-authn-authz/rbac/)): + অথেন্টিকেটেড ব্যবহারকারীদের নির্দিষ্ট সেটের অনুমতি প্রদান করে আপনাকে আপনার ক্লাস্টারে অ্যাক্সেস বরাদ্দ করতে দেয়। + একটি নির্দিষ্ট নেমস্পেস (Role) বা সমগ্র ক্লাস্টার জুড়ে (ClusterRole) অনুমতিগুলি বরাদ্দ + করা যেতে পারে। তারপর RoleBindings এবং ClusterRoleBindings ব্যবহার করে, সেই অনুমতিগুলি নির্দিষ্ট ব্যবহারকারীদের সাথে + সংযুক্ত করা যেতে পারে। + - *অ্যাট্রিবিউট-বেসড অ্যাক্সেস কন্ট্রোল* ([ABAC](/bn/docs/reference/access-authn-authz/abac/)): + আপনাকে ক্লাস্টারে রিসোর্স অ্যাট্রিবিউটের উপর ভিত্তি করে পলিসি তৈরি করতে দেয় এবং সেই অ্যাট্রিবিউটগুলির উপর ভিত্তি করে অ্যাক্সেসের + অনুমতি দেয় বা অস্বীকার করে। একটি পলিসি ফাইলের প্রতিটি লাইন ভার্শনিং বৈশিষ্ট্য (apiVersion + এবং kind) এবং বিষয় (ব্যবহারকারী বা গ্রুপ), রিসোর্স বৈশিষ্ট্য, + নন-রিসোর্সে বৈশিষ্ট্য (/version বা /apis) এবং শুধুমাত্র পঠনযোগ্য বৈশিষ্ট্যের সাথে মেলে বিশেষ বৈশিষ্ট্যগুলির একটি মানচিত্র সনাক্ত করে। + বিস্তারিত জানার জন্য [উদাহরণ](/bn/docs/reference/access-authn-authz/abac/#examples) দেখুন. + +যেহেতু কেউ আপনার প্রোডাকশন কুবারনেটস ক্লাস্টারে অথেনটিকেশন এবং অথোরাইজেশন সেট আপ করছে, এখানে কিছু বিষয় বিবেচনা করার আছেঃ + +- *অথোরাইজেশন মোড সেট করুন*: যখন Kubernetes API সার্ভার + ([kube-apiserver](/bn/docs/reference/command-line-tools-reference/kube-apiserver/)) শুরু হয়, + তখন সমর্থিত অথেনটিকেশন মোডগুলি *--authorization-mode* ফ্ল্যাগ ব্যবহার করে সেট করতে হবে। + উদাহরণস্বরূপ, *kube-adminserver.yaml* ফাইলে (*/etc/kubernetes/manifests*-এ) সেই ফ্ল্যাগটি + Node,RBAC-তে সেট করা যেতে পারে। এটি অথেন্টিকেটেড অনুরোধের জন্য নোড এবং RBAC অথোরাইজেশনের অনুমতি দেবে। +- *ব্যবহারকারী সার্টিফিকেট এবং রোল বাইন্ডিং (RBAC)ভর্তি নিয়ন্ত্রক বিবেচনা করুন*: আপনি যদি RBAC অথোরাইজেশন ব্যবহার করেন, + ব্যবহারকারীরা একটি CertificateSigningRequest (CSR) তৈরি করতে পারে যা ক্লাস্টার + CA দ্বারা স্বাক্ষরিত হতে পারে। তারপর আপনি প্রতিটি ব্যবহারকারীর রোল এবং ক্লাস্টার রোল বাইন্ড করতে পারেন। + বিস্তারিত জানার জন্য [সার্টিফিকেট স্বাক্ষরের অনুরোধ](/bn/docs/reference/access-authn-authz/certificate-signing-requests/) + দেখুন। +- *অ্যাট্রিবিউটগুলিকে একত্রিত করে এমন পলিসি তৈরি করুন (ABAC)*: আপনি যদি ABAC অথোরাইজেশন ব্যবহার করেন, + আপনি নির্দিষ্ট রিসোর্সগুলি (যেমন একটি পড), নেমস্পেস, বা apiGroup অ্যাক্সেস করার জন্য নির্বাচিত + ব্যবহারকারী বা গ্রুপগুলিকে অথোরাইজেশন করার জন্য পলিসিগুলি গঠনের জন্য অ্যাট্রিবিউটগুলিকে সংমিশ্রণ + বরাদ্দ করতে পারেন। আরও তথ্যের জন্য, + [উদাহরণ](/bn/docs/reference/access-authn-authz/abac/#examples) দেখুন। +- *অ্যাডমিশন কন্ট্রোলারদের বিবেচনা করুন*: API সার্ভারের মাধ্যমে যে অনুরোধগুলি + আসতে পারে তার অথোরাইজেশনের অতিরিক্ত ফর্মগুলির মধ্যে রয়েছে + [ওয়েবহুক টোকেন অথেনটিকেশন](/bn/docs/reference/access-authn-authz/authentication/#webhook-token-authentication)। + API সার্ভারে [অ্যাডমিশন কন্ট্রোলার](/bn/docs/reference/access-authn-authz/admission-controllers/) + যোগ করে ওয়েবহুক এবং অন্যান্য বিশেষ অথোরাইজেশনের ধরন + সক্ষম করতে হবে। + +## ওয়ার্কলোডের রিসোর্সের সীমা নির্ধারণ করুন + +প্রোডাকশন ওয়ার্কলোডের চাহিদা কুবারনেটিস কন্ট্রোল প্লেনের ভিতরে এবং বাইরে +উভয় ক্ষেত্রেই চাপ সৃষ্টি করতে পারে। আপনার ক্লাস্টারের ওয়ার্কলোডের প্রয়োজনের জন্য সেট আপ করার সময় +এই আইটেমগুলি বিবেচনা করুনঃ + +- *নেমস্পেস সীমা সেট করুন*: মেমরি এবং সিপিইউ এর মত জিনিসগুলিতে প্রতি-নেমস্পেস কোটা সেট করুন। বিস্তারিত + জানার জন্য [Manage Memory, CPU, and API Resources](/bn/docs/tasks/administer-cluster/manage-resources/) + দেখুন। এছাড়াও আপনি উত্তরাধিকার + সীমার জন্য [হায়ারার্কিক্যাল নেমস্পেস](/blog/2020/08/14/introducing-hierarchical-namespaces/) + সেট করতে পারেন। +- *ডিএনএস চাহিদার জন্য প্রস্তুত করুন*: আপনি যদি আশা করেন যে ওয়ার্কলোড ব্যাপকভাবে বৃদ্ধি পাবে, + আপনার DNS সার্ভিসটিও স্কেল বাড়াতে প্রস্তুত থাকতে হবে। দেখুন + [একটি ক্লাস্টারে DNS সার্ভিসটি অটোস্কেল করুন](/bn/docs/tasks/administer-cluster/dns-horizontal-autoscaling/)। +- *অতিরিক্ত সার্ভিস অ্যাকাউন্ট তৈরি করুন*: ব্যবহারকারীর অ্যাকাউন্টগুলি নির্ধারণ করে যে ব্যবহারকারীরা + একটি ক্লাস্টারে কী করতে পারে, যখন একটি সার্ভিস অ্যাকাউন্ট একটি নির্দিষ্ট নেমস্পেসের মধ্যে পড + অ্যাক্সেসকে সংজ্ঞায়িত করে। ডিফল্টরূপে, একটি পড তার নেমস্পেস থেকে ডিফল্ট সার্ভিস অ্যাকাউন্টে নেয়। + একটি নতুন সার্ভিস অ্যাকাউন্ট তৈরি করার তথ্যের জন্য [সার্ভিস অ্যাকাউন্ট পরিচালনা করা](/bn/docs/reference/access-authn-authz/service-accounts-admin/) + দেখুন। উদাহরণস্বরূপ, আপনি চাইতে পারেন: + - সিক্রেট যোগ করুন যা একটি পড একটি নির্দিষ্ট কন্টেইনার রেজিস্ট্রি থেকে ইমেজ পুল করতে ব্যবহার করতে পারে। + উদাহরণের জন্য [পডের জন্য সার্ভিস অ্যাকাউন্ট কনফিগার করুন](/bn/docs/tasks/configure-pod-container/configure-service-account/) + দেখুন। + - একটি সার্ভিস অ্যাকাউন্টে RBAC অনুমতিগুলি বরাদ্দ করুন৷ বিস্তারিত জানার জন্য + [সার্ভিস অ্যাকাউন্ট অনুমতি](/bn/docs/reference/access-authn-authz/rbac/#service-account-permissions) + দেখুন। + +## {{% heading "whatsnext" %}} + +- আপনি নিজের প্রোডাকশন তৈরি করতে চান কিনা তা স্থির করুন কুবারনেটিস + বা এভেইল্যাবল [টার্নকি ক্লাউড সলিউশন](/bn/docs/setup/production-environment/turnkey-solutions/) + অথবা [কুবারনেটিস অংশীদার](/bn/partners/) থেকে একটি পেতে চান। +- আপনি যদি নিজের ক্লাস্টার তৈরি করতে চান, + তাহলে পরিকল্পনা করুন আপনি কীভাবে + [সার্টিফিকেট](/bn/docs/setup/best-practices/certificates/) পরিচালনা করতে চান এবং + [etcd](/bn/docs/setup/production-environment/tools/kubeadm/setup-ha-etcd-with-kubeadm/) + এর মতো ফিচারগুলির জন্য উচ্চ প্রাপ্যতা সেট আপ করুন) + এবং [API সার্ভার](/bn/docs/setup/production-environment/tools/kubeadm/ha-topology/)। +- [kubeadm](/bn/docs/setup/production-environment/tools/kubeadm/) থেকে ডিপ্লয়মেন্ট পদ্ধতি বেছে নিন, + [kops](https://kops.sigs.k8s.io/) অথবা + [Kubespray](https://kubespray.io/)। +- আপনার [অথেনটিকেশন](/bn/docs/reference/access-authn-authz/authentication/) + এবং [অথোরাইজেশন](/bn/docs/reference/access-authn-authz/authorization/) + পদ্ধতি নির্ধারণ করে ব্যবহারকারী ব্যবস্থাপনা কনফিগার করুন। +- [রিসোর্স লিমিট](/bn/docs/tasks/administer-cluster/manage-resources/), + [DNS autoscaling](/bn/docs/tasks/administer-cluster/dns-horizontal-autoscaling) + সেট আপ করে অ্যাপ্লিকেশন ওয়ার্কলোডের জন্য প্রস্তুত করুন /) এবং + [সার্ভিস অ্যাকাউন্ট](/bn/docs/reference/access-authn-authz/service-accounts-admin/)। diff --git a/content/bn/docs/setup/production-environment/tools/_index.md b/content/bn/docs/setup/production-environment/tools/_index.md new file mode 100644 index 0000000000000..d274fb6da782c --- /dev/null +++ b/content/bn/docs/setup/production-environment/tools/_index.md @@ -0,0 +1,22 @@ +--- +title: ডিপ্লয়মেন্টের টুল সহ কুবারনেটিস ইনস্টল করা +weight: 30 +no_list: true +--- + +আপনার নিজস্ব প্রোডাকশন কুবারনেটিস ক্লাস্টার সেট আপ করার জন্য অনেক পদ্ধতি এবং সরঞ্জাম আছে। +উদাহরণ স্বরূপ: + +- [kubeadm](/bn/docs/setup/production-environment/tools/kubeadm/) + +- [kops](https://kops.sigs.k8s.io/): একটি স্বয়ংক্রিয় ক্লাস্টার প্রভিশনিং টুল। + টিউটোরিয়াল, সর্বোত্তম অনুশীলন, কনফিগারেশন বিকল্প এবং কমিউনিটির + কাছে পৌঁছানো তথ্যের জন্য, অনুগ্রহ করে চেক করুন + [`kOps` ওয়েবসাইট](https://kops.sigs.k8s.io/) বিস্তারিত জানতে। + +- [Kubespray](https://kubespray.io/): + [Ansible](https://docs.ansible.com/) প্লেবুকের একটি রচনা, + [ইনভেন্টরি](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/ansible.md#inventory), + প্রভিশনিং টুলস, এবং জেনেরিক ওস/কুবারনেটিস ক্লাস্টার কনফিগারেশন ব্যবস্থাপনা কাজের + জন্য ডোমেন জ্ঞান । আপনি স্ল্যাক চ্যানেলে সম্প্রদায়ের সাথে যোগাযোগ করতে পারেন + [#kubespray](https://kubernetes.slack.com/messages/kubespray/)। diff --git a/content/bn/docs/setup/production-environment/tools/kubeadm/_index.md b/content/bn/docs/setup/production-environment/tools/kubeadm/_index.md new file mode 100644 index 0000000000000..8b10351e4933e --- /dev/null +++ b/content/bn/docs/setup/production-environment/tools/kubeadm/_index.md @@ -0,0 +1,4 @@ +--- +title: "kubeadm এর সাহায্যে ক্লাস্টার বুটস্ট্রাপ করা" +weight: 10 +--- diff --git a/content/bn/docs/setup/production-environment/turnkey-solutions.md b/content/bn/docs/setup/production-environment/turnkey-solutions.md new file mode 100644 index 0000000000000..310c1abe57a1b --- /dev/null +++ b/content/bn/docs/setup/production-environment/turnkey-solutions.md @@ -0,0 +1,14 @@ +--- +title: Turnkey ক্লাউড সলিউশন +content_type: concept +weight: 40 +--- + + +এই পৃষ্ঠাটি কুবারনেটিস প্রত্যয়িত সমাধান প্রদানকারীদের একটি তালিকা প্রদান করে। প্রতিটি প্রদানকারীর পৃষ্ঠা থেকে, +আপনি প্রোডাকশন এর জন্য প্রস্তুত ক্লাস্টার ইনস্টল এবং +সেটআপ করা শিখতে পারেন + + + +{{< cncf-landscape helpers=true category="platform--certified-kubernetes-hosted" >}} diff --git a/content/bn/docs/tasks/_index.md b/content/bn/docs/tasks/_index.md new file mode 100644 index 0000000000000..8ab034e3ae986 --- /dev/null +++ b/content/bn/docs/tasks/_index.md @@ -0,0 +1,15 @@ +--- +title: টাস্ক +main_menu: true +weight: 50 +content_type: concept +--- + + + +কুবারনেটিস ডকুমেন্টেশনের এই বিভাগে এমন পৃষ্ঠা রয়েছে +যা দেখায় কিভাবে পৃথক টাস্ক করতে হয়। সাধারণত পদক্ষেপের একটি +সংক্ষিপ্ত ক্রম দিয়ে একটি টাস্ক পেজ দেখায় কিভাবে একটি একক জিনিস করতে হয়। + +আপনি যদি একটি টাস্ক পৃষ্ঠা লিখতে চান, দেখুন কীভাবে +[একটি ডকুমেন্টেশন পুল রিকোয়েস্ট তৈরি করা](/docs/contribute/new-content/open-a-pr/) যায়। diff --git a/content/bn/docs/tasks/tools/_index.md b/content/bn/docs/tasks/tools/_index.md new file mode 100644 index 0000000000000..06d71204b8ce1 --- /dev/null +++ b/content/bn/docs/tasks/tools/_index.md @@ -0,0 +1,65 @@ +--- +title: "টুল ইনস্টল করুন" +description: আপনার কম্পিউটারে কুবারনেটিস টুল সেট আপ করুন। +weight: 10 +no_list: true +card: + name: tasks + weight: 20 + anchors: + - anchor: "#kubectl" + title: kubectl ইনস্টল করুন +--- + +## kubectl + + +কুবারনেটিস কমান্ড-লাইন টুল, [kubectl](/docs/reference/kubectl/kubectl/) +আপনাকে কুবারনেটিস ক্লাস্টারগুলির বিরুদ্ধে কমান্ড চালাতে অনুমতি দেয় । +আপনি অ্যাপ্লিকেশন স্থাপন করতে, ক্লাস্টার সংস্থান পরিদর্শন ও পরিচালনা করতে এবং লগ দেখতে kubectl ব্যবহার করতে পারেন। +আরও তথ্যের জন্য kubectl অপারেশনগুলির একটি সম্পূর্ণ তালিকা সহ দেখুন +[`kubectl` রেফারেন্স ডকুমেন্টেশন](/docs/reference/kubectl/)। + +kubectl বিভিন্ন লিনাক্স প্ল্যাটফর্ম, macOS এবং Windows এ ইনস্টলযোগ্য। +নীচে আপনার পছন্দের অপারেটিং সিস্টেম খুঁজুন। + +- [লিনাক্সে kubectl ইনস্টল করুন](/bn/docs/tasks/tools/install-kubectl-linux) +- [macOS-এ kubectl ইনস্টল করুন](/bn/docs/tasks/tools/install-kubectl-macos) +- [উইন্ডোজে kubectl ইনস্টল করুন](/bn/docs/tasks/tools/install-kubectl-windows) + +## kind + +[`kind`](https://kind.sigs.k8s.io/) আপনাকে কুবারনেটিস চালাতে দেয় +আপনার স্থানীয় কম্পিউটারে। আপনি +[ডকার](https://www.docker.com/) অথবা [পডম্যান](https://podman.io/) ইনস্টল এবং কনফিগার করুন। + +এই ধরনের [কুইক শুরু](https://kind.sigs.k8s.io/docs/user/quick-start/) পৃষ্ঠা আপনাকে +কী করতে হবে তা দেখায়। + +বিভিন্ন ধরনের কুইক স্টার্ট গাইড দেখুন + +## minikube + +`kind` এর মতো, [`minikube`](https://minikube.sigs.k8s.io/) একটি টুল যা আপনাকে স্থানীয়ভাবে কুবারনেটিস চালাতে দেয় । +`minikube` আপনার ব্যক্তিগত কম্পিউটারের (উইন্ডোজ, ম্যাকোস এবং লিনাক্স পিসি সহ) উপর একটি একক-নোড কুবারনেটিস ক্লাস্টার চালায় +যাতে আপনি চেষ্টা করে দেখতে পারেন কুবারনেটিস, বা দৈনন্দিন উন্নয়ন কাজের জন্য। + +আপনি অফিসিয়াল নির্দেশিকা [শুরু করুন!](https://minikube.sigs.k8s.io/docs/start/) +অনুসরণ করতে পারেন যদি আপনার ফোকাস হয় টুল ইনস্টল করার উপর। + +minikube শুরু করুন! গাইডটি দেখুন + +একবার আপনার `minikube` কাজ করলে, আপনি এটি [একটি নমুনা অ্যাপ্লিকেশন চালাতে](/docs/tutorials/hello-minikube/) ব্যবহার করতে পারেন । + +## kubeadm + + +আপনি কুবারনেটিস ক্লাস্টার তৈরি এবং পরিচালনা করতে {{< glossary_tooltip term_id="kubeadm" text="kubeadm" >}} টুল ব্যবহার করতে পারেন। +এটি একটি ন্যূনতম কার্যকর, নিরাপদ ক্লাস্টার আপ এবং ব্যবহারকারী বান্ধব উপায়ে চালানোর জন্য প্রয়োজনীয় ক্রিয়া সম্পাদন করে। + +[kubeadm ইনস্টল করা](/docs/setup/production-environment/tools/kubeadm/install-kubeadm/) আপনাকে দেখায় কিভাবে kubeadm ইনস্টল করতে হয়। +একবার ইনস্টল হয়ে গেলে আপনি এটিকে [একটি ক্লাস্টার তৈরি করতে](/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/) ব্যবহার করতে পারেন। + + + +kubeadm ইনস্টল গাইড দেখুন diff --git a/content/bn/docs/tasks/tools/included/_index.md b/content/bn/docs/tasks/tools/included/_index.md new file mode 100644 index 0000000000000..7aab4049c960d --- /dev/null +++ b/content/bn/docs/tasks/tools/included/_index.md @@ -0,0 +1,10 @@ +--- +title: "অন্তর্ভুক্ত টুলস" +description: "স্নিপেট গুলো সংযুক্ত করতে হবে প্রধান kubectl-installs-*.md পেজগুলোতে ।" +headless: true +toc_hide: true +_build: + list: never + render: never + publishResources: false +--- diff --git a/content/bn/docs/tasks/tools/included/kubectl-convert-overview.md b/content/bn/docs/tasks/tools/included/kubectl-convert-overview.md new file mode 100644 index 0000000000000..0a155193c1b78 --- /dev/null +++ b/content/bn/docs/tasks/tools/included/kubectl-convert-overview.md @@ -0,0 +1,14 @@ +--- +title: "kubectl-convert পরিদর্শন" +description: >- + একটি kubectl প্লাগইন যা আপনাকে একটি Kubernetes আপিআই এর সংস্করণ থেকে একটি ভিন্ন সংস্করণে রূপান্তর করতে দেয়। +headless: true +_build: + list: never + render: never + publishResources: false +--- + +কুবারনেটিস কমান্ড-লাইন টুল `kubectl` এর জন্য একটি প্লাগইন, যা আপনাকে বিভিন্ন আপিআই সংস্করণ এর মধ্যে রূপান্তর করতে দেয়। +এটি নতুন কুবারনেটিস রিলিজের সাথে একটি অ-বঞ্চিত আপিআই সংস্করণে স্থানান্তর করতে বিশেষভাবে সহায়ক হতে পারে। +আরও তথ্যের জন্য, [অপ্রচলিত apis-এ মাইগ্রেট করুন](/docs/reference/using-api/deprecation-guide/#migrate-to-non-deprecated-apis) diff --git a/content/bn/docs/tasks/tools/included/kubectl-whats-next.md b/content/bn/docs/tasks/tools/included/kubectl-whats-next.md new file mode 100644 index 0000000000000..24321bce6ece7 --- /dev/null +++ b/content/bn/docs/tasks/tools/included/kubectl-whats-next.md @@ -0,0 +1,16 @@ +--- +title: "এরপর কি?" +description: "kubectl ইন্সটল করার পর যা হবে" +headless: true +_build: + list: never + render: never + publishResources: false +--- + +* [ইনস্টল করুন Minikube](https://minikube.sigs.k8s.io/docs/start/) +* ক্লাস্টার তৈরি সম্পর্কে আরও জানতে দেখুন [গাইড শুরু করা](/bn/docs/setup/) ফাইলটি । +* [আপনার অ্যাপ্লিকেশানটি কীভাবে লঞ্চ করবেন এবং প্রকাশ করবেন তা জানুন ।](/docs/tasks/access-application-cluster/service-access-application-cluster/) +* আপনার যদি এমন একটি ক্লাস্টারে অ্যাক্সেসের প্রয়োজন হয় যা আপনি তৈরি করেননি, দেখুন + [ক্লাস্টার অ্যাক্সেস নথি ভাগ করেন](/docs/tasks/access-application-cluster/configure-access-multiple-clusters/). +* [kubectl রেফারেন্স ডক্স](/docs/reference/kubectl/kubectl/) পড়ুন । diff --git a/content/bn/docs/tasks/tools/included/optional-kubectl-configs-bash-linux.md b/content/bn/docs/tasks/tools/included/optional-kubectl-configs-bash-linux.md new file mode 100644 index 0000000000000..3a046fe50c168 --- /dev/null +++ b/content/bn/docs/tasks/tools/included/optional-kubectl-configs-bash-linux.md @@ -0,0 +1,60 @@ +--- +title: "লিনাক্সে ব্যাশ স্বয়ংক্রিয় সমাপ্তি" +description: "লিনাক্সে ব্যাশ স্বয়ংক্রিয় সমাপ্তি এর জন্য কিছু ঐচ্ছিক কনফিগারেশন।" +headless: true +_build: + list: never + render: never + publishResources: false +--- + +### ভূমিকা + +ব্যাশ-এর জন্য kubectl কমপ্লিশন স্ক্রিপ্ট `kubectl completion bash` কমান্ড দিয়ে তৈরি করা যেতে পারে। আপনার শেলে সমাপ্তি স্ক্রিপ্ট সোর্স করা kubectl অটোকমপ্লিসন সক্ষম করে। + +যাইহোক, কমপ্লিসন স্ক্রিপ্ট [**ব্যাশ-কমপ্লিসন**](https://github.com/scop/bash-completion) এর উপর নির্ভর করে, তার মানে হচ্ছে আপনাকে প্রথমে এই সফ্টওয়্যারটি ইনস্টল করতে হবে (আপনার ব্যাশ-কমপ্লিসন ইতিমধ্যেই ইনস্টল করা আছে কিনা তা `type _init_completion` চালিয়ে পরীক্ষা করতে পারেন)। + +### ব্যাশ-কমপ্লিসন ইনস্টল করুন + +ব্যাশ-কমপ্লিসন অনেক প্যাকেজ ম্যানেজার দ্বারা প্রদান করা হয় ([এখানে](https://github.com/scop/bash-completion#installation) দেখুন)। আপনি এটিকে `apt-get install bash-completion` অথবা `yum install bash-completion`, ইত্যাদি দিয়ে ইনস্টল করতে পারেন। + +উপরের কমান্ডগুলি `/usr/share/bash-completion/bash_completion` তৈরি করে, যা ব্যাশ-কমপ্লিসন এর প্রধান স্ক্রিপ্ট। আপনার প্যাকেজ ম্যানেজারের উপর নির্ভর করে, আপনাকে ম্যানুয়ালি এই ফাইলটি আপনার `~/.bashrc` ফাইলে সোর্স করতে হবে। +জানতে চাইলে, আপনার শেল পুনরায় লোড করুন এবং `type_init_completion` চালান। কমান্ডটি সফল হলে, আপনি ইতিমধ্যেই সেট করেছেন, অন্যথায় আপনার `~/.bashrc` ফাইলে নিম্নলিখিত যোগ করুন: + +```bash +source /usr/share/bash-completion/bash_completion +``` + +আপনার শেল পুনরায় লোড করুন এবং `type _init_completion` লিখে ব্যাশ-কমপ্লিসন সঠিকভাবে ইনস্টল করা হয়েছে কিনা তা যাচাই করুন। + +### kubectl অটোকমপ্লিসন চালু করুন + +#### ব্যাশ + +আপনাকে এখন নিশ্চিত করতে হবে যে kubectl সমাপ্তি স্ক্রিপ্টটি আপনার সমস্ত শেল সেশনে পাওয়া যায়। আপনি এটি করতে পারেন যা দুটি উপায় আছেঃ + +{{< tabs name="kubectl_bash_autocompletion" >}} +{{< tab name="User" codelang="bash" >}} +echo 'source <(kubectl completion bash)' >>~/.bashrc +{{< /tab >}} +{{< tab name="System" codelang="bash" >}} +kubectl completion bash | sudo tee /etc/bash_completion.d/kubectl > /dev/null +{{< /tab >}} +{{< /tabs >}} + +আপনার যদি kubectl এর জন্য একটি অ্যালিঅ্যাস থাকে তবে আপনি সেই অ্যালিঅ্যাসের সাথে কাজ করার জন্য শেল কমপ্লিসন বাড়াতে পারেনঃ + +```bash +echo 'alias k=kubectl' >>~/.bashrc +echo 'complete -o default -F __start_kubectl k' >>~/.bashrc +``` + +{{< note >}} +ব্যাশ-কমপ্লিসনের সূত্র `/etc/bash_completion.d`-এ সমস্ত কমপ্লিসন স্ক্রিপ্ট। +{{< /note >}} + +উভয় পন্থা সমতুল্য। আপনার শেল পুনরায় লোড করার পরে, kubectl অটোকমপ্লিসন কাজ করা উচিত। +শেলের বর্তমান সেশনে ব্যাশ অটোকমপ্লিসন সক্ষম করতে, ~/.bashrc ফাইলটি উৎস করুনঃ +```bash +source ~/.bashrc +``` diff --git a/content/bn/docs/tasks/tools/included/optional-kubectl-configs-bash-mac.md b/content/bn/docs/tasks/tools/included/optional-kubectl-configs-bash-mac.md new file mode 100644 index 0000000000000..7c1af5bcc30ea --- /dev/null +++ b/content/bn/docs/tasks/tools/included/optional-kubectl-configs-bash-mac.md @@ -0,0 +1,92 @@ +--- +title: "macOS এ ব্যাশ স্বয়ংক্রিয় সমাপ্তি" +description: "macOS-এ ব্যাশ স্বয়ংক্রিয় সমাপ্তি এর জন্য কিছু ঐচ্ছিক কনফিগারেশন।" +headless: true +_build: + list: never + render: never + publishResources: false +--- + +### ভূমিকা + +Bash-এর জন্য kubectl কমপ্লিশন স্ক্রিপ্ট `kubectl completion bash` দিয়ে তৈরি করা যেতে পারে। আপনার শেলে এই স্ক্রিপ্টটি সোর্স করা kubectl সম্পূর্ণতা সক্ষম করে। + +কিন্তু, kubectl কমপ্লিসন স্ক্রিপ্ট নির্ভর করে [**bash-completion**](https://github.com/scop/bash-completion) যা আপনাকে আগে ইনস্টল করতে হবে। + +{{< warning>}} +bash-completion এর দুটি সংস্করণ আছে, v1 এবং v2। V1 Bash 3.2 এর জন্য (যা macOS-এ ডিফল্ট), এবং v2 হল Bash 4.1+ এর জন্য। kubectl পূর্ণতা স্ক্রিপ্ট ** কাজ করে না** সঠিকভাবে bash-completion v1 এবং Bash 3.2 এর সাথে। এর জন্য **ব্যাশ-সম্পূর্ণ v2** এবং **ব্যাশ 4.1+** প্রয়োজন। সুতরাং, macOS-এ kubectl সমাপ্তি সঠিকভাবে ব্যবহার করতে সক্ষম হতে, আপনাকে Bash 4.1+ ([*instructions*](https://itnext.io/upgrading-bash-on-macos-7138bd1066ba)) ইনস্টল এবং ব্যবহার করতে হবে। নিম্নলিখিত নির্দেশাবলী অনুমান করে যে আপনি Bash 4.1+ ব্যবহার করেন (অর্থাৎ, 4.1 বা তার পরবর্তী যেকোনো Bash সংস্করণ)। +{{< /warning >}} + +### Bash আপগ্রেড করুন + +এখানে নির্দেশাবলী অনুমান করে আপনি ব্যাশ 4.1+ ব্যবহার করছেন। আপনি রান করে আপনার ব্যাশের সংস্করণটি পরীক্ষা করতে পারেন: + +```bash +echo $BASH_VERSION +``` + +যদি এটি খুব পুরানো হয়, আপনি Homebrew ব্যবহার করে এটি ইনস্টল/আপগ্রেড করতে পারেন: + +```bash +brew install bash +``` + +আপনার শেল পুনরায় লোড করুন এবং যাচাই করুন যে পছন্দসই সংস্করণটি ব্যবহার করা হচ্ছে: + +```bash +echo $BASH_VERSION $SHELL +``` + +Homebrew সাধারণত `/usr/local/bin/bash` এ ইনস্টল হয়। + +### ব্যাশ-কমপ্লিসন ইনস্টল করুন + +{{< note >}} +উল্লিখিত হিসাবে, এই নির্দেশাবলী অনুমান করে আপনি Bash 4.1+ ব্যবহার করেন, যার মানে আপনি bash-completion v2 ইনস্টল করবেন (Bash 3.2 এবং bash-completion v1 এর বিপরীতে, এই ক্ষেত্রে kubectl সমাপ্তি কাজ করবে না)। +{{< /note >}} + +আপনি পরীক্ষা করতে পারেন যদি আপনার bash-completion v2 ইতিমধ্যেই `type _init_completion` দিয়ে ইনস্টল করা আছে। যদি না হয়, আপনি homebrew দিয়ে এটি ইনস্টল করতে পারেন: + +```bash +brew install bash-completion@2 +``` + +এই কমান্ডের আউটপুটে যেমন বলা হয়েছে, আপনার `~/.bash_profile` ফাইলে নিম্নলিখিত যোগ করুন: + +```bash +brew_etc="$(brew --prefix)/etc" && [[ -r "${brew_etc}/profile.d/bash_completion.sh" ]] && . "${brew_etc}/profile.d/bash_completion.sh" +``` + +আপনার শেল পুনরায় লোড করুন এবং যাচাই করুন যে bash-completion v2 সঠিকভাবে `type _init_completion` দিয়ে ইনস্টল করা আছে। + +### kubectl অটোকমপ্লিসন চালু করুন + +আপনাকে এখন নিশ্চিত করতে হবে যে আপনার সমস্ত শেল সেশনে kubectl কমপ্লিসনের স্ক্রিপ্টটি পাওয়া যায়। এটি অর্জন করার একাধিক উপায় রয়েছে: + +- আপনার `~/.bash_profile` ফাইলে কমপ্লিসনের স্ক্রিপ্ট উৎস করুন: + + ```bash + echo 'source <(kubectl completion bash)' >>~/.bash_profile + ``` + +- `/usr/local/etc/bash_completion.d` ডিরেক্টরিতে কমপ্লিসনের স্ক্রিপ্ট যোগ করুন: + + ```bash + kubectl completion bash >/usr/local/etc/bash_completion.d/kubectl + ``` + +- আপনার যদি kubectl এর জন্য একটি উপনাম থাকে তবে আপনি সেই উপনামের সাথে কাজ করার জন্য শেল কমপ্লিসন বাড়াতে পারেন: + + ```bash + echo 'alias k=kubectl' >>~/.bash_profile + echo 'complete -o default -F __start_kubectl k' >>~/.bash_profile + ``` + +- আপনি যদি হোমব্রু দিয়ে kubectl ইনস্টল করেন (যেমন [এখানে ব্যাখ্যা করা হয়েছে](/docs/tasks/tools/install-kubectl-macos/#install-with-homebrew-on-macos)), তাহলে kubectl কমপ্লিসনের স্ক্রিপ্ট ইতিমধ্যেই `/-এ থাকা উচিত usr/local/etc/bash_completion.d/kubectl`। সেক্ষেত্রে আপনার কিছু করার দরকার নেই। + + {{< note >}} + bash-completion v2-এর Homebrew ইনস্টলেশনটি `BASH_COMPLETION_COMPAT_DIR` ডিরেক্টরির সমস্ত ফাইলকে উৎস করে, তাই পরবর্তী দুটি পদ্ধতি কাজ করে। + {{< /note >}} + +যে কোনো ক্ষেত্রে, আপনার শেল পুনরায় লোড করার পরে, kubectl সমাপ্তি কাজ করা উচিত। diff --git a/content/bn/docs/tasks/tools/included/optional-kubectl-configs-fish.md b/content/bn/docs/tasks/tools/included/optional-kubectl-configs-fish.md new file mode 100644 index 0000000000000..27d6ef7b7e141 --- /dev/null +++ b/content/bn/docs/tasks/tools/included/optional-kubectl-configs-fish.md @@ -0,0 +1,23 @@ +--- +title: "fish স্বয়ংক্রিয় সমাপ্তি" +description: "fish শেল স্বয়ংক্রিয় সমাপ্তি চালু করার জন্য ঐচ্ছিক কনফিগারেশন।" +headless: true +_build: + list: never + render: never + publishResources: false +--- + +{{< note >}} +kubectl 1.23 বা তার পরের সংস্করণ প্রয়োজন fish এর স্বয়ংক্রিয় সমাপ্তি করার জন্য । +{{< /note >}} + +fish এর জন্য kubectl সমাপ্তি স্ক্রিপ্ট `kubectl completion fish` কমান্ড দিয়ে তৈরি করা যেতে পারে। আপনার শেলের মধ্যে সমাপ্তি স্ক্রিপ্ট সোর্স করা kubectl স্বয়ংক্রিয় সমাপ্তি চালু করে। + +আপনার সমস্ত শেল সেশনে এটি করতে, আপনার `~/.config/fish/config.fish` ফাইলে নিম্নলিখিত লাইন যুক্ত করুন: + +```shell +kubectl completion fish | source +``` + +আপনার শেল পুনরায় লোড করার পরে, kubectl স্বয়ংক্রিয় সমাপ্তি কাজ করা উচিত। diff --git a/content/bn/docs/tasks/tools/included/optional-kubectl-configs-pwsh.md b/content/bn/docs/tasks/tools/included/optional-kubectl-configs-pwsh.md new file mode 100644 index 0000000000000..159aa0eeb0c20 --- /dev/null +++ b/content/bn/docs/tasks/tools/included/optional-kubectl-configs-pwsh.md @@ -0,0 +1,27 @@ +--- +title: "PowerShell স্বয়ংক্রিয় সমাপ্তি" +description: "powershell স্বয়ংক্রিয় সমাপ্তি এর জন্য কিছু ঐচ্ছিক কনফিগারেশন।" +headless: true +_build: + list: never + render: never + publishResources: false +--- + +PowerShell-এর জন্য kubectl সমাপ্তি স্ক্রিপ্ট `kubectl completion powershell` কমান্ড দিয়ে তৈরি করা যেতে পারে। + +আপনার সমস্ত শেল সেশনে এটি করতে, আপনার `$PROFILE` ফাইলে নিম্নলিখিত লাইন যোগ করুন: + +```powershell +kubectl completion powershell | Out-String | Invoke-Expression +``` + +এই কমান্ডটি প্রতিটি PowerShell স্টার্ট আপে স্বয়ংক্রিয় সমাপ্তি স্ক্রিপ্ট পুনরায় তৈরি করবে। আপনি চাইলে জেনারেট করা স্ক্রিপ্টটি সরাসরি আপনার `$PROFILE` ফাইলে যোগ করতে পারেন। + +আপনার `$PROFILE` ফাইলে জেনারেট করা স্ক্রিপ্ট যোগ করতে, আপনার পাওয়ারশেল প্রম্পটে নিম্নলিখিত লাইনটি চালান: + +```powershell +kubectl completion powershell >> $PROFILE +``` + +আপনার শেল পুনরায় লোড করার পরে, kubectl স্বয়ংক্রিয় সমাপ্তি কাজ করা উচিত। diff --git a/content/bn/docs/tasks/tools/included/optional-kubectl-configs-zsh.md b/content/bn/docs/tasks/tools/included/optional-kubectl-configs-zsh.md new file mode 100644 index 0000000000000..166e8cd7f7aa8 --- /dev/null +++ b/content/bn/docs/tasks/tools/included/optional-kubectl-configs-zsh.md @@ -0,0 +1,28 @@ +--- +title: "zsh স্বয়ংক্রিয় সমাপ্তি" +description: "zsh স্বয়ংক্রিয় সমাপ্তি এর জন্য কিছু ঐচ্ছিক কনফিগারেশন।" +headless: true +_build: + list: never + render: never + publishResources: false +--- + +Zsh-এর জন্য kubectl কমপ্লিশন স্ক্রিপ্ট `kubectl completion zsh` কমান্ড দিয়ে তৈরি করা যেতে পারে। আপনার শেলে সমাপ্তি স্ক্রিপ্ট সোর্স করা kubectl স্বয়ংসম্পূর্ণতা সক্ষম করে। 12 + +আপনার সমস্ত শেল সেশনে এটি করতে, আপনার `~/.zshrc` ফাইলে নিম্নলিখিত যোগ করুন: + +```zsh +source <(kubectl completion zsh) +``` + +আপনার যদি kubectl-এর একটি উপনাম থাকে, kubectl স্বয়ংসম্পূর্ণতা স্বয়ংক্রিয়ভাবে এটির সাথে কাজ করবে। + +আপনার শেল পুনরায় লোড করার পরে, kubectl স্বয়ংসম্পূর্ণতা কাজ করা উচিত। + +যদি আপনি একটি ত্রুটি পান যেমন `2: command not found: compdef`, তাহলে আপনার `~/.zshrc` ফাইলের শুরুতে নিম্নলিখিত যোগ করুন: + +```zsh +autoload -Uz compinit +compinit +``` diff --git a/content/bn/docs/tasks/tools/included/verify-kubectl.md b/content/bn/docs/tasks/tools/included/verify-kubectl.md new file mode 100644 index 0000000000000..3ee62d09790e6 --- /dev/null +++ b/content/bn/docs/tasks/tools/included/verify-kubectl.md @@ -0,0 +1,52 @@ +--- +title: "kubectl ইনস্টল যাচাই করুন" +description: "কিভাবে kubectl যাচাই করবেন।" +headless: true +_build: + list: never + render: never + publishResources: false +--- + +kubectl-এর জন্য একটি কুবারনেটিস ক্লাস্টার খুঁজে পেতে এবং অ্যাক্সেস পেতে, যার জন্য প্রয়োজন +[kubeconfig ফাইল](/docs/concepts/configuration/organize-cluster-access-kubeconfig/), +যা স্বয়ংক্রিয়ভাবে তৈরি হয় যখন আপনি একটি ক্লাস্টার তৈরি করেন +[kube-up.sh](https://github.com/kubernetes/kubernetes/blob/master/cluster/kube-up.sh) +ব্যবহার করে অথবা সফলভাবে একটি Minikube ক্লাস্টার স্থাপন করুন। +ডিফল্টরূপে, kubectl কনফিগারেশন `~/.kube/config` এ অবস্থিত। + +ক্লাস্টার অবস্থা পেয়ে kubectl সঠিকভাবে কনফিগার করা হয়েছে তা পরীক্ষা করুন: + +```shell +kubectl cluster-info +``` + +আপনি যদি একটি URL দেখতে পান, তাহলে আপনার ক্লাস্টার অ্যাক্সেস করার জন্য kubectl সঠিকভাবে কনফিগার করা হয়েছে। + +আপনি যদি নিম্নলিখিতগুলোর মতো একটি বার্তা দেখতে পান, তাহলে বুঝবেন যে kubectl সঠিকভাবে কনফিগার করা হয়নি +অথবা একটি Kubernetes ক্লাস্টারের সাথে সংযোগ করতে সক্ষম নয়। + +``` +সার্ভারের সাথে সংযোগ প্রত্যাখ্যান করা হয়েছিল - আপনি কি সঠিক হোস্ট বা পোর্ট উল্লেখ করেছেন? +``` + +উদাহরণস্বরূপ, আপনি যদি আপনার ল্যাপটপে (স্থানীয়ভাবে) একটি কুবারনেটিস ক্লাস্টার চালাতে চান, +তাহলে আপনাকে প্রথমে মিনিকুবের মতো একটি টুল ইনস্টল করতে হবে এবং তারপরে উপরে বর্ণিত কমান্ডগুলি পুনরায় চালাতে হবে। + +যদি kubectl ক্লাস্টার-তথ্য url প্রতিক্রিয়া প্রদান করে কিন্তু আপনি আপনার ক্লাস্টার অ্যাক্সেস করতে না পারেন, +এটি সঠিকভাবে কনফিগার করা হয়েছে কিনা তা পরীক্ষা করতে, ব্যবহার করুন: + +```shell +kubectl cluster-info dump +``` + +### 'No Auth Provider Found' ত্রুটি বার্তার সমস্যা সমাধান {#no-auth-provider-found} + +কুবারনেটিস 1.26-এ, kubectl নিম্নলিখিত ক্লাউড প্রদানকারীদের পরিচালিত কুবারনেটিস অফারগুলোর জন্য অন্তর্নির্মিত অথেনটিকেশন সরিয়ে দিয়েছে। +এই প্রদানকারীরা ক্লাউডের-নির্দিষ্ট অথেনটিকেশন প্রদানের জন্য kubectl প্লাগইন প্রকাশ করেছে। +নির্দেশাবলীর জন্য, নিম্নলিখিত প্রদানকারী ডকুমেন্টেশন পড়ুন: + +* Azure AKS: [kubelogin plugin](https://azure.github.io/kubelogin/) +* Google Kubernetes Engine: [gke-gcloud-auth-plugin](https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-access-for-kubectl#install_plugin) + +(একই ত্রুটির বার্তা দেখার অন্যান্য কারণও থাকতে পারে, এই পরিবর্তনের সাথে সম্পর্কিত নয়।) diff --git a/content/bn/docs/tasks/tools/install-kubectl-linux.md b/content/bn/docs/tasks/tools/install-kubectl-linux.md new file mode 100644 index 0000000000000..054c997f04c5f --- /dev/null +++ b/content/bn/docs/tasks/tools/install-kubectl-linux.md @@ -0,0 +1,279 @@ +--- +reviewers: +- mitul3737 +title: লিনাক্সে kubectl ইনস্টল এবং সেট আপ করুন +content_type: task +weight: 10 +card: + name: tasks + weight: 20 + title: লিনাক্সে kubectl ইনস্টল করুন +--- + +## {{% heading "prerequisites" %}} + +আপনাকে অবশ্যই একটি kubectl সংস্করণ ব্যবহার করতে হবে যা আপনার ক্লাস্টারের একটি ছোট সংস্করণের পার্থক্যের মধ্যে রয়েছে। উদাহরণস্বরূপ, একটি v{{< skew currentVersion >}} ক্লায়েন্ট v{{< skew currentVersionAddMinor -1 >}}, v{{< skew currentVersionAddMinor 0 >}}, এবং v{{< skew currentVersionAddMinor 1 >}} এর কন্ট্রোল প্লেনের সাথে যোগাযোগ করতে পারে। +kubectl এর সর্বশেষ সামঞ্জস্যপূর্ণ সংস্করণ ব্যবহার করা অপ্রত্যাশিত সমস্যাগুলি এড়াতে সাহায্য করে৷ + +## লিনাক্সে kubectl ইনস্টল করুন + +লিনাক্সে kubectl ইনস্টল করার জন্য নিম্নলিখিত পদ্ধতি বিদ্যমানঃ + +- [লিনাক্সে কার্ল দিয়ে kubectl বাইনারি ইনস্টল করুন](#install-kubectl-binary-with-curl-on-linux) +- [নেটিভ প্যাকেজ ম্যানেজমেন্ট দিয়ে ইনস্টল করুন](#install-using-native-package-management) +- [অন্যান্য প্যাকেজ ব্যবস্থাপনা ব্যবহার করে ইনস্টল করুন](#install-using-other-package-management) + +### লিনাক্সে কার্ল সহ kubectl বাইনারি ইনস্টল করুন + +১. কমান্ড সহ সর্বশেষ রিলিজ ডাউনলোড করুন: + + ```bash + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" + ``` + + {{< note >}} +একটি নির্দিষ্ট সংস্করণ ডাউনলোড করতে, নির্দিষ্ট সংস্করণের সাথে কমান্ডের `$(curl -L -s https://dl.k8s.io/release/stable.txt)` অংশটি প্রতিস্থাপন করুন। + +উদাহরণস্বরূপ, লিনাক্সে সংস্করণ {{% skew currentPatchVersion %}} ডাউনলোড করতে, টাইপ করুন: + + ```bash + curl -LO https://dl.k8s.io/release/v{{% skew currentPatchVersion %}}/bin/linux/amd64/kubectl + ``` + {{< /note >}} + +২. বাইনারি যাচাই করুন (ঐচ্ছিক) + + kubectl চেকসাম ফাইল ডাউনলোড করুন: + + ```bash + curl -LO "https://dl.k8s.io/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl.sha256" + ``` + + চেকসাম ফাইলের বিপরীতে kubectl বাইনারি যাচাই করুন: + + ```bash + echo "$(cat kubectl.sha256) kubectl" | sha256sum --check + ``` + + বৈধ হলে, আউটপুট হবে: + + ```console + kubectl: OK + ``` + + চেক ব্যর্থ হলে, `sha256` অশূন্য স্থিতি সহ প্রস্থান করে এবং অনুরূপ আউটপুট প্রিন্ট করে: + + ```bash + kubectl: FAILED + sha256sum: WARNING: 1 computed checksum did NOT match + ``` + + {{< note >}} + বাইনারি এবং চেকসামের একই সংস্করণ ডাউনলোড করুন। + {{< /note >}} + +৩. kubectl ইনস্টল করুন + + ```bash + sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl + ``` + + {{< note >}} + যদি আপনার টার্গেট সিস্টেমে রুট অ্যাক্সেস না থাকে, তাহলেও আপনি `~/.local/bin` ডিরেক্টরিতে kubectl ইনস্টল করতে পারেন: + + ```bash + chmod +x kubectl + mkdir -p ~/.local/bin + mv ./kubectl ~/.local/bin/kubectl + # and then append (or prepend) ~/.local/bin to $PATH + ``` + + {{< /note >}} + +৪. আপনার ইনস্টল করা সংস্করণ আপ-টু-ডেট কিনা তা নিশ্চিত করতে পরীক্ষা করুন: + + ```bash + kubectl version --client + ``` + {{< note >}} + উপরের কমান্ডটি একটি সতর্কতা তৈরি করবে: + ``` + WARNING: This version information is deprecated and will be replaced with the output from kubectl version --short. + ``` + আপনি এই সতর্কতা উপেক্ষা করতে পারেন। আপনি শুধুমাত্র `kubectl` এর সংস্করণটি পরীক্ষা করছেন যা আপনি ইনস্টল করেছেন। + + + {{< /note >}} + + অথবা সংস্করণের বিস্তারিত দেখার জন্য এটি ব্যবহার করুনঃ + + ```cmd + kubectl version --client --output=yaml + ``` + +### নেটিভ প্যাকেজ ম্যানেজমেন্ট ব্যবহার করে ইনস্টল করুন + +{{< tabs name="kubectl_install" >}} +{{% tab name="Debian-based distributions" %}} + +১. `apt` প্যাকেজ ইনডেক্স আপডেট করুন এবং Kubernetes `apt` রিপোযিটোরী ব্যবহার করার জন্য প্রয়োজনীয় প্যাকেজ ইনস্টল করুন: + + ```shell + sudo apt-get update + sudo apt-get install -y ca-certificates curl + ``` + আপনি যদি ডেবিয়ান ৯ (স্ট্রেচ) বা তার আগে ব্যবহার করেন তবে আপনাকে `apt-transport-https` ইনস্টল করতে হবে: + ```shell + sudo apt-get install -y apt-transport-https + ``` + +২. গুগল ক্লাউড পাবলিক সাইনিং কী ডাউনলোড করুন: + + ```shell + sudo curl -fsSLo /etc/apt/keyrings/kubernetes-archive-keyring.gpg https://packages.cloud.google.com/apt/doc/apt-key.gpg + ``` + +৩. কুবারনেটিস `apt` রিপোযিটোরী যোগ করুন: + + ```shell + echo "deb [signed-by=/etc/apt/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/kubernetes.list + ``` + +৪. নতুন রিপোযিটোরীর সাথে `apt` প্যাকেজ ইনডেক্স আপডেট করুন এবং kubectl ইনস্টল করুন: + + ```shell + sudo apt-get update + sudo apt-get install -y kubectl + ``` +{{< note >}} +ডেবিয়ান ১২ এবং উবুন্টু ২২.০৪ এর চেয়ে পুরানো রিলিজে, `/etc/apt/keyrings` ডিফল্টরূপে বিদ্যমান নেই। +আপনার প্রয়োজন হলে আপনি এই ডিরেক্টরিটি তৈরি করতে পারেন, এটি ওয়ার্ল্ড-রিডেবল কিন্তু শুধুমাত্র অ্যাডমিনদের দ্বারা লেখার যোগ্য। + +{{< /note >}} + +{{% /tab %}} + +{{% tab name="Red Hat-based distributions" %}} +```bash +cat <}} + +### অন্যান্য প্যাকেজ ব্যবস্থাপনা ব্যবহার করে ইনস্টল করুন + +{{< tabs name="other_kubectl_install" >}} +{{% tab name="Snap" %}} +আপনি যদি উবুন্টু বা অন্য একটি লিনাক্স ডিস্ট্রিবিউশনে থাকেন যা [স্ন্যাপ](https://snapcraft.io/docs/core/install) প্যাকেজ ম্যানেজার সমর্থন করে, তাহলে kubectl একটি [স্ন্যাপ](https://snapcraft.io/) অ্যাপ্লিকেশান হিসেবে পাওয়া যাবে। + + +```shell +snap install kubectl --classic +kubectl version --client +``` + +{{% /tab %}} + +{{% tab name="Homebrew" %}} +আপনি যদি লিনাক্সে থাকেন এবং [হোম্ব্রু](https://docs.brew.sh/Homebrew-on-Linux) প্যাকেজ ম্যানেজার ব্যবহার করেন, তাহলে kubectl [ইনস্টলেশন](https://docs.brew.sh/Homebrew-on-Linux#install) এর জন্য পাওয়া যাবে। + +```shell +brew install kubectl +kubectl version --client +``` + +{{% /tab %}} + +{{< /tabs >}} + +## kubectl কনফিগারেশন যাচাই করুন + +{{< include "included/verify-kubectl.md" >}} + +## ঐচ্ছিক kubectl কনফিগারেশন এবং প্লাগই + +### শেল অটোকম্পিসন চালু করুন + +kubectl Bash, Zsh, Fish এবং PowerShell-এর জন্য অটোকম্পিসন সমর্থন প্রদান করে, যা আপনাকে অনেক টাইপিং বাঁচাতে পারে। + +নীচে Bash, Fish, এবং Zsh-এর জন্য স্বয়ংসম্পূর্ণতা সেট আপ করার পদ্ধতিগুলি রয়েছে৷ + +{{< tabs name="kubectl_autocompletion" >}} +{{< tab name="Bash" include="included/optional-kubectl-configs-bash-linux.md" />}} +{{< tab name="Fish" include="included/optional-kubectl-configs-fish.md" />}} +{{< tab name="Zsh" include="included/optional-kubectl-configs-zsh.md" />}} +{{< /tabs >}} + +### `kubectl convert` প্লাগইন ইনস্টল করুন + +{{< include "included/kubectl-convert-overview.md" >}} + +১. কমান্ড সহ সর্বশেষ রিলিজ ডাউনলোড করুন: + + ```bash + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl-convert" + ``` + +২. বাইনারি যাচাই করুন (ঐচ্ছিক) + + kubectl-convert চেকসাম ফাইলটি ডাউনলোড করুন: + + ```bash + curl -LO "https://dl.k8s.io/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl-convert.sha256" + ``` + + চেকসাম ফাইলের বিপরীতে kubectl-রূপান্তর বাইনারি যাচাই করুন: + + ```bash + echo "$(cat kubectl-convert.sha256) kubectl-convert" | sha256sum --check + ``` + + বৈধ হলে, আউটপুট হল: + + ```console + kubectl-convert: OK + ``` + + চেক ব্যর্থ হলে, `sha256` অশূন্য স্থিতি সহ প্রস্থান করে এবং অনুরূপ আউটপুট প্রিন্ট করে: + + ```bash + kubectl-convert: FAILED + sha256sum: WARNING: 1 computed checksum did NOT match + ``` + + {{< note >}} + বাইনারি এবং চেকসামের একই সংস্করণ ডাউনলোড করুন। + {{< /note >}} + +৩. kubectl-convert ইনস্টল করুন + + ```bash + sudo install -o root -g root -m 0755 kubectl-convert /usr/local/bin/kubectl-convert + ``` + +৪. প্লাগইন সফলভাবে ইনস্টল করা হয়েছে যাচাই করুন + + ```shell + kubectl convert --help + ``` + + আপনি যদি একটি ত্রুটি দেখতে না পান, এর মানে হল প্লাগইনটি সফলভাবে ইনস্টল করা হয়েছে। + +৫. প্লাগইন ইনস্টল করার পরে, ইনস্টলেশন ফাইলগুলি পরিষ্কার করুন: + + ```bash + rm kubectl-convert kubectl-convert.sha256 + ``` + +## {{% heading "whatsnext" %}} + +{{< include "included/kubectl-whats-next.md" >}} diff --git a/content/bn/docs/tasks/tools/install-kubectl-macos.md b/content/bn/docs/tasks/tools/install-kubectl-macos.md new file mode 100644 index 0000000000000..ad2f21b7c06b5 --- /dev/null +++ b/content/bn/docs/tasks/tools/install-kubectl-macos.md @@ -0,0 +1,282 @@ +--- +reviewers: +- mitul3737 +title: macOS এ kubectl ইনস্টল এবং সেট আপ করুন +content_type: task +weight: 10 +card: + name: tasks + weight: 20 + title: macOS এ kubectl ইনস্টল করুন +--- + +## {{% heading "prerequisites" %}} + +আপনাকে অবশ্যই একটি kubectl সংস্করণ ব্যবহার করতে হবে যা আপনার ক্লাস্টারের একটি ছোট সংস্করণের পার্থক্যের মধ্যে রয়েছে। উদাহরণস্বরূপ, একটি v{{< skew currentVersion >}} ক্লায়েন্ট v{{< skew currentVersionAddMinor -1 >}}, v{{< skew currentVersionAddMinor 0 >}}, এবং v{{< skew currentVersionAddMinor 1 >}} নিয়ন্ত্রণ প্লেন এর সাথে যোগাযোগ করতে পারে। +kubectl এর সর্বশেষ সামঞ্জস্যপূর্ণ সংস্করণ ব্যবহার করা অপ্রত্যাশিত সমস্যাগুলি এড়াতে সাহায্য করে৷ + +## macOS এ kubectl ইনস্টল করুন + +macOS এ kubectl ইনস্টল করার জন্য নিম্নলিখিত পদ্ধতি রয়েছেঃ + +- [macOS এ kubectl ইনস্টল করুন](#install-kubectl-on-macos) + - [macOS-এ কার্ল দিয়ে kubectl বাইনারি ইনস্টল করুন](#install-kubectl-binary-with-curl-on-macos) + - [MacOS এ Homebrew দিয়ে ইনস্টল করুন](#install-with-homebrew-on-macos) + - [MacOS এ Macports দিয়ে ইনস্টল করুন](#install-with-macports-on-macos) +- [kubectl কনফিগারেশন যাচাই করুন](#verify-kubectl-configuration) +- [বাড়তি kubectl কনফিগারেশন এবং প্লাগইন](#optional-kubectl-configurations-and-plugins) + - [শেল অটোকমপ্লিট সক্ষম করুন](#enable-shell-autocompletion) + - [`kubectl convert` প্লাগইন ইনস্টল করুন](#install-kubectl-convert-plugin) + +### macOS-এ কার্ল সহ kubectl বাইনারি ইনস্টল করুন + +১. সর্বশেষ রিলিজ ডাউনলোড করুন: + + {{< tabs name="download_binary_macos" >}} + {{< tab name="Intel" codelang="bash" >}} + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/darwin/amd64/kubectl" + {{< /tab >}} + {{< tab name="Apple Silicon" codelang="bash" >}} + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/darwin/arm64/kubectl" + {{< /tab >}} + {{< /tabs >}} + + {{< note >}} + একটি নির্দিষ্ট সংস্করণ ডাউনলোড করতে, নির্দিষ্ট সংস্করণের সাথে কমান্ডের `$(curl -L -s https://dl.k8s.io/release/stable.txt)` অংশটি প্রতিস্থাপন করুন। + + উদাহরণস্বরূপ, Intel macOS-এ সংস্করণ {{% skew currentPatchVersion %}} ডাউনলোড করতে, টাইপ করুন: + + ```bash + curl -LO "https://dl.k8s.io/release/v{{% skew currentPatchVersion %}}/bin/darwin/amd64/kubectl" + ``` + + এবং অ্যাপল সিলিকনে macOS এর জন্য, টাইপ করুন: + + ```bash + curl -LO "https://dl.k8s.io/release/v{{% skew currentPatchVersion %}}/bin/darwin/arm64/kubectl" + ``` + + {{< /note >}} + +২. বাইনারি যাচাই করুন (ঐচ্ছিক) + + kubectl checksum ফাইল ডাউনলোড করুন: + + {{< tabs name="download_checksum_macos" >}} + {{< tab name="Intel" codelang="bash" >}} + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/darwin/amd64/kubectl.sha256" + {{< /tab >}} + {{< tab name="Apple Silicon" codelang="bash" >}} + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/darwin/arm64/kubectl.sha256" + {{< /tab >}} + {{< /tabs >}} + + চেকসাম ফাইলের বিপরীতে kubectl বাইনারি যাচাই করুন: + + ```bash + echo "$(cat kubectl.sha256) kubectl" | shasum -a 256 --check + ``` + + বৈধ হলে, আউটপুট হল: + + ```console + kubectl: OK + ``` + + চেক ব্যর্থ হলে, `shasum` অশূন্য স্থিতি সহ প্রস্থান করে এবং অনুরূপ আউটপুট প্রিন্ট করে: + + ```bash + kubectl: FAILED + shasum: WARNING: 1 computed checksum did NOT match + ``` + + {{< note >}} + বাইনারি এবং চেকসামের একই সংস্করণ ডাউনলোড করুন। + {{< /note >}} + +৩. kubectl বাইনারি এক্সিকিউটেবল করুন। + + ```bash + chmod +x ./kubectl + ``` + +৪. আপনার সিস্টেম `PATH`-এ একটি ফাইল অবস্থানে kubectl বাইনারি সরান। + + ```bash + sudo mv ./kubectl /usr/local/bin/kubectl + sudo chown root: /usr/local/bin/kubectl + ``` + + {{< note >}} + নিশ্চিত করুন যে `/usr/local/bin` আপনার PATH এনভায়রনমেন্ট ভেরিয়েবলে আছে। + {{< /note >}} + +৫. আপনার ইনস্টল করা সংস্করণ আপ-টু-ডেট কিনা তা নিশ্চিত করতে পরীক্ষা করুন: + + ```bash + kubectl version --client + ``` + + {{< note >}} + উপরের কমান্ডটি একটি সতর্কতা তৈরি করবে: + ``` + WARNING: This version information is deprecated and will be replaced with the output from kubectl version --short. + ``` + আপনি এই সতর্কতা উপেক্ষা করতে পারেন. আপনি শুধুমাত্র `kubectl` এর যে সংস্করণটি ইনস্টল করেছেন তা পরীক্ষা করছেন। + + {{< /note >}} + + অথবা সংস্করণের বিস্তারিত দেখার জন্য এটি ব্যবহার করুন: + + ```cmd + kubectl version --client --output=yaml + ``` + +৬. প্লাগইন ইনস্টল করার পরে, ইনস্টলেশন ফাইলগুলি পরিষ্কার করুন: + + ```bash + rm kubectl kubectl.sha256 + ``` + +### MacOS এ Homebrew দিয়ে ইনস্টল করুন + +আপনি যদি macOS-এ থাকেন এবং [Homebrew](https://brew.sh/) প্যাকেজ ম্যানেজার ব্যবহার করেন, তাহলে আপনি Homebrew-এর সাথে kubectl ইনস্টল করতে পারেন। + +১. ইনস্টলেশন কমান্ড চালান: + + ```bash + brew install kubectl + ``` + + অথবা, + + ```bash + brew install kubernetes-cli + ``` + +২. আপনার ইনস্টল করা সংস্করণ আপ-টু-ডেট কিনা তা নিশ্চিত করতে পরীক্ষা করুন: + + ```bash + kubectl version --client + ``` + +### MacOS এ Macports দিয়ে ইনস্টল করুন + +আপনি যদি macOS এ থাকেন এবং [Macports](https://macports.org/) প্যাকেজ ম্যানেজার ব্যবহার করেন, তাহলে আপনি ম্যাকপোর্টের সাথে kubectl ইনস্টল করতে পারেন। + +১. ইনস্টলেশন কমান্ড চালান: + + ```bash + sudo port selfupdate + sudo port install kubectl + ``` + +২. আপনার ইনস্টল করা সংস্করণ আপ-টু-ডেট কিনা তা নিশ্চিত করতে পরীক্ষা করুন: + + ```bash + kubectl version --client + ``` + +## kubectl কনফিগারেশন যাচাই করুন + +{{< include "included/verify-kubectl.md" >}} + +## ঐচ্ছিক kubectl কনফিগারেশন এবং প্লাগইন + +### শেল অটোকমপ্লিট সক্ষম করুন + +kubectl Bash, Zsh, Fish এবং PowerShell-এর জন্য অটোকমপ্লিট সমর্থন প্রদান করে যা আপনাকে অনেক টাইপিং বাঁচাতে পারে। + +নীচে Bash, Fish, এবং Zsh-এর জন্য স্বয়ংসম্পূর্ণতা সেট আপ করার পদ্ধতিগুলি রয়েছে৷ + +{{< tabs name="kubectl_autocompletion" >}} +{{< tab name="Bash" include="included/optional-kubectl-configs-bash-mac.md" />}} +{{< tab name="Fish" include="included/optional-kubectl-configs-fish.md" />}} +{{< tab name="Zsh" include="included/optional-kubectl-configs-zsh.md" />}} +{{< /tabs >}} + +### `kubectl convert` প্লাগইন ইনস্টল করুন + +{{< include "included/kubectl-convert-overview.md" >}} + +১. কমান্ড সহ সর্বশেষ রিলিজ ডাউনলোড করুন: + + {{< tabs name="download_convert_binary_macos" >}} + {{< tab name="Intel" codelang="bash" >}} + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/darwin/amd64/kubectl-convert" + {{< /tab >}} + {{< tab name="Apple Silicon" codelang="bash" >}} + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/darwin/arm64/kubectl-convert" + {{< /tab >}} + {{< /tabs >}} + +২. বাইনারি যাচাই করুন (ঐচ্ছিক) + + kubectl-convert checksum ফাইলটি ডাউনলোড করুন: + + {{< tabs name="download_convert_checksum_macos" >}} + {{< tab name="Intel" codelang="bash" >}} + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/darwin/amd64/kubectl-convert.sha256" + {{< /tab >}} + {{< tab name="Apple Silicon" codelang="bash" >}} + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/darwin/arm64/kubectl-convert.sha256" + {{< /tab >}} + {{< /tabs >}} + + চেকসাম ফাইলের বিপরীতে kubectl-রূপান্তর বাইনারি যাচাই করুন: + + ```bash + echo "$(cat kubectl-convert.sha256) kubectl-convert" | shasum -a 256 --check + ``` + + বৈধ হলে, আউটপুট হল: + + ```console + kubectl-convert: OK + ``` + + চেক ব্যর্থ হলে, `shasum` অশূন্য স্থিতি সহ প্রস্থান করে এবং অনুরূপ আউটপুট প্রিন্ট করে: + + ```bash + kubectl-convert: FAILED + shasum: WARNING: 1 computed checksum did NOT match + ``` + + {{< note >}} + বাইনারি এবং চেকসামের একই সংস্করণ ডাউনলোড করুন। + {{< /note >}} + +৩. kubectl-রূপান্তর বাইনারি এক্সিকিউটেবল করুন + + ```bash + chmod +x ./kubectl-convert + ``` + +৪. আপনার সিস্টেম `PATH`-এ একটি ফাইল অবস্থানে kubectl-রূপান্তর বাইনারি সরান। + + ```bash + sudo mv ./kubectl-convert /usr/local/bin/kubectl-convert + sudo chown root: /usr/local/bin/kubectl-convert + ``` + + {{< note >}} + নিশ্চিত করুন যে `/usr/local/bin` আপনার PATH এনভায়রনমেন্ট ভেরিয়েবলে আছে। + {{< /note >}} + +৫. প্লাগইন সফলভাবে ইনস্টল করা হয়েছে যাচাই করুন + + ```shell + kubectl convert --help + ``` + + আপনি যদি একটি ত্রুটি দেখতে না পান, এর মানে হল প্লাগইনটি সফলভাবে ইনস্টল করা হয়েছে। + +৬. প্লাগইন ইনস্টল করার পরে, ইনস্টলেশন ফাইলগুলি পরিষ্কার করুন: + + ```bash + rm kubectl-convert kubectl-convert.sha256 + ``` + +## {{% heading "whatsnext" %}} + +{{< include "included/kubectl-whats-next.md" >}} diff --git a/content/bn/docs/tasks/tools/install-kubectl-windows.md b/content/bn/docs/tasks/tools/install-kubectl-windows.md new file mode 100644 index 0000000000000..78de0ec3d6758 --- /dev/null +++ b/content/bn/docs/tasks/tools/install-kubectl-windows.md @@ -0,0 +1,217 @@ +--- +reviewers: +- mitul3737 +title: উইন্ডোজে kubectl ইনস্টল এবং সেট আপ করুন +content_type: task +weight: 10 +card: + name: tasks + weight: 20 + title: উইন্ডোজে kubectl ইনস্টল করুন +--- + +## {{% heading "prerequisites" %}} + +আপনাকে অবশ্যই একটি kubectl সংস্করণ ব্যবহার করতে হবে যা আপনার ক্লাস্টারের একটি ছোট সংস্করণের পার্থক্যের মধ্যে রয়েছে। উদাহরণ স্বরূপ, একটি v{{< skew currentVersion >}} ক্লায়েন্ট v{{< skew currentVersionAddMinor -1 >}}, v{{< skew currentVersionAddMinor 0 >}}, and v{{< skew currentVersionAddMinor 1 >}} কন্ট্রল প্লেনের সাথে যোগাযোগ করতে পারবে। +kubectl এর সর্বশেষ সামঞ্জস্যপূর্ণ সংস্করণ ব্যবহার করা অপ্রত্যাশিত সমস্যাগুলি এড়াতে সাহায্য করে৷ + +## উইন্ডোজে kubectl ইনস্টল করুন + +উইন্ডোজে kubectl ইনস্টল করার জন্য নিম্নলিখিত পদ্ধতিগুলো অনুসরণ করতে পারেন: + +- [উইন্ডোজে কার্ল ব্যাবহার kubectl বাইনারি ইনস্টল করুন](#install-kubectl-binary-with-curl-on-windows) +- [Chocolatey, Scoop, বা winget ব্যবহার করে Windows এ ইনস্টল করুন](#install-nonstandard-package-tools) + +### উইন্ডোজে কার্ল ব্যাবহার kubectl বাইনারি ইনস্টল করুন + +1. সর্বশেষ {{< skew currentVersion >}} প্যাচ রিলিজ ডাউনলোড করুন: [kubectl {{% skew currentPatchVersion %}}](https://dl.k8s.io/release/v{{% skew currentPatchVersion %}}/bin/windows/amd64/kubectl.exe)। + + অথবা যদি আপনার `curl` ইনস্টল থাকে, এই কমান্ডটি ব্যবহার করুন: + + ```powershell + curl.exe -LO "https://dl.k8s.io/release/v{{% skew currentPatchVersion %}}/bin/windows/amd64/kubectl.exe" + ``` + + {{< note >}} + সর্বশেষ স্থিতিশীল সংস্করণ খুঁজে বের করতে (উদাহরণস্বরূপ, স্ক্রিপ্টিংয়ের জন্য), [https://dl.k8s.io/release/stable.txt](https://dl.k8s.io/release/stable.txt) দেখতে পারেন। + {{< /note >}} + +1. বাইনারি যাচাই করুন (অপশনাল) + + `kubectl` চেকসাম ফাইলটি ডাউনলোড করুন: + + ```powershell + curl.exe -LO "https://dl.k8s.io/v{{% skew currentPatchVersion %}}/bin/windows/amd64/kubectl.exe.sha256" + ``` + + চেকসাম ফাইলের বিপরীতে `kubectl` বাইনারি যাচাই করুন: + + - ডাউনলোড করা চেকসাম ফাইলের সাথে ম্যানুয়ালি `CertUtil` এর আউটপুট তুলনা করতে কমান্ড প্রম্পট ব্যবহার করে: + + ```cmd + CertUtil -hashfile kubectl.exe SHA256 + type kubectl.exe.sha256 + ``` + + - একটি `True` বা `False` ফলাফল পেতে `-eq` অপারেটর ব্যবহার করে যাচাইকরণ স্বয়ংক্রিয় করতে PowerShell ব্যবহার করে: + + ```powershell + $(Get-FileHash -Algorithm SHA256 .\kubectl.exe).Hash -eq $(Get-Content .\kubectl.exe.sha256) + ``` + +1. আপনার `PATH` এনভায়রনমেন্ট ভেরিয়েবলে `kubectl` বাইনারি ফোল্ডারটি অ্যাপেন্ড বা প্রিপেন্ড করুন। + +1. `kubectl`-এর সংস্করণ ডাউনলোড করা একই রকম তা নিশ্চিত করতে পরীক্ষা করুন + + ```cmd + kubectl version --client + ``` + {{< note >}} + উপরের কমান্ডটি একটি সতর্ক বার্তা তৈরি করবে: + ``` + WARNING: This version information is deprecated and will be replaced with the output from kubectl version --short. + ``` + আপনি এই সতর্কতা উপেক্ষা করতে পারেন. আপনি শুধুমাত্র `kubectl` এর যে সংস্করণটি ইনস্টল করেছেন তা পরীক্ষা করছেন। + + {{< /note >}} + + অথবা সংস্করণের বিস্তারিত দেখার জন্য এটি ব্যবহার করুন: + + ```cmd + kubectl version --client --output=yaml + ``` + +1. প্লাগইন ইনস্টল করার পরে, ইনস্টলেশন ফাইলগুলি পরিষ্কার করুন: + + ```powershell + del kubectl.exe kubectl.exe.sha256 + ``` + +{{< note >}} +[উইন্ডোজের জন্য ডকার ডেস্কটপ](https://docs.docker.com/docker-for-windows/#kubernetes) `PATH`-এ `kubectl` এর নিজস্ব সংস্করণ যোগ করে। +আপনি যদি আগে ডকার ডেস্কটপ ইনস্টল করে থাকেন, তাহলে আপনাকে ডকার ডেস্কটপ ইনস্টলার দ্বারা যোগ করা একটির আগে আপনার `PATH` এন্ট্রি স্থাপন করতে হবে অথবা ডকার ডেস্কটপের `kubectl` সরিয়ে ফেলতে হবে। +{{< /note >}} + +### Chocolatey, Scoop, বা winget ব্যবহার করে Windows এ ইনস্টল করুন {#install-nonstandard-package-tools} + +1. উইন্ডোজে kubectl ইনস্টল করতে আপনি উভয় [Chocolatey](https://chocolatey.org) প্যাকেজ ম্যানেজার, [Scoop](https://scoop.sh) কমান্ড-লাইন ইনস্টলার, অথবা [winget](https://learn.microsoft.com/en-us/windows/package-manager/winget/) প্যাকেজ ম্যানেজার ব্যবহার করতে পারেন। + + {{< tabs name="kubectl_win_install" >}} + {{% tab name="choco" %}} + ```powershell + choco install kubernetes-cli + ``` + {{% /tab %}} + {{% tab name="scoop" %}} + ```powershell + scoop install kubectl + ``` + {{% /tab %}} + {{% tab name="winget" %}} + ```powershell + winget install -e --id Kubernetes.kubectl + ``` + {{% /tab %}} + {{< /tabs >}} + +1. আপনার ইনস্টল করা সংস্করণ আপ-টু-ডেট কিনা তা নিশ্চিত করতে পরীক্ষা করুন: + + ```powershell + kubectl version --client + ``` + +1. আপনার হোম ডিরেক্টরিতে নেভিগেট করুন: + + ```powershell + # If you're using cmd.exe, run: cd %USERPROFILE% + cd ~ + ``` + +1. `.kube` ডিরেক্টরি তৈরি করুন: + + ```powershell + mkdir .kube + ``` + +1. আপনার তৈরি করা `.kube` ডিরেক্টরিতে ঢুকে পড়ুন: + + ```powershell + cd .kube + ``` + +1. একটি দূরবর্তী Kubernetes ক্লাস্টার ব্যবহার করতে kubectl কনফিগার করুরু + + ```powershell + New-Item config -type file + ``` + +{{< note >}} +আপনার পছন্দের টেক্সট এডিটর, যেমন নোটপ্যাড দিয়ে কনফিগার ফাইলটি সম্পাদনা করুন। +{{< /note >}} + +## kubectl কনফিগারেশন যাচাই করুন + +{{< include "included/verify-kubectl.md" >}} + +## অপশনাল kubectl কনফিগারেশন এবং প্লাগইন + +### শেল ওটোকমপ্লিট চালু করুন + +kubectl Bash, Zsh, Fish এবং PowerShell-এর জন্য ওটোকম্পিট সমর্থন প্রদান করে, যা আপনাকে অনেক টাইপিং করা থেকে রক্ষা করতে পারে। + +পাওয়ারশেলের জন্য ওটোকম্পিট সেট আপ করার পদ্ধতিগুলি নীচে দেওয়া হল। + +{{< include "included/optional-kubectl-configs-pwsh.md" >}} + +### `kubectl convert` প্লাগইন ইনস্টল করুন + +{{< include "included/kubectl-convert-overview.md" >}} + +1. কমান্ড সহ সর্বশেষ রিলিজ ডাউনলোড করুন: + + ```powershell + curl.exe -LO "https://dl.k8s.io/release/v{{% skew currentPatchVersion %}}/bin/windows/amd64/kubectl-convert.exe" + ``` + +1. বাইনারি যাচাই করুন (অপশনাল)। + + `kubectl-convert` চেকসাম ফাইলটি ডাউনলোড কর্সনা + + ```powershell + curl.exe -LO "https://dl.k8s.io/v{{% skew currentPatchVersion %}}/bin/windows/amd64/kubectl-convert.exe.sha256" + ``` + + চেকসাম ফাইলের বিপরীতে `kubectl-convert` বাইনারি যাচাই করুন: + + - ডাউনলোড করা চেকসাম ফাইলের সাথে ম্যানুয়ালি `CertUtil` এর আউটপুট তুলনা করতে কমান্ড প্রম্পট ব্যবহার করে: + + ```cmd + CertUtil -hashfile kubectl-convert.exe SHA256 + type kubectl-convert.exe.sha256 + ``` + + - একটি `True` বা `False` ফলাফল পেতে `-eq` অপারেটর ব্যবহার করে যাচাইকরণ স্বয়ংক্রিয় করতে PowerShell ব্যবহার করে: + + ```powershell + $($(CertUtil -hashfile .\kubectl-convert.exe SHA256)[1] -replace " ", "") -eq $(type .\kubectl-convert.exe.sha256) + ``` + +1. আপনার `PATH` এনভায়রনমেন্ট ভেরিয়েবলের সাথে `kubectl-convert` বাইনারি ফোল্ডারটি অ্যাপেন্ড বা প্রিপেন্ড করুন। + +1. প্লাগইন সফলভাবে ইনস্টল করা হয়েছে যাচাই করুন। + + ```shell + kubectl convert --help + ``` + + আপনি যদি একটি ত্রুটি দেখতে না পান, এর মানে হল প্লাগইনটি সফলভাবে ইনস্টল করা হয়েছে। + +1. প্লাগইন ইনস্টল করার পরে, ইনস্টলেশন ফাইলগুলি পরিষ্কার করুন: + + ```powershell + del kubectl-convert.exe kubectl-convert.exe.sha256 + ``` + +## {{% heading "whatsnext" %}} + +{{< include "included/kubectl-whats-next.md" >}} diff --git a/content/bn/docs/tutorials/_index.md b/content/bn/docs/tutorials/_index.md new file mode 100644 index 0000000000000..4ff19ffd7a3dd --- /dev/null +++ b/content/bn/docs/tutorials/_index.md @@ -0,0 +1,58 @@ +--- +title: টিউটোরিয়াল +main_menu: true +no_list: true +weight: 60 +content_type: ধারণা +--- + + + +কুবারনেটিস ডকুমেন্টেশনের এই বিভাগে টিউটোরিয়াল রয়েছে। +একটি টিউটোরিয়াল দেখায় কিভাবে একটি লক্ষ্য অর্জন করতে হয় যা একটি একক থেকে বড় +[টাস্ক](/docs/tasks/)। সাধারণত একটি টিউটোরিয়ালের বেশ কয়েকটি বিভাগ থাকে +যার প্রতিটিরই ধাপের ক্রম রয়েছে। +প্রতিটি টিউটোরিয়ালের মাধ্যমে হাঁটার আগে, আপনি [প্রমিত শব্দকোষ](/docs/reference/glossary/) +পৃষ্ঠা বুকমার্ক করতে চাইতে পারেন পরবর্তী রেফারেন্সের জন্য । + + + +## বেসিক + +* [কুবারনেটিস বেসিক](/docs/tutorials/kubernetes-basics/) হলো একটি গভীর ইন্টারেক্টিভ টিউটোরিয়াল যা আপনাকে কুবারনেটিস সিস্টেম বুঝতে এবং কিছু মৌলিক কুবারনেটিস বৈশিষ্ট্যগুলো ব্যবহার করে দেখতে সাহায্য করে। +* [কুবারনেটিসের ভূমিকা (edX)](https://www.edx.org/course/introduction-kubernetes-linuxfoundationx-lfs158x#) +* [হ্যালো মিনিকুব](/docs/tutorials/hello-minikube/) + +## কনফিগারেশন + +* [উদাহরণ: একটি জাভা মাইক্রোসার্ভিস কনফিগার কর](/docs/tutorials/configuration/configure-java-microservice/) +* [কনফিগার ম্যাপ ব্যবহার করে রেডিস কনফিগার কর](/docs/tutorials/configuration/configure-redis-using-configmap/) + +## স্টেটলেস অ্যাপ্লিকেশন + +* [একটি ক্লাস্টারে একটি অ্যাপ্লিকেশন অ্যাক্সেস করার জন্য একটি বহিরাগত আইপি ঠিকানা প্রকাশ করয ](/docs/tutorials/stateless-application/expose-external-ip-address/) +* [উদাহরণ: Redis এর সাথে PHP গেস্টবুক অ্যাপ্লিকেশন স্থাপন কর](/docs/tutorials/stateless-application/guestbook/) + +## স্টেটফুল অ্যাপ্লিকেশন + +* [স্টেটফুল সেটের বেসিক](/docs/tutorials/stateful-application/basic-stateful-set/) +* [উদাহরণ: ওয়ার্ডপ্রেস এবং স্থায়ী ভলিউম সহ MySQL](/docs/tutorials/stateful-application/mysql-wordpress-persistent-volume/) +* [উদাহরণ: স্টেটফুল সেটের সাথে ক্যাসান্দ্রা স্থাপন কর](/docs/tutorials/stateful-application/cassandra/) +* [জুকিপার চালাই, একটি সিপি ডিস্ট্রিবিউটেড সিস্টেম](/docs/tutorials/stateful-application/zookeeper/) + +## সেবা + +* [পরিষেবাগুলোর সাথে অ্যাপ্লিকেশনগুলো সংযুক্ত কর](/docs/tutorials/services/connect-applications-service/) +* [উৎস আইপি ব্যবহার কর](/docs/tutorials/services/source-ip/) + +## নিরাপত্তা + +* [ক্লাস্টার স্তরে পড নিরাপত্তা মান প্রয়োগ করুন](/docs/tutorials/security/cluster-level-pss/) +* [নেমস্পেস স্তরে পড নিরাপত্তা মান প্রয়োগ করুন](/docs/tutorials/security/ns-level-pss/) +* [AppArmor](/docs/tutorials/security/apparmor/) +* [Seccomp](/docs/tutorials/security/seccomp/) +## {{% heading "whatsnext" %}} + +আপনি যদি একটি টিউটোরিয়াল লিখতে চান, +[সামগ্রী পৃষ্ঠার ধরন](/docs/contribute/style/page-content-types/) দেখুন +টিউটোরিয়াল পৃষ্ঠার ধরন সম্পর্কে তথ্যের জন্য। diff --git a/content/bn/docs/tutorials/configuration/_index.md b/content/bn/docs/tutorials/configuration/_index.md new file mode 100644 index 0000000000000..a688a7251e414 --- /dev/null +++ b/content/bn/docs/tutorials/configuration/_index.md @@ -0,0 +1,4 @@ +--- +title: "কনফিগারেশন" +weight: 30 +--- diff --git a/content/bn/docs/tutorials/configuration/configure-java-microservice/_index.md b/content/bn/docs/tutorials/configuration/configure-java-microservice/_index.md new file mode 100644 index 0000000000000..6614577d2d593 --- /dev/null +++ b/content/bn/docs/tutorials/configuration/configure-java-microservice/_index.md @@ -0,0 +1,4 @@ +--- +title: "উদাহরণ: একটি জাভা মাইক্রোসার্ভিস কনফিগার করা" +weight: 10 +--- diff --git a/content/bn/docs/tutorials/hello-minikube.md b/content/bn/docs/tutorials/hello-minikube.md new file mode 100644 index 0000000000000..af7f4137a6e82 --- /dev/null +++ b/content/bn/docs/tutorials/hello-minikube.md @@ -0,0 +1,279 @@ +--- +title: হ্যালো মিনিকুব (Hello Minikube) +content_type: tutorial +weight: 5 +card: + name: tutorials + weight: 10 +--- + + + +এই টিউটোরিয়ালটি আপনাকে দেখায় কিভাবে মিনিকুব ব্যবহার করে কুবারনেটিস এ একটি নমুনা অ্যাপ চালাতে হয়। +টিউটোরিয়ালটি একটি কন্টেইনার চিত্র প্রদান করে যা NGINX ব্যবহার করে সমস্ত অনুরোধগুলোকে প্রতিধ্বনিত করে ৷ + +## {{% heading "objectives" %}} + +* মিনিকিউবে একটি সরল অ্যাপ্লিকেশন স্থাপন করুন। +* অ্যাপ্লিকেশনটিকে চালান। +* অ্যাপ্লিকেশন লগ (log) দেখুন। + +## {{% heading "prerequisites" %}} + +এই টিউটোরিয়ালটি একটি কন্টেইনার ইমেজ প্রদান করে যা NGINX ব্যবহার করে সমস্ত অনুরোধে সাড়া দেয়। + + + +## একটি মিনিকিউব ক্লাস্টার তৈরি করা। + +1. ক্লিক করুন **Launch Terminal** এ + + {{< kat-button >}} + +{{< note >}} +মিনিকুব স্থানীয়ভাবে ইনস্টল করা থাকলে, `minikube start` চালান। `minikube dashboard` কমান্ড কার্যকর করার আগে, একটি নতুন টার্মিনাল খুলুন, সেই টার্মিনালে `minikube dashboard` কমান্ডটি চালান এবং মূল টার্মিনালে ফিরে যান। +{{< /note >}} + +2. একটি ব্রাউজারে কুবারনেটিস ড্যাশবোর্ড খুলুন: + + ```shell + minikube dashboard + ``` + +3. Katacoda পরিবেশ (Katacoda Environment): টার্মিনাল প্যানেলের শীর্ষে প্লাস ক্লিক করুন, তারপরে ক্লিক করুন **Select port to view on Host 1** । + +4. Katacoda পরিবেশ (Katacoda Environment): `30000` লিখুন এবং **Display Port** এ ক্লিক করুন। + +{{< note >}} +আপনি যখন `minikube dashboard` কমান্ড ইস্যু করেন, তখন ড্যাশবোর্ড অ্যাড-অন এবং প্রক্সি সক্রিয় হয় এবং প্রক্সিতে সংযোগ করার জন্য একটি ডিফল্ট ওয়েব ব্রাউজার উইন্ডো খোলে। +আপনি ড্যাশবোর্ড থেকে কুবারনেটিস সংস্থান তৈরি করতে পারেন যেমন ডিপ্লয়মেন্ট বা সার্ভিস। + +আপনি যদি `root` এনভায়রনমেন্টে কমান্ড নির্বাহ করছেন, তাহলে [URL ব্যবহার করে ড্যাশবোর্ড অ্যাক্সেস করা] (#open-dashboard-with-url) পড়ুন। + +ডিফল্টরূপে, ড্যাশবোর্ড শুধুমাত্র কুবারনেটিস অভ্যন্তরীণ ভার্চুয়াল নেটওয়ার্ক থেকে অ্যাক্সেসযোগ্য। + +`dashboard` কমান্ড কুবারনেটিস ভার্চুয়াল নেটওয়ার্কের বাইরে থেকে ড্যাশবোর্ড অ্যাক্সেস করার জন্য একটি অস্থায়ী প্রক্সি তৈরি করে। + +আপনি `Ctrl+C` টিপে প্রক্সি থেকে প্রস্থান করতে পারেন। +কমান্ডটি শেষ হওয়ার পরে, ড্যাশবোর্ডটি কুবারনেটিস ক্লাস্টারে চলতে থাকে। +আপনি আবার `dashboard` কমান্ড চালিয়ে ড্যাশবোর্ড অ্যাক্সেস করার জন্য আরেকটি প্রক্সি তৈরি করতে পারেন। +{{< /note >}} + +## URL ব্যবহার করে ড্যাশবোর্ড খুলুন + +আপনি যদি ওয়েব ব্রাউজারটি স্বয়ংক্রিয়ভাবে খুলতে না চান তবে আপনি `--url` ফ্ল্যাগ দিয়ে নিম্নলিখিত কমান্ডটি কার্যকর করে ড্যাশবোর্ড অ্যাক্সেস URL মুদ্রণ করতে পারেন : + +```shell +minikube dashboard --url +``` + +## ডিপ্লয়মেন্ট (Deployment) তৈরি করুন + +কুবারনেটিস [পডস](/bn/docs/concepts/workloads/pods/) নেটওয়ার্কিং উদ্দেশ্যে এক বা একাধিক পাত্রের একটি গ্রুপ একসাথে গোষ্ঠীবদ্ধ করে। +এই টিউটোরিয়ালের পডটিতে (pod) শুধুমাত্র একটি পাত্র রয়েছে। কুবারনেটিস +[Deployment](/bn/docs/concepts/workloads/controllers/deployment/) হলো পডের +একটি স্বাস্থ্য পরীক্ষা করে এবং পডের ধারকটি বন্ধ হয়ে গেলে পুনরায় চালু করে। পড তৈরি এবং স্কেলিং পরিচালনা করার উপায় হিসাবে স্থাপনের সুপারিশ করা হয়। + +1. পড পরিচালনা ও ডিপ্লয়মেন্ট তৈরি করতে `kubectl create` কমান্ডটি চালান। এই পডগুলি প্রদত্ত Docker ইমেজ এর উপর ভিত্তি করে কন্টেইনার চালায়। + + ```shell + kubectl create deployment hello-node --image=k8s.gcr.io/echoserver:1.4 + ``` + +2. ডিপ্লয়মেন্টটি দেখুন: + + ```shell + kubectl get deployments + ``` + + অনুরূপ আউটপুট দেখবেন: + + ``` + NAME READY UP-TO-DATE AVAILABLE AGE + hello-node 1/1 1 1 1m + ``` + +3. পডটি দেখুন: + + ```shell + kubectl get pods + ``` + + অনুরূপ আউটপুট দেখবেন: + + ``` + NAME READY STATUS RESTARTS AGE + hello-node-5f76cf6ccf-br9b5 1/1 Running 0 1m + ``` + +4. ক্লাস্টার ইভেন্ট দেখুন: + + ```shell + kubectl get events + ``` + +5. `kubectl` এর কনফিগারেশন দেখুন: + + ```shell + kubectl config view + ``` + +{{< note >}} +`kubectl` কমান্ড সম্পর্কে আরও তথ্যের জন্য, দেখুন [kubectl overview](/docs/reference/kubectl/). +{{< /note >}} + +## সার্ভিস (Service) তৈরি করুন + +সাধারণত, পড শুধুমাত্র কুবারনেটিস ক্লাস্টারের অভ্যন্তরীণ আইপি (Internal IP) ঠিকানা দ্বারা অ্যাক্সেসযোগ্য। কুবারনেটিস ভার্চুয়াল নেটওয়ার্কের বাইরে থেকে 'hello-node' কন্টেইনারকে অ্যাক্সেসযোগ্য করতে, আপনাকে কুবারনেটিস সার্ভিস হিসাবে পডটিকে প্রকাশ করতে হবে। + +1. সর্বজনীন ইন্টারনেটে (Public Internet) পডটি প্রকাশ করুন `kubectl expose` কমান্ড ব্যবহার করে: + + ```shell + kubectl expose deployment hello-node --type=LoadBalancer --port=8080 + ``` + + `--type=LoadBalancer` ফ্ল্যগটি নির্দেশ করে যে আপনি ক্লাস্টারের বাইরে আপনার পরিষেবা প্রকাশ করতে চান। + `k8s.gcr.io/echoserver` কনটেইনারের ভিতরের অ্যাপ্লিকেশন কোড শুধুমাত্র TCP port 8080 থেকেই শোনা হয়। আপনি যদি একটি ভিন্ন পোর্ট প্রকাশ করতে `kubectl expose` ব্যবহার করেন, তাহলে ক্লায়েন্টরা সেই অন্য পোর্টের সাথে সংযোগ করতে পারবে না। + +2. তৈরি করা সার্ভিসটি দেখুন: + + ```shell + kubectl get services + ``` + + অনুরূপ আউটপুট দেখবেন: + + ``` + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + hello-node LoadBalancer 10.108.144.78 8080:30369/TCP 21s + kubernetes ClusterIP 10.96.0.1 443/TCP 23m + ``` + + ক্লাউড প্রদানকারীরা (Cloud Providers) যারা লোড ব্যালেন্সার (Load Balancer) সমর্থন করে, তাতে একটি External IP Address ব্যবস্থা করা হয়, সার্ভিসটি অ্যাক্সেস করার জন্য। + মিনিকিউব-এ, `LoadBalancer` প্রকারটি `minikube service` কমান্ডের মাধ্যমে পরিষেবাটিকে অ্যাক্সেসযোগ্য করে তোলে। + +3. নিম্নলিখিত কমান্ড চালান: + + ```shell + minikube service hello-node + ``` + +4. Katacoda পরিবেশ (Katacoda Environment): টার্মিনাল প্যানেলের শীর্ষে প্লাস ক্লিক করুন, তারপরে ক্লিক করুন **Select port to view on Host 1** + +5. শুধুমাত্র Katacoda পরিবেশ (Katacoda Environment): সার্ভিস আউটপুটে `8080` এর বিপরীতে প্রদর্শিত ৫-সংখ্যার পোর্ট নম্বরটি নোট করুন। এই পোর্ট নম্বরটি এলোমেলোভাবে তৈরি করা হয়েছে এবং এটি আপনার জন্য আলাদা হতে পারে। পোর্ট নম্বর টেক্সট বক্সে আপনার নম্বর টাইপ করুন, তারপর ডিসপ্ল পোর্টে (default port) ক্লিক করুন। আগের উদাহরণটি ব্যবহার করে, আপনি `30369` টাইপ করবেন। + + এটি একটি ব্রাউজার উইন্ডো খোলে যা আপনার অ্যাপটি পরিবেশন করে এবং অ্যাপের প্রতিক্রিয়া দেখায়। + +## অ্যাডন সক্রিয় করুন (Addons) + +মিনিকিউব টুলটিতে অন্তর্নির্মিত অ্যাডনগুলির (Internal addons) একটি সেট রয়েছে যা স্থানীয় কুবারনেটিস পরিবেশে এনেবেল (enable), ডিজেবল (disable) এবং ওপেন (open) করা যেতে পারে। + +1. বর্তমানে সমর্থিত অ্যাডনগুলির তালিকা: + + ```shell + minikube addons list + ``` + + অনুরূপ আউটপুট দেখবেন: + + ``` + addon-manager: enabled + dashboard: enabled + default-storageclass: enabled + efk: disabled + freshpod: disabled + gvisor: disabled + helm-tiller: disabled + ingress: disabled + ingress-dns: disabled + logviewer: disabled + metrics-server: disabled + nvidia-driver-installer: disabled + nvidia-gpu-device-plugin: disabled + registry: disabled + registry-creds: disabled + storage-provisioner: enabled + storage-provisioner-gluster: disabled + ``` + +2. একটি অ্যাডন এনেবেল (enable) করুন, উদাহরণস্বরূপ `metrics-server`: + + ```shell + minikube addons enable metrics-server + ``` + + অনুরূপ আউটপুট দেখবেন: + + ``` + The 'metrics-server' addon is enabled + ``` + +3. আপনার তৈরি করা পড এবং সার্ভিস দেখুন: + + ```shell + kubectl get pod,svc -n kube-system + ``` + + অনুরূপ আউটপুট দেখবেন: + + ``` + NAME READY STATUS RESTARTS AGE + pod/coredns-5644d7b6d9-mh9ll 1/1 Running 0 34m + pod/coredns-5644d7b6d9-pqd2t 1/1 Running 0 34m + pod/metrics-server-67fb648c5 1/1 Running 0 26s + pod/etcd-minikube 1/1 Running 0 34m + pod/influxdb-grafana-b29w8 2/2 Running 0 26s + pod/kube-addon-manager-minikube 1/1 Running 0 34m + pod/kube-apiserver-minikube 1/1 Running 0 34m + pod/kube-controller-manager-minikube 1/1 Running 0 34m + pod/kube-proxy-rnlps 1/1 Running 0 34m + pod/kube-scheduler-minikube 1/1 Running 0 34m + pod/storage-provisioner 1/1 Running 0 34m + + NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE + service/metrics-server ClusterIP 10.96.241.45 80/TCP 26s + service/kube-dns ClusterIP 10.96.0.10 53/UDP,53/TCP 34m + service/monitoring-grafana NodePort 10.99.24.54 80:30002/TCP 26s + service/monitoring-influxdb ClusterIP 10.111.169.94 8083/TCP,8086/TCP 26s + ``` + +4. ডিজেবল (disable) করুন `metrics-server`: + + ```shell + minikube addons disable metrics-server + ``` + + অনুরূপ আউটপুট দেখবেন: + + ``` + metrics-server was successfully disabled + ``` + +## পরিষ্কার করুন (Clean up) + +এখন আপনি আপনার ক্লাস্টারে তৈরি রিসোর্সগুলি পরিষ্কার করতে পারেন: + +```shell +kubectl delete service hello-node +kubectl delete deployment hello-node +``` + +ঐচ্ছিকভাবে, মিনিকিউব ভার্চুয়াল মেশিন (Minikube Virtual Machine) বন্ধ করুন: + +```shell +minikube stop +``` + +ঐচ্ছিকভাবে, মিনিকিউব ভার্চুয়াল মেশিন (Minikube Virtual Machine) মুছুন ফেলুন: + +```shell +minikube delete +``` + +## {{% heading "whatsnext" %}} + + +* _[kubectl এর সাথে কুবারনেটিসে আপনার প্রথম অ্যাপ স্থাপন](/docs/tutorials/kubernetes-basics/deploy-app/deploy-intro/)_ করার টিউটোরিয়াল। +* [Deployment objects](/docs/concepts/workloads/controllers/deployment/) এর ব্যাপারে আরো জানুন। +* [Deploying applications](/docs/tasks/run-application/run-stateless-application-deployment/) এর ব্যাপারে আরো জানুন। +* [Service objects](/docs/concepts/services-networking/service/) এর ব্যাপারে আরো জানুন। diff --git a/content/bn/docs/tutorials/kubernetes-basics/_index.html b/content/bn/docs/tutorials/kubernetes-basics/_index.html new file mode 100644 index 0000000000000..a4c63bb368dd2 --- /dev/null +++ b/content/bn/docs/tutorials/kubernetes-basics/_index.html @@ -0,0 +1,117 @@ +--- +title: কুবারনেটিসের বেসিক শিখুন +linkTitle: কুবারনেটিসের বেসিক শিখুন +no_list: true +weight: 10 +card: + name: tutorials + weight: 20 + title: টিউটোরিয়াল +--- + + + + + + + + + +
+ +
+ +
+
+

কুবারনেটিস বেসিক

+

এই টিউটোরিয়ালটি কুবারনেটিস ক্লাস্টার অর্কেস্ট্রেশন সিস্টেমের মূল বিষয়গুলির একটি ধারণা প্রদান করে। প্রতিটি মডিউলে কুবারনেটিসের প্রধান বৈশিষ্ট্য এবং ধারণাগুলির কিছু পটভূমি তথ্য রয়েছে এবং একটি ইন্টারেক্টিভ অনলাইন টিউটোরিয়াল অন্তর্ভুক্ত রয়েছে। এই ইন্টারেক্টিভ টিউটোরিয়ালগুলি আপনাকে নিজের জন্য একটি সাধারণ ক্লাস্টার এবং এর ধারকযুক্ত অ্যাপ্লিকেশনগুলি পরিচালনা করতে দেয় ৷

+

ইন্টারেক্টিভ টিউটোরিয়াল ব্যবহার করে, আপনি এগুলো শিখতে পারেনঃ

+
    +
  • একটি ক্লাস্টারে একটি কন্টেইনারাইজড অ্যাপ্লিকেশন স্থাপন করুন।
  • +
  • ডিপ্লয়মেন্ট স্কেল করুন।
  • +
  • একটি নতুন সফ্টওয়্যার সংস্করণ দিয়ে কন্টেইনারাইজড অ্যাপ্লিকেশন আপডেট করুন।
  • +
  • কন্টেইনারাইজড অ্যাপ্লিকেশন ডিবাগ করুন।
  • +
+

টিউটোরিয়ালগুলি আপনার ওয়েব ব্রাউজারে একটি ভার্চুয়াল টার্মিনাল চালানোর জন্য Katacoda ব্যবহার করে যা Minikube চালায়, কুবারনেটের একটি ছোট আকারের স্থানীয় ডিপ্লয়মেন্ট যা যেকোনো জায়গায় চলতে পারে। কোন সফ্টওয়্যার ইনস্টল বা কিছু কনফিগার করার কোন প্রয়োজন নেই; প্রতিটি ইন্টারেক্টিভ টিউটোরিয়াল সরাসরি আপনার ওয়েব ব্রাউজার থেকে চলে।

+
+
+ +
+ +
+
+

কুবারনেটিস আপনার জন্য কী করতে পারে?

+

আধুনিক ওয়েব পরিষেবাগুলির সাথে, ব্যবহারকারীরা আশা করে যে অ্যাপ্লিকেশনগুলি ২৪/৭ উপলব্ধ থাকবে এবং বিকাশকারীরা সেই অ্যাপ্লিকেশনগুলির নতুন সংস্করণগুলি দিনে কয়েকবার স্থাপন করার আশা করে৷ কনটেইনারাইজেশন প্যাকেজ সফ্টওয়্যারকে এই লক্ষ্যগুলি পূরণ করতে সহায়তা করে, অ্যাপ্লিকেশনগুলিকে মুক্তি এবং ডাউনটাইম ছাড়াই আপডেট করতে সক্ষম করে। কুবারনেটিস আপনাকে নিশ্চিত করতে সাহায্য করে যে সেই কন্টেইনারাইজড অ্যাপ্লিকেশানগুলি আপনি যেখানে এবং যখন চান চালান, এবং তাদের কাজ করার জন্য প্রয়োজনীয় সংস্থান এবং সরঞ্জামগুলি খুঁজে পেতে সহায়তা করে৷ কুবারনেটিস হল একটি উৎপাদন-প্রস্তুত, ওপেন সোর্স প্ল্যাটফর্ম যা কনটেইনার অর্কেস্ট্রেশনে গুগল-এর সঞ্চিত অভিজ্ঞতার সাহায্যে ডিজাইন করা হয়েছে, যা সম্প্রদায়ের সেরা জাত ধারণাগুলির সাথে মিলিত হয়েছে

+
+
+ +
+ + + +
+ +
+ + + diff --git a/content/bn/docs/tutorials/kubernetes-basics/create-cluster/_index.md b/content/bn/docs/tutorials/kubernetes-basics/create-cluster/_index.md new file mode 100644 index 0000000000000..f064c9eea8659 --- /dev/null +++ b/content/bn/docs/tutorials/kubernetes-basics/create-cluster/_index.md @@ -0,0 +1,6 @@ +--- +title: ক্লাস্টার তৈরি করুন +weight: 10 +--- + +কুবারনেটিস সম্পর্কে জানুন {{< glossary_tooltip text="ক্লাস্টার" term_id="cluster" length="all" >}} এবং মিনিকিউব দিয়ে সহজ ক্লাস্টার তৈরি করুন। \ No newline at end of file diff --git a/content/bn/docs/tutorials/kubernetes-basics/create-cluster/cluster-interactive.html b/content/bn/docs/tutorials/kubernetes-basics/create-cluster/cluster-interactive.html new file mode 100644 index 0000000000000..14f654772705d --- /dev/null +++ b/content/bn/docs/tutorials/kubernetes-basics/create-cluster/cluster-interactive.html @@ -0,0 +1,33 @@ +--- +title: ইন্টারেক্টিভ টিউটোরিয়াল - একটি ক্লাস্টার তৈরি করা +weight: 20 +--- + + + + + + + +{{< katacoda-tutorial >}} + + + + + diff --git a/content/bn/docs/tutorials/kubernetes-basics/create-cluster/cluster-intro.html b/content/bn/docs/tutorials/kubernetes-basics/create-cluster/cluster-intro.html new file mode 100644 index 0000000000000..4ec583db41b6a --- /dev/null +++ b/content/bn/docs/tutorials/kubernetes-basics/create-cluster/cluster-intro.html @@ -0,0 +1,109 @@ +--- +title: একটি ক্লাস্টার তৈরি করতে Minikube ব্যবহার করো +weight: 10 +--- + + + + + + + + + +
+ +
+ +
+ +
+

Objectives

+
    +
  • Kubernetes ক্লাস্টার কি তা জানুন.
  • +
  • Minikube কি তা জানুন.
  • +
  • একটি অনলাইন টার্মিনাল ব্যবহার করে একটি Kubernetes ক্লাস্টার শুরু করুন।
  • +
+
+ +
+

Kubernetes Clusters

+

+ কুবারনেটিস কম্পিউটারের একটি অত্যন্ত উপলব্ধ ক্লাস্টার সমন্বয় করে যা একটি একক হিসাবে কাজ করার জন্য সংযুক্ত। Kubernetes-এর বিমূর্ততা আপনাকে বিশেষভাবে পৃথক মেশিনে না বেঁধে একটি ক্লাস্টারে কন্টেইনারাইজড অ্যাপ্লিকেশন স্থাপন করতে দেয়। ডিপ্লয়মেন্টের এই নতুন মডেলটি ব্যবহার করার জন্য, অ্যাপ্লিকেশনগুলিকে এমনভাবে প্যাকেজ করা দরকার যাতে সেগুলিকে পৃথক হোস্ট থেকে আলাদা করা যায়: তাদের কন্টেইনারাইজ করা দরকার। কন্টেইনারাইজড অ্যাপ্লিকেশনগুলি অতীতের ডিপ্লয়মেন্টের মডেলগুলির তুলনায় আরও নমনীয় এবং উপলব্ধ, যেখানে অ্যাপ্লিকেশনগুলি সরাসরি হোস্টের সাথে গভীরভাবে একত্রিত প্যাকেজ হিসাবে নির্দিষ্ট মেশিনে ইনস্টল করা হয়েছিল। Kubernetes একটি ক্লাস্টার জুড়ে অ্যাপ্লিকেশন কন্টেইনারগুলির বিতরণ এবং সময়সূচীকে আরও দক্ষ উপায়ে স্বয়ংক্রিয় করে। Kubernetes একটি ওপেন সোর্স প্ল্যাটফর্ম এবং এটি উৎপাদনের জন্য প্রস্তুত। +

+

>একটি কুবারনেটিস ক্লাস্টার দুটি ধরণের সংস্থান নিয়ে গঠিত: + +

    +
  • The নিয়ন্ত্রণ প্যানেল (Control Plane) ক্লাস্টার (cluster) সমন্বয় করে
  • +
  • নোড (Nodes) হল কর্মীরা যারা অ্যাপ্লিকেশনটি (applications) চালায়
  • +
+

+
+ +
+
+

সারসংক্ষেপ:

+
    +
  • কুবারনেটিস ক্লাস্টার (Kubernetes cluster)
  • +
  • মিনিকুব (Minikube)
  • +
+
+
+

+ কুবারনেটিস হল একটি প্রোডাকশন-গ্রেড, ওপেন-সোর্স প্ল্যাটফর্ম যা কম্পিউটার ক্লাস্টারের মধ্যে এবং জুড়ে অ্যাপ্লিকেশন কন্টেইনারগুলির প্লেসমেন্ট (শিডিউলিং) এবং এক্সিকিউশনকে অর্কেস্ট্রেট করে। + +

+
+
+
+
+ +
+
+

ক্লাস্টার ডায়াগ্রাম (Cluster Diagram)

+
+
+ +
+
+

+
+
+
+ +
+
+

কন্ট্রোল প্লেন ক্লাস্টার পরিচালনার জন্য দায়ী.কন্ট্রোল প্লেন আপনার ক্লাস্টারে সমস্ত ক্রিয়াকলাপ সমন্বয় করে, যেমন অ্যাপ্লিকেশনের সময় নির্ধারণ, অ্যাপ্লিকেশনগুলির পছন্দসই অবস্থা বজায় রাখা, অ্যাপ্লিকেশনগুলিকে স্কেলিং করা এবং নতুন আপডেটগুলি রোল আউট করা.

+

একটি নোড হল একটি VM বা একটি শারীরিক কম্পিউটার যা একটি কুবারনেটিস ক্লাস্টারে কর্মী মেশিন হিসাবে কাজ করে। প্রতিটি নোডের একটি কুবেলেট থাকে, যা নোড পরিচালনা এবং কুবারনেটিস কন্ট্রোল প্লেনের সাথে যোগাযোগের জন্য একটি এজেন্ট। নোডের কনটেইনার ক্রিয়াকলাপ পরিচালনা করার জন্য সরঞ্জাম থাকা উচিত, যেমন কন্টেইনার বা ডকার। একটি Kubernetes ক্লাস্টার যা উৎপাদন ট্র্যাফিক পরিচালনা করে তার ন্যূনতম তিনটি নোড থাকা উচিত কারণ যদি একটি নোড নিচে চলে যায়, তাহলে একটি etcd সদস্য এবং একটি কন্ট্রোল প্লেন ইনস্ট্যান্স উভয়ই হারিয়ে যায় এবং রিডানডেন্সি আপস করা হয়। আপনি আরো কন্ট্রোল প্লেন নোড যোগ করে এই ঝুঁকি কমাতে পারেন।

+ +
+
+
+

কন্ট্রোল প্লেনগুলি (Control Planes) চলমান অ্যাপ্লিকেশনগুলি হোস্ট করতে ব্যবহৃত ক্লাস্টার এবং নোডগুলি পরিচালনা করে।

+
+
+
+ +
+
+

আপনি যখন কুবারনেটিসএ অ্যাপ্লিকেশনগুলি স্থাপন করেন, তখন আপনি কন্ট্রোল প্লেনকে অ্যাপ্লিকেশন কন্টেইনারগুলি শুরু করতে বলেন৷ কন্ট্রোল প্লেন ক্লাস্টারের নোডগুলিতে চালানোর জন্য কন্টেইনারগুলি নির্ধারণ করে। নোডগুলি কুবারনেটিস API ব্যবহার করে কন্ট্রোল প্লেনের সাথে যোগাযোগ করে, যা কন্ট্রোল প্লেন প্রকাশ করে৷ শেষ ব্যবহারকারীরাও ক্লাস্টারের সাথে ইন্টারঅ্যাক্ট করতে সরাসরি কুবারনেটিস API ব্যবহার করতে পারেন।

+ +

একটি কুবারনেটিস ক্লাস্টার শারীরিক বা ভার্চুয়াল মেশিনে স্থাপন করা যেতে পারে। কুবারনেটিসের বিকাশ শুরু করতে সাথে, আপনি মিনিকুব ব্যবহার করতে পারেন। মিনিকুব হল একটি হালকা ওজনের কুবারনেটিস বাস্তবায়ন যা আপনার স্থানীয় মেশিনে একটি VM তৈরি করে এবং শুধুমাত্র একটি নোড ধারণকারী একটি সাধারণ ক্লাস্টার স্থাপন করে। মিনিকুব লিনাক্স (Minikube Linux), macOS এবং Windows সিস্টেমের জন্য উপলব্ধ। মিনিকুব CLI আপনার ক্লাস্টারের সাথে কাজ করার জন্য প্রাথমিক বুটস্ট্র্যাপিং ক্রিয়াকলাপগুলি প্রদান করে, যার মধ্যে শুরু, থামানো, স্থিতি এবং মুছে ফেলা হয়। এই টিউটোরিয়ালের জন্য, তবে, আপনি মিনিকুবের আগে থেকে ইনস্টল করা একটি প্রদত্ত অনলাইন টার্মিনাল ব্যবহার করবেন৷

+

এখন যেহেতু আপনি জানেন কুবারনেটিস কী, আসুন অনলাইন টিউটোরিয়ালটিতে যাই এবং আমাদের প্রথম ক্লাস্টার শুরু করি!

+ +
+
+
+ + + +
+ +
+ + + diff --git a/content/bn/docs/tutorials/kubernetes-basics/deploy-app/_index.md b/content/bn/docs/tutorials/kubernetes-basics/deploy-app/_index.md new file mode 100644 index 0000000000000..104441aec9121 --- /dev/null +++ b/content/bn/docs/tutorials/kubernetes-basics/deploy-app/_index.md @@ -0,0 +1,4 @@ +--- +title: একটি অ্যাপ স্থাপন করা +weight: 20 +--- diff --git a/content/bn/docs/tutorials/kubernetes-basics/deploy-app/deploy-interactive.html b/content/bn/docs/tutorials/kubernetes-basics/deploy-app/deploy-interactive.html new file mode 100644 index 0000000000000..267f489b5e951 --- /dev/null +++ b/content/bn/docs/tutorials/kubernetes-basics/deploy-app/deploy-interactive.html @@ -0,0 +1,45 @@ +--- +title: ইন্টারেক্টিভ টিউটোরিয়াল - একটি অ্যাপ ডিপ্লোয় করা +weight: 20 +--- + + + + + + + +{{< katacoda-tutorial >}} + +
+ +
+ +
+
+

+ একটি পড হল একটি কুবারনেটিসে অ্যাপ্লিকেশনের মৌলিক এক্সিকিউশন ইউনিট। প্রতিটি পড আপনার ক্লাস্টারে চলমান কাজের চাপের একটি অংশ উপস্থাপন করে। পড সম্পর্কে আরও জানুন. +

+
+
+ +
+
+
+
+ +
+ + +
+ +
+ + + diff --git a/content/bn/docs/tutorials/kubernetes-basics/deploy-app/deploy-intro.html b/content/bn/docs/tutorials/kubernetes-basics/deploy-app/deploy-intro.html new file mode 100644 index 0000000000000..dead5cc7893b1 --- /dev/null +++ b/content/bn/docs/tutorials/kubernetes-basics/deploy-app/deploy-intro.html @@ -0,0 +1,110 @@ +--- +title: kubectl ব্যবহার করে একটি ডিপ্লয়মেন্ট তৈরি করা + +weight: 10 +--- + + + + + + + + +
+ +
+ +
+ +
+

উদ্দেশ্য

+
    +
  • অ্যাপ্লিকেশন ডিপ্লয়মেন্ট সম্পর্কে জানুন ।
  • +
  • kubectl দিয়ে কুবারনেটিসে আপনার প্রথম অ্যাপ স্থাপন করুন।
  • +
+
+ +
+

কুবারনেটিস ডিপ্লয়মেন্ট

+

একবার আপনার চলমান কুবারনেটস ক্লাস্টার হয়ে গেলে, আপনি এটির উপরে আপনার কন্টেইনারাইজড অ্যাপ্লিকেশনগুলি ডিপ্লোয় করতে পারেন। + এটি করার জন্য, আপনি একটি কুবারনেটিসে ডিপ্লয়মেন্ট কনফিগারেশন তৈরি করুন। ডিপ্লয়মেন্ট কুবারনেটসকে নির্দেশ দেয় কিভাবে আপনার আবেদনের দৃষ্টান্ত তৈরি এবং আপডেট করতে হয়। একবার আপনি একটি ডিপ্লোয় তৈরি করে ফেললে, কুবারনেটস কন্ট্রোল প্লেন ক্লাস্টারের পৃথক নোডগুলিতে চালানোর জন্য সেই ডিপ্লয়মেন্ট অন্তর্ভুক্ত অ্যাপ্লিকেশন উদাহরণগুলি নির্ধারণ করে।

+ +

একবার অ্যাপ্লিকেশন দৃষ্টান্ত তৈরি হয়ে গেলে, একটি কুবারনেটস ডিপ্লয়মেন্ট কন্ট্রোলার ক্রমাগত সেই দৃষ্টান্তগুলি পর্যবেক্ষণ করে। +যদি কোনো দৃষ্টান্ত (instance) হোস্টিং নোড নিচে চলে যায় বা মুছে ফেলা হয়, তাহলে ডিপ্লয়মেন্ট কন্ট্রোলার ক্লাস্টারের অন্য নোডের দৃষ্টান্তের (instance) সাথে প্রতিস্থাপন করে। এটি মেশিনের ব্যর্থতা বা রক্ষণাবেক্ষণের জন্য একটি স্ব-নিরাময় প্রক্রিয়া সরবরাহ করে।

+ +

একটি প্রাক-অর্কেস্ট্রেশন (pre-orchestration) বিশ্বে, ইনস্টলেশন স্ক্রিপ্টগুলি প্রায়শই অ্যাপ্লিকেশন শুরু করার জন্য ব্যবহার করা হত, কিন্তু তারা মেশিনের ব্যর্থতা থেকে পুনরুদ্ধারের অনুমতি দেয়নি। আপনার অ্যাপ্লিকেশন দৃষ্টান্ত তৈরি করে এবং সেগুলিকে নোড জুড়ে চালিয়ে রেখে, কুবারনেটস ডিপ্লয়মেন্টগুলি অ্যাপ্লিকেশন পরিচালনার জন্য একটি মৌলিকভাবে ভিন্ন পদ্ধতি প্রদান করে।

+ +
+ +
+
+

সারসংক্ষেপ :

+
    +
  • ডিপ্লয়মেন্ট
  • +
  • Kubectl
  • +
+
+
+

+ আপনার আবেদনের দৃষ্টান্ত তৈরি এবং আপডেট করার জন্য একটি ডিপ্লয়মেন্ট দায়ী | +

+
+
+
+
+ +
+
+

কুবারনেটিসে আপনার প্রথম অ্যাপ ডিপ্লোয় করুন

+
+
+ +
+
+

+
+
+
+ +
+
+ +

আপনি কুবারনেটিস কমান্ড লাইন ইন্টারফেস,
Kubectl
ব্যবহার করে একটি ডিপ্লোয় তৈরি এবং পরিচালনা করতে পারেন। Kubectl ক্লাস্টারের সাথে ইন্টারঅ্যাক্ট করতে কুবারনেটিস এপিআই (API) ব্যবহার করে। এই মডিউলে, আপনি কুবারনেটস ক্লাস্টারে আপনার অ্যাপ্লিকেশনগুলি চালানোর জন্য ডিপ্লয়মেন্ট তৈরি করার জন্য প্রয়োজনীয় সবচেয়ে সাধারণ Kubectl কমান্ডগুলি শিখবেন ।

+ +

আপনি যখন একটি ডিপ্লয়মেন্ট তৈরি করেন, আপনাকে আপনার অ্যাপ্লিকেশনের জন্য কন্টেইনার চিত্র এবং আপনি যে প্রতিলিপিগুলি চালাতে চান তার সংখ্যা নির্দিষ্ট করতে হবে ৷ আপনি পরে আপনার ডিপ্লয়মেন্ট আপডেট করে সেই তথ্য পরিবর্তন করতে পারেন; বুটক্যাম্পের মডিউল 5 এবং 6 আলোচনা করে যে আপনি কীভাবে আপনার ডিপ্লয়মেন্টের স্কেল এবং আপডেট করতে পারেন।

+ + + + +
+
+
+

কুবারনেটিসে মোতায়েন করার জন্য অ্যাপ্লিকেশনগুলিকে সমর্থিত কন্টেইনার পদ্ধতি প্যাকেজ করা দরকার

+
+
+
+ +
+
+

+ আপনার প্রথম ডিপ্লয়মেন্টর জন্য, আপনি একটি ডকার কন্টেনারে প্যাকেজ করা একটি হ্যালো-নোড অ্যাপ্লিকেশন ব্যবহার করবেন যা সমস্ত অনুরোধগুলিকে প্রতিধ্বনিত করতে NGINX ব্যবহার করে। (যদি আপনি ইতিমধ্যে একটি হ্যালো-নোড অ্যাপ্লিকেশন তৈরি করার এবং একটি কন্টেইনার ব্যবহার করে এটি স্থাপন করার চেষ্টা না করে থাকেন তবে আপনি হ্যালো মিনিকুব টিউটোরিয়াল (Hello Minikube tutorial) থেকে নির্দেশাবলী অনুসরণ করে প্রথমে এটি করতে পারেন)। +

+ +

এখন যেহেতু আপনি জানেন যে ডিপ্লয়মেন্টগুলি কী, আসুন অনলাইন টিউটোরিয়ালটিতে যাই এবং আমাদের প্রথম অ্যাপটি স্থাপন করি!

+
+
+
+ + + +
+ +
+ + + diff --git a/content/bn/docs/tutorials/kubernetes-basics/explore/_index.md b/content/bn/docs/tutorials/kubernetes-basics/explore/_index.md new file mode 100644 index 0000000000000..e8c65e55f6a8b --- /dev/null +++ b/content/bn/docs/tutorials/kubernetes-basics/explore/_index.md @@ -0,0 +1,4 @@ +--- +title: আপনার অ্যাপ্লিকেশানকে অন্বেষণ করুন +weight: 30 +--- diff --git a/content/bn/docs/tutorials/kubernetes-basics/explore/explore-interactive.html b/content/bn/docs/tutorials/kubernetes-basics/explore/explore-interactive.html new file mode 100644 index 0000000000000..6b705a5933b56 --- /dev/null +++ b/content/bn/docs/tutorials/kubernetes-basics/explore/explore-interactive.html @@ -0,0 +1,37 @@ +--- +title: ইন্টারেক্টিভ প্রশিক্ষণ - আপনার অ্যাপ্লিকেশানকে অন্বেষণ করুন +weight: 20 +--- + + + + + + + +{{< katacoda-tutorial >}} + + + + + diff --git a/content/bn/docs/tutorials/kubernetes-basics/explore/explore-intro.html b/content/bn/docs/tutorials/kubernetes-basics/explore/explore-intro.html new file mode 100644 index 0000000000000..cdc115e308d49 --- /dev/null +++ b/content/bn/docs/tutorials/kubernetes-basics/explore/explore-intro.html @@ -0,0 +1,138 @@ +--- +title: পড এবং নোড দেখা +weight: 10 +--- + + + + + + + +
+ +
+ +
+ +
+

উদ্দেশ্য

+
    +
  • কুবারনেটিসে পডস সম্পর্কে জানুন।
  • +
  • কুবারনেটিসে নোডস সম্পর্কে জানুন।
  • +
  • ডিপ্লোয় করা অ্যাপ্লিকেশনের সমস্যা সমাধান(Troubleshoot) করুন।
  • +
+
+ +
+

কুবারনেটিসে পডস

+

আপনি যখন মডিউল 2 -এ একটি ডিপ্লোয়মেন্ট তৈরি করেন, তখন কুবারনেটস আপনার অ্যাপ্লিকেশন দৃষ্টান্ত হোস্ট করার জন্য একটি পড তৈরি করে। একটি পড হল একটি কুবারনেটস প্রত্যাহরণ যা এক বা একাধিক অ্যাপ্লিকেশন কন্টেইনারগুলির একটি গ্রুপকে প্রতিনিধিত্ব করে (যেমন ডকার), এবং সেই পাত্রগুলির জন্য কিছু শেয়ার করা সংস্থান। এই সম্পদ অন্তর্ভুক্ত:

+
    +
  • ভাগ করা স্টোরেজ, পরিমাণ হিসাবে
  • +
  • নেটওয়ার্কিং, একটি অনন্য ক্লাস্টার আইপি ঠিকানা হিসাবে
  • +
  • প্রতিটি কন্টেইনার কিভাবে চালাতে হয় সে সম্পর্কে তথ্য, যেমন কন্টেইনার ইমেজ সংস্করণ বা নির্দিষ্ট পোর্ট ব্যবহার করতে হবে
  • +
+

একটি পড একটি অ্যাপ্লিকেশন-নির্দিষ্ট "লজিক্যাল হোস্ট" মডেল করে এবং এতে বিভিন্ন অ্যাপ্লিকেশন পাত্র থাকতে পারে যা তুলনামূলকভাবে শক্তভাবে সংযুক্ত থাকে। উদাহরণ স্বরূপ, একটি পড আপনার Node.js অ্যাপের পাশাপাশি একটি ভিন্ন ধারক যা Node.js ওয়েব সার্ভার দ্বারা প্রকাশিত ডেটা ফিড করে উভয় ধারক অন্তর্ভুক্ত করতে পারে। একটি পডের কন্টেইনারগুলি একটি আইপি ঠিকানা এবং পোর্ট স্পেস ভাগ করে, সর্বদা সহ-অবস্থিত এবং সহ-নির্ধারিত, এবং একই নোডে একটি ভাগ করা প্রসঙ্গে চালিত হয়।

+ +

কুবারনেটস প্ল্যাটফর্মের পারমাণবিক একক হল পড | যখন আমরা কুবারনেটিসে একটি ডিপ্লোয়মেন্ট করি, সেই ডিপ্লোয়মেন্টটি তাদের ভিতরে কন্টেইনার সহ পড তৈরি করে (সরাসরি কন্টেইনার তৈরির বিপরীতে)। প্রতিটি পড নোডের সাথে আবদ্ধ থাকে যেখানে এটি নির্ধারিত হয়, এবং সমাপ্তি (পুনঃসূচনা নীতি অনুসারে) বা মুছে ফেলা পর্যন্ত সেখানে থাকে। নোড ব্যর্থতার ক্ষেত্রে, ক্লাস্টারের অন্যান্য উপলব্ধ নোডগুলিতে অভিন্ন পডগুলি নির্ধারিত হয়।

+ +
+
+
+

সারসংক্ষেপ:

+
    +
  • পডস
  • +
  • নোডস
  • +
  • Kubectl প্রধান কমান্ড
  • +
+
+
+

একটি পড হল এক বা একাধিক অ্যাপ্লিকেশন কন্টেইনারের একটি গোষ্ঠী (যেমন ডকার) এবং এতে ভাগ করা স্টোরেজ (ভলিউম), আইপি ঠিকানা এবং সেগুলি চালানোর বিষয়ে তথ্য অন্তর্ভুক্ত থাকে।

+
+
+
+
+ +
+
+

পডসের পরিদর্শন

+
+
+ +
+
+

+
+
+
+ +
+
+

নোডস

+

একটি পড সর্বদা একটি নোডে চলে | একটি নোড হল কুবারনেটিসে একটি কর্মী মেশিন এবং ক্লাস্টারের উপর নির্ভর করে একটি ভার্চুয়াল বা ফিজিক্যাল মেশিন হতে পারে। প্রতিটি নোড নিয়ন্ত্রণ সমতল দ্বারা পরিচালিত হয়। একটি নোডে একাধিক পড থাকতে পারে এবং কুবারনেটস কন্ট্রোল প্লেন স্বয়ংক্রিয়ভাবে ক্লাস্টারের নোড জুড়ে পডগুলির সময়সূচী পরিচালনা করে। কন্ট্রোল প্লেনের স্বয়ংক্রিয় সময়সূচী প্রতিটি নোডে উপলব্ধ সংস্থানগুলিকে বিবেচনা করে।

+ +

প্রতিটি কুবারনেটস নোড কমপক্ষে রান করে:

+
    +
  • কুবেলেট, কুবারনেটিস কন্ট্রোল প্লেন এবং নোডের মধ্যে যোগাযোগের জন্য দায়ী একটি প্রক্রিয়া; এটি একটি মেশিনে চলমান পড এবং পাত্রগুলি পরিচালনা করে।
  • +
  • একটি কন্টেইনার রানটাইম (ডকারের মতো) একটি রেজিস্ট্রি থেকে কন্টেইনার ইমেজ টেনে আনা, কন্টেইনার আনপ্যাক করা এবং অ্যাপ্লিকেশন চালানোর জন্য দায়ী।
  • +
+ +
+
+
+

কন্টেইনারগুলি শুধুমাত্র একটি একক পডে একসাথে নির্ধারিত হওয়া উচিত যদি সেগুলি শক্তভাবে সংযুক্ত থাকে এবং ডিস্কের মতো সংস্থানগুলি ভাগ করতে হয়৷

+
+
+
+ +
+ +
+
+

নোড পরিদর্শন

+
+
+ +
+
+

+
+
+
+ +
+
+

kubectl এর সাথে সমস্যা সমাধান(Troubleshooting)

+

মডিউল 2 - এ, আপনি Kubectl কমান্ড-লাইন ইন্টারফেস ব্যবহার করেছেন। আপনি ডিপ্লোয় করা অ্যাপ্লিকেশন এবং তাদের পরিবেশ সম্পর্কে তথ্য পেতে মডিউল 3 এ এটি ব্যবহার করা চালিয়ে যাবেন। সবচেয়ে সাধারণ ক্রিয়াকলাপগুলি নিম্নলিখিত kubectl কমান্ড দিয়ে করা যেতে পারে:

+
    +
  • kubectl get - সম্পদ(resource) তালিকা
  • +
  • kubectl describe - একটি সম্পদ সম্পর্কে বিস্তারিত তথ্য দেখান
  • +
  • kubectl logs - একটি পডে কন্টেইনার থেকে লগ দেখানো
  • +
  • kubectl exec - একটি পডের একটি কন্টেইনার একটি কমান্ড চালান
  • +
+ +

অ্যাপ্লিকেশনগুলি কখন ডিপ্লোয় করা হয়েছিল, তাদের বর্তমান অবস্থা কী, তারা কোথায় চলছে এবং তাদের কনফিগারেশনগুলি কী তা দেখতে আপনি এই কমান্ডগুলি ব্যবহার করতে পারেন।

+ +

এখন যেহেতু আমরা আমাদের ক্লাস্টার উপাদান এবং কমান্ড লাইন সম্পর্কে আরও জানি, আসুন আমাদের অ্যাপ্লিকেশনটি অন্বেষণ করি।

+ +
+
+
+

একটি নোড হল কুবারনেটিসে একটি কর্মী মেশিন এবং ক্লাস্টারের উপর নির্ভর করে একটি ভার্চুয়াল বা শারীরিক মেশিন হতে পারে। একাধিক পড এক নোডে চলতে পারে।

+
+
+
+
+ + + +
+ +
+ + + \ No newline at end of file diff --git a/content/bn/docs/tutorials/kubernetes-basics/expose/_index.md b/content/bn/docs/tutorials/kubernetes-basics/expose/_index.md new file mode 100644 index 0000000000000..8af22bf1b3ac3 --- /dev/null +++ b/content/bn/docs/tutorials/kubernetes-basics/expose/_index.md @@ -0,0 +1,4 @@ +--- +title: আপনার অ্যাপটি প্রকাশ্যে প্রকাশ করুন +weight: 4০ +--- diff --git a/content/bn/docs/tutorials/kubernetes-basics/expose/expose-interactive.html b/content/bn/docs/tutorials/kubernetes-basics/expose/expose-interactive.html new file mode 100644 index 0000000000000..d4ce55d805315 --- /dev/null +++ b/content/bn/docs/tutorials/kubernetes-basics/expose/expose-interactive.html @@ -0,0 +1,35 @@ +--- +title: ইন্টারেক্টিভ টিউটোরিয়াল - আপনার অ্যাপ প্রকাশ করা +weight: 20 +--- + + + + + + + +{{< katacoda-tutorial >}} + +
+ +
+ +
+
+
+
+ + +
+ +
+ + + diff --git a/content/bn/docs/tutorials/kubernetes-basics/expose/expose-intro.html b/content/bn/docs/tutorials/kubernetes-basics/expose/expose-intro.html new file mode 100644 index 0000000000000..910c80f192928 --- /dev/null +++ b/content/bn/docs/tutorials/kubernetes-basics/expose/expose-intro.html @@ -0,0 +1,100 @@ +--- +title: আপনার অ্যাপ প্রকাশ করতে একটি পরিষেবা ব্যবহার করা +weight: 10 +--- + + + + + + + +
+ +
+ +
+
+

উদ্দেশ্য

+
    +
  • কুবারনের্টিস-এ একটি পরিষেবা সম্পর্কে জানুন
  • +
  • label এবং labelSelector বস্তু একটি পরিষেবার সাথে কিভাবে সম্পর্কিত তা বুঝুন
  • +
  • একটি পরিষেবা ব্যবহার করে কুবারনেটিস ক্লাস্টারের বাইরে একটি অ্যাপ্লিকেশন প্রকাশ করুন
  • +
+
+ +
+

কুবারনেটিস পরিষেবার ওভারভিউ

+ +

কুবারনেটিস পডগুলো মরণশীল। পডগুলোর একটি জীবনচক্র আছে। যখন একজন কর্মী নোড মারা যায়, তখন নোডে চলমান পডগুলিও হারিয়ে যায়। একটি ReplicaSet আপনার অ্যাপ্লিকেশন চালু রাখতে নতুন পড তৈরির মাধ্যমে গতিশীলভাবে ক্লাস্টারটিকে পছন্দসই অবস্থায় ফিরিয়ে আনতে পারে৷ আরেকটি উদাহরণ হিসাবে, ৩টি প্রতিলিপি সহ একটি চিত্র-প্রসেসিং ব্যাকএন্ড বিবেচনা করুন। সেই প্রতিলিপিগুলি বিনিময়যোগ্য; ফ্রন্ট-এন্ড সিস্টেমের ব্যাকএন্ড প্রতিলিপি বা পড হারিয়ে গেলেও আবার তৈরি করা উচিত নয়। তাতে বলা হয়েছে, কুবারনেটিস ক্লাস্টারের প্রতিটি পডের একটি অনন্য আইপি ঠিকানা রয়েছে, এমনকি একই নোডে থাকা পডস, তাই পডগুলির মধ্যে স্বয়ংক্রিয়ভাবে পরিবর্তনগুলি সমন্বয় করার একটি উপায় থাকা দরকার যাতে আপনার অ্যাপ্লিকেশনগুলি কাজ করতে থাকে ৷

+ +

কুবারনেটিস-এ একটি পরিষেবা হল একটি বিমূর্ততা যা পডগুলির একটি যৌক্তিক সেট এবং একটি নীতি যার দ্বারা সেগুলি অ্যাক্সেস করা যায় তা সংজ্ঞায়িত করে৷ পরিষেবাগুলি নির্ভরশীল পডগুলির মধ্যে একটি আলগা সংযোগ সক্ষম করে৷ একটি পরিষেবা YAML (পছন্দের) বা JSON ব্যবহার করে সংজ্ঞায়িত করা হয়, যেমন সমস্ত কুবারনেটিস অবজেক্ট। একটি পরিষেবা দ্বারা লক্ষ্য করা পডের সেট সাধারণত একটি লেবেলনির্বাচক দ্বারা নির্ধারিত হয় (বিশেষে একটি নির্বাচক অন্তর্ভুক্ত না করে কেন আপনি একটি পরিষেবা পেতে পারেন তা নীচে দেখুন)।

+ +

যদিও প্রতিটি পডের একটি অনন্য আইপি ঠিকানা রয়েছে, তবে সেই আইপিগুলি পরিষেবা ছাড়া ক্লাস্টারের বাইরে প্রকাশ করা হয় না। পরিষেবাগুলি আপনার অ্যাপ্লিকেশনগুলিকে ট্রাফিক পেতে অনুমতি দেয়৷ ServiceSpec-এ টাইপ উল্লেখ করে পরিষেবাগুলিকে বিভিন্ন উপায়ে প্রকাশ করা যেতে পারে:

+
    +
  • ClusterIP (ডিফল্ট) - ক্লাস্টারে একটি অভ্যন্তরীণ আইপিতে পরিষেবাটি প্রকাশ করে৷ এই ধরনের পরিষেবা শুধুমাত্র ক্লাস্টারের মধ্যে থেকে পৌঁছানো যায়।
  • +
  • NodePort - NAT ব্যবহার করে ক্লাস্টারে প্রতিটি নির্বাচিত নোডের একই পোর্টে পরিষেবাটি প্রকাশ করে। <NodeIP>:<NodePort> ব্যবহার করে ক্লাস্টারের বাইরে থেকে একটি পরিষেবা অ্যাক্সেসযোগ্য করে তোলে। ClusterIP এর সুপারসেট।
  • +
  • লোডব্যালেন্সার - বর্তমান ক্লাউডে একটি বাহ্যিক লোড ব্যালেন্সার তৈরি করে (যদি সমর্থিত হয়) এবং পরিষেবাতে একটি নির্দিষ্ট, বাহ্যিক আইপি বরাদ্দ করে৷ নোডপোর্টের সুপারসেট।
  • +
  • ExternalName - পরিষেবাটিকে externalName ক্ষেত্রের বিষয়বস্তুতে (যেমন foo.bar.example.com) ম্যাপ করে, একটি CNAME এর মান সহ রেকর্ড করুন। কোন ধরনের প্রক্সি সেট আপ করা হয় না. এই ধরনের v1.7 বা উচ্চতর kube-dns, অথবা CoreDNS সংস্করণ 0.0.8 বা উচ্চতর প্রয়োজন।
  • +
+

বিভিন্ন ধরনের পরিষেবা সম্পর্কে আরও তথ্য উৎস আইপি ব্যবহার করা টিউটোরিয়ালে পাওয়া যাবে। এছাড়াও পরিষেবার সাথে অ্যাপ্লিকেশন সংযোগ করা দেখুন৷

+

অতিরিক্ত, নোট করুন যে পরিষেবাগুলির সাথে কিছু ব্যবহারের ক্ষেত্রে রয়েছে যেগুলির মধ্যে একটি নির্বাচক সংজ্ঞায়িত করা নেই৷ নির্বাচক ছাড়া তৈরি করা একটি পরিষেবা সংশ্লিষ্ট এন্ডপয়েন্ট অবজেক্ট তৈরি করবে না। এটি ব্যবহারকারীদের ম্যানুয়ালি নির্দিষ্ট শেষ পয়েন্টে একটি পরিষেবা ম্যাপ করতে দেয়। কোন নির্বাচক না থাকার আরেকটি সম্ভাবনা হল আপনি কঠোরভাবে type: ExternalName ব্যবহার করছেন।

+
+
+
+

সারাংশ

+
    +
  • বাহ্যিক ট্র্যাফিকের কাছে পডগুলিকে প্রকাশ করা
  • +
  • একাধিক পড জুড়ে ভারসাম্যপূর্ণ ট্রাফিক লোড করুন
  • +
  • লেবেল ব্যবহার করা
  • +
+
+
+

একটি কুবারনেটিস পরিষেবা হল একটি বিমূর্ত স্তর যা পডগুলির একটি যৌক্তিক সেটকে সংজ্ঞায়িত করে এবং সেই পডগুলির জন্য বহিরাগত ট্রাফিক এক্সপোজার, লোড ব্যালেন্সিং এবং পরিষেবা আবিষ্কার সক্ষম করে৷

+
+
+
+
+ +
+
+

পরিষেবা এবং লেবেল

+
+
+ +
+
+

একটি পরিষেবা পডের একটি সেট জুড়ে ট্রাফিককে রুট করে। পরিষেবাগুলি হল বিমূর্ততা যা আপনার আবেদনকে প্রভাবিত না করেই কুবারনেটে পডগুলিকে মরতে এবং প্রতিলিপি তৈরি করতে দেয়৷ নির্ভরশীল পডগুলির মধ্যে আবিষ্কার এবং রাউটিং (যেমন একটি অ্যাপ্লিকেশনে ফ্রন্টএন্ড এবং ব্যাকএন্ড উপাদান) Kubernetes পরিষেবা দ্বারা পরিচালিত হয়৷

+

পরিষেবাগুলি লেবেল এবং নির্বাচকদের ব্যবহার করে পডের একটি সেটের সাথে মেলে, একটি আদিম গ্রুপিং যা কুবারনেটসের বস্তুতে লজিক্যাল অপারেশনের অনুমতি দেয় . লেবেল হল কী/মান জোড়া বস্তুর সাথে সংযুক্ত এবং যেকোনো উপায়ে ব্যবহার করা যেতে পারে:

+
    +
  • উন্নয়ন, পরীক্ষা এবং উৎপাদনের জন্য মনোনীত বস্তু
  • +
  • এম্বেড সংস্করণ ট্যাগ
  • +
  • ট্যাগ ব্যবহার করে একটি বস্তুকে শ্রেণীবদ্ধ করুন
  • +
+
+
+ +
+ +
+
+

+
+
+
+
+
+

লেবেলগুলি তৈরির সময় বা পরে বস্তুর সাথে সংযুক্ত করা যেতে পারে। এগুলি যে কোনও সময় পরিবর্তন করা যেতে পারে। আসুন এখন একটি পরিষেবা ব্যবহার করে আমাদের অ্যাপ্লিকেশনটি প্রকাশ করি এবং কিছু লেবেল প্রয়োগ করি৷

৷ +
+
+
+ +
+
+ + + diff --git a/content/bn/docs/tutorials/kubernetes-basics/scale/_index.md b/content/bn/docs/tutorials/kubernetes-basics/scale/_index.md new file mode 100644 index 0000000000000..fb6f9901f56f0 --- /dev/null +++ b/content/bn/docs/tutorials/kubernetes-basics/scale/_index.md @@ -0,0 +1,4 @@ +--- +title: আপনার অ্যাপ পরিসর করুন +weight: 50 +--- diff --git a/content/bn/docs/tutorials/kubernetes-basics/scale/scale-interactive.html b/content/bn/docs/tutorials/kubernetes-basics/scale/scale-interactive.html new file mode 100644 index 0000000000000..b2f794a19b8b8 --- /dev/null +++ b/content/bn/docs/tutorials/kubernetes-basics/scale/scale-interactive.html @@ -0,0 +1,37 @@ +--- +title: ইন্টারেক্টিভ টিউটোরিয়াল - আপনার অ্যাপ পরিসর করুন +weight: 20 +--- + + + + + + + +{{< katacoda-tutorial >}} + + + + + diff --git a/content/bn/docs/tutorials/kubernetes-basics/scale/scale-intro.html b/content/bn/docs/tutorials/kubernetes-basics/scale/scale-intro.html new file mode 100644 index 0000000000000..d4613e224b6c0 --- /dev/null +++ b/content/bn/docs/tutorials/kubernetes-basics/scale/scale-intro.html @@ -0,0 +1,120 @@ +--- +title: আপনার অ্যাপের একাধিক উদাহরণ (instance) চালান +weight: 10 +--- + + + + + + + +
+ +
+ +
+ +
+

উদ্দেশ্য

+
    +
  • kubectl ব্যবহার করে একটি অ্যাপ স্কেল করুন।
  • +
+
+ +
+

একটি অ্যাপ্লিকেশন স্কেলিং

+ +

পূর্ববর্তী মডিউলগুলিতে আমরা তৈরি করেছি একটি ডিপ্লয়মেন্ট এবং তারপর সর্বজনীনভাবে এটি প্রকাশ করে একটি পরিষেবার মাধ্যমে ৷ ডিপ্লয়মেন্টটি আমাদের অ্যাপ্লিকেশন চালানোর জন্য শুধুমাত্র একটি পড তৈরি করেছে। যখন ট্র্যাফিক বাড়বে, তখন ব্যবহারকারীর চাহিদার সাথে তাল মিলিয়ে চলতে আমাদের অ্যাপ্লিকেশন স্কেল করতে হবে।

+ +

স্কেলিং একটি ডিপ্লয়মেন্টের প্রতিলিপি সংখ্যা পরিবর্তন করে সম্পন্ন করা হয়

+ +
+
+
+

সারসংক্ষেপ:

+
    +
  • একটি ডিপ্লয়মেন্টের স্কেলিং
  • +
+
+
+

+ আপনি শুরু থেকে kubectl create deployment কমান্ডের --replicas প্যারামিটার ব্যবহার করে একাধিক উদাহরণ সহ একটি ডিপ্লয়মেন্ট তৈরি করতে পারেন

+
+
+
+
+ +
+
+

স্কেলিং এর ধারণা

+
+
+ +
+
+
+ +
+
+ +
+ +
+
+ +

একটি ডিপ্লয়মেন্টের স্কেল করা নিশ্চিত করবে যে নতুন পড তৈরি করা হয়েছে এবং উপলব্ধ সংস্থান সহ নোডগুলিতে নির্ধারিত হয়েছে। স্কেলিং নতুন পছন্দসই অবস্থায় পডের সংখ্যা বৃদ্ধি করবে। কুবারনেটস পডের অটোস্কেলিং সমর্থন করে, কিন্তু এটি এই টিউটোরিয়ালের সুযোগের বাইরে। শূন্যে স্কেলিং করাও সম্ভব, এবং এটি নির্দিষ্ট ডিপ্লয়মেন্টের সমস্ত পড বন্ধ করবে।

+ +

একটি অ্যাপ্লিকেশনের একাধিক দৃষ্টান্ত চালানোর জন্য তাদের সকলে ট্রাফিক বিতরণ করার একটি উপায় প্রয়োজন। পরিষেবাগুলিতে একটি সমন্বিত লোড-ব্যালেন্সার রয়েছে যা একটি উন্মুক্ত ডিপ্লয়মেন্টের সমস্ত পডগুলিতে নেটওয়ার্ক ট্র্যাফিক বিতরণ করবে। পরিষেবাগুলি কেবলমাত্র উপলভ্য পডগুলিতে ট্র্যাফিক পাঠানো হয়েছে তা নিশ্চিত করার জন্য এন্ডপয়েন্টগুলি ব্যবহার করে ক্রমাগত চলমান পডগুলি পর্যবেক্ষণ করবে।

+ +
+
+
+

একটি ডিপ্লয়মেন্টেয় প্রতিলিপির সংখ্যা পরিবর্তন করে স্কেলিং সম্পন্ন করা হয়।

+
+
+
+ +
+ +
+
+

একবার আপনার কাছে একটি অ্যাপ্লিকেশন চালানোর একাধিক উদাহরণ হয়ে গেলে, আপনি ডাউনটাইম ছাড়াই রোলিং আপডেট করতে সক্ষম হবেন। আমরা পরবর্তী মডিউলে এটি কভার করব। এখন, আসুন অনলাইন টার্মিনালে যাই এবং আমাদের আবেদন স্কেল করি।

+
+
+
+ + + +
+ +
+ + + diff --git a/content/bn/docs/tutorials/kubernetes-basics/update/_index.md b/content/bn/docs/tutorials/kubernetes-basics/update/_index.md new file mode 100644 index 0000000000000..241137e639264 --- /dev/null +++ b/content/bn/docs/tutorials/kubernetes-basics/update/_index.md @@ -0,0 +1,4 @@ +--- +title: আপনার অ্যাপ্লিকেশন আপডেট করুন +weight: 60 +--- diff --git a/content/bn/docs/tutorials/kubernetes-basics/update/update-interactive.html b/content/bn/docs/tutorials/kubernetes-basics/update/update-interactive.html new file mode 100644 index 0000000000000..79894a7ec7660 --- /dev/null +++ b/content/bn/docs/tutorials/kubernetes-basics/update/update-interactive.html @@ -0,0 +1,33 @@ +--- +title: মিথস্ক্রিয় প্রশিক্ষণ - অ্যাপটিকে আধুনিক রূপ দেওয়া +weight: 20 +--- + + + + + + + +{{< katacoda-tutorial >}} + + + + + diff --git a/content/bn/docs/tutorials/kubernetes-basics/update/update-intro.html b/content/bn/docs/tutorials/kubernetes-basics/update/update-intro.html new file mode 100644 index 0000000000000..c0a079002f8a1 --- /dev/null +++ b/content/bn/docs/tutorials/kubernetes-basics/update/update-intro.html @@ -0,0 +1,133 @@ +--- +title: একটি রোলিং আপডেট সম্পাদন করা +weight: 10 +--- + + + + + + + +
+ +
+ +
+ +
+

উদ্দেশ্য

+
    +
  • kubectl ব্যবহার করে একটি রোলিং আপডেট সম্পাদন করুন।
  • +
+
+ +
+

একটি অ্যাপ্লিকেশন আপডেট করা হচ্ছে

+ +

ব্যবহারকারীরা আশা করে যে অ্যাপ্লিকেশনগুলি সর্বদা উপলব্ধ থাকবে এবং ডেভেলপাররা দিনে কয়েকবার তাদের নতুন সংস্করণ স্থাপন করবে বলে আশা করা হচ্ছে। কুবারনেটিসে এটি রোলিং আপডেটের সাথে করা হয়। ঘূর্ণায়মান আপডেটগুলি নতুনগুলির সাথে পড দৃষ্টান্তগুলিকে ক্রমবর্ধমানভাবে আপডেট করে শূন্য ডাউনটাইম সহ ডিপ্লয়মেন্টের আপডেটগুলি ঘটতে দেয়৷ নতুন পডগুলি উপলব্ধ সংস্থান সহ নোডগুলিতে নির্ধারিত হবে৷

+ +

আগের মডিউলে আমরা একাধিক দৃষ্টান্ত চালানোর জন্য আমাদের অ্যাপ্লিকেশনটিকে স্কেল করেছি। এটি অ্যাপ্লিকেশন প্রাপ্যতা প্রভাবিত না করে আপডেট সম্পাদন করার জন্য একটি প্রয়োজনীয়তা। ডিফল্টভাবে, আপডেটের সময় সর্বাধিক সংখ্যক পড অনুপলব্ধ হতে পারে এবং সর্বাধিক নতুন পড তৈরি করা যেতে পারে, একটি। উভয় বিকল্পই সংখ্যা বা শতাংশে (পডের) কনফিগার করা যেতে পারে। + কুবারনেটিসে, আপডেটগুলি ভার্সন করা হয় এবং যেকোনো ডিপ্লয়মেন্ট আপডেটকে পূর্ববর্তী (স্থিতিশীল) সংস্করণে ফিরিয়ে আনা যায়।

+ +
+
+
+

সারাংশঃ

+
    +
  • একটি অ্যাপ আপডেট করা হচ্ছে
  • +
+
+
+

রোলিং আপডেটগুলি নতুনগুলির সাথে পডস দৃষ্টান্তগুলিকে ক্রমবর্ধমানভাবে আপডেট করার মাধ্যমে শূন্য ডাউনটাইম সহ ডিপ্লয়মেন্টের আপডেটগুলি ঘটতে দেয়৷

+
+
+
+
+ +
+
+

রোলিং আপডেট ওভারভিউ

+
+
+
+
+
+ +
+
+
+ +
+
+ +

অ্যাপ্লিকেশন স্কেলিং-এর অনুরূপ, যদি একটি ডিপ্লয়মেন্ট সর্বজনীনভাবে প্রকাশ করা হয়, পরিষেবাটি আপডেটের সময় শুধুমাত্র উপলব্ধ পডগুলিতে ট্র্যাফিক লোড-ব্যালেন্স করবে। একটি উপলব্ধ পড একটি উদাহরণ যা অ্যাপ্লিকেশন ব্যবহারকারীদের জন্য উপলব্ধ৷

+

রোলিং আপডেটগুলি নিম্নলিখিত ক্রিয়াগুলিকে অনুমতি দেয়ঃ

+
    +
  • একটি পরিবেশ থেকে অন্য পরিবেশে একটি অ্যাপ্লিকেশন প্রচার করুন (কন্টেইনার ইমেজ আপডেটের মাধ্যমে)
  • +
  • পূর্ববর্তী সংস্করণে রোলব্যাক করুন
  • +
  • ক্রমাগত একীকরণ এবং শূন্য ডাউনটাইম সহ অ্যাপ্লিকেশনগুলির ক্রমাগত বিতরণ
  • + +
+ +
+
+
+

যদি একটি ডিপ্লয়মেন্ট পাবলিকলি প্রকাশ করা হয়, পরিষেবাটি আপডেটের সময় শুধুমাত্র উপলব্ধ পডগুলিতে ট্র্যাফিক লোড-ব্যালেন্স করবে।

+
+
+
+ +
+ +
+
+

নিম্নলিখিত ইন্টারেক্টিভ টিউটোরিয়ালে, আমরা আমাদের অ্যাপ্লিকেশনটিকে একটি নতুন সংস্করণে আপডেট করব এবং একটি রোলব্যাকও করব৷

+
+
+
+ + + +
+ +
+ + + diff --git a/content/bn/docs/tutorials/security/_index.md b/content/bn/docs/tutorials/security/_index.md new file mode 100644 index 0000000000000..ac33aa20dccf6 --- /dev/null +++ b/content/bn/docs/tutorials/security/_index.md @@ -0,0 +1,11 @@ +--- +title: "নিরাপত্তা" +weight: 40 +--- + +কুবারনেটিস ক্লাস্টার পরিচালনাকারী বেশিরভাগ সংস্থা এবং লোকেদের জন্য নিরাপত্তা একটি গুরুত্বপূর্ণ উদ্বেগ। +আপনি একটি মৌলিক [নিরাপত্তা চেকলিস্ট](/bn/docs/concepts/security/security-checklist/) খুঁজে পেতে পারেন +কুবারনেটিস ডকুমেন্টেশনের অন্যত্র । + +কুবারনেটিসের নিরাপত্তার দিকগুএও কীভাবে স্থাপন এবং পরিচালনা করতে হয় তা শিখতে, +আপনি এই বিভাগে টিউটোরিয়ালগুলো অনুসরণ করতে পারেন। diff --git a/content/bn/docs/tutorials/services/_index.md b/content/bn/docs/tutorials/services/_index.md new file mode 100644 index 0000000000000..4bb0b023bce33 --- /dev/null +++ b/content/bn/docs/tutorials/services/_index.md @@ -0,0 +1,4 @@ +--- +title: "সেবা" +weight: 70 +--- diff --git a/content/bn/docs/tutorials/stateful-application/_index.md b/content/bn/docs/tutorials/stateful-application/_index.md new file mode 100644 index 0000000000000..3a4dcdd7c5d8e --- /dev/null +++ b/content/bn/docs/tutorials/stateful-application/_index.md @@ -0,0 +1,4 @@ +--- +title: "স্টেটফুল অ্যাপ্লিকেশন" +weight: 50 +--- diff --git a/content/bn/docs/tutorials/stateless-application/_index.md b/content/bn/docs/tutorials/stateless-application/_index.md new file mode 100644 index 0000000000000..848fe537935bf --- /dev/null +++ b/content/bn/docs/tutorials/stateless-application/_index.md @@ -0,0 +1,4 @@ +--- +title: "স্টেটলেস অ্যাপ্লিকেশন" +weight: 40 +--- diff --git a/content/bn/docs/update-user-guide-links.py b/content/bn/docs/update-user-guide-links.py new file mode 100644 index 0000000000000..7c449d73efb08 --- /dev/null +++ b/content/bn/docs/update-user-guide-links.py @@ -0,0 +1,85 @@ +import subprocess +import re + +# Finds the documents to rewrite for files that include user-guide-content-moved.md. +# Then opens these files and processes the stuff after those lines to figure out where +# the line should move to. +# Returns a list of ('old/path', 'new/path') tuples. +def find_documents_to_rewrite(): + cmd = "ag --markdown -Q -l \"{% include user-guide-content-moved.md %}\"" + moved_docs = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout.read().splitlines() + + rewrites = [] + for doc in moved_docs: + location = doc_location(doc) + destinations = get_destinations_for_doc(doc) + + if len(destinations) == 0: + print("Unable to get possible destinations for %s" % doc) + elif len(destinations) > 1: + print("%s has multiple potential destinations. Not rewriting links." % doc) + else: + # print("%s --> %s" % (location, destinations[0])) + rewrites.append((location, destinations[0])) + + return rewrites + +# Returns the location of the documentation as we will refer to it in the markdown. +# /docs/path/to/foo/index.md are available at /docs/path/to/foo/ +# /docs/path/to/foo/bar.md are available at /docs/path/to/foo/bar/ +def doc_location(filename): + if filename.endswith('/index.md'): + return "/docs/" + filename[:-9] + "/" + else: + return "/docs/" + filename[:-3] + "/" + +REDIRECT_REGEX = re.compile("^.*\[(.*)\]\((.*)\)$") + +def get_destinations_for_doc(filename): + destination_paths = [] + with open(filename) as f: + lines = [line.rstrip('\n').rstrip('\r') for line in f.readlines()] + + # Remove empty lines + lines = filter(bool, lines) + + content_moved_index = lines.index("{% include user-guide-content-moved.md %}") + + # Get everything after that line. + destinations = lines[content_moved_index + 1:] + for destination in destinations: + result = REDIRECT_REGEX.match(destination) + if not result: + return [] + doc_title = result.group(1) # Unused, can print it out for more info. + new_path = result.group(2) + destination_paths.append(new_path) + + return destination_paths + +# Given a list of (old/path, new/path) tuples executes a sed command across all files in +# to replace (/docs/path/to/old/doc/) with (/docs/path/to/new/doc/). +def rewrite_documents(rewrites): + cmd = "find . -name '*.md' -type f -exec sed -i.bak 's@(%s)@(%s)@g' '{}' \;" + for original, new in rewrites: + + print("%s --> %s" % (original, new)) + original = original.replace('-', '\-') + new = new.replace('-', '\-') + + #print(cmd % (original, new)) + subprocess.call(cmd % (original, new), shell=True) + +# We can't have in-line replace across multiple files without sudo (I think), so it +# creates a lot of backups that we have to delete. +def remove_sed_backups(): + cmd = "find . -name '*.bak' -delete" + subprocess.call(cmd, shell=True) + +def main(): + rewrites = find_documents_to_rewrite() + rewrite_documents(rewrites) + remove_sed_backups() + +if __name__ == "__main__": + main() diff --git a/content/bn/examples/README.md b/content/bn/examples/README.md new file mode 100644 index 0000000000000..0ac9922b5c8ca --- /dev/null +++ b/content/bn/examples/README.md @@ -0,0 +1,11 @@ +স্থানীয়করণের জন্য পরীক্ষা চালানোর জন্য, নিম্নলিখিত কমান্ডটি ব্যবহার করুন: + +``` +go test k8s.io/website/content//examples +``` + +যেখানে `` একটি ভাষার দুটি অক্ষর উপস্থাপনা। উদাহরণ স্বরূপ: + +``` +go test k8s.io/website/content/en/examples +``` diff --git a/content/bn/examples/access/certificate-signing-request/clusterrole-approve.yaml b/content/bn/examples/access/certificate-signing-request/clusterrole-approve.yaml new file mode 100644 index 0000000000000..2c854c95c60c8 --- /dev/null +++ b/content/bn/examples/access/certificate-signing-request/clusterrole-approve.yaml @@ -0,0 +1,27 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: csr-approver +rules: +- apiGroups: + - certificates.k8s.io + resources: + - certificatesigningrequests + verbs: + - get + - list + - watch +- apiGroups: + - certificates.k8s.io + resources: + - certificatesigningrequests/approval + verbs: + - update +- apiGroups: + - certificates.k8s.io + resources: + - signers + resourceNames: + - example.com/my-signer-name # example.com/* can be used to authorize for all signers in the 'example.com' domain + verbs: + - approve diff --git a/content/bn/examples/access/certificate-signing-request/clusterrole-create.yaml b/content/bn/examples/access/certificate-signing-request/clusterrole-create.yaml new file mode 100644 index 0000000000000..def1b879d8e88 --- /dev/null +++ b/content/bn/examples/access/certificate-signing-request/clusterrole-create.yaml @@ -0,0 +1,14 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: csr-creator +rules: +- apiGroups: + - certificates.k8s.io + resources: + - certificatesigningrequests + verbs: + - create + - get + - list + - watch diff --git a/content/bn/examples/access/certificate-signing-request/clusterrole-sign.yaml b/content/bn/examples/access/certificate-signing-request/clusterrole-sign.yaml new file mode 100644 index 0000000000000..6d1a2f7882cd1 --- /dev/null +++ b/content/bn/examples/access/certificate-signing-request/clusterrole-sign.yaml @@ -0,0 +1,27 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: csr-signer +rules: +- apiGroups: + - certificates.k8s.io + resources: + - certificatesigningrequests + verbs: + - get + - list + - watch +- apiGroups: + - certificates.k8s.io + resources: + - certificatesigningrequests/status + verbs: + - update +- apiGroups: + - certificates.k8s.io + resources: + - signers + resourceNames: + - example.com/my-signer-name # example.com/* can be used to authorize for all signers in the 'example.com' domain + verbs: + - sign diff --git a/content/bn/examples/access/deployment-replicas-policy.yaml b/content/bn/examples/access/deployment-replicas-policy.yaml new file mode 100644 index 0000000000000..466a6ccbd6b18 --- /dev/null +++ b/content/bn/examples/access/deployment-replicas-policy.yaml @@ -0,0 +1,18 @@ +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingAdmissionPolicy +metadata: + name: "deploy-replica-policy.example.com" +spec: + paramKind: + apiVersion: rules.example.com/v1 + kind: ReplicaLimit + matchConstraints: + resourceRules: + - apiGroups: ["apps"] + apiVersions: ["v1"] + operations: ["CREATE", "UPDATE"] + resources: ["deployments"] + validations: + - expression: "object.spec.replicas <= params.maxReplicas" + messageExpression: "'object.spec.replicas must be no greater than ' + string(params.maxReplicas)" + reason: Invalid diff --git a/content/bn/examples/access/endpoints-aggregated.yaml b/content/bn/examples/access/endpoints-aggregated.yaml new file mode 100644 index 0000000000000..d238820056afb --- /dev/null +++ b/content/bn/examples/access/endpoints-aggregated.yaml @@ -0,0 +1,20 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + annotations: + kubernetes.io/description: |- + Add endpoints write permissions to the edit and admin roles. This was + removed by default in 1.22 because of CVE-2021-25740. See + https://issue.k8s.io/103675. This can allow writers to direct LoadBalancer + or Ingress implementations to expose backend IPs that would not otherwise + be accessible, and can circumvent network policies or security controls + intended to prevent/isolate access to those backends. + EndpointSlices were never included in the edit or admin roles, so there + is nothing to restore for the EndpointSlice API. + labels: + rbac.authorization.k8s.io/aggregate-to-edit: "true" + name: custom:aggregate-to-edit:endpoints # you can change this if you wish +rules: + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["create", "delete", "deletecollection", "patch", "update"] diff --git a/content/bn/examples/access/image-matches-namespace-environment.policy.yaml b/content/bn/examples/access/image-matches-namespace-environment.policy.yaml new file mode 100644 index 0000000000000..6482991316805 --- /dev/null +++ b/content/bn/examples/access/image-matches-namespace-environment.policy.yaml @@ -0,0 +1,28 @@ +# This policy enforces that all containers of a deployment has the image repo match the environment label of its namespace. +# Except for "exempt" deployments, or any containers that do not belong to the "example.com" organization (e.g. common sidecars). +# For example, if the namespace has a label of {"environment": "staging"}, all container images must be either staging.example.com/* +# or do not contain "example.com" at all, unless the deployment has {"exempt": "true"} label. +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingAdmissionPolicy +metadata: + name: "image-matches-namespace-environment.policy.example.com" +spec: + failurePolicy: Fail + matchConstraints: + resourceRules: + - apiGroups: ["apps"] + apiVersions: ["v1"] + operations: ["CREATE", "UPDATE"] + resources: ["deployments"] + variables: + - name: environment + expression: "'environment' in namespaceObject.metadata.labels ? namespaceObject.metadata.labels['environment'] : 'prod'" + - name: exempt + expression: "'exempt' in object.metadata.labels && object.metadata.labels['exempt'] == 'true'" + - name: containers + expression: "object.spec.template.spec.containers" + - name: containersToCheck + expression: "variables.containers.filter(c, c.image.contains('example.com/'))" + validations: + - expression: "variables.exempt || variables.containersToCheck.all(c, c.image.startsWith(variables.environment + '.'))" + messageExpression: "'only ' + variables.environment + ' images are allowed in namespace ' + namespaceObject.metadata.name" diff --git a/content/bn/examples/access/validating-admission-policy-audit-annotation.yaml b/content/bn/examples/access/validating-admission-policy-audit-annotation.yaml new file mode 100644 index 0000000000000..1c422a825447f --- /dev/null +++ b/content/bn/examples/access/validating-admission-policy-audit-annotation.yaml @@ -0,0 +1,18 @@ +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingAdmissionPolicy +metadata: + name: "demo-policy.example.com" +spec: + failurePolicy: Fail + matchConstraints: + resourceRules: + - apiGroups: ["apps"] + apiVersions: ["v1"] + operations: ["CREATE", "UPDATE"] + resources: ["deployments"] + validations: + - expression: "object.spec.replicas > 50" + messageExpression: "'Deployment spec.replicas set to ' + string(object.spec.replicas)" + auditAnnotations: + - key: "high-replica-count" + valueExpression: "'Deployment spec.replicas set to ' + string(object.spec.replicas)" diff --git a/content/bn/examples/access/validating-admission-policy-match-conditions.yaml b/content/bn/examples/access/validating-admission-policy-match-conditions.yaml new file mode 100644 index 0000000000000..e0a1522667a38 --- /dev/null +++ b/content/bn/examples/access/validating-admission-policy-match-conditions.yaml @@ -0,0 +1,21 @@ +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingAdmissionPolicy +metadata: + name: "demo-policy.example.com" +spec: + failurePolicy: Fail + matchConstraints: + resourceRules: + - apiGroups: ["*"] + apiVersions: ["*"] + operations: ["CREATE", "UPDATE"] + resources: ["*"] + matchConditions: + - name: 'exclude-leases' # Each match condition must have a unique name + expression: '!(request.resource.group == "coordination.k8s.io" && request.resource.resource == "leases")' # Match non-lease resources. + - name: 'exclude-kubelet-requests' + expression: '!("system:nodes" in request.userInfo.groups)' # Match requests made by non-node users. + - name: 'rbac' # Skip RBAC requests. + expression: 'request.resource.group != "rbac.authorization.k8s.io"' + validations: + - expression: "!object.metadata.name.contains('demo') || object.metadata.namespace == 'demo'" diff --git a/content/bn/examples/admin/cloud/ccm-example.yaml b/content/bn/examples/admin/cloud/ccm-example.yaml new file mode 100644 index 0000000000000..91b7ef2b8944f --- /dev/null +++ b/content/bn/examples/admin/cloud/ccm-example.yaml @@ -0,0 +1,73 @@ +# This is an example of how to set up cloud-controller-manager as a Daemonset in your cluster. +# It assumes that your masters can run pods and has the role node-role.kubernetes.io/master +# Note that this Daemonset will not work straight out of the box for your cloud, this is +# meant to be a guideline. + +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cloud-controller-manager + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: system:cloud-controller-manager +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: +- kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + k8s-app: cloud-controller-manager + name: cloud-controller-manager + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: cloud-controller-manager + template: + metadata: + labels: + k8s-app: cloud-controller-manager + spec: + serviceAccountName: cloud-controller-manager + containers: + - name: cloud-controller-manager + # for in-tree providers we use registry.k8s.io/cloud-controller-manager + # this can be replaced with any other image for out-of-tree providers + image: registry.k8s.io/cloud-controller-manager:v1.8.0 + command: + - /usr/local/bin/cloud-controller-manager + - --cloud-provider=[YOUR_CLOUD_PROVIDER] # Add your own cloud provider here! + - --leader-elect=true + - --use-service-account-credentials + # these flags will vary for every cloud provider + - --allocate-node-cidrs=true + - --configure-cloud-routes=true + - --cluster-cidr=172.17.0.0/16 + tolerations: + # this is required so CCM can bootstrap itself + - key: node.cloudprovider.kubernetes.io/uninitialized + value: "true" + effect: NoSchedule + # these tolerations are to have the daemonset runnable on control plane nodes + # remove them if your control plane nodes should not run pods + - key: node-role.kubernetes.io/control-plane + operator: Exists + effect: NoSchedule + - key: node-role.kubernetes.io/master + operator: Exists + effect: NoSchedule + # this is to restrict CCM to only run on master nodes + # the node selector may vary depending on your cluster setup + nodeSelector: + node-role.kubernetes.io/master: "" diff --git a/content/bn/examples/admin/dns/busybox.yaml b/content/bn/examples/admin/dns/busybox.yaml new file mode 100644 index 0000000000000..31f009d307291 --- /dev/null +++ b/content/bn/examples/admin/dns/busybox.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Pod +metadata: + name: busybox + namespace: default +spec: + containers: + - name: busybox + image: busybox:1.28 + command: + - sleep + - "3600" + imagePullPolicy: IfNotPresent + restartPolicy: Always diff --git a/content/bn/examples/admin/dns/dns-horizontal-autoscaler.yaml b/content/bn/examples/admin/dns/dns-horizontal-autoscaler.yaml new file mode 100644 index 0000000000000..3182fed3c8052 --- /dev/null +++ b/content/bn/examples/admin/dns/dns-horizontal-autoscaler.yaml @@ -0,0 +1,87 @@ +kind: ServiceAccount +apiVersion: v1 +metadata: + name: kube-dns-autoscaler + namespace: kube-system +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: system:kube-dns-autoscaler +rules: + - apiGroups: [""] + resources: ["nodes"] + verbs: ["list", "watch"] + - apiGroups: [""] + resources: ["replicationcontrollers/scale"] + verbs: ["get", "update"] + - apiGroups: ["apps"] + resources: ["deployments/scale", "replicasets/scale"] + verbs: ["get", "update"] +# Remove the configmaps rule once below issue is fixed: +# kubernetes-incubator/cluster-proportional-autoscaler#16 + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "create"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: system:kube-dns-autoscaler +subjects: + - kind: ServiceAccount + name: kube-dns-autoscaler + namespace: kube-system +roleRef: + kind: ClusterRole + name: system:kube-dns-autoscaler + apiGroup: rbac.authorization.k8s.io + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kube-dns-autoscaler + namespace: kube-system + labels: + k8s-app: kube-dns-autoscaler + kubernetes.io/cluster-service: "true" +spec: + selector: + matchLabels: + k8s-app: kube-dns-autoscaler + template: + metadata: + labels: + k8s-app: kube-dns-autoscaler + spec: + priorityClassName: system-cluster-critical + securityContext: + seccompProfile: + type: RuntimeDefault + supplementalGroups: [ 65534 ] + fsGroup: 65534 + nodeSelector: + kubernetes.io/os: linux + containers: + - name: autoscaler + image: registry.k8s.io/cpa/cluster-proportional-autoscaler:1.8.4 + resources: + requests: + cpu: "20m" + memory: "10Mi" + command: + - /cluster-proportional-autoscaler + - --namespace=kube-system + - --configmap=kube-dns-autoscaler + # Should keep target in sync with cluster/addons/dns/kube-dns.yaml.base + - --target= + # When cluster is using large nodes(with more cores), "coresPerReplica" should dominate. + # If using small nodes, "nodesPerReplica" should dominate. + - --default-params={"linear":{"coresPerReplica":256,"nodesPerReplica":16,"preventSinglePointFailure":true,"includeUnschedulableNodes":true}} + - --logtostderr=true + - --v=2 + tolerations: + - key: "CriticalAddonsOnly" + operator: "Exists" + serviceAccountName: kube-dns-autoscaler diff --git a/content/bn/examples/admin/dns/dnsutils.yaml b/content/bn/examples/admin/dns/dnsutils.yaml new file mode 100644 index 0000000000000..e1b3ace336f99 --- /dev/null +++ b/content/bn/examples/admin/dns/dnsutils.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Pod +metadata: + name: dnsutils + namespace: default +spec: + containers: + - name: dnsutils + image: registry.k8s.io/e2e-test-images/jessie-dnsutils:1.3 + command: + - sleep + - "infinity" + imagePullPolicy: IfNotPresent + restartPolicy: Always diff --git a/content/bn/examples/admin/konnectivity/egress-selector-configuration.yaml b/content/bn/examples/admin/konnectivity/egress-selector-configuration.yaml new file mode 100644 index 0000000000000..631e6cc26862e --- /dev/null +++ b/content/bn/examples/admin/konnectivity/egress-selector-configuration.yaml @@ -0,0 +1,21 @@ +apiVersion: apiserver.k8s.io/v1beta1 +kind: EgressSelectorConfiguration +egressSelections: +# Since we want to control the egress traffic to the cluster, we use the +# "cluster" as the name. Other supported values are "etcd", and "controlplane". +- name: cluster + connection: + # This controls the protocol between the API Server and the Konnectivity + # server. Supported values are "GRPC" and "HTTPConnect". There is no + # end user visible difference between the two modes. You need to set the + # Konnectivity server to work in the same mode. + proxyProtocol: GRPC + transport: + # This controls what transport the API Server uses to communicate with the + # Konnectivity server. UDS is recommended if the Konnectivity server + # locates on the same machine as the API Server. You need to configure the + # Konnectivity server to listen on the same UDS socket. + # The other supported transport is "tcp". You will need to set up TLS + # config to secure the TCP transport. + uds: + udsName: /etc/kubernetes/konnectivity-server/konnectivity-server.socket diff --git a/content/bn/examples/admin/konnectivity/konnectivity-agent.yaml b/content/bn/examples/admin/konnectivity/konnectivity-agent.yaml new file mode 100644 index 0000000000000..cbcbf89114a94 --- /dev/null +++ b/content/bn/examples/admin/konnectivity/konnectivity-agent.yaml @@ -0,0 +1,55 @@ +apiVersion: apps/v1 +# Alternatively, you can deploy the agents as Deployments. It is not necessary +# to have an agent on each node. +kind: DaemonSet +metadata: + labels: + addonmanager.kubernetes.io/mode: Reconcile + k8s-app: konnectivity-agent + namespace: kube-system + name: konnectivity-agent +spec: + selector: + matchLabels: + k8s-app: konnectivity-agent + template: + metadata: + labels: + k8s-app: konnectivity-agent + spec: + priorityClassName: system-cluster-critical + tolerations: + - key: "CriticalAddonsOnly" + operator: "Exists" + containers: + - image: us.gcr.io/k8s-artifacts-prod/kas-network-proxy/proxy-agent:v0.0.37 + name: konnectivity-agent + command: ["/proxy-agent"] + args: [ + "--logtostderr=true", + "--ca-cert=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt", + # Since the konnectivity server runs with hostNetwork=true, + # this is the IP address of the master machine. + "--proxy-server-host=35.225.206.7", + "--proxy-server-port=8132", + "--admin-server-port=8133", + "--health-server-port=8134", + "--service-account-token-path=/var/run/secrets/tokens/konnectivity-agent-token" + ] + volumeMounts: + - mountPath: /var/run/secrets/tokens + name: konnectivity-agent-token + livenessProbe: + httpGet: + port: 8134 + path: /healthz + initialDelaySeconds: 15 + timeoutSeconds: 15 + serviceAccountName: konnectivity-agent + volumes: + - name: konnectivity-agent-token + projected: + sources: + - serviceAccountToken: + path: konnectivity-agent-token + audience: system:konnectivity-server diff --git a/content/bn/examples/admin/konnectivity/konnectivity-rbac.yaml b/content/bn/examples/admin/konnectivity/konnectivity-rbac.yaml new file mode 100644 index 0000000000000..7687f49b77e82 --- /dev/null +++ b/content/bn/examples/admin/konnectivity/konnectivity-rbac.yaml @@ -0,0 +1,24 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: system:konnectivity-server + labels: + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:auth-delegator +subjects: + - apiGroup: rbac.authorization.k8s.io + kind: User + name: system:konnectivity-server +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: konnectivity-agent + namespace: kube-system + labels: + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile diff --git a/content/bn/examples/admin/konnectivity/konnectivity-server.yaml b/content/bn/examples/admin/konnectivity/konnectivity-server.yaml new file mode 100644 index 0000000000000..4dfbf5db9d11a --- /dev/null +++ b/content/bn/examples/admin/konnectivity/konnectivity-server.yaml @@ -0,0 +1,73 @@ +apiVersion: v1 +kind: Pod +metadata: + name: konnectivity-server + namespace: kube-system +spec: + priorityClassName: system-cluster-critical + hostNetwork: true + containers: + - name: konnectivity-server-container + image: registry.k8s.io/kas-network-proxy/proxy-server:v0.0.37 + command: ["/proxy-server"] + args: [ + "--logtostderr=true", + # This needs to be consistent with the value set in egressSelectorConfiguration. + "--uds-name=/etc/kubernetes/konnectivity-server/konnectivity-server.socket", + "--delete-existing-uds-file", + # The following two lines assume the Konnectivity server is + # deployed on the same machine as the apiserver, and the certs and + # key of the API Server are at the specified location. + "--cluster-cert=/etc/kubernetes/pki/apiserver.crt", + "--cluster-key=/etc/kubernetes/pki/apiserver.key", + # This needs to be consistent with the value set in egressSelectorConfiguration. + "--mode=grpc", + "--server-port=0", + "--agent-port=8132", + "--admin-port=8133", + "--health-port=8134", + "--agent-namespace=kube-system", + "--agent-service-account=konnectivity-agent", + "--kubeconfig=/etc/kubernetes/konnectivity-server.conf", + "--authentication-audience=system:konnectivity-server" + ] + livenessProbe: + httpGet: + scheme: HTTP + host: 127.0.0.1 + port: 8134 + path: /healthz + initialDelaySeconds: 30 + timeoutSeconds: 60 + ports: + - name: agentport + containerPort: 8132 + hostPort: 8132 + - name: adminport + containerPort: 8133 + hostPort: 8133 + - name: healthport + containerPort: 8134 + hostPort: 8134 + volumeMounts: + - name: k8s-certs + mountPath: /etc/kubernetes/pki + readOnly: true + - name: kubeconfig + mountPath: /etc/kubernetes/konnectivity-server.conf + readOnly: true + - name: konnectivity-uds + mountPath: /etc/kubernetes/konnectivity-server + readOnly: false + volumes: + - name: k8s-certs + hostPath: + path: /etc/kubernetes/pki + - name: kubeconfig + hostPath: + path: /etc/kubernetes/konnectivity-server.conf + type: FileOrCreate + - name: konnectivity-uds + hostPath: + path: /etc/kubernetes/konnectivity-server + type: DirectoryOrCreate diff --git a/content/bn/examples/admin/logging/fluentd-sidecar-config.yaml b/content/bn/examples/admin/logging/fluentd-sidecar-config.yaml new file mode 100644 index 0000000000000..eea1849b033fa --- /dev/null +++ b/content/bn/examples/admin/logging/fluentd-sidecar-config.yaml @@ -0,0 +1,25 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: fluentd-config +data: + fluentd.conf: | + + type tail + format none + path /var/log/1.log + pos_file /var/log/1.log.pos + tag count.format1 + + + + type tail + format none + path /var/log/2.log + pos_file /var/log/2.log.pos + tag count.format2 + + + + type google_cloud + diff --git a/content/bn/examples/admin/logging/two-files-counter-pod-agent-sidecar.yaml b/content/bn/examples/admin/logging/two-files-counter-pod-agent-sidecar.yaml new file mode 100644 index 0000000000000..a621a9fb2acce --- /dev/null +++ b/content/bn/examples/admin/logging/two-files-counter-pod-agent-sidecar.yaml @@ -0,0 +1,39 @@ +apiVersion: v1 +kind: Pod +metadata: + name: counter +spec: + containers: + - name: count + image: busybox:1.28 + args: + - /bin/sh + - -c + - > + i=0; + while true; + do + echo "$i: $(date)" >> /var/log/1.log; + echo "$(date) INFO $i" >> /var/log/2.log; + i=$((i+1)); + sleep 1; + done + volumeMounts: + - name: varlog + mountPath: /var/log + - name: count-agent + image: registry.k8s.io/fluentd-gcp:1.30 + env: + - name: FLUENTD_ARGS + value: -c /etc/fluentd-config/fluentd.conf + volumeMounts: + - name: varlog + mountPath: /var/log + - name: config-volume + mountPath: /etc/fluentd-config + volumes: + - name: varlog + emptyDir: {} + - name: config-volume + configMap: + name: fluentd-config diff --git a/content/bn/examples/admin/logging/two-files-counter-pod-streaming-sidecar.yaml b/content/bn/examples/admin/logging/two-files-counter-pod-streaming-sidecar.yaml new file mode 100644 index 0000000000000..ac19efe4a2350 --- /dev/null +++ b/content/bn/examples/admin/logging/two-files-counter-pod-streaming-sidecar.yaml @@ -0,0 +1,38 @@ +apiVersion: v1 +kind: Pod +metadata: + name: counter +spec: + containers: + - name: count + image: busybox:1.28 + args: + - /bin/sh + - -c + - > + i=0; + while true; + do + echo "$i: $(date)" >> /var/log/1.log; + echo "$(date) INFO $i" >> /var/log/2.log; + i=$((i+1)); + sleep 1; + done + volumeMounts: + - name: varlog + mountPath: /var/log + - name: count-log-1 + image: busybox:1.28 + args: [/bin/sh, -c, 'tail -n+1 -F /var/log/1.log'] + volumeMounts: + - name: varlog + mountPath: /var/log + - name: count-log-2 + image: busybox:1.28 + args: [/bin/sh, -c, 'tail -n+1 -F /var/log/2.log'] + volumeMounts: + - name: varlog + mountPath: /var/log + volumes: + - name: varlog + emptyDir: {} diff --git a/content/bn/examples/admin/logging/two-files-counter-pod.yaml b/content/bn/examples/admin/logging/two-files-counter-pod.yaml new file mode 100644 index 0000000000000..31bbed3cf8683 --- /dev/null +++ b/content/bn/examples/admin/logging/two-files-counter-pod.yaml @@ -0,0 +1,26 @@ +apiVersion: v1 +kind: Pod +metadata: + name: counter +spec: + containers: + - name: count + image: busybox:1.28 + args: + - /bin/sh + - -c + - > + i=0; + while true; + do + echo "$i: $(date)" >> /var/log/1.log; + echo "$(date) INFO $i" >> /var/log/2.log; + i=$((i+1)); + sleep 1; + done + volumeMounts: + - name: varlog + mountPath: /var/log + volumes: + - name: varlog + emptyDir: {} diff --git a/content/bn/examples/admin/namespace-dev.json b/content/bn/examples/admin/namespace-dev.json new file mode 100644 index 0000000000000..cb3ed7cdc1efa --- /dev/null +++ b/content/bn/examples/admin/namespace-dev.json @@ -0,0 +1,10 @@ +{ + "apiVersion": "v1", + "kind": "Namespace", + "metadata": { + "name": "development", + "labels": { + "name": "development" + } + } +} diff --git a/content/bn/examples/admin/namespace-dev.yaml b/content/bn/examples/admin/namespace-dev.yaml new file mode 100644 index 0000000000000..5e753b693f03b --- /dev/null +++ b/content/bn/examples/admin/namespace-dev.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: development + labels: + name: development diff --git a/content/bn/examples/admin/namespace-prod.yaml b/content/bn/examples/admin/namespace-prod.yaml new file mode 100644 index 0000000000000..761d6325cb404 --- /dev/null +++ b/content/bn/examples/admin/namespace-prod.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: production + labels: + name: production diff --git a/content/bn/examples/admin/resource/cpu-constraints-pod-2.yaml b/content/bn/examples/admin/resource/cpu-constraints-pod-2.yaml new file mode 100644 index 0000000000000..b5c7348f26ee0 --- /dev/null +++ b/content/bn/examples/admin/resource/cpu-constraints-pod-2.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Pod +metadata: + name: constraints-cpu-demo-2 +spec: + containers: + - name: constraints-cpu-demo-2-ctr + image: nginx + resources: + limits: + cpu: "1.5" + requests: + cpu: "500m" diff --git a/content/bn/examples/admin/resource/cpu-constraints-pod-3.yaml b/content/bn/examples/admin/resource/cpu-constraints-pod-3.yaml new file mode 100644 index 0000000000000..0a2083acd8ec6 --- /dev/null +++ b/content/bn/examples/admin/resource/cpu-constraints-pod-3.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Pod +metadata: + name: constraints-cpu-demo-3 +spec: + containers: + - name: constraints-cpu-demo-3-ctr + image: nginx + resources: + limits: + cpu: "800m" + requests: + cpu: "100m" diff --git a/content/bn/examples/admin/resource/cpu-constraints-pod-4.yaml b/content/bn/examples/admin/resource/cpu-constraints-pod-4.yaml new file mode 100644 index 0000000000000..3c102158db509 --- /dev/null +++ b/content/bn/examples/admin/resource/cpu-constraints-pod-4.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Pod +metadata: + name: constraints-cpu-demo-4 +spec: + containers: + - name: constraints-cpu-demo-4-ctr + image: vish/stress diff --git a/content/bn/examples/admin/resource/cpu-constraints-pod.yaml b/content/bn/examples/admin/resource/cpu-constraints-pod.yaml new file mode 100644 index 0000000000000..7db23f26c8842 --- /dev/null +++ b/content/bn/examples/admin/resource/cpu-constraints-pod.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Pod +metadata: + name: constraints-cpu-demo +spec: + containers: + - name: constraints-cpu-demo-ctr + image: nginx + resources: + limits: + cpu: "800m" + requests: + cpu: "500m" diff --git a/content/bn/examples/admin/resource/cpu-constraints.yaml b/content/bn/examples/admin/resource/cpu-constraints.yaml new file mode 100644 index 0000000000000..6fc4239027c86 --- /dev/null +++ b/content/bn/examples/admin/resource/cpu-constraints.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: LimitRange +metadata: + name: cpu-min-max-demo-lr +spec: + limits: + - max: + cpu: "800m" + min: + cpu: "200m" + type: Container diff --git a/content/bn/examples/admin/resource/cpu-defaults-pod-2.yaml b/content/bn/examples/admin/resource/cpu-defaults-pod-2.yaml new file mode 100644 index 0000000000000..9ca216dee1557 --- /dev/null +++ b/content/bn/examples/admin/resource/cpu-defaults-pod-2.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Pod +metadata: + name: default-cpu-demo-2 +spec: + containers: + - name: default-cpu-demo-2-ctr + image: nginx + resources: + limits: + cpu: "1" diff --git a/content/bn/examples/admin/resource/cpu-defaults-pod-3.yaml b/content/bn/examples/admin/resource/cpu-defaults-pod-3.yaml new file mode 100644 index 0000000000000..214cdee34bfff --- /dev/null +++ b/content/bn/examples/admin/resource/cpu-defaults-pod-3.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Pod +metadata: + name: default-cpu-demo-3 +spec: + containers: + - name: default-cpu-demo-3-ctr + image: nginx + resources: + requests: + cpu: "0.75" diff --git a/content/bn/examples/admin/resource/cpu-defaults-pod.yaml b/content/bn/examples/admin/resource/cpu-defaults-pod.yaml new file mode 100644 index 0000000000000..56b06d9a690e5 --- /dev/null +++ b/content/bn/examples/admin/resource/cpu-defaults-pod.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Pod +metadata: + name: default-cpu-demo +spec: + containers: + - name: default-cpu-demo-ctr + image: nginx diff --git a/content/bn/examples/admin/resource/cpu-defaults.yaml b/content/bn/examples/admin/resource/cpu-defaults.yaml new file mode 100644 index 0000000000000..b53d297181683 --- /dev/null +++ b/content/bn/examples/admin/resource/cpu-defaults.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: LimitRange +metadata: + name: cpu-limit-range +spec: + limits: + - default: + cpu: 1 + defaultRequest: + cpu: 0.5 + type: Container diff --git a/content/bn/examples/admin/resource/limit-mem-cpu-container.yaml b/content/bn/examples/admin/resource/limit-mem-cpu-container.yaml new file mode 100644 index 0000000000000..3c2b30f29ccef --- /dev/null +++ b/content/bn/examples/admin/resource/limit-mem-cpu-container.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: LimitRange +metadata: + name: limit-mem-cpu-per-container +spec: + limits: + - max: + cpu: "800m" + memory: "1Gi" + min: + cpu: "100m" + memory: "99Mi" + default: + cpu: "700m" + memory: "900Mi" + defaultRequest: + cpu: "110m" + memory: "111Mi" + type: Container diff --git a/content/bn/examples/admin/resource/limit-mem-cpu-pod.yaml b/content/bn/examples/admin/resource/limit-mem-cpu-pod.yaml new file mode 100644 index 0000000000000..0ce0f69ac8130 --- /dev/null +++ b/content/bn/examples/admin/resource/limit-mem-cpu-pod.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: LimitRange +metadata: + name: limit-mem-cpu-per-pod +spec: + limits: + - max: + cpu: "2" + memory: "2Gi" + type: Pod diff --git a/content/bn/examples/admin/resource/limit-memory-ratio-pod.yaml b/content/bn/examples/admin/resource/limit-memory-ratio-pod.yaml new file mode 100644 index 0000000000000..859fc20ecec38 --- /dev/null +++ b/content/bn/examples/admin/resource/limit-memory-ratio-pod.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: LimitRange +metadata: + name: limit-memory-ratio-pod +spec: + limits: + - maxLimitRequestRatio: + memory: 2 + type: Pod diff --git a/content/bn/examples/admin/resource/limit-range-pod-1.yaml b/content/bn/examples/admin/resource/limit-range-pod-1.yaml new file mode 100644 index 0000000000000..b9bd20d06a2c7 --- /dev/null +++ b/content/bn/examples/admin/resource/limit-range-pod-1.yaml @@ -0,0 +1,37 @@ +apiVersion: v1 +kind: Pod +metadata: + name: busybox1 +spec: + containers: + - name: busybox-cnt01 + image: busybox:1.28 + command: ["/bin/sh"] + args: ["-c", "while true; do echo hello from cnt01; sleep 10;done"] + resources: + requests: + memory: "100Mi" + cpu: "100m" + limits: + memory: "200Mi" + cpu: "500m" + - name: busybox-cnt02 + image: busybox:1.28 + command: ["/bin/sh"] + args: ["-c", "while true; do echo hello from cnt02; sleep 10;done"] + resources: + requests: + memory: "100Mi" + cpu: "100m" + - name: busybox-cnt03 + image: busybox:1.28 + command: ["/bin/sh"] + args: ["-c", "while true; do echo hello from cnt03; sleep 10;done"] + resources: + limits: + memory: "200Mi" + cpu: "500m" + - name: busybox-cnt04 + image: busybox:1.28 + command: ["/bin/sh"] + args: ["-c", "while true; do echo hello from cnt04; sleep 10;done"] diff --git a/content/bn/examples/admin/resource/limit-range-pod-2.yaml b/content/bn/examples/admin/resource/limit-range-pod-2.yaml new file mode 100644 index 0000000000000..40da19c1aee05 --- /dev/null +++ b/content/bn/examples/admin/resource/limit-range-pod-2.yaml @@ -0,0 +1,37 @@ +apiVersion: v1 +kind: Pod +metadata: + name: busybox2 +spec: + containers: + - name: busybox-cnt01 + image: busybox:1.28 + command: ["/bin/sh"] + args: ["-c", "while true; do echo hello from cnt01; sleep 10;done"] + resources: + requests: + memory: "100Mi" + cpu: "100m" + limits: + memory: "200Mi" + cpu: "500m" + - name: busybox-cnt02 + image: busybox:1.28 + command: ["/bin/sh"] + args: ["-c", "while true; do echo hello from cnt02; sleep 10;done"] + resources: + requests: + memory: "100Mi" + cpu: "100m" + - name: busybox-cnt03 + image: busybox:1.28 + command: ["/bin/sh"] + args: ["-c", "while true; do echo hello from cnt03; sleep 10;done"] + resources: + limits: + memory: "200Mi" + cpu: "500m" + - name: busybox-cnt04 + image: busybox:1.28 + command: ["/bin/sh"] + args: ["-c", "while true; do echo hello from cnt04; sleep 10;done"] diff --git a/content/bn/examples/admin/resource/limit-range-pod-3.yaml b/content/bn/examples/admin/resource/limit-range-pod-3.yaml new file mode 100644 index 0000000000000..5b6b835e38b62 --- /dev/null +++ b/content/bn/examples/admin/resource/limit-range-pod-3.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Pod +metadata: + name: busybox3 +spec: + containers: + - name: busybox-cnt01 + image: busybox:1.28 + command: ["sleep", "3600"] + resources: + limits: + memory: "300Mi" + requests: + memory: "100Mi" diff --git a/content/bn/examples/admin/resource/memory-available-cgroupv2.sh b/content/bn/examples/admin/resource/memory-available-cgroupv2.sh new file mode 100644 index 0000000000000..47b9f6802bdfd --- /dev/null +++ b/content/bn/examples/admin/resource/memory-available-cgroupv2.sh @@ -0,0 +1,30 @@ +#!/bin/bash + +# This script reproduces what the kubelet does +# to calculate memory.available relative to kubepods cgroup. + +# current memory usage +memory_capacity_in_kb=$(cat /proc/meminfo | grep MemTotal | awk '{print $2}') +memory_capacity_in_bytes=$((memory_capacity_in_kb * 1024)) +memory_usage_in_bytes=$(cat /sys/fs/cgroup/kubepods.slice/memory.current) +memory_total_inactive_file=$(cat /sys/fs/cgroup/kubepods.slice/memory.stat | grep inactive_file | awk '{print $2}') + +memory_working_set=${memory_usage_in_bytes} +if [ "$memory_working_set" -lt "$memory_total_inactive_file" ]; +then + memory_working_set=0 +else + memory_working_set=$((memory_usage_in_bytes - memory_total_inactive_file)) +fi + +memory_available_in_bytes=$((memory_capacity_in_bytes - memory_working_set)) +memory_available_in_kb=$((memory_available_in_bytes / 1024)) +memory_available_in_mb=$((memory_available_in_kb / 1024)) + +echo "memory.capacity_in_bytes $memory_capacity_in_bytes" +echo "memory.usage_in_bytes $memory_usage_in_bytes" +echo "memory.total_inactive_file $memory_total_inactive_file" +echo "memory.working_set $memory_working_set" +echo "memory.available_in_bytes $memory_available_in_bytes" +echo "memory.available_in_kb $memory_available_in_kb" +echo "memory.available_in_mb $memory_available_in_mb" diff --git a/content/bn/examples/admin/resource/memory-available.sh b/content/bn/examples/admin/resource/memory-available.sh new file mode 100644 index 0000000000000..a699b1d2e2046 --- /dev/null +++ b/content/bn/examples/admin/resource/memory-available.sh @@ -0,0 +1,31 @@ +#!/bin/bash +#!/usr/bin/env bash + +# This script reproduces what the kubelet does +# to calculate memory.available relative to root cgroup. + +# current memory usage +memory_capacity_in_kb=$(cat /proc/meminfo | grep MemTotal | awk '{print $2}') +memory_capacity_in_bytes=$((memory_capacity_in_kb * 1024)) +memory_usage_in_bytes=$(cat /sys/fs/cgroup/memory/memory.usage_in_bytes) +memory_total_inactive_file=$(cat /sys/fs/cgroup/memory/memory.stat | grep total_inactive_file | awk '{print $2}') + +memory_working_set=${memory_usage_in_bytes} +if [ "$memory_working_set" -lt "$memory_total_inactive_file" ]; +then + memory_working_set=0 +else + memory_working_set=$((memory_usage_in_bytes - memory_total_inactive_file)) +fi + +memory_available_in_bytes=$((memory_capacity_in_bytes - memory_working_set)) +memory_available_in_kb=$((memory_available_in_bytes / 1024)) +memory_available_in_mb=$((memory_available_in_kb / 1024)) + +echo "memory.capacity_in_bytes $memory_capacity_in_bytes" +echo "memory.usage_in_bytes $memory_usage_in_bytes" +echo "memory.total_inactive_file $memory_total_inactive_file" +echo "memory.working_set $memory_working_set" +echo "memory.available_in_bytes $memory_available_in_bytes" +echo "memory.available_in_kb $memory_available_in_kb" +echo "memory.available_in_mb $memory_available_in_mb" diff --git a/content/bn/examples/admin/resource/memory-constraints-pod-2.yaml b/content/bn/examples/admin/resource/memory-constraints-pod-2.yaml new file mode 100644 index 0000000000000..0b1ae569c4962 --- /dev/null +++ b/content/bn/examples/admin/resource/memory-constraints-pod-2.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Pod +metadata: + name: constraints-mem-demo-2 +spec: + containers: + - name: constraints-mem-demo-2-ctr + image: nginx + resources: + limits: + memory: "1.5Gi" + requests: + memory: "800Mi" diff --git a/content/bn/examples/admin/resource/memory-constraints-pod-3.yaml b/content/bn/examples/admin/resource/memory-constraints-pod-3.yaml new file mode 100644 index 0000000000000..f97cd4a8ac07f --- /dev/null +++ b/content/bn/examples/admin/resource/memory-constraints-pod-3.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Pod +metadata: + name: constraints-mem-demo-3 +spec: + containers: + - name: constraints-mem-demo-3-ctr + image: nginx + resources: + limits: + memory: "800Mi" + requests: + memory: "100Mi" diff --git a/content/bn/examples/admin/resource/memory-constraints-pod-4.yaml b/content/bn/examples/admin/resource/memory-constraints-pod-4.yaml new file mode 100644 index 0000000000000..03e2b6d75e437 --- /dev/null +++ b/content/bn/examples/admin/resource/memory-constraints-pod-4.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Pod +metadata: + name: constraints-mem-demo-4 +spec: + containers: + - name: constraints-mem-demo-4-ctr + image: nginx diff --git a/content/bn/examples/admin/resource/memory-constraints-pod.yaml b/content/bn/examples/admin/resource/memory-constraints-pod.yaml new file mode 100644 index 0000000000000..06954d10d65ad --- /dev/null +++ b/content/bn/examples/admin/resource/memory-constraints-pod.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Pod +metadata: + name: constraints-mem-demo +spec: + containers: + - name: constraints-mem-demo-ctr + image: nginx + resources: + limits: + memory: "800Mi" + requests: + memory: "600Mi" diff --git a/content/bn/examples/admin/resource/memory-constraints.yaml b/content/bn/examples/admin/resource/memory-constraints.yaml new file mode 100644 index 0000000000000..3a2924c032e50 --- /dev/null +++ b/content/bn/examples/admin/resource/memory-constraints.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: LimitRange +metadata: + name: mem-min-max-demo-lr +spec: + limits: + - max: + memory: 1Gi + min: + memory: 500Mi + type: Container diff --git a/content/bn/examples/admin/resource/memory-defaults-pod-2.yaml b/content/bn/examples/admin/resource/memory-defaults-pod-2.yaml new file mode 100644 index 0000000000000..aa80610d84492 --- /dev/null +++ b/content/bn/examples/admin/resource/memory-defaults-pod-2.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Pod +metadata: + name: default-mem-demo-2 +spec: + containers: + - name: default-mem-demo-2-ctr + image: nginx + resources: + limits: + memory: "1Gi" diff --git a/content/bn/examples/admin/resource/memory-defaults-pod-3.yaml b/content/bn/examples/admin/resource/memory-defaults-pod-3.yaml new file mode 100644 index 0000000000000..09ee8b39a9b42 --- /dev/null +++ b/content/bn/examples/admin/resource/memory-defaults-pod-3.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Pod +metadata: + name: default-mem-demo-3 +spec: + containers: + - name: default-mem-demo-3-ctr + image: nginx + resources: + requests: + memory: "128Mi" diff --git a/content/bn/examples/admin/resource/memory-defaults-pod.yaml b/content/bn/examples/admin/resource/memory-defaults-pod.yaml new file mode 100644 index 0000000000000..ce7a50fb555e0 --- /dev/null +++ b/content/bn/examples/admin/resource/memory-defaults-pod.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Pod +metadata: + name: default-mem-demo +spec: + containers: + - name: default-mem-demo-ctr + image: nginx diff --git a/content/bn/examples/admin/resource/memory-defaults.yaml b/content/bn/examples/admin/resource/memory-defaults.yaml new file mode 100644 index 0000000000000..b98a5ae262561 --- /dev/null +++ b/content/bn/examples/admin/resource/memory-defaults.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: LimitRange +metadata: + name: mem-limit-range +spec: + limits: + - default: + memory: 512Mi + defaultRequest: + memory: 256Mi + type: Container diff --git a/content/bn/examples/admin/resource/pvc-limit-greater.yaml b/content/bn/examples/admin/resource/pvc-limit-greater.yaml new file mode 100644 index 0000000000000..2d92bf92b3121 --- /dev/null +++ b/content/bn/examples/admin/resource/pvc-limit-greater.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: pvc-limit-greater +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 5Gi diff --git a/content/bn/examples/admin/resource/pvc-limit-lower.yaml b/content/bn/examples/admin/resource/pvc-limit-lower.yaml new file mode 100644 index 0000000000000..ef819b6292049 --- /dev/null +++ b/content/bn/examples/admin/resource/pvc-limit-lower.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: pvc-limit-lower +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 500Mi diff --git a/content/bn/examples/admin/resource/quota-mem-cpu-pod-2.yaml b/content/bn/examples/admin/resource/quota-mem-cpu-pod-2.yaml new file mode 100644 index 0000000000000..380e900fda52f --- /dev/null +++ b/content/bn/examples/admin/resource/quota-mem-cpu-pod-2.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: quota-mem-cpu-demo-2 +spec: + containers: + - name: quota-mem-cpu-demo-2-ctr + image: redis + resources: + limits: + memory: "1Gi" + cpu: "800m" + requests: + memory: "700Mi" + cpu: "400m" diff --git a/content/bn/examples/admin/resource/quota-mem-cpu-pod.yaml b/content/bn/examples/admin/resource/quota-mem-cpu-pod.yaml new file mode 100644 index 0000000000000..b0fd0a9451bf2 --- /dev/null +++ b/content/bn/examples/admin/resource/quota-mem-cpu-pod.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: quota-mem-cpu-demo +spec: + containers: + - name: quota-mem-cpu-demo-ctr + image: nginx + resources: + limits: + memory: "800Mi" + cpu: "800m" + requests: + memory: "600Mi" + cpu: "400m" diff --git a/content/bn/examples/admin/resource/quota-mem-cpu.yaml b/content/bn/examples/admin/resource/quota-mem-cpu.yaml new file mode 100644 index 0000000000000..5c4bcd81b8b35 --- /dev/null +++ b/content/bn/examples/admin/resource/quota-mem-cpu.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: ResourceQuota +metadata: + name: mem-cpu-demo +spec: + hard: + requests.cpu: "1" + requests.memory: 1Gi + limits.cpu: "2" + limits.memory: 2Gi diff --git a/content/bn/examples/admin/resource/quota-objects-pvc-2.yaml b/content/bn/examples/admin/resource/quota-objects-pvc-2.yaml new file mode 100644 index 0000000000000..2539c2d3093a8 --- /dev/null +++ b/content/bn/examples/admin/resource/quota-objects-pvc-2.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: pvc-quota-demo-2 +spec: + storageClassName: manual + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 4Gi diff --git a/content/bn/examples/admin/resource/quota-objects-pvc.yaml b/content/bn/examples/admin/resource/quota-objects-pvc.yaml new file mode 100644 index 0000000000000..728bb4d708c27 --- /dev/null +++ b/content/bn/examples/admin/resource/quota-objects-pvc.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: pvc-quota-demo +spec: + storageClassName: manual + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 3Gi diff --git a/content/bn/examples/admin/resource/quota-objects.yaml b/content/bn/examples/admin/resource/quota-objects.yaml new file mode 100644 index 0000000000000..e97748decd53a --- /dev/null +++ b/content/bn/examples/admin/resource/quota-objects.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: ResourceQuota +metadata: + name: object-quota-demo +spec: + hard: + persistentvolumeclaims: "1" + services.loadbalancers: "2" + services.nodeports: "0" diff --git a/content/bn/examples/admin/resource/quota-pod-deployment.yaml b/content/bn/examples/admin/resource/quota-pod-deployment.yaml new file mode 100644 index 0000000000000..86e85aa468e13 --- /dev/null +++ b/content/bn/examples/admin/resource/quota-pod-deployment.yaml @@ -0,0 +1,17 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: pod-quota-demo +spec: + selector: + matchLabels: + purpose: quota-demo + replicas: 3 + template: + metadata: + labels: + purpose: quota-demo + spec: + containers: + - name: pod-quota-demo + image: nginx diff --git a/content/bn/examples/admin/resource/quota-pod.yaml b/content/bn/examples/admin/resource/quota-pod.yaml new file mode 100644 index 0000000000000..0a07f055ca853 --- /dev/null +++ b/content/bn/examples/admin/resource/quota-pod.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: ResourceQuota +metadata: + name: pod-demo +spec: + hard: + pods: "2" diff --git a/content/bn/examples/admin/resource/storagelimits.yaml b/content/bn/examples/admin/resource/storagelimits.yaml new file mode 100644 index 0000000000000..7f597e4dfe9b1 --- /dev/null +++ b/content/bn/examples/admin/resource/storagelimits.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: LimitRange +metadata: + name: storagelimits +spec: + limits: + - type: PersistentVolumeClaim + max: + storage: 2Gi + min: + storage: 1Gi diff --git a/content/bn/examples/admin/sched/clusterrole.yaml b/content/bn/examples/admin/sched/clusterrole.yaml new file mode 100644 index 0000000000000..554b8659db5b0 --- /dev/null +++ b/content/bn/examples/admin/sched/clusterrole.yaml @@ -0,0 +1,37 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + labels: + kubernetes.io/bootstrapping: rbac-defaults + name: system:kube-scheduler +rules: + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - apiGroups: + - coordination.k8s.io + resourceNames: + - kube-scheduler + - my-scheduler + resources: + - leases + verbs: + - get + - update + - apiGroups: + - "" + resourceNames: + - kube-scheduler + - my-scheduler + resources: + - endpoints + verbs: + - delete + - get + - patch + - update diff --git a/content/bn/examples/admin/sched/my-scheduler.yaml b/content/bn/examples/admin/sched/my-scheduler.yaml new file mode 100644 index 0000000000000..fa1c65bf9a462 --- /dev/null +++ b/content/bn/examples/admin/sched/my-scheduler.yaml @@ -0,0 +1,113 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: my-scheduler + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: my-scheduler-as-kube-scheduler +subjects: +- kind: ServiceAccount + name: my-scheduler + namespace: kube-system +roleRef: + kind: ClusterRole + name: system:kube-scheduler + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: my-scheduler-as-volume-scheduler +subjects: +- kind: ServiceAccount + name: my-scheduler + namespace: kube-system +roleRef: + kind: ClusterRole + name: system:volume-scheduler + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: my-scheduler-extension-apiserver-authentication-reader + namespace: kube-system +roleRef: + kind: Role + name: extension-apiserver-authentication-reader + apiGroup: rbac.authorization.k8s.io +subjects: +- kind: ServiceAccount + name: my-scheduler + namespace: kube-system +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: my-scheduler-config + namespace: kube-system +data: + my-scheduler-config.yaml: | + apiVersion: kubescheduler.config.k8s.io/v1beta2 + kind: KubeSchedulerConfiguration + profiles: + - schedulerName: my-scheduler + leaderElection: + leaderElect: false +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + component: scheduler + tier: control-plane + name: my-scheduler + namespace: kube-system +spec: + selector: + matchLabels: + component: scheduler + tier: control-plane + replicas: 1 + template: + metadata: + labels: + component: scheduler + tier: control-plane + version: second + spec: + serviceAccountName: my-scheduler + containers: + - command: + - /usr/local/bin/kube-scheduler + - --config=/etc/kubernetes/my-scheduler/my-scheduler-config.yaml + image: gcr.io/my-gcp-project/my-kube-scheduler:1.0 + livenessProbe: + httpGet: + path: /healthz + port: 10259 + scheme: HTTPS + initialDelaySeconds: 15 + name: kube-second-scheduler + readinessProbe: + httpGet: + path: /healthz + port: 10259 + scheme: HTTPS + resources: + requests: + cpu: '0.1' + securityContext: + privileged: false + volumeMounts: + - name: config-volume + mountPath: /etc/kubernetes/my-scheduler + hostNetwork: false + hostPID: false + volumes: + - name: config-volume + configMap: + name: my-scheduler-config diff --git a/content/bn/examples/admin/sched/pod1.yaml b/content/bn/examples/admin/sched/pod1.yaml new file mode 100644 index 0000000000000..4755e445a5123 --- /dev/null +++ b/content/bn/examples/admin/sched/pod1.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: Pod +metadata: + name: no-annotation + labels: + name: multischeduler-example +spec: + containers: + - name: pod-with-no-annotation-container + image: registry.k8s.io/pause:2.0 diff --git a/content/bn/examples/admin/sched/pod2.yaml b/content/bn/examples/admin/sched/pod2.yaml new file mode 100644 index 0000000000000..b78ab64a4bc4a --- /dev/null +++ b/content/bn/examples/admin/sched/pod2.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Pod +metadata: + name: annotation-default-scheduler + labels: + name: multischeduler-example +spec: + schedulerName: default-scheduler + containers: + - name: pod-with-default-annotation-container + image: registry.k8s.io/pause:2.0 diff --git a/content/bn/examples/admin/sched/pod3.yaml b/content/bn/examples/admin/sched/pod3.yaml new file mode 100644 index 0000000000000..661414382913f --- /dev/null +++ b/content/bn/examples/admin/sched/pod3.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Pod +metadata: + name: annotation-second-scheduler + labels: + name: multischeduler-example +spec: + schedulerName: my-scheduler + containers: + - name: pod-with-second-annotation-container + image: registry.k8s.io/pause:2.0 diff --git a/content/bn/examples/admin/snowflake-deployment.yaml b/content/bn/examples/admin/snowflake-deployment.yaml new file mode 100644 index 0000000000000..21b6738ba4e6d --- /dev/null +++ b/content/bn/examples/admin/snowflake-deployment.yaml @@ -0,0 +1,20 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: snowflake + name: snowflake +spec: + replicas: 2 + selector: + matchLabels: + app: snowflake + template: + metadata: + labels: + app: snowflake + spec: + containers: + - image: registry.k8s.io/serve_hostname + imagePullPolicy: Always + name: snowflake diff --git a/content/bn/examples/application/cassandra/cassandra-service.yaml b/content/bn/examples/application/cassandra/cassandra-service.yaml new file mode 100644 index 0000000000000..31bee74b58732 --- /dev/null +++ b/content/bn/examples/application/cassandra/cassandra-service.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app: cassandra + name: cassandra +spec: + clusterIP: None + ports: + - port: 9042 + selector: + app: cassandra diff --git a/content/bn/examples/application/cassandra/cassandra-statefulset.yaml b/content/bn/examples/application/cassandra/cassandra-statefulset.yaml new file mode 100644 index 0000000000000..a7bdbedc9c5aa --- /dev/null +++ b/content/bn/examples/application/cassandra/cassandra-statefulset.yaml @@ -0,0 +1,100 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: cassandra + labels: + app: cassandra +spec: + serviceName: cassandra + replicas: 3 + selector: + matchLabels: + app: cassandra + template: + metadata: + labels: + app: cassandra + spec: + terminationGracePeriodSeconds: 1800 + containers: + - name: cassandra + image: gcr.io/google-samples/cassandra:v13 + imagePullPolicy: Always + ports: + - containerPort: 7000 + name: intra-node + - containerPort: 7001 + name: tls-intra-node + - containerPort: 7199 + name: jmx + - containerPort: 9042 + name: cql + resources: + limits: + cpu: "500m" + memory: 1Gi + requests: + cpu: "500m" + memory: 1Gi + securityContext: + capabilities: + add: + - IPC_LOCK + lifecycle: + preStop: + exec: + command: + - /bin/sh + - -c + - nodetool drain + env: + - name: MAX_HEAP_SIZE + value: 512M + - name: HEAP_NEWSIZE + value: 100M + - name: CASSANDRA_SEEDS + value: "cassandra-0.cassandra.default.svc.cluster.local" + - name: CASSANDRA_CLUSTER_NAME + value: "K8Demo" + - name: CASSANDRA_DC + value: "DC1-K8Demo" + - name: CASSANDRA_RACK + value: "Rack1-K8Demo" + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + readinessProbe: + exec: + command: + - /bin/bash + - -c + - /ready-probe.sh + initialDelaySeconds: 15 + timeoutSeconds: 5 + # These volume mounts are persistent. They are like inline claims, + # but not exactly because the names need to match exactly one of + # the stateful pod volumes. + volumeMounts: + - name: cassandra-data + mountPath: /cassandra_data + # These are converted to volume claims by the controller + # and mounted at the paths mentioned above. + # do not use these in production until ssd GCEPersistentDisk or other ssd pd + volumeClaimTemplates: + - metadata: + name: cassandra-data + spec: + accessModes: [ "ReadWriteOnce" ] + storageClassName: fast + resources: + requests: + storage: 1Gi +--- +kind: StorageClass +apiVersion: storage.k8s.io/v1 +metadata: + name: fast +provisioner: k8s.io/minikube-hostpath +parameters: + type: pd-ssd diff --git a/content/bn/examples/application/deployment-patch.yaml b/content/bn/examples/application/deployment-patch.yaml new file mode 100644 index 0000000000000..af12f4cb0c4ec --- /dev/null +++ b/content/bn/examples/application/deployment-patch.yaml @@ -0,0 +1,21 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: patch-demo +spec: + replicas: 2 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: patch-demo-ctr + image: nginx + tolerations: + - effect: NoSchedule + key: dedicated + value: test-team diff --git a/content/bn/examples/application/deployment-retainkeys.yaml b/content/bn/examples/application/deployment-retainkeys.yaml new file mode 100644 index 0000000000000..af63f46d37294 --- /dev/null +++ b/content/bn/examples/application/deployment-retainkeys.yaml @@ -0,0 +1,19 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: retainkeys-demo +spec: + selector: + matchLabels: + app: nginx + strategy: + rollingUpdate: + maxSurge: 30% + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: retainkeys-demo-ctr + image: nginx diff --git a/content/bn/examples/application/deployment-scale.yaml b/content/bn/examples/application/deployment-scale.yaml new file mode 100644 index 0000000000000..838576375ef6f --- /dev/null +++ b/content/bn/examples/application/deployment-scale.yaml @@ -0,0 +1,19 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-deployment +spec: + selector: + matchLabels: + app: nginx + replicas: 4 # Update the replicas from 2 to 4 + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx:1.16.1 + ports: + - containerPort: 80 diff --git a/content/bn/examples/application/deployment-sidecar.yaml b/content/bn/examples/application/deployment-sidecar.yaml new file mode 100644 index 0000000000000..80bacbd15bc9d --- /dev/null +++ b/content/bn/examples/application/deployment-sidecar.yaml @@ -0,0 +1,34 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: myapp + labels: + app: myapp +spec: + replicas: 1 + selector: + matchLabels: + app: myapp + template: + metadata: + labels: + app: myapp + spec: + containers: + - name: myapp + image: alpine:latest + command: ['sh', '-c', 'while true; do echo "logging" >> /opt/logs.txt; sleep 1; done'] + volumeMounts: + - name: data + mountPath: /opt + initContainers: + - name: logshipper + image: alpine:latest + restartPolicy: Always + command: ['sh', '-c', 'tail -F /opt/logs.txt'] + volumeMounts: + - name: data + mountPath: /opt + volumes: + - name: data + emptyDir: {} diff --git a/content/bn/examples/application/deployment-update.yaml b/content/bn/examples/application/deployment-update.yaml new file mode 100644 index 0000000000000..1c0b9d1ab8a4f --- /dev/null +++ b/content/bn/examples/application/deployment-update.yaml @@ -0,0 +1,19 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-deployment +spec: + selector: + matchLabels: + app: nginx + replicas: 2 + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx:1.16.1 # Update the version of nginx from 1.14.2 to 1.16.1 + ports: + - containerPort: 80 diff --git a/content/bn/examples/application/deployment.yaml b/content/bn/examples/application/deployment.yaml new file mode 100644 index 0000000000000..6ff818be62d36 --- /dev/null +++ b/content/bn/examples/application/deployment.yaml @@ -0,0 +1,19 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-deployment +spec: + selector: + matchLabels: + app: nginx + replicas: 2 # ডিপ্লয়মেন্টকে টেমপ্লেটের সাথে মিলে যাওয়া 2টি পড চালাতে বলে + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx:1.14.2 + ports: + - containerPort: 80 diff --git a/content/bn/examples/application/guestbook/frontend-deployment.yaml b/content/bn/examples/application/guestbook/frontend-deployment.yaml new file mode 100644 index 0000000000000..b4639929ad424 --- /dev/null +++ b/content/bn/examples/application/guestbook/frontend-deployment.yaml @@ -0,0 +1,29 @@ +# SOURCE: https://cloud.google.com/kubernetes-engine/docs/tutorials/guestbook +apiVersion: apps/v1 +kind: Deployment +metadata: + name: frontend +spec: + replicas: 3 + selector: + matchLabels: + app: guestbook + tier: frontend + template: + metadata: + labels: + app: guestbook + tier: frontend + spec: + containers: + - name: php-redis + image: us-docker.pkg.dev/google-samples/containers/gke/gb-frontend:v5 + env: + - name: GET_HOSTS_FROM + value: "dns" + resources: + requests: + cpu: 100m + memory: 100Mi + ports: + - containerPort: 80 diff --git a/content/bn/examples/application/guestbook/frontend-service.yaml b/content/bn/examples/application/guestbook/frontend-service.yaml new file mode 100644 index 0000000000000..14a8ded8abe88 --- /dev/null +++ b/content/bn/examples/application/guestbook/frontend-service.yaml @@ -0,0 +1,19 @@ +# SOURCE: https://cloud.google.com/kubernetes-engine/docs/tutorials/guestbook +apiVersion: v1 +kind: Service +metadata: + name: frontend + labels: + app: guestbook + tier: frontend +spec: + # if your cluster supports it, uncomment the following to automatically create + # an external load-balanced IP for the frontend service. + # type: LoadBalancer + #type: LoadBalancer + ports: + # the port that this service should serve on + - port: 80 + selector: + app: guestbook + tier: frontend diff --git a/content/bn/examples/application/guestbook/redis-follower-deployment.yaml b/content/bn/examples/application/guestbook/redis-follower-deployment.yaml new file mode 100644 index 0000000000000..a221e47e6fdbe --- /dev/null +++ b/content/bn/examples/application/guestbook/redis-follower-deployment.yaml @@ -0,0 +1,30 @@ +# SOURCE: https://cloud.google.com/kubernetes-engine/docs/tutorials/guestbook +apiVersion: apps/v1 +kind: Deployment +metadata: + name: redis-follower + labels: + app: redis + role: follower + tier: backend +spec: + replicas: 2 + selector: + matchLabels: + app: redis + template: + metadata: + labels: + app: redis + role: follower + tier: backend + spec: + containers: + - name: follower + image: us-docker.pkg.dev/google-samples/containers/gke/gb-redis-follower:v2 + resources: + requests: + cpu: 100m + memory: 100Mi + ports: + - containerPort: 6379 diff --git a/content/bn/examples/application/guestbook/redis-follower-service.yaml b/content/bn/examples/application/guestbook/redis-follower-service.yaml new file mode 100644 index 0000000000000..e40607e7192ff --- /dev/null +++ b/content/bn/examples/application/guestbook/redis-follower-service.yaml @@ -0,0 +1,17 @@ +# SOURCE: https://cloud.google.com/kubernetes-engine/docs/tutorials/guestbook +apiVersion: v1 +kind: Service +metadata: + name: redis-follower + labels: + app: redis + role: follower + tier: backend +spec: + ports: + # the port that this service should serve on + - port: 6379 + selector: + app: redis + role: follower + tier: backend diff --git a/content/bn/examples/application/guestbook/redis-leader-deployment.yaml b/content/bn/examples/application/guestbook/redis-leader-deployment.yaml new file mode 100644 index 0000000000000..70122fba816e8 --- /dev/null +++ b/content/bn/examples/application/guestbook/redis-leader-deployment.yaml @@ -0,0 +1,30 @@ +# SOURCE: https://cloud.google.com/kubernetes-engine/docs/tutorials/guestbook +apiVersion: apps/v1 +kind: Deployment +metadata: + name: redis-leader + labels: + app: redis + role: leader + tier: backend +spec: + replicas: 1 + selector: + matchLabels: + app: redis + template: + metadata: + labels: + app: redis + role: leader + tier: backend + spec: + containers: + - name: leader + image: "docker.io/redis:6.0.5" + resources: + requests: + cpu: 100m + memory: 100Mi + ports: + - containerPort: 6379 diff --git a/content/bn/examples/application/guestbook/redis-leader-service.yaml b/content/bn/examples/application/guestbook/redis-leader-service.yaml new file mode 100644 index 0000000000000..439334f99b64b --- /dev/null +++ b/content/bn/examples/application/guestbook/redis-leader-service.yaml @@ -0,0 +1,17 @@ +# SOURCE: https://cloud.google.com/kubernetes-engine/docs/tutorials/guestbook +apiVersion: v1 +kind: Service +metadata: + name: redis-leader + labels: + app: redis + role: leader + tier: backend +spec: + ports: + - port: 6379 + targetPort: 6379 + selector: + app: redis + role: leader + tier: backend diff --git a/content/bn/examples/application/hpa/php-apache.yaml b/content/bn/examples/application/hpa/php-apache.yaml new file mode 100644 index 0000000000000..1c49aca6a1ff5 --- /dev/null +++ b/content/bn/examples/application/hpa/php-apache.yaml @@ -0,0 +1,18 @@ +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: php-apache +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: php-apache + minReplicas: 1 + maxReplicas: 10 + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 50 diff --git a/content/bn/examples/application/job/cronjob.yaml b/content/bn/examples/application/job/cronjob.yaml new file mode 100644 index 0000000000000..78d0e2d314792 --- /dev/null +++ b/content/bn/examples/application/job/cronjob.yaml @@ -0,0 +1,19 @@ +apiVersion: batch/v1 +kind: CronJob +metadata: + name: hello +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + spec: + containers: + - name: hello + image: busybox:1.28 + imagePullPolicy: IfNotPresent + command: + - /bin/sh + - -c + - date; echo Hello from the Kubernetes cluster + restartPolicy: OnFailure diff --git a/content/bn/examples/application/job/indexed-job-vol.yaml b/content/bn/examples/application/job/indexed-job-vol.yaml new file mode 100644 index 0000000000000..cecc2b7e65ab6 --- /dev/null +++ b/content/bn/examples/application/job/indexed-job-vol.yaml @@ -0,0 +1,27 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: 'indexed-job' +spec: + completions: 5 + parallelism: 3 + completionMode: Indexed + template: + spec: + restartPolicy: Never + containers: + - name: 'worker' + image: 'docker.io/library/busybox' + command: + - "rev" + - "/input/data.txt" + volumeMounts: + - mountPath: /input + name: input + volumes: + - name: input + downwardAPI: + items: + - path: "data.txt" + fieldRef: + fieldPath: metadata.annotations['batch.kubernetes.io/job-completion-index'] diff --git a/content/bn/examples/application/job/indexed-job.yaml b/content/bn/examples/application/job/indexed-job.yaml new file mode 100644 index 0000000000000..5b80d3526491f --- /dev/null +++ b/content/bn/examples/application/job/indexed-job.yaml @@ -0,0 +1,35 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: 'indexed-job' +spec: + completions: 5 + parallelism: 3 + completionMode: Indexed + template: + spec: + restartPolicy: Never + initContainers: + - name: 'input' + image: 'docker.io/library/bash' + command: + - "bash" + - "-c" + - | + items=(foo bar baz qux xyz) + echo ${items[$JOB_COMPLETION_INDEX]} > /input/data.txt + volumeMounts: + - mountPath: /input + name: input + containers: + - name: 'worker' + image: 'docker.io/library/busybox' + command: + - "rev" + - "/input/data.txt" + volumeMounts: + - mountPath: /input + name: input + volumes: + - name: input + emptyDir: {} diff --git a/content/bn/examples/application/job/job-sidecar.yaml b/content/bn/examples/application/job/job-sidecar.yaml new file mode 100644 index 0000000000000..ce483b872ef6f --- /dev/null +++ b/content/bn/examples/application/job/job-sidecar.yaml @@ -0,0 +1,26 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: myjob +spec: + template: + spec: + containers: + - name: myjob + image: alpine:latest + command: ['sh', '-c', 'echo "logging" > /opt/logs.txt'] + volumeMounts: + - name: data + mountPath: /opt + initContainers: + - name: logshipper + image: alpine:latest + restartPolicy: Always + command: ['sh', '-c', 'tail -F /opt/logs.txt'] + volumeMounts: + - name: data + mountPath: /opt + restartPolicy: Never + volumes: + - name: data + emptyDir: {} diff --git a/content/bn/examples/application/job/job-tmpl.yaml b/content/bn/examples/application/job/job-tmpl.yaml new file mode 100644 index 0000000000000..d7dbbafd62bc5 --- /dev/null +++ b/content/bn/examples/application/job/job-tmpl.yaml @@ -0,0 +1,18 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: process-item-$ITEM + labels: + jobgroup: jobexample +spec: + template: + metadata: + name: jobexample + labels: + jobgroup: jobexample + spec: + containers: + - name: c + image: busybox:1.28 + command: ["sh", "-c", "echo Processing item $ITEM && sleep 5"] + restartPolicy: Never diff --git a/content/bn/examples/application/job/rabbitmq/Dockerfile b/content/bn/examples/application/job/rabbitmq/Dockerfile new file mode 100644 index 0000000000000..50faab23f439c --- /dev/null +++ b/content/bn/examples/application/job/rabbitmq/Dockerfile @@ -0,0 +1,10 @@ +# Specify BROKER_URL and QUEUE when running +FROM ubuntu:18.04 + +RUN apt-get update && \ + apt-get install -y curl ca-certificates amqp-tools python \ + --no-install-recommends \ + && rm -rf /var/lib/apt/lists/* +COPY ./worker.py /worker.py + +CMD /usr/bin/amqp-consume --url=$BROKER_URL -q $QUEUE -c 1 /worker.py diff --git a/content/bn/examples/application/job/rabbitmq/job.yaml b/content/bn/examples/application/job/rabbitmq/job.yaml new file mode 100644 index 0000000000000..4e1a61892be6b --- /dev/null +++ b/content/bn/examples/application/job/rabbitmq/job.yaml @@ -0,0 +1,20 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: job-wq-1 +spec: + completions: 8 + parallelism: 2 + template: + metadata: + name: job-wq-1 + spec: + containers: + - name: c + image: gcr.io//job-wq-1 + env: + - name: BROKER_URL + value: amqp://guest:guest@rabbitmq-service:5672 + - name: QUEUE + value: job1 + restartPolicy: OnFailure diff --git a/content/bn/examples/application/job/rabbitmq/rabbitmq-service.yaml b/content/bn/examples/application/job/rabbitmq/rabbitmq-service.yaml new file mode 100644 index 0000000000000..2f7fb06dcfed6 --- /dev/null +++ b/content/bn/examples/application/job/rabbitmq/rabbitmq-service.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + component: rabbitmq + name: rabbitmq-service +spec: + ports: + - port: 5672 + selector: + app.kubernetes.io/name: task-queue + app.kubernetes.io/component: rabbitmq diff --git a/content/bn/examples/application/job/rabbitmq/rabbitmq-statefulset.yaml b/content/bn/examples/application/job/rabbitmq/rabbitmq-statefulset.yaml new file mode 100644 index 0000000000000..502598ddf947e --- /dev/null +++ b/content/bn/examples/application/job/rabbitmq/rabbitmq-statefulset.yaml @@ -0,0 +1,36 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + component: rabbitmq + name: rabbitmq +spec: + replicas: 1 + serviceName: rabbitmq-service + selector: + matchLabels: + app.kubernetes.io/name: task-queue + app.kubernetes.io/component: rabbitmq + template: + metadata: + labels: + app.kubernetes.io/name: task-queue + app.kubernetes.io/component: rabbitmq + spec: + containers: + - image: rabbitmq + name: rabbitmq + ports: + - containerPort: 5672 + resources: + requests: + memory: 16M + limits: + cpu: 250m + memory: 512M + volumeMounts: + - mountPath: /var/lib/rabbitmq + name: rabbitmq-data + volumes: + - name: rabbitmq-data + emptyDir: {} diff --git a/content/bn/examples/application/job/rabbitmq/worker.py b/content/bn/examples/application/job/rabbitmq/worker.py new file mode 100644 index 0000000000000..88a7fcf96d91a --- /dev/null +++ b/content/bn/examples/application/job/rabbitmq/worker.py @@ -0,0 +1,7 @@ +#!/usr/bin/env python + +# Just prints standard out and sleeps for 10 seconds. +import sys +import time +print("Processing " + sys.stdin.readlines()[0]) +time.sleep(10) diff --git a/content/bn/examples/application/job/redis/Dockerfile b/content/bn/examples/application/job/redis/Dockerfile new file mode 100644 index 0000000000000..2de23b3c98340 --- /dev/null +++ b/content/bn/examples/application/job/redis/Dockerfile @@ -0,0 +1,6 @@ +FROM python +RUN pip install redis +COPY ./worker.py /worker.py +COPY ./rediswq.py /rediswq.py + +CMD python worker.py diff --git a/content/bn/examples/application/job/redis/job.yaml b/content/bn/examples/application/job/redis/job.yaml new file mode 100644 index 0000000000000..ee7a06c732986 --- /dev/null +++ b/content/bn/examples/application/job/redis/job.yaml @@ -0,0 +1,14 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: job-wq-2 +spec: + parallelism: 2 + template: + metadata: + name: job-wq-2 + spec: + containers: + - name: c + image: gcr.io/myproject/job-wq-2 + restartPolicy: OnFailure diff --git a/content/bn/examples/application/job/redis/redis-pod.yaml b/content/bn/examples/application/job/redis/redis-pod.yaml new file mode 100644 index 0000000000000..ae0c43a793570 --- /dev/null +++ b/content/bn/examples/application/job/redis/redis-pod.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: redis-master + labels: + app: redis +spec: + containers: + - name: master + image: redis + env: + - name: MASTER + value: "true" + ports: + - containerPort: 6379 diff --git a/content/bn/examples/application/job/redis/redis-service.yaml b/content/bn/examples/application/job/redis/redis-service.yaml new file mode 100644 index 0000000000000..85f2ca2271d0b --- /dev/null +++ b/content/bn/examples/application/job/redis/redis-service.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: Service +metadata: + name: redis +spec: + ports: + - port: 6379 + targetPort: 6379 + selector: + app: redis diff --git a/content/bn/examples/application/job/redis/rediswq.py b/content/bn/examples/application/job/redis/rediswq.py new file mode 100644 index 0000000000000..c868682e908e3 --- /dev/null +++ b/content/bn/examples/application/job/redis/rediswq.py @@ -0,0 +1,129 @@ +#!/usr/bin/env python + +# Based on http://peter-hoffmann.com/2012/python-simple-queue-redis-queue.html +# and the suggestion in the redis documentation for RPOPLPUSH, at +# http://redis.io/commands/rpoplpush, which suggests how to implement a work-queue. + + +import redis +import uuid +import hashlib + +class RedisWQ(object): + """Simple Finite Work Queue with Redis Backend + + This work queue is finite: as long as no more work is added + after workers start, the workers can detect when the queue + is completely empty. + + The items in the work queue are assumed to have unique values. + + This object is not intended to be used by multiple threads + concurrently. + """ + def __init__(self, name, **redis_kwargs): + """The default connection parameters are: host='localhost', port=6379, db=0 + + The work queue is identified by "name". The library may create other + keys with "name" as a prefix. + """ + self._db = redis.StrictRedis(**redis_kwargs) + # The session ID will uniquely identify this "worker". + self._session = str(uuid.uuid4()) + # Work queue is implemented as two queues: main, and processing. + # Work is initially in main, and moved to processing when a client picks it up. + self._main_q_key = name + self._processing_q_key = name + ":processing" + self._lease_key_prefix = name + ":leased_by_session:" + + def sessionID(self): + """Return the ID for this session.""" + return self._session + + def _main_qsize(self): + """Return the size of the main queue.""" + return self._db.llen(self._main_q_key) + + def _processing_qsize(self): + """Return the size of the main queue.""" + return self._db.llen(self._processing_q_key) + + def empty(self): + """Return True if the queue is empty, including work being done, False otherwise. + + False does not necessarily mean that there is work available to work on right now, + """ + return self._main_qsize() == 0 and self._processing_qsize() == 0 + +# TODO: implement this +# def check_expired_leases(self): +# """Return to the work queueReturn True if the queue is empty, False otherwise.""" +# # Processing list should not be _too_ long since it is approximately as long +# # as the number of active and recently active workers. +# processing = self._db.lrange(self._processing_q_key, 0, -1) +# for item in processing: +# # If the lease key is not present for an item (it expired or was +# # never created because the client crashed before creating it) +# # then move the item back to the main queue so others can work on it. +# if not self._lease_exists(item): +# TODO: transactionally move the key from processing queue to +# to main queue, while detecting if a new lease is created +# or if either queue is modified. + + def _itemkey(self, item): + """Returns a string that uniquely identifies an item (bytes).""" + return hashlib.sha224(item).hexdigest() + + def _lease_exists(self, item): + """True if a lease on 'item' exists.""" + return self._db.exists(self._lease_key_prefix + self._itemkey(item)) + + def lease(self, lease_secs=60, block=True, timeout=None): + """Begin working on an item the work queue. + + Lease the item for lease_secs. After that time, other + workers may consider this client to have crashed or stalled + and pick up the item instead. + + If optional args block is true and timeout is None (the default), block + if necessary until an item is available.""" + if block: + item = self._db.brpoplpush(self._main_q_key, self._processing_q_key, timeout=timeout) + else: + item = self._db.rpoplpush(self._main_q_key, self._processing_q_key) + if item: + # Record that we (this session id) are working on a key. Expire that + # note after the lease timeout. + # Note: if we crash at this line of the program, then GC will see no lease + # for this item a later return it to the main queue. + itemkey = self._itemkey(item) + self._db.setex(self._lease_key_prefix + itemkey, lease_secs, self._session) + return item + + def complete(self, value): + """Complete working on the item with 'value'. + + If the lease expired, the item may not have completed, and some + other worker may have picked it up. There is no indication + of what happened. + """ + self._db.lrem(self._processing_q_key, 0, value) + # If we crash here, then the GC code will try to move the value, but it will + # not be here, which is fine. So this does not need to be a transaction. + itemkey = self._itemkey(value) + self._db.delete(self._lease_key_prefix + itemkey) + +# TODO: add functions to clean up all keys associated with "name" when +# processing is complete. + +# TODO: add a function to add an item to the queue. Atomically +# check if the queue is empty and if so fail to add the item +# since other workers might think work is done and be in the process +# of exiting. + +# TODO(etune): move to my own github for hosting, e.g. github.com/erictune/rediswq-py and +# make it so it can be pip installed by anyone (see +# http://stackoverflow.com/questions/8247605/configuring-so-that-pip-install-can-work-from-github) + +# TODO(etune): finish code to GC expired leases, and call periodically +# e.g. each time lease times out. diff --git a/content/bn/examples/application/job/redis/worker.py b/content/bn/examples/application/job/redis/worker.py new file mode 100644 index 0000000000000..c3523a4e21a48 --- /dev/null +++ b/content/bn/examples/application/job/redis/worker.py @@ -0,0 +1,23 @@ +#!/usr/bin/env python + +import time +import rediswq + +host="redis" +# Uncomment next two lines if you do not have Kube-DNS working. +# import os +# host = os.getenv("REDIS_SERVICE_HOST") + +q = rediswq.RedisWQ(name="job2", host=host) +print("Worker with sessionID: " + q.sessionID()) +print("Initial queue state: empty=" + str(q.empty())) +while not q.empty(): + item = q.lease(lease_secs=10, block=True, timeout=2) + if item is not None: + itemstr = item.decode("utf-8") + print("Working on " + itemstr) + time.sleep(10) # Put your actual work here instead of sleep. + q.complete(item) + else: + print("Waiting for work") +print("Queue empty, exiting") diff --git a/content/bn/examples/application/mongodb/mongo-deployment.yaml b/content/bn/examples/application/mongodb/mongo-deployment.yaml new file mode 100644 index 0000000000000..04908ce25b1dc --- /dev/null +++ b/content/bn/examples/application/mongodb/mongo-deployment.yaml @@ -0,0 +1,31 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: mongo + labels: + app.kubernetes.io/name: mongo + app.kubernetes.io/component: backend +spec: + selector: + matchLabels: + app.kubernetes.io/name: mongo + app.kubernetes.io/component: backend + replicas: 1 + template: + metadata: + labels: + app.kubernetes.io/name: mongo + app.kubernetes.io/component: backend + spec: + containers: + - name: mongo + image: mongo:4.2 + args: + - --bind_ip + - 0.0.0.0 + resources: + requests: + cpu: 100m + memory: 100Mi + ports: + - containerPort: 27017 diff --git a/content/bn/examples/application/mongodb/mongo-service.yaml b/content/bn/examples/application/mongodb/mongo-service.yaml new file mode 100644 index 0000000000000..b9cef607bcf79 --- /dev/null +++ b/content/bn/examples/application/mongodb/mongo-service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + name: mongo + labels: + app.kubernetes.io/name: mongo + app.kubernetes.io/component: backend +spec: + ports: + - port: 27017 + targetPort: 27017 + selector: + app.kubernetes.io/name: mongo + app.kubernetes.io/component: backend diff --git a/content/bn/examples/application/mysql/mysql-configmap.yaml b/content/bn/examples/application/mysql/mysql-configmap.yaml new file mode 100644 index 0000000000000..715d2f572486a --- /dev/null +++ b/content/bn/examples/application/mysql/mysql-configmap.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: mysql + labels: + app: mysql + app.kubernetes.io/name: mysql +data: + primary.cnf: | + # Apply this config only on the primary. + [mysqld] + log-bin + replica.cnf: | + # Apply this config only on replicas. + [mysqld] + super-read-only diff --git a/content/bn/examples/application/mysql/mysql-deployment.yaml b/content/bn/examples/application/mysql/mysql-deployment.yaml new file mode 100644 index 0000000000000..419fbe03d3ff0 --- /dev/null +++ b/content/bn/examples/application/mysql/mysql-deployment.yaml @@ -0,0 +1,43 @@ +apiVersion: v1 +kind: Service +metadata: + name: mysql +spec: + ports: + - port: 3306 + selector: + app: mysql + clusterIP: None +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: mysql +spec: + selector: + matchLabels: + app: mysql + strategy: + type: Recreate + template: + metadata: + labels: + app: mysql + spec: + containers: + - image: mysql:5.6 + name: mysql + env: + # Use secret in real usage + - name: MYSQL_ROOT_PASSWORD + value: password + ports: + - containerPort: 3306 + name: mysql + volumeMounts: + - name: mysql-persistent-storage + mountPath: /var/lib/mysql + volumes: + - name: mysql-persistent-storage + persistentVolumeClaim: + claimName: mysql-pv-claim diff --git a/content/bn/examples/application/mysql/mysql-pv.yaml b/content/bn/examples/application/mysql/mysql-pv.yaml new file mode 100644 index 0000000000000..c89779a83fd23 --- /dev/null +++ b/content/bn/examples/application/mysql/mysql-pv.yaml @@ -0,0 +1,26 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: mysql-pv-volume + labels: + type: local +spec: + storageClassName: manual + capacity: + storage: 20Gi + accessModes: + - ReadWriteOnce + hostPath: + path: "/mnt/data" +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: mysql-pv-claim +spec: + storageClassName: manual + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi diff --git a/content/bn/examples/application/mysql/mysql-services.yaml b/content/bn/examples/application/mysql/mysql-services.yaml new file mode 100644 index 0000000000000..bc015066780c3 --- /dev/null +++ b/content/bn/examples/application/mysql/mysql-services.yaml @@ -0,0 +1,32 @@ +# Headless service for stable DNS entries of StatefulSet members. +apiVersion: v1 +kind: Service +metadata: + name: mysql + labels: + app: mysql + app.kubernetes.io/name: mysql +spec: + ports: + - name: mysql + port: 3306 + clusterIP: None + selector: + app: mysql +--- +# Client service for connecting to any MySQL instance for reads. +# For writes, you must instead connect to the primary: mysql-0.mysql. +apiVersion: v1 +kind: Service +metadata: + name: mysql-read + labels: + app: mysql + app.kubernetes.io/name: mysql + readonly: "true" +spec: + ports: + - name: mysql + port: 3306 + selector: + app: mysql diff --git a/content/bn/examples/application/mysql/mysql-statefulset.yaml b/content/bn/examples/application/mysql/mysql-statefulset.yaml new file mode 100644 index 0000000000000..67755dbb9e830 --- /dev/null +++ b/content/bn/examples/application/mysql/mysql-statefulset.yaml @@ -0,0 +1,168 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: mysql +spec: + selector: + matchLabels: + app: mysql + app.kubernetes.io/name: mysql + serviceName: mysql + replicas: 3 + template: + metadata: + labels: + app: mysql + app.kubernetes.io/name: mysql + spec: + initContainers: + - name: init-mysql + image: mysql:5.7 + command: + - bash + - "-c" + - | + set -ex + # Generate mysql server-id from pod ordinal index. + [[ $HOSTNAME =~ -([0-9]+)$ ]] || exit 1 + ordinal=${BASH_REMATCH[1]} + echo [mysqld] > /mnt/conf.d/server-id.cnf + # Add an offset to avoid reserved server-id=0 value. + echo server-id=$((100 + $ordinal)) >> /mnt/conf.d/server-id.cnf + # Copy appropriate conf.d files from config-map to emptyDir. + if [[ $ordinal -eq 0 ]]; then + cp /mnt/config-map/primary.cnf /mnt/conf.d/ + else + cp /mnt/config-map/replica.cnf /mnt/conf.d/ + fi + volumeMounts: + - name: conf + mountPath: /mnt/conf.d + - name: config-map + mountPath: /mnt/config-map + - name: clone-mysql + image: gcr.io/google-samples/xtrabackup:1.0 + command: + - bash + - "-c" + - | + set -ex + # Skip the clone if data already exists. + [[ -d /var/lib/mysql/mysql ]] && exit 0 + # Skip the clone on primary (ordinal index 0). + [[ `hostname` =~ -([0-9]+)$ ]] || exit 1 + ordinal=${BASH_REMATCH[1]} + [[ $ordinal -eq 0 ]] && exit 0 + # Clone data from previous peer. + ncat --recv-only mysql-$(($ordinal-1)).mysql 3307 | xbstream -x -C /var/lib/mysql + # Prepare the backup. + xtrabackup --prepare --target-dir=/var/lib/mysql + volumeMounts: + - name: data + mountPath: /var/lib/mysql + subPath: mysql + - name: conf + mountPath: /etc/mysql/conf.d + containers: + - name: mysql + image: mysql:5.7 + env: + - name: MYSQL_ALLOW_EMPTY_PASSWORD + value: "1" + ports: + - name: mysql + containerPort: 3306 + volumeMounts: + - name: data + mountPath: /var/lib/mysql + subPath: mysql + - name: conf + mountPath: /etc/mysql/conf.d + resources: + requests: + cpu: 500m + memory: 1Gi + livenessProbe: + exec: + command: ["mysqladmin", "ping"] + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + readinessProbe: + exec: + # Check we can execute queries over TCP (skip-networking is off). + command: ["mysql", "-h", "127.0.0.1", "-e", "SELECT 1"] + initialDelaySeconds: 5 + periodSeconds: 2 + timeoutSeconds: 1 + - name: xtrabackup + image: gcr.io/google-samples/xtrabackup:1.0 + ports: + - name: xtrabackup + containerPort: 3307 + command: + - bash + - "-c" + - | + set -ex + cd /var/lib/mysql + + # Determine binlog position of cloned data, if any. + if [[ -f xtrabackup_slave_info && "x$( change_master_to.sql.in + # Ignore xtrabackup_binlog_info in this case (it's useless). + rm -f xtrabackup_slave_info xtrabackup_binlog_info + elif [[ -f xtrabackup_binlog_info ]]; then + # We're cloning directly from primary. Parse binlog position. + [[ `cat xtrabackup_binlog_info` =~ ^(.*?)[[:space:]]+(.*?)$ ]] || exit 1 + rm -f xtrabackup_binlog_info xtrabackup_slave_info + echo "CHANGE MASTER TO MASTER_LOG_FILE='${BASH_REMATCH[1]}',\ + MASTER_LOG_POS=${BASH_REMATCH[2]}" > change_master_to.sql.in + fi + + # Check if we need to complete a clone by starting replication. + if [[ -f change_master_to.sql.in ]]; then + echo "Waiting for mysqld to be ready (accepting connections)" + until mysql -h 127.0.0.1 -e "SELECT 1"; do sleep 1; done + + echo "Initializing replication from clone position" + mysql -h 127.0.0.1 \ + -e "$( + # /var/lib/docker/containers/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b-json.log + # The /var/log directory on the host is mapped to the /var/log directory in the container + # running this instance of Fluentd and we end up collecting the file: + # /var/log/containers/synthetic-logger-0.25lps-pod_default-synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log + # This results in the tag: + # var.log.containers.synthetic-logger-0.25lps-pod_default-synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log + # The record reformer is used is discard the var.log.containers prefix and + # the Docker container ID suffix and "kubernetes." is pre-pended giving the tag: + # kubernetes.synthetic-logger-0.25lps-pod_default-synth-lgr + # Tag is then parsed by google_cloud plugin and translated to the metadata, + # visible in the log viewer + + # Example: + # {"log":"[info:2016-02-16T16:04:05.930-08:00] Some log text here\n","stream":"stdout","time":"2016-02-17T00:04:05.931087621Z"} + + type tail + format json + time_key time + path /var/log/containers/*.log + pos_file /var/log/gcp-containers.log.pos + time_format %Y-%m-%dT%H:%M:%S.%N%Z + tag reform.* + read_from_head true + + + + type parser + format /^(?\w)(? + + + type record_reformer + enable_ruby true + tag raw.kubernetes.${tag_suffix[4].split('-')[0..-2].join('-')} + + + # Detect exceptions in the log output and forward them as one log entry. + + @type copy + + + @type prometheus + + + type counter + name logging_line_count + desc Total number of lines generated by application containers + + tag ${tag} + + + + + @type detect_exceptions + + remove_tag_prefix raw + message log + stream stream + multiline_flush_interval 5 + max_bytes 500000 + max_lines 1000 + + + system.input.conf: |- + # Example: + # Dec 21 23:17:22 gke-foo-1-1-4b5cbd14-node-4eoj startupscript: Finished running startup script /var/run/google.startup.script + + type tail + format syslog + path /var/log/startupscript.log + pos_file /var/log/gcp-startupscript.log.pos + tag startupscript + + + # Examples: + # time="2016-02-04T06:51:03.053580605Z" level=info msg="GET /containers/json" + # time="2016-02-04T07:53:57.505612354Z" level=error msg="HTTP Error" err="No such image: -f" statusCode=404 + + type tail + format /^time="(?