diff --git a/404.html b/404.html index 34fc637f9..81722e25b 100644 --- a/404.html +++ b/404.html @@ -32,7 +32,7 @@ - + @@ -40,7 +40,7 @@ - + @@ -51,12 +51,12 @@ - + - + @@ -117,7 +117,7 @@ - + public @@ -148,7 +148,7 @@ - + @@ -1130,9 +1130,9 @@ @@ -1142,7 +1142,7 @@ - + diff --git a/assets/javascripts/application.ac79c3b0.js b/assets/javascripts/application.c33a9706.js similarity index 75% rename from assets/javascripts/application.ac79c3b0.js rename to assets/javascripts/application.c33a9706.js index a92563a35..3da6d0caf 100644 --- a/assets/javascripts/application.ac79c3b0.js +++ b/assets/javascripts/application.c33a9706.js @@ -1,4 +1,4 @@ -!function(e,t){for(var n in t)e[n]=t[n]}(window,function(n){var r={};function i(e){if(r[e])return r[e].exports;var t=r[e]={i:e,l:!1,exports:{}};return n[e].call(t.exports,t,t.exports,i),t.l=!0,t.exports}return i.m=n,i.c=r,i.d=function(e,t,n){i.o(e,t)||Object.defineProperty(e,t,{enumerable:!0,get:n})},i.r=function(e){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},i.t=function(t,e){if(1&e&&(t=i(t)),8&e)return t;if(4&e&&"object"==typeof t&&t&&t.__esModule)return t;var n=Object.create(null);if(i.r(n),Object.defineProperty(n,"default",{enumerable:!0,value:t}),2&e&&"string"!=typeof t)for(var r in t)i.d(n,r,function(e){return t[e]}.bind(null,r));return n},i.n=function(e){var t=e&&e.__esModule?function(){return e.default}:function(){return e};return i.d(t,"a",t),t},i.o=function(e,t){return Object.prototype.hasOwnProperty.call(e,t)},i.p="",i(i.s=13)}([function(e,t,n){"use strict";var r={Listener:function(){function e(e,t,n){var r=this;this.els_=Array.prototype.slice.call("string"==typeof e?document.querySelectorAll(e):[].concat(e)),this.handler_="function"==typeof n?{update:n}:n,this.events_=[].concat(t),this.update_=function(e){return r.handler_.update(e)}}var t=e.prototype;return t.listen=function(){var n=this;this.els_.forEach(function(t){n.events_.forEach(function(e){t.addEventListener(e,n.update_,!1)})}),"function"==typeof this.handler_.setup&&this.handler_.setup()},t.unlisten=function(){var n=this;this.els_.forEach(function(t){n.events_.forEach(function(e){t.removeEventListener(e,n.update_)})}),"function"==typeof this.handler_.reset&&this.handler_.reset()},e}(),MatchMedia:function(e,t){this.handler_=function(e){e.matches?t.listen():t.unlisten()};var n=window.matchMedia(e);n.addListener(this.handler_),this.handler_(n)}},i={Shadow:function(){function e(e,t){var n="string"==typeof e?document.querySelector(e):e;if(!(n instanceof HTMLElement&&n.parentNode instanceof HTMLElement))throw new ReferenceError;if(this.el_=n.parentNode,!((n="string"==typeof t?document.querySelector(t):t)instanceof HTMLElement))throw new ReferenceError;this.header_=n,this.height_=0,this.active_=!1}var t=e.prototype;return t.setup=function(){for(var e=this.el_;e=e.previousElementSibling;){if(!(e instanceof HTMLElement))throw new ReferenceError;this.height_+=e.offsetHeight}this.update()},t.update=function(e){if(!e||"resize"!==e.type&&"orientationchange"!==e.type){var t=window.pageYOffset>=this.height_;t!==this.active_&&(this.header_.dataset.mdState=(this.active_=t)?"shadow":"")}else this.height_=0,this.setup()},t.reset=function(){this.header_.dataset.mdState="",this.height_=0,this.active_=!1},e}(),Title:function(){function e(e,t){var n="string"==typeof e?document.querySelector(e):e;if(!(n instanceof HTMLElement))throw new ReferenceError;if(this.el_=n,!((n="string"==typeof t?document.querySelector(t):t)instanceof HTMLHeadingElement))throw new ReferenceError;this.header_=n,this.active_=!1}var t=e.prototype;return t.setup=function(){var t=this;Array.prototype.forEach.call(this.el_.children,function(e){e.style.width=t.el_.offsetWidth-20+"px"})},t.update=function(e){var t=this,n=window.pageYOffset>=this.header_.offsetTop;n!==this.active_&&(this.el_.dataset.mdState=(this.active_=n)?"active":""),"resize"!==e.type&&"orientationchange"!==e.type||Array.prototype.forEach.call(this.el_.children,function(e){e.style.width=t.el_.offsetWidth-20+"px"})},t.reset=function(){this.el_.dataset.mdState="",this.el_.style.width="",this.active_=!1},e}()},o={Blur:function(){function e(e){this.els_="string"==typeof e?document.querySelectorAll(e):e,this.index_=0,this.offset_=window.pageYOffset,this.dir_=!1,this.anchors_=[].reduce.call(this.els_,function(e,t){var n=decodeURIComponent(t.hash);return e.concat(document.getElementById(n.substring(1))||[])},[])}var t=e.prototype;return t.setup=function(){this.update()},t.update=function(){var e=window.pageYOffset,t=this.offset_-e<0;if(this.dir_!==t&&(this.index_=this.index_=t?0:this.els_.length-1),0!==this.anchors_.length){if(this.offset_<=e)for(var n=this.index_+1;ne)){this.index_=r;break}0=this.offset_?"lock"!==this.el_.dataset.mdState&&(this.el_.dataset.mdState="lock"):"lock"===this.el_.dataset.mdState&&(this.el_.dataset.mdState="")},t.reset=function(){this.el_.dataset.mdState="",this.el_.style.height="",this.height_=0},e}()},c=n(6),l=n.n(c);var u={Adapter:{GitHub:function(o){var e,t;function n(e){var t;t=o.call(this,e)||this;var n=/^.+github\.com\/([^/]+)\/?([^/]+)?.*$/.exec(t.base_);if(n&&3===n.length){var r=n[1],i=n[2];t.base_="https://api.github.com/users/"+r+"/repos",t.name_=i}return t}return t=o,(e=n).prototype=Object.create(t.prototype),(e.prototype.constructor=e).__proto__=t,n.prototype.fetch_=function(){var i=this;return function n(r){return void 0===r&&(r=0),fetch(i.base_+"?per_page=30&page="+r).then(function(e){return e.json()}).then(function(e){if(!(e instanceof Array))throw new TypeError;if(i.name_){var t=e.find(function(e){return e.name===i.name_});return t||30!==e.length?t?[i.format_(t.stargazers_count)+" Stars",i.format_(t.forks_count)+" Forks"]:[]:n(r+1)}return[e.length+" Repositories"]})}()},n}(function(){function e(e){var t="string"==typeof e?document.querySelector(e):e;if(!(t instanceof HTMLAnchorElement))throw new ReferenceError;this.el_=t,this.base_=this.el_.href,this.salt_=this.hash_(this.base_)}var t=e.prototype;return t.fetch=function(){var n=this;return new Promise(function(t){var e=l.a.getJSON(n.salt_+".cache-source");void 0!==e?t(e):n.fetch_().then(function(e){l.a.set(n.salt_+".cache-source",e,{expires:1/96}),t(e)})})},t.fetch_=function(){throw new Error("fetch_(): Not implemented")},t.format_=function(e){return 1e4=this.el_.children[0].offsetTop+(5-this.height_);e!==this.active_&&(this.el_.dataset.mdState=(this.active_=e)?"hidden":"")},t.reset=function(){this.el_.dataset.mdState="",this.active_=!1},e}()};t.a={Event:r,Header:i,Nav:o,Search:a,Sidebar:s,Source:u,Tabs:f}},function(t,e,n){(function(e){t.exports=e.lunr=n(24)}).call(this,n(4))},function(e,d,h){"use strict";(function(t){var e=h(8),n=setTimeout;function c(e){return Boolean(e&&void 0!==e.length)}function r(){}function o(e){if(!(this instanceof o))throw new TypeError("Promises must be constructed via new");if("function"!=typeof e)throw new TypeError("not a function");this._state=0,this._handled=!1,this._value=void 0,this._deferreds=[],f(e,this)}function i(n,r){for(;3===n._state;)n=n._value;0!==n._state?(n._handled=!0,o._immediateFn(function(){var e=1===n._state?r.onFulfilled:r.onRejected;if(null!==e){var t;try{t=e(n._value)}catch(e){return void s(r.promise,e)}a(r.promise,t)}else(1===n._state?a:s)(r.promise,n._value)})):n._deferreds.push(r)}function a(t,e){try{if(e===t)throw new TypeError("A promise cannot be resolved with itself.");if(e&&("object"==typeof e||"function"==typeof e)){var n=e.then;if(e instanceof o)return t._state=3,t._value=e,void l(t);if("function"==typeof n)return void f((r=n,i=e,function(){r.apply(i,arguments)}),t)}t._state=1,t._value=e,l(t)}catch(e){s(t,e)}var r,i}function s(e,t){e._state=2,e._value=t,l(e)}function l(e){2===e._state&&0===e._deferreds.length&&o._immediateFn(function(){e._handled||o._unhandledRejectionFn(e._value)});for(var t=0,n=e._deferreds.length;t=this.height_;t!==this.active_&&(this.header_.dataset.mdState=(this.active_=t)?"shadow":"")}else this.height_=0,this.setup()},t.reset=function(){this.header_.dataset.mdState="",this.height_=0,this.active_=!1},e}(),Title:function(){function e(e,t){var n="string"==typeof e?document.querySelector(e):e;if(!(n instanceof HTMLElement))throw new ReferenceError;if(this.el_=n,!((n="string"==typeof t?document.querySelector(t):t)instanceof HTMLHeadingElement))throw new ReferenceError;this.header_=n,this.active_=!1}var t=e.prototype;return t.setup=function(){var t=this;Array.prototype.forEach.call(this.el_.children,function(e){e.style.width=t.el_.offsetWidth-20+"px"})},t.update=function(e){var t=this,n=window.pageYOffset>=this.header_.offsetTop;n!==this.active_&&(this.el_.dataset.mdState=(this.active_=n)?"active":""),"resize"!==e.type&&"orientationchange"!==e.type||Array.prototype.forEach.call(this.el_.children,function(e){e.style.width=t.el_.offsetWidth-20+"px"})},t.reset=function(){this.el_.dataset.mdState="",this.el_.style.width="",this.active_=!1},e}()},o={Blur:function(){function e(e){this.els_="string"==typeof e?document.querySelectorAll(e):e,this.index_=0,this.offset_=window.pageYOffset,this.dir_=!1,this.anchors_=[].reduce.call(this.els_,function(e,t){var n=decodeURIComponent(t.hash);return e.concat(document.getElementById(n.substring(1))||[])},[])}var t=e.prototype;return t.setup=function(){this.update()},t.update=function(){var e=window.pageYOffset,t=this.offset_-e<0;if(this.dir_!==t&&(this.index_=this.index_=t?0:this.els_.length-1),0!==this.anchors_.length){if(this.offset_<=e)for(var n=this.index_+1;ne)){this.index_=r;break}0=this.offset_?"lock"!==this.el_.dataset.mdState&&(this.el_.dataset.mdState="lock"):"lock"===this.el_.dataset.mdState&&(this.el_.dataset.mdState="")},t.reset=function(){this.el_.dataset.mdState="",this.el_.style.height="",this.height_=0},e}()},c=n(6),l=n.n(c);var u={Adapter:{GitHub:function(o){var e,t;function n(e){var t;t=o.call(this,e)||this;var n=/^.+github\.com\/([^/]+)\/?([^/]+)?.*$/.exec(t.base_);if(n&&3===n.length){var r=n[1],i=n[2];t.base_="https://api.github.com/users/"+r+"/repos",t.name_=i}return t}return t=o,(e=n).prototype=Object.create(t.prototype),(e.prototype.constructor=e).__proto__=t,n.prototype.fetch_=function(){var i=this;return function n(r){return void 0===r&&(r=0),fetch(i.base_+"?per_page=100&sort=updated&page="+r).then(function(e){return e.json()}).then(function(e){if(!(e instanceof Array))return[];if(i.name_){var t=e.find(function(e){return e.name===i.name_});return t||30!==e.length?t?[i.format_(t.stargazers_count)+" Stars",i.format_(t.forks_count)+" Forks"]:[]:n(r+1)}return[e.length+" Repositories"]})}()},n}(function(){function e(e){var t="string"==typeof e?document.querySelector(e):e;if(!(t instanceof HTMLAnchorElement))throw new ReferenceError;this.el_=t,this.base_=this.el_.href,this.salt_=this.hash_(this.base_)}var t=e.prototype;return t.fetch=function(){var n=this;return new Promise(function(t){var e=l.a.getJSON(n.salt_+".cache-source");void 0!==e?t(e):n.fetch_().then(function(e){l.a.set(n.salt_+".cache-source",e,{expires:1/96}),t(e)})})},t.fetch_=function(){throw new Error("fetch_(): Not implemented")},t.format_=function(e){return 1e4=this.el_.children[0].offsetTop+(5-this.height_);e!==this.active_&&(this.el_.dataset.mdState=(this.active_=e)?"hidden":"")},t.reset=function(){this.el_.dataset.mdState="",this.active_=!1},e}()};t.a={Event:r,Header:i,Nav:o,Search:a,Sidebar:s,Source:u,Tabs:f}},function(t,e,n){(function(e){t.exports=e.lunr=n(24)}).call(this,n(4))},function(e,d,h){"use strict";(function(t){var e=h(8),n=setTimeout;function c(e){return Boolean(e&&void 0!==e.length)}function r(){}function o(e){if(!(this instanceof o))throw new TypeError("Promises must be constructed via new");if("function"!=typeof e)throw new TypeError("not a function");this._state=0,this._handled=!1,this._value=void 0,this._deferreds=[],f(e,this)}function i(n,r){for(;3===n._state;)n=n._value;0!==n._state?(n._handled=!0,o._immediateFn(function(){var e=1===n._state?r.onFulfilled:r.onRejected;if(null!==e){var t;try{t=e(n._value)}catch(e){return void s(r.promise,e)}a(r.promise,t)}else(1===n._state?a:s)(r.promise,n._value)})):n._deferreds.push(r)}function a(t,e){try{if(e===t)throw new TypeError("A promise cannot be resolved with itself.");if(e&&("object"==typeof e||"function"==typeof e)){var n=e.then;if(e instanceof o)return t._state=3,t._value=e,void l(t);if("function"==typeof n)return void f((r=n,i=e,function(){r.apply(i,arguments)}),t)}t._state=1,t._value=e,l(t)}catch(e){s(t,e)}var r,i}function s(e,t){e._state=2,e._value=t,l(e)}function l(e){2===e._state&&0===e._deferreds.length&&o._immediateFn(function(){e._handled||o._unhandledRejectionFn(e._value)});for(var t=0,n=e._deferreds.length;t"+n+""};this.stack_=[],r.forEach(function(e,t){var n,r=a.docs_.get(t),i=f.createElement("li",{class:"md-search-result__item"},f.createElement("a",{href:r.location,title:r.title,class:"md-search-result__link",tabindex:"-1"},f.createElement("article",{class:"md-search-result__article md-search-result__article--document"},f.createElement("h1",{class:"md-search-result__title"},{__html:r.title.replace(s,c)}),r.text.length?f.createElement("p",{class:"md-search-result__teaser"},{__html:r.text.replace(s,c)}):{}))),o=e.map(function(t){return function(){var e=a.docs_.get(t.ref);i.appendChild(f.createElement("a",{href:e.location,title:e.title,class:"md-search-result__link","data-md-rel":"anchor",tabindex:"-1"},f.createElement("article",{class:"md-search-result__article"},f.createElement("h1",{class:"md-search-result__title"},{__html:e.title.replace(s,c)}),e.text.length?f.createElement("p",{class:"md-search-result__teaser"},{__html:function(e,t){var n=t;if(e.length>n){for(;" "!==e[n]&&0<--n;);return e.substring(0,n)+"..."}return e}(e.text.replace(s,c),400)}):{})))}});(n=a.stack_).push.apply(n,[function(){return a.list_.appendChild(i)}].concat(o))});var o=this.el_.parentNode;if(!(o instanceof HTMLElement))throw new ReferenceError;for(;this.stack_.length&&o.offsetHeight>=o.scrollHeight-16;)this.stack_.shift()();var l=this.list_.querySelectorAll("[data-md-rel=anchor]");switch(Array.prototype.forEach.call(l,function(r){["click","keydown"].forEach(function(n){r.addEventListener(n,function(e){if("keydown"!==n||13===e.keyCode){var t=document.querySelector("[data-md-toggle=search]");if(!(t instanceof HTMLInputElement))throw new ReferenceError;t.checked&&(t.checked=!1,t.dispatchEvent(new CustomEvent("change"))),e.preventDefault(),setTimeout(function(){document.location.href=r.href},100)}})})}),r.size){case 0:this.meta_.textContent=this.message_.none;break;case 1:this.meta_.textContent=this.message_.one;break;default:this.meta_.textContent=this.message_.other.replace("#",r.size)}}}else{var u=function(e){a.docs_=e.reduce(function(e,t){var n,r,i,o=t.location.split("#"),a=o[0],s=o[1];return t.text=(n=t.text,r=document.createTextNode(n),(i=document.createElement("p")).appendChild(r),i.innerHTML),s&&(t.parent=e.get(a),t.parent&&!t.parent.done&&(t.parent.title=t.title,t.parent.text=t.text,t.parent.done=!0)),t.text=t.text.replace(/\n/g," ").replace(/\s+/g," ").replace(/\s+([,.:;!?])/g,function(e,t){return t}),t.parent&&t.parent.title===t.title||e.set(t.location,t),e},new Map);var i=a.docs_,o=a.lang_;a.stack_=[],a.index_=d()(function(){var e,t=this,n={"search.pipeline.trimmer":d.a.trimmer,"search.pipeline.stopwords":d.a.stopWordFilter},r=Object.keys(n).reduce(function(e,t){return h(t).match(/^false$/i)||e.push(n[t]),e},[]);this.pipeline.reset(),r&&(e=this.pipeline).add.apply(e,r),1===o.length&&"en"!==o[0]&&d.a[o[0]]?this.use(d.a[o[0]]):1=t.scrollHeight-16;)a.stack_.splice(0,10).forEach(function(e){return e()})})};setTimeout(function(){return"function"==typeof a.data_?a.data_().then(u):u(a.data_)},250)}},e}()}).call(this,r(3))},function(e,n,r){"use strict";(function(t){r.d(n,"a",function(){return e});var e=function(){function e(e){var t="string"==typeof e?document.querySelector(e):e;if(!(t instanceof HTMLElement))throw new ReferenceError;this.el_=t}return e.prototype.initialize=function(e){e.length&&this.el_.children.length&&this.el_.children[this.el_.children.length-1].appendChild(t.createElement("ul",{class:"md-source__facts"},e.map(function(e){return t.createElement("li",{class:"md-source__fact"},e)}))),this.el_.dataset.mdState="done"},e}()}).call(this,r(3))},,,function(e,n,c){"use strict";c.r(n),function(o){c.d(n,"app",function(){return t});c(14),c(15),c(16),c(17),c(18),c(19),c(20);var r=c(2),e=c(5),a=c.n(e),i=c(0);window.Promise=window.Promise||r.a;var s=function(e){var t=document.getElementsByName("lang:"+e)[0];if(!(t instanceof HTMLMetaElement))throw new ReferenceError;return t.content};var t={initialize:function(t){new i.a.Event.Listener(document,"DOMContentLoaded",function(){if(!(document.body instanceof HTMLElement))throw new ReferenceError;Modernizr.addTest("ios",function(){return!!navigator.userAgent.match(/(iPad|iPhone|iPod)/g)});var e=document.querySelectorAll("table:not([class])");if(Array.prototype.forEach.call(e,function(e){var t=o.createElement("div",{class:"md-typeset__scrollwrap"},o.createElement("div",{class:"md-typeset__table"}));e.nextSibling?e.parentNode.insertBefore(t,e.nextSibling):e.parentNode.appendChild(t),t.children[0].appendChild(e)}),a.a.isSupported()){var t=document.querySelectorAll(".codehilite > pre, pre > code");Array.prototype.forEach.call(t,function(e,t){var n="__code_"+t,r=o.createElement("button",{class:"md-clipboard",title:s("clipboard.copy"),"data-clipboard-target":"#"+n+" pre, #"+n+" code"},o.createElement("span",{class:"md-clipboard__message"})),i=e.parentNode;i.id=n,i.insertBefore(r,e)}),new a.a(".md-clipboard").on("success",function(e){var t=e.trigger.querySelector(".md-clipboard__message");if(!(t instanceof HTMLElement))throw new ReferenceError;e.clearSelection(),t.dataset.mdTimer&&clearTimeout(parseInt(t.dataset.mdTimer,10)),t.classList.add("md-clipboard__message--active"),t.innerHTML=s("clipboard.copied"),t.dataset.mdTimer=setTimeout(function(){t.classList.remove("md-clipboard__message--active"),t.dataset.mdTimer=""},2e3).toString()})}if(!Modernizr.details){var n=document.querySelectorAll("details > summary");Array.prototype.forEach.call(n,function(e){e.addEventListener("click",function(e){var t=e.target.parentNode;t.hasAttribute("open")?t.removeAttribute("open"):t.setAttribute("open","")})})}var r=function(){if(document.location.hash){var e=document.getElementById(document.location.hash.substring(1));if(!e)return;for(var t=e.parentNode;t&&!(t instanceof HTMLDetailsElement);)t=t.parentNode;if(t&&!t.open){t.open=!0;var n=location.hash;location.hash=" ",location.hash=n}}};if(window.addEventListener("hashchange",r),r(),Modernizr.ios){var i=document.querySelectorAll("[data-md-scrollfix]");Array.prototype.forEach.call(i,function(t){t.addEventListener("touchstart",function(){var e=t.scrollTop;0===e?t.scrollTop=1:e+t.offsetHeight===t.scrollHeight&&(t.scrollTop=e-1)})})}}).listen(),new i.a.Event.Listener(window,["scroll","resize","orientationchange"],new i.a.Header.Shadow("[data-md-component=container]","[data-md-component=header]")).listen(),new i.a.Event.Listener(window,["scroll","resize","orientationchange"],new i.a.Header.Title("[data-md-component=title]",".md-typeset h1")).listen(),document.querySelector("[data-md-component=hero]")&&new i.a.Event.Listener(window,["scroll","resize","orientationchange"],new i.a.Tabs.Toggle("[data-md-component=hero]")).listen(),document.querySelector("[data-md-component=tabs]")&&new i.a.Event.Listener(window,["scroll","resize","orientationchange"],new i.a.Tabs.Toggle("[data-md-component=tabs]")).listen(),new i.a.Event.MatchMedia("(min-width: 1220px)",new i.a.Event.Listener(window,["scroll","resize","orientationchange"],new i.a.Sidebar.Position("[data-md-component=navigation]","[data-md-component=header]"))),document.querySelector("[data-md-component=toc]")&&new i.a.Event.MatchMedia("(min-width: 960px)",new i.a.Event.Listener(window,["scroll","resize","orientationchange"],new i.a.Sidebar.Position("[data-md-component=toc]","[data-md-component=header]"))),new i.a.Event.MatchMedia("(min-width: 960px)",new i.a.Event.Listener(window,"scroll",new i.a.Nav.Blur("[data-md-component=toc] .md-nav__link")));var e=document.querySelectorAll("[data-md-component=collapsible]");Array.prototype.forEach.call(e,function(e){new i.a.Event.MatchMedia("(min-width: 1220px)",new i.a.Event.Listener(e.previousElementSibling,"click",new i.a.Nav.Collapse(e)))}),new i.a.Event.MatchMedia("(max-width: 1219px)",new i.a.Event.Listener("[data-md-component=navigation] [data-md-toggle]","change",new i.a.Nav.Scrolling("[data-md-component=navigation] nav"))),document.querySelector("[data-md-component=search]")&&(new i.a.Event.MatchMedia("(max-width: 959px)",new i.a.Event.Listener("[data-md-toggle=search]","change",new i.a.Search.Lock("[data-md-toggle=search]"))),new i.a.Event.Listener("[data-md-component=query]",["focus","keyup","change"],new i.a.Search.Result("[data-md-component=result]",function(){return fetch(t.url.base+"/search/search_index.json",{credentials:"same-origin"}).then(function(e){return e.json()}).then(function(e){return e.docs.map(function(e){return e.location=t.url.base+"/"+e.location,e})})})).listen(),new i.a.Event.Listener("[data-md-component=reset]","click",function(){setTimeout(function(){var e=document.querySelector("[data-md-component=query]");if(!(e instanceof HTMLInputElement))throw new ReferenceError;e.focus()},10)}).listen(),new i.a.Event.Listener("[data-md-toggle=search]","change",function(e){setTimeout(function(e){if(!(e instanceof HTMLInputElement))throw new ReferenceError;if(e.checked){var t=document.querySelector("[data-md-component=query]");if(!(t instanceof HTMLInputElement))throw new ReferenceError;t.focus()}},400,e.target)}).listen(),new i.a.Event.Listener("[data-md-component=query]","focus",function(){var e=document.querySelector("[data-md-toggle=search]");if(!(e instanceof HTMLInputElement))throw new ReferenceError;e.checked||(e.checked=!0,e.dispatchEvent(new CustomEvent("change")))}).listen(),new i.a.Event.Listener(window,"keydown",function(e){var t=document.querySelector("[data-md-toggle=search]");if(!(t instanceof HTMLInputElement))throw new ReferenceError;var n=document.querySelector("[data-md-component=query]");if(!(n instanceof HTMLInputElement))throw new ReferenceError;if(!(document.activeElement instanceof HTMLElement&&document.activeElement.isContentEditable||e.metaKey||e.ctrlKey))if(t.checked){if(13===e.keyCode){if(n===document.activeElement){e.preventDefault();var r=document.querySelector("[data-md-component=search] [href][data-md-state=active]");r instanceof HTMLLinkElement&&(window.location=r.getAttribute("href"),t.checked=!1,t.dispatchEvent(new CustomEvent("change")),n.blur())}}else if(9===e.keyCode||27===e.keyCode)t.checked=!1,t.dispatchEvent(new CustomEvent("change")),n.blur();else if(-1!==[8,37,39].indexOf(e.keyCode))n!==document.activeElement&&n.focus();else if(-1!==[38,40].indexOf(e.keyCode)){var i=e.keyCode,o=Array.prototype.slice.call(document.querySelectorAll("[data-md-component=query], [data-md-component=search] [href]")),a=o.find(function(e){if(!(e instanceof HTMLElement))throw new ReferenceError;return"active"===e.dataset.mdState});a&&(a.dataset.mdState="");var s=Math.max(0,(o.indexOf(a)+o.length+(38===i?-1:1))%o.length);return o[s]&&(o[s].dataset.mdState="active",o[s].focus()),e.preventDefault(),e.stopPropagation(),!1}}else if(document.activeElement&&!document.activeElement.form){if("TEXTAREA"===document.activeElement.tagName||"INPUT"===document.activeElement.tagName)return;70!==e.keyCode&&83!==e.keyCode||(n.focus(),e.preventDefault())}}).listen(),new i.a.Event.Listener(window,"keypress",function(){var e=document.querySelector("[data-md-toggle=search]");if(!(e instanceof HTMLInputElement))throw new ReferenceError;if(e.checked){var t=document.querySelector("[data-md-component=query]");if(!(t instanceof HTMLInputElement))throw new ReferenceError;t!==document.activeElement&&t.focus()}}).listen()),new i.a.Event.Listener(document.body,"keydown",function(e){if(9===e.keyCode){var t=document.querySelectorAll("[data-md-component=navigation] .md-nav__link[for]:not([tabindex])");Array.prototype.forEach.call(t,function(e){e.offsetHeight&&(e.tabIndex=0)})}}).listen(),new i.a.Event.Listener(document.body,"mousedown",function(){var e=document.querySelectorAll("[data-md-component=navigation] .md-nav__link[tabindex]");Array.prototype.forEach.call(e,function(e){e.removeAttribute("tabIndex")})}).listen(),document.body.addEventListener("click",function(){"tabbing"===document.body.dataset.mdState&&(document.body.dataset.mdState="")}),new i.a.Event.MatchMedia("(max-width: 959px)",new i.a.Event.Listener("[data-md-component=navigation] [href^='#']","click",function(){var e=document.querySelector("[data-md-toggle=drawer]");if(!(e instanceof HTMLInputElement))throw new ReferenceError;e.checked&&(e.checked=!1,e.dispatchEvent(new CustomEvent("change")))})),function(){var e=document.querySelector("[data-md-source]");if(!e)return r.a.resolve([]);if(!(e instanceof HTMLAnchorElement))throw new ReferenceError;switch(e.dataset.mdSource){case"github":return new i.a.Source.Adapter.GitHub(e).fetch();default:return r.a.resolve([])}}().then(function(t){var e=document.querySelectorAll("[data-md-source]");Array.prototype.forEach.call(e,function(e){new i.a.Source.Repository(e).initialize(t)})});var n=function(){var e=document.querySelectorAll("details");Array.prototype.forEach.call(e,function(e){e.setAttribute("open","")})};new i.a.Event.MatchMedia("print",{listen:n,unlisten:function(){}}),window.onbeforeprint=n}}}.call(this,c(3))},function(e,t,n){e.exports=n.p+"assets/images/icons/bitbucket.1b09e088.svg"},function(e,t,n){e.exports=n.p+"assets/images/icons/github.f0b8504a.svg"},function(e,t,n){e.exports=n.p+"assets/images/icons/gitlab.6dd19c00.svg"},function(e,t){e.exports="/home/travis/build/squidfunk/mkdocs-material/material/application.30686662.css"},function(e,t){e.exports="/home/travis/build/squidfunk/mkdocs-material/material/application-palette.a8b3c06d.css"},function(e,t){!function(){if("undefined"!=typeof window)try{var e=new window.CustomEvent("test",{cancelable:!0});if(e.preventDefault(),!0!==e.defaultPrevented)throw new Error("Could not prevent default")}catch(e){var t=function(e,t){var n,r;return(t=t||{}).bubbles=!!t.bubbles,t.cancelable=!!t.cancelable,(n=document.createEvent("CustomEvent")).initCustomEvent(e,t.bubbles,t.cancelable,t.detail),r=n.preventDefault,n.preventDefault=function(){r.call(this);try{Object.defineProperty(this,"defaultPrevented",{get:function(){return!0}})}catch(e){this.defaultPrevented=!0}},n};t.prototype=window.Event.prototype,window.CustomEvent=t}}()},function(e,t,n){window.fetch||(window.fetch=n(7).default||n(7))},function(e,i,o){(function(e){var t=void 0!==e&&e||"undefined"!=typeof self&&self||window,n=Function.prototype.apply;function r(e,t){this._id=e,this._clearFn=t}i.setTimeout=function(){return new r(n.call(setTimeout,t,arguments),clearTimeout)},i.setInterval=function(){return new r(n.call(setInterval,t,arguments),clearInterval)},i.clearTimeout=i.clearInterval=function(e){e&&e.close()},r.prototype.unref=r.prototype.ref=function(){},r.prototype.close=function(){this._clearFn.call(t,this._id)},i.enroll=function(e,t){clearTimeout(e._idleTimeoutId),e._idleTimeout=t},i.unenroll=function(e){clearTimeout(e._idleTimeoutId),e._idleTimeout=-1},i._unrefActive=i.active=function(e){clearTimeout(e._idleTimeoutId);var t=e._idleTimeout;0<=t&&(e._idleTimeoutId=setTimeout(function(){e._onTimeout&&e._onTimeout()},t))},o(22),i.setImmediate="undefined"!=typeof self&&self.setImmediate||void 0!==e&&e.setImmediate||this&&this.setImmediate,i.clearImmediate="undefined"!=typeof self&&self.clearImmediate||void 0!==e&&e.clearImmediate||this&&this.clearImmediate}).call(this,o(4))},function(e,t,n){(function(e,p){!function(n,r){"use strict";if(!n.setImmediate){var i,o,t,a,e,s=1,c={},l=!1,u=n.document,f=Object.getPrototypeOf&&Object.getPrototypeOf(n);f=f&&f.setTimeout?f:n,i="[object process]"==={}.toString.call(n.process)?function(e){p.nextTick(function(){h(e)})}:function(){if(n.postMessage&&!n.importScripts){var e=!0,t=n.onmessage;return n.onmessage=function(){e=!1},n.postMessage("","*"),n.onmessage=t,e}}()?(a="setImmediate$"+Math.random()+"$",e=function(e){e.source===n&&"string"==typeof e.data&&0===e.data.indexOf(a)&&h(+e.data.slice(a.length))},n.addEventListener?n.addEventListener("message",e,!1):n.attachEvent("onmessage",e),function(e){n.postMessage(a+e,"*")}):n.MessageChannel?((t=new MessageChannel).port1.onmessage=function(e){h(e.data)},function(e){t.port2.postMessage(e)}):u&&"onreadystatechange"in u.createElement("script")?(o=u.documentElement,function(e){var t=u.createElement("script");t.onreadystatechange=function(){h(e),t.onreadystatechange=null,o.removeChild(t),t=null},o.appendChild(t)}):function(e){setTimeout(h,0,e)},f.setImmediate=function(e){"function"!=typeof e&&(e=new Function(""+e));for(var t=new Array(arguments.length-1),n=0;n"+n+""};this.stack_=[],r.forEach(function(e,t){var n,r=a.docs_.get(t),i=f.createElement("li",{class:"md-search-result__item"},f.createElement("a",{href:r.location,title:r.title,class:"md-search-result__link",tabindex:"-1"},f.createElement("article",{class:"md-search-result__article md-search-result__article--document"},f.createElement("h1",{class:"md-search-result__title"},{__html:r.title.replace(s,c)}),r.text.length?f.createElement("p",{class:"md-search-result__teaser"},{__html:r.text.replace(s,c)}):{}))),o=e.map(function(t){return function(){var e=a.docs_.get(t.ref);i.appendChild(f.createElement("a",{href:e.location,title:e.title,class:"md-search-result__link","data-md-rel":"anchor",tabindex:"-1"},f.createElement("article",{class:"md-search-result__article"},f.createElement("h1",{class:"md-search-result__title"},{__html:e.title.replace(s,c)}),e.text.length?f.createElement("p",{class:"md-search-result__teaser"},{__html:function(e,t){var n=t;if(e.length>n){for(;" "!==e[n]&&0<--n;);return e.substring(0,n)+"..."}return e}(e.text.replace(s,c),400)}):{})))}});(n=a.stack_).push.apply(n,[function(){return a.list_.appendChild(i)}].concat(o))});var o=this.el_.parentNode;if(!(o instanceof HTMLElement))throw new ReferenceError;for(;this.stack_.length&&o.offsetHeight>=o.scrollHeight-16;)this.stack_.shift()();var l=this.list_.querySelectorAll("[data-md-rel=anchor]");switch(Array.prototype.forEach.call(l,function(r){["click","keydown"].forEach(function(n){r.addEventListener(n,function(e){if("keydown"!==n||13===e.keyCode){var t=document.querySelector("[data-md-toggle=search]");if(!(t instanceof HTMLInputElement))throw new ReferenceError;t.checked&&(t.checked=!1,t.dispatchEvent(new CustomEvent("change"))),e.preventDefault(),setTimeout(function(){document.location.href=r.href},100)}})})}),r.size){case 0:this.meta_.textContent=this.message_.none;break;case 1:this.meta_.textContent=this.message_.one;break;default:this.meta_.textContent=this.message_.other.replace("#",r.size)}}}else{var u=function(e){a.docs_=e.reduce(function(e,t){var n,r,i,o=t.location.split("#"),a=o[0],s=o[1];return t.text=(n=t.text,r=document.createTextNode(n),(i=document.createElement("p")).appendChild(r),i.innerHTML),s&&(t.parent=e.get(a),t.parent&&!t.parent.done&&(t.parent.title=t.title,t.parent.text=t.text,t.parent.done=!0)),t.text=t.text.replace(/\n/g," ").replace(/\s+/g," ").replace(/\s+([,.:;!?])/g,function(e,t){return t}),t.parent&&t.parent.title===t.title||e.set(t.location,t),e},new Map);var i=a.docs_,o=a.lang_;a.stack_=[],a.index_=d()(function(){var e,t=this,n={"search.pipeline.trimmer":d.a.trimmer,"search.pipeline.stopwords":d.a.stopWordFilter},r=Object.keys(n).reduce(function(e,t){return h(t).match(/^false$/i)||e.push(n[t]),e},[]);this.pipeline.reset(),r&&(e=this.pipeline).add.apply(e,r),1===o.length&&"en"!==o[0]&&d.a[o[0]]?this.use(d.a[o[0]]):1=t.scrollHeight-16;)a.stack_.splice(0,10).forEach(function(e){return e()})})};setTimeout(function(){return"function"==typeof a.data_?a.data_().then(u):u(a.data_)},250)}},e}()}).call(this,r(3))},function(e,n,r){"use strict";(function(t){r.d(n,"a",function(){return e});var e=function(){function e(e){var t="string"==typeof e?document.querySelector(e):e;if(!(t instanceof HTMLElement))throw new ReferenceError;this.el_=t}return e.prototype.initialize=function(e){e.length&&this.el_.children.length&&this.el_.children[this.el_.children.length-1].appendChild(t.createElement("ul",{class:"md-source__facts"},e.map(function(e){return t.createElement("li",{class:"md-source__fact"},e)}))),this.el_.dataset.mdState="done"},e}()}).call(this,r(3))},,,function(e,n,c){"use strict";c.r(n),function(o){c.d(n,"app",function(){return t});c(14),c(15),c(16),c(17),c(18),c(19),c(20);var r=c(2),e=c(5),a=c.n(e),i=c(0);window.Promise=window.Promise||r.a;var s=function(e){var t=document.getElementsByName("lang:"+e)[0];if(!(t instanceof HTMLMetaElement))throw new ReferenceError;return t.content};var t={initialize:function(t){new i.a.Event.Listener(document,"DOMContentLoaded",function(){if(!(document.body instanceof HTMLElement))throw new ReferenceError;Modernizr.addTest("ios",function(){return!!navigator.userAgent.match(/(iPad|iPhone|iPod)/g)});var e=document.querySelectorAll("table:not([class])");if(Array.prototype.forEach.call(e,function(e){var t=o.createElement("div",{class:"md-typeset__scrollwrap"},o.createElement("div",{class:"md-typeset__table"}));e.nextSibling?e.parentNode.insertBefore(t,e.nextSibling):e.parentNode.appendChild(t),t.children[0].appendChild(e)}),a.a.isSupported()){var t=document.querySelectorAll("pre > code");Array.prototype.forEach.call(t,function(e,t){var n="__code_"+t,r=o.createElement("button",{class:"md-clipboard",title:s("clipboard.copy"),"data-clipboard-target":"#"+n+" pre, #"+n+" code"},o.createElement("span",{class:"md-clipboard__message"})),i=e.parentNode;i.id=n,i.insertBefore(r,e)}),new a.a(".md-clipboard").on("success",function(e){var t=e.trigger.querySelector(".md-clipboard__message");if(!(t instanceof HTMLElement))throw new ReferenceError;e.clearSelection(),t.dataset.mdTimer&&clearTimeout(parseInt(t.dataset.mdTimer,10)),t.classList.add("md-clipboard__message--active"),t.innerHTML=s("clipboard.copied"),t.dataset.mdTimer=setTimeout(function(){t.classList.remove("md-clipboard__message--active"),t.dataset.mdTimer=""},2e3).toString()})}if(!Modernizr.details){var n=document.querySelectorAll("details > summary");Array.prototype.forEach.call(n,function(e){e.addEventListener("click",function(e){var t=e.target.parentNode;t.hasAttribute("open")?t.removeAttribute("open"):t.setAttribute("open","")})})}var r=function(){if(document.location.hash){var e=document.getElementById(document.location.hash.substring(1));if(!e)return;for(var t=e.parentNode;t&&!(t instanceof HTMLDetailsElement);)t=t.parentNode;if(t&&!t.open){t.open=!0;var n=location.hash;location.hash=" ",location.hash=n}}};if(window.addEventListener("hashchange",r),r(),Modernizr.ios){var i=document.querySelectorAll("[data-md-scrollfix]");Array.prototype.forEach.call(i,function(t){t.addEventListener("touchstart",function(){var e=t.scrollTop;0===e?t.scrollTop=1:e+t.offsetHeight===t.scrollHeight&&(t.scrollTop=e-1)})})}}).listen(),new i.a.Event.Listener(window,["scroll","resize","orientationchange"],new i.a.Header.Shadow("[data-md-component=container]","[data-md-component=header]")).listen(),new i.a.Event.Listener(window,["scroll","resize","orientationchange"],new i.a.Header.Title("[data-md-component=title]",".md-typeset h1")).listen(),document.querySelector("[data-md-component=hero]")&&new i.a.Event.Listener(window,["scroll","resize","orientationchange"],new i.a.Tabs.Toggle("[data-md-component=hero]")).listen(),document.querySelector("[data-md-component=tabs]")&&new i.a.Event.Listener(window,["scroll","resize","orientationchange"],new i.a.Tabs.Toggle("[data-md-component=tabs]")).listen(),new i.a.Event.MatchMedia("(min-width: 1220px)",new i.a.Event.Listener(window,["scroll","resize","orientationchange"],new i.a.Sidebar.Position("[data-md-component=navigation]","[data-md-component=header]"))),document.querySelector("[data-md-component=toc]")&&new i.a.Event.MatchMedia("(min-width: 960px)",new i.a.Event.Listener(window,["scroll","resize","orientationchange"],new i.a.Sidebar.Position("[data-md-component=toc]","[data-md-component=header]"))),new i.a.Event.MatchMedia("(min-width: 960px)",new i.a.Event.Listener(window,"scroll",new i.a.Nav.Blur("[data-md-component=toc] .md-nav__link")));var e=document.querySelectorAll("[data-md-component=collapsible]");Array.prototype.forEach.call(e,function(e){new i.a.Event.MatchMedia("(min-width: 1220px)",new i.a.Event.Listener(e.previousElementSibling,"click",new i.a.Nav.Collapse(e)))}),new i.a.Event.MatchMedia("(max-width: 1219px)",new i.a.Event.Listener("[data-md-component=navigation] [data-md-toggle]","change",new i.a.Nav.Scrolling("[data-md-component=navigation] nav"))),document.querySelector("[data-md-component=search]")&&(new i.a.Event.MatchMedia("(max-width: 959px)",new i.a.Event.Listener("[data-md-toggle=search]","change",new i.a.Search.Lock("[data-md-toggle=search]"))),new i.a.Event.Listener("[data-md-component=query]",["focus","keyup","change"],new i.a.Search.Result("[data-md-component=result]",function(){return fetch(t.url.base+"/search/search_index.json",{credentials:"same-origin"}).then(function(e){return e.json()}).then(function(e){return e.docs.map(function(e){return e.location=t.url.base+"/"+e.location,e})})})).listen(),new i.a.Event.Listener("[data-md-component=reset]","click",function(){setTimeout(function(){var e=document.querySelector("[data-md-component=query]");if(!(e instanceof HTMLInputElement))throw new ReferenceError;e.focus()},10)}).listen(),new i.a.Event.Listener("[data-md-toggle=search]","change",function(e){setTimeout(function(e){if(!(e instanceof HTMLInputElement))throw new ReferenceError;if(e.checked){var t=document.querySelector("[data-md-component=query]");if(!(t instanceof HTMLInputElement))throw new ReferenceError;t.focus()}},400,e.target)}).listen(),new i.a.Event.Listener("[data-md-component=query]","focus",function(){var e=document.querySelector("[data-md-toggle=search]");if(!(e instanceof HTMLInputElement))throw new ReferenceError;e.checked||(e.checked=!0,e.dispatchEvent(new CustomEvent("change")))}).listen(),new i.a.Event.Listener(window,"keydown",function(e){var t=document.querySelector("[data-md-toggle=search]");if(!(t instanceof HTMLInputElement))throw new ReferenceError;var n=document.querySelector("[data-md-component=query]");if(!(n instanceof HTMLInputElement))throw new ReferenceError;if(!(document.activeElement instanceof HTMLElement&&document.activeElement.isContentEditable||e.metaKey||e.ctrlKey))if(t.checked){if(13===e.keyCode){if(n===document.activeElement){e.preventDefault();var r=document.querySelector("[data-md-component=search] [href][data-md-state=active]");r instanceof HTMLLinkElement&&(window.location=r.getAttribute("href"),t.checked=!1,t.dispatchEvent(new CustomEvent("change")),n.blur())}}else if(9===e.keyCode||27===e.keyCode)t.checked=!1,t.dispatchEvent(new CustomEvent("change")),n.blur();else if(-1!==[8,37,39].indexOf(e.keyCode))n!==document.activeElement&&n.focus();else if(-1!==[38,40].indexOf(e.keyCode)){var i=e.keyCode,o=Array.prototype.slice.call(document.querySelectorAll("[data-md-component=query], [data-md-component=search] [href]")),a=o.find(function(e){if(!(e instanceof HTMLElement))throw new ReferenceError;return"active"===e.dataset.mdState});a&&(a.dataset.mdState="");var s=Math.max(0,(o.indexOf(a)+o.length+(38===i?-1:1))%o.length);return o[s]&&(o[s].dataset.mdState="active",o[s].focus()),e.preventDefault(),e.stopPropagation(),!1}}else if(document.activeElement&&!document.activeElement.form){if("TEXTAREA"===document.activeElement.tagName||"INPUT"===document.activeElement.tagName)return;70!==e.keyCode&&83!==e.keyCode||(n.focus(),e.preventDefault())}}).listen(),new i.a.Event.Listener(window,"keypress",function(){var e=document.querySelector("[data-md-toggle=search]");if(!(e instanceof HTMLInputElement))throw new ReferenceError;if(e.checked){var t=document.querySelector("[data-md-component=query]");if(!(t instanceof HTMLInputElement))throw new ReferenceError;t!==document.activeElement&&t.focus()}}).listen()),new i.a.Event.Listener(document.body,"keydown",function(e){if(9===e.keyCode){var t=document.querySelectorAll("[data-md-component=navigation] .md-nav__link[for]:not([tabindex])");Array.prototype.forEach.call(t,function(e){e.offsetHeight&&(e.tabIndex=0)})}}).listen(),new i.a.Event.Listener(document.body,"mousedown",function(){var e=document.querySelectorAll("[data-md-component=navigation] .md-nav__link[tabindex]");Array.prototype.forEach.call(e,function(e){e.removeAttribute("tabIndex")})}).listen(),document.body.addEventListener("click",function(){"tabbing"===document.body.dataset.mdState&&(document.body.dataset.mdState="")}),new i.a.Event.MatchMedia("(max-width: 959px)",new i.a.Event.Listener("[data-md-component=navigation] [href^='#']","click",function(){var e=document.querySelector("[data-md-toggle=drawer]");if(!(e instanceof HTMLInputElement))throw new ReferenceError;e.checked&&(e.checked=!1,e.dispatchEvent(new CustomEvent("change")))})),function(){var e=document.querySelector("[data-md-source]");if(!e)return r.a.resolve([]);if(!(e instanceof HTMLAnchorElement))throw new ReferenceError;switch(e.dataset.mdSource){case"github":return new i.a.Source.Adapter.GitHub(e).fetch();default:return r.a.resolve([])}}().then(function(t){var e=document.querySelectorAll("[data-md-source]");Array.prototype.forEach.call(e,function(e){new i.a.Source.Repository(e).initialize(t)})});var n=function(){var e=document.querySelectorAll("details");Array.prototype.forEach.call(e,function(e){e.setAttribute("open","")})};new i.a.Event.MatchMedia("print",{listen:n,unlisten:function(){}}),window.onbeforeprint=n}}}.call(this,c(3))},function(e,t,n){"use strict";n.p},function(e,t,n){"use strict";n.p},function(e,t,n){"use strict";n.p},function(e,t,n){"use strict"},function(e,t,n){"use strict"},function(e,t){!function(){if("undefined"!=typeof window)try{var e=new window.CustomEvent("test",{cancelable:!0});if(e.preventDefault(),!0!==e.defaultPrevented)throw new Error("Could not prevent default")}catch(e){var t=function(e,t){var n,r;return(t=t||{}).bubbles=!!t.bubbles,t.cancelable=!!t.cancelable,(n=document.createEvent("CustomEvent")).initCustomEvent(e,t.bubbles,t.cancelable,t.detail),r=n.preventDefault,n.preventDefault=function(){r.call(this);try{Object.defineProperty(this,"defaultPrevented",{get:function(){return!0}})}catch(e){this.defaultPrevented=!0}},n};t.prototype=window.Event.prototype,window.CustomEvent=t}}()},function(e,t,n){window.fetch||(window.fetch=n(7).default||n(7))},function(e,i,o){(function(e){var t=void 0!==e&&e||"undefined"!=typeof self&&self||window,n=Function.prototype.apply;function r(e,t){this._id=e,this._clearFn=t}i.setTimeout=function(){return new r(n.call(setTimeout,t,arguments),clearTimeout)},i.setInterval=function(){return new r(n.call(setInterval,t,arguments),clearInterval)},i.clearTimeout=i.clearInterval=function(e){e&&e.close()},r.prototype.unref=r.prototype.ref=function(){},r.prototype.close=function(){this._clearFn.call(t,this._id)},i.enroll=function(e,t){clearTimeout(e._idleTimeoutId),e._idleTimeout=t},i.unenroll=function(e){clearTimeout(e._idleTimeoutId),e._idleTimeout=-1},i._unrefActive=i.active=function(e){clearTimeout(e._idleTimeoutId);var t=e._idleTimeout;0<=t&&(e._idleTimeoutId=setTimeout(function(){e._onTimeout&&e._onTimeout()},t))},o(22),i.setImmediate="undefined"!=typeof self&&self.setImmediate||void 0!==e&&e.setImmediate||this&&this.setImmediate,i.clearImmediate="undefined"!=typeof self&&self.clearImmediate||void 0!==e&&e.clearImmediate||this&&this.clearImmediate}).call(this,o(4))},function(e,t,n){(function(e,p){!function(n,r){"use strict";if(!n.setImmediate){var i,o,t,a,e,s=1,c={},l=!1,u=n.document,f=Object.getPrototypeOf&&Object.getPrototypeOf(n);f=f&&f.setTimeout?f:n,i="[object process]"==={}.toString.call(n.process)?function(e){p.nextTick(function(){h(e)})}:function(){if(n.postMessage&&!n.importScripts){var e=!0,t=n.onmessage;return n.onmessage=function(){e=!1},n.postMessage("","*"),n.onmessage=t,e}}()?(a="setImmediate$"+Math.random()+"$",e=function(e){e.source===n&&"string"==typeof e.data&&0===e.data.indexOf(a)&&h(+e.data.slice(a.length))},n.addEventListener?n.addEventListener("message",e,!1):n.attachEvent("onmessage",e),function(e){n.postMessage(a+e,"*")}):n.MessageChannel?((t=new MessageChannel).port1.onmessage=function(e){h(e.data)},function(e){t.port2.postMessage(e)}):u&&"onreadystatechange"in u.createElement("script")?(o=u.documentElement,function(e){var t=u.createElement("script");t.onreadystatechange=function(){h(e),t.onreadystatechange=null,o.removeChild(t),t=null},o.appendChild(t)}):function(e){setTimeout(h,0,e)},f.setImmediate=function(e){"function"!=typeof e&&(e=new Function(""+e));for(var t=new Array(arguments.length-1),n=0;nab",t=n.offsetHeight,n.open=!0,t=t!==n.offsetHeight}),t)}),c.addTest("fetch","fetch"in i);var _="Moz O ms Webkit",w=t._config.usePrefixes?_.split(" "):[];t._cssomPrefixes=w;var x={elem:m("modernizr")};c._q.push(function(){delete x.elem});var T={style:x.elem.style};c._q.unshift(function(){delete T.style});var P=t._config.usePrefixes?_.toLowerCase().split(" "):[];t._domPrefixes=P,t.testAllProps=r,t.testAllProps=e;var j="CSS"in i&&"supports"in i.CSS,O="supportsCSS"in i;c.addTest("supports",j||O),c.addTest("csstransforms3d",function(){return!!e("perspective","1px",!0)}),function(){var e,t,n,r,o,i;for(var s in f)if(f.hasOwnProperty(s)){if(e=[],(t=f[s]).name&&(e.push(t.name.toLowerCase()),t.options&&t.options.aliases&&t.options.aliases.length))for(n=0;nab",t=n.offsetHeight,n.open=!0,t=t!==n.offsetHeight}),t)}),c.addTest("fetch","fetch"in i);var _="Moz O ms Webkit",w=t._config.usePrefixes?_.split(" "):[];t._cssomPrefixes=w;var x={elem:m("modernizr")};c._q.push(function(){delete x.elem});var T={style:x.elem.style};c._q.unshift(function(){delete T.style});var P=t._config.usePrefixes?_.toLowerCase().split(" "):[];t._domPrefixes=P,t.testAllProps=r,t.testAllProps=e;var j="CSS"in i&&"supports"in i.CSS,O="supportsCSS"in i;c.addTest("supports",j||O),c.addTest("csstransforms3d",function(){return!!e("perspective","1px",!0)}),function(){var e,t,n,r,o,i;for(var s in f)if(f.hasOwnProperty(s)){if(e=[],(t=f[s]).name&&(e.push(t.name.toLowerCase()),t.options&&t.options.aliases&&t.options.aliases.length))for(n=0;n.admonition-title:before,.md-typeset .admonition>summary:before,.md-typeset .critic.comment:before,.md-typeset .footnote-backref,.md-typeset .task-list-control .task-list-indicator:before,.md-typeset details>.admonition-title:before,.md-typeset details>summary:before,.md-typeset summary:after{font-family:Material Icons;font-style:normal;font-variant:normal;font-weight:400;line-height:1;text-transform:none;white-space:nowrap;speak:none;word-wrap:normal;direction:ltr}.md-content__icon,.md-footer-nav__button,.md-header-nav__button,.md-nav__button,.md-nav__title:before,.md-search-result__article--document:before{display:inline-block;margin:.2rem;padding:.4rem;font-size:1.2rem;cursor:pointer}.md-icon--arrow-back:before{content:""}.md-icon--arrow-forward:before{content:""}.md-icon--menu:before{content:""}.md-icon--search:before{content:""}[dir=rtl] .md-icon--arrow-back:before{content:""}[dir=rtl] .md-icon--arrow-forward:before{content:""}body{-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}body,input{color:rgba(0,0,0,.87);-webkit-font-feature-settings:"kern","liga";font-feature-settings:"kern","liga";font-family:Helvetica Neue,Helvetica,Arial,sans-serif}code,kbd,pre{color:rgba(0,0,0,.87);-webkit-font-feature-settings:"kern";font-feature-settings:"kern";font-family:Courier New,Courier,monospace}.md-typeset{font-size:.8rem;line-height:1.6;-webkit-print-color-adjust:exact}.md-typeset blockquote,.md-typeset ol,.md-typeset p,.md-typeset ul{margin:1em 0}.md-typeset h1{margin:0 0 2rem;color:rgba(0,0,0,.54);font-size:1.5625rem;line-height:1.3}.md-typeset h1,.md-typeset h2{font-weight:300;letter-spacing:-.01em}.md-typeset h2{margin:2rem 0 .8rem;font-size:1.25rem;line-height:1.4}.md-typeset h3{margin:1.6rem 0 .8rem;font-size:1rem;font-weight:400;letter-spacing:-.01em;line-height:1.5}.md-typeset h2+h3{margin-top:.8rem}.md-typeset h4{font-size:.8rem}.md-typeset h4,.md-typeset h5,.md-typeset h6{margin:.8rem 0;font-weight:700;letter-spacing:-.01em}.md-typeset h5,.md-typeset h6{color:rgba(0,0,0,.54);font-size:.64rem}.md-typeset h5{text-transform:uppercase}.md-typeset hr{margin:1.5em 0;border-bottom:.05rem dotted rgba(0,0,0,.26)}.md-typeset a{color:#3f51b5;word-break:break-word}.md-typeset a,.md-typeset a:before{-webkit-transition:color .125s;transition:color .125s}.md-typeset a:active,.md-typeset a:hover{color:#536dfe}.md-typeset code,.md-typeset pre{background-color:hsla(0,0%,92.5%,.5);color:#37474f;font-size:85%;direction:ltr}.md-typeset code{margin:0 .29412em;padding:.07353em 0;border-radius:.1rem;box-shadow:.29412em 0 0 hsla(0,0%,92.5%,.5),-.29412em 0 0 hsla(0,0%,92.5%,.5);word-break:break-word;-webkit-box-decoration-break:clone;box-decoration-break:clone}.md-typeset h1 code,.md-typeset h2 code,.md-typeset h3 code,.md-typeset h4 code,.md-typeset h5 code,.md-typeset h6 code{margin:0;background-color:transparent;box-shadow:none}.md-typeset a>code{margin:inherit;padding:inherit;border-radius:initial;background-color:inherit;color:inherit;box-shadow:none}.md-typeset pre{position:relative;margin:1em 0;border-radius:.1rem;line-height:1.4;-webkit-overflow-scrolling:touch}.md-typeset pre>code{display:block;margin:0;padding:.525rem .6rem;background-color:transparent;font-size:inherit;box-shadow:none;-webkit-box-decoration-break:slice;box-decoration-break:slice;overflow:auto}.md-typeset pre>code::-webkit-scrollbar{width:.2rem;height:.2rem}.md-typeset pre>code::-webkit-scrollbar-thumb{background-color:rgba(0,0,0,.26)}.md-typeset pre>code::-webkit-scrollbar-thumb:hover{background-color:#536dfe}.md-typeset kbd{padding:0 .29412em;border-radius:.15rem;border:.05rem solid #c9c9c9;border-bottom-color:#bcbcbc;background-color:#fcfcfc;color:#555;font-size:85%;box-shadow:0 .05rem 0 #b0b0b0;word-break:break-word}.md-typeset mark{margin:0 .25em;padding:.0625em 0;border-radius:.1rem;background-color:rgba(255,235,59,.5);box-shadow:.25em 0 0 rgba(255,235,59,.5),-.25em 0 0 rgba(255,235,59,.5);word-break:break-word;-webkit-box-decoration-break:clone;box-decoration-break:clone}.md-typeset abbr{border-bottom:.05rem dotted rgba(0,0,0,.54);text-decoration:none;cursor:help}.md-typeset small{opacity:.75}.md-typeset sub,.md-typeset sup{margin-left:.07812em}[dir=rtl] .md-typeset sub,[dir=rtl] .md-typeset sup{margin-right:.07812em;margin-left:0}.md-typeset blockquote{padding-left:.6rem;border-left:.2rem solid rgba(0,0,0,.26);color:rgba(0,0,0,.54)}[dir=rtl] .md-typeset blockquote{padding-right:.6rem;padding-left:0;border-right:.2rem solid rgba(0,0,0,.26);border-left:initial}.md-typeset ul{list-style-type:disc}.md-typeset ol,.md-typeset ul{margin-left:.625em;padding:0}[dir=rtl] .md-typeset ol,[dir=rtl] .md-typeset ul{margin-right:.625em;margin-left:0}.md-typeset ol ol,.md-typeset ul ol{list-style-type:lower-alpha}.md-typeset ol ol ol,.md-typeset ul ol ol{list-style-type:lower-roman}.md-typeset ol li,.md-typeset ul li{margin-bottom:.5em;margin-left:1.25em}[dir=rtl] .md-typeset ol li,[dir=rtl] .md-typeset ul li{margin-right:1.25em;margin-left:0}.md-typeset ol li blockquote,.md-typeset ol li p,.md-typeset ul li blockquote,.md-typeset ul li p{margin:.5em 0}.md-typeset ol li:last-child,.md-typeset ul li:last-child{margin-bottom:0}.md-typeset ol li ol,.md-typeset ol li ul,.md-typeset ul li ol,.md-typeset ul li ul{margin:.5em 0 .5em .625em}[dir=rtl] .md-typeset ol li ol,[dir=rtl] .md-typeset ol li ul,[dir=rtl] .md-typeset ul li ol,[dir=rtl] .md-typeset ul li ul{margin-right:.625em;margin-left:0}.md-typeset dd{margin:1em 0 1em 1.875em}[dir=rtl] .md-typeset dd{margin-right:1.875em;margin-left:0}.md-typeset iframe,.md-typeset img,.md-typeset svg{max-width:100%}.md-typeset table:not([class]){box-shadow:0 2px 2px 0 rgba(0,0,0,.14),0 1px 5px 0 rgba(0,0,0,.12),0 3px 1px -2px rgba(0,0,0,.2);display:inline-block;max-width:100%;border-radius:.1rem;font-size:.64rem;overflow:auto;-webkit-overflow-scrolling:touch}.md-typeset table:not([class])+*{margin-top:1.5em}.md-typeset table:not([class]) td:not([align]),.md-typeset table:not([class]) th:not([align]){text-align:left}[dir=rtl] .md-typeset table:not([class]) td:not([align]),[dir=rtl] .md-typeset table:not([class]) th:not([align]){text-align:right}.md-typeset table:not([class]) th{min-width:5rem;padding:.6rem .8rem;background-color:rgba(0,0,0,.54);color:#fff;vertical-align:top}.md-typeset table:not([class]) td{padding:.6rem .8rem;border-top:.05rem solid rgba(0,0,0,.07);vertical-align:top}.md-typeset table:not([class]) tr{-webkit-transition:background-color .125s;transition:background-color .125s}.md-typeset table:not([class]) tr:hover{background-color:rgba(0,0,0,.035);box-shadow:inset 0 .05rem 0 #fff}.md-typeset table:not([class]) tr:first-child td{border-top:0}.md-typeset table:not([class]) a{word-break:normal}.md-typeset__scrollwrap{margin:1em -.8rem;overflow-x:auto;-webkit-overflow-scrolling:touch}.md-typeset .md-typeset__table{display:inline-block;margin-bottom:.5em;padding:0 .8rem}.md-typeset .md-typeset__table table{display:table;width:100%;margin:0;overflow:hidden}html{font-size:125%;overflow-x:hidden}body,html{height:100%}body{position:relative;font-size:.5rem}hr{display:block;height:.05rem;padding:0;border:0}.md-svg{display:none}.md-grid{max-width:61rem;margin-right:auto;margin-left:auto}.md-container,.md-main{overflow:auto}.md-container{display:table;width:100%;height:100%;padding-top:2.4rem;table-layout:fixed}.md-main{display:table-row;height:100%}.md-main__inner{height:100%;padding-top:1.5rem;padding-bottom:.05rem}.md-toggle{display:none}.md-overlay{position:fixed;top:0;width:0;height:0;-webkit-transition:width 0s .25s,height 0s .25s,opacity .25s;transition:width 0s .25s,height 0s .25s,opacity .25s;background-color:rgba(0,0,0,.54);opacity:0;z-index:3}.md-flex{display:table}.md-flex__cell{display:table-cell;position:relative;vertical-align:top}.md-flex__cell--shrink{width:0}.md-flex__cell--stretch{display:table;width:100%;table-layout:fixed}.md-flex__ellipsis{display:table-cell;text-overflow:ellipsis;white-space:nowrap;overflow:hidden}.md-skip{position:fixed;width:.05rem;height:.05rem;margin:.5rem;padding:.3rem .5rem;-webkit-transform:translateY(.4rem);transform:translateY(.4rem);border-radius:.1rem;background-color:rgba(0,0,0,.87);color:#fff;font-size:.64rem;opacity:0;overflow:hidden}.md-skip:focus{width:auto;height:auto;clip:auto;-webkit-transform:translateX(0);transform:translateX(0);-webkit-transition:opacity .175s 75ms,-webkit-transform .25s cubic-bezier(.4,0,.2,1);transition:opacity .175s 75ms,-webkit-transform .25s cubic-bezier(.4,0,.2,1);transition:transform .25s cubic-bezier(.4,0,.2,1),opacity .175s 75ms;transition:transform .25s cubic-bezier(.4,0,.2,1),opacity .175s 75ms,-webkit-transform .25s cubic-bezier(.4,0,.2,1);opacity:1;z-index:10}@page{margin:25mm}.md-clipboard{position:absolute;top:.3rem;right:.3rem;width:1.4rem;height:1.4rem;border-radius:.1rem;font-size:.8rem;cursor:pointer;z-index:1;-webkit-backface-visibility:hidden;backface-visibility:hidden}.md-clipboard:before{-webkit-transition:color .25s,opacity .25s;transition:color .25s,opacity .25s;color:rgba(0,0,0,.07);content:"\E14D"}.codehilite:hover .md-clipboard:before,.md-typeset .highlight:hover .md-clipboard:before,pre:hover .md-clipboard:before{color:rgba(0,0,0,.54)}.md-clipboard:focus:before,.md-clipboard:hover:before{color:#536dfe}.md-clipboard__message{display:block;position:absolute;top:0;right:1.7rem;padding:.3rem .5rem;-webkit-transform:translateX(.4rem);transform:translateX(.4rem);-webkit-transition:opacity .175s,-webkit-transform .25s cubic-bezier(.9,.1,.9,0);transition:opacity .175s,-webkit-transform .25s cubic-bezier(.9,.1,.9,0);transition:transform .25s cubic-bezier(.9,.1,.9,0),opacity .175s;transition:transform .25s cubic-bezier(.9,.1,.9,0),opacity .175s,-webkit-transform .25s cubic-bezier(.9,.1,.9,0);border-radius:.1rem;background-color:rgba(0,0,0,.54);color:#fff;font-size:.64rem;white-space:nowrap;opacity:0;pointer-events:none}.md-clipboard__message--active{-webkit-transform:translateX(0);transform:translateX(0);-webkit-transition:opacity .175s 75ms,-webkit-transform .25s cubic-bezier(.4,0,.2,1);transition:opacity .175s 75ms,-webkit-transform .25s cubic-bezier(.4,0,.2,1);transition:transform .25s cubic-bezier(.4,0,.2,1),opacity .175s 75ms;transition:transform .25s cubic-bezier(.4,0,.2,1),opacity .175s 75ms,-webkit-transform .25s cubic-bezier(.4,0,.2,1);opacity:1;pointer-events:auto}.md-clipboard__message:before{content:attr(aria-label)}.md-clipboard__message:after{display:block;position:absolute;top:50%;right:-.2rem;width:0;margin-top:-.2rem;border-color:transparent rgba(0,0,0,.54);border-style:solid;border-width:.2rem 0 .2rem .2rem;content:""}.md-content__inner{margin:0 .8rem 1.2rem;padding-top:.6rem}.md-content__inner:before{display:block;height:.4rem;content:""}.md-content__inner>:last-child{margin-bottom:0}.md-content__icon{position:relative;margin:.4rem 0;padding:0;float:right}.md-typeset .md-content__icon{color:rgba(0,0,0,.26)}.md-header{position:fixed;top:0;right:0;left:0;height:2.4rem;-webkit-transition:background-color .25s,color .25s;transition:background-color .25s,color .25s;background-color:#3f51b5;color:#fff;box-shadow:none;z-index:2;-webkit-backface-visibility:hidden;backface-visibility:hidden}.no-js .md-header{-webkit-transition:none;transition:none;box-shadow:none}.md-header[data-md-state=shadow]{-webkit-transition:background-color .25s,color .25s,box-shadow .25s;transition:background-color .25s,color .25s,box-shadow .25s;box-shadow:0 0 .2rem rgba(0,0,0,.1),0 .2rem .4rem rgba(0,0,0,.2)}.md-header-nav{padding:0 .2rem}.md-header-nav__button{position:relative;-webkit-transition:opacity .25s;transition:opacity .25s;z-index:1}.md-header-nav__button:hover{opacity:.7}.md-header-nav__button.md-logo *{display:block}.no-js .md-header-nav__button.md-icon--search{display:none}.md-header-nav__topic{display:block;position:absolute;-webkit-transition:opacity .15s,-webkit-transform .4s cubic-bezier(.1,.7,.1,1);transition:opacity .15s,-webkit-transform .4s cubic-bezier(.1,.7,.1,1);transition:transform .4s cubic-bezier(.1,.7,.1,1),opacity .15s;transition:transform .4s cubic-bezier(.1,.7,.1,1),opacity .15s,-webkit-transform .4s cubic-bezier(.1,.7,.1,1);text-overflow:ellipsis;white-space:nowrap;overflow:hidden}.md-header-nav__topic+.md-header-nav__topic{-webkit-transform:translateX(1.25rem);transform:translateX(1.25rem);-webkit-transition:opacity .15s,-webkit-transform .4s cubic-bezier(1,.7,.1,.1);transition:opacity .15s,-webkit-transform .4s cubic-bezier(1,.7,.1,.1);transition:transform .4s cubic-bezier(1,.7,.1,.1),opacity .15s;transition:transform .4s cubic-bezier(1,.7,.1,.1),opacity .15s,-webkit-transform .4s cubic-bezier(1,.7,.1,.1);opacity:0;z-index:-1;pointer-events:none}[dir=rtl] .md-header-nav__topic+.md-header-nav__topic{-webkit-transform:translateX(-1.25rem);transform:translateX(-1.25rem)}.no-js .md-header-nav__topic{position:static}.no-js .md-header-nav__topic+.md-header-nav__topic{display:none}.md-header-nav__title{padding:0 1rem;font-size:.9rem;line-height:2.4rem}.md-header-nav__title[data-md-state=active] .md-header-nav__topic{-webkit-transform:translateX(-1.25rem);transform:translateX(-1.25rem);-webkit-transition:opacity .15s,-webkit-transform .4s cubic-bezier(1,.7,.1,.1);transition:opacity .15s,-webkit-transform .4s cubic-bezier(1,.7,.1,.1);transition:transform .4s cubic-bezier(1,.7,.1,.1),opacity .15s;transition:transform .4s cubic-bezier(1,.7,.1,.1),opacity .15s,-webkit-transform .4s cubic-bezier(1,.7,.1,.1);opacity:0;z-index:-1;pointer-events:none}[dir=rtl] .md-header-nav__title[data-md-state=active] .md-header-nav__topic{-webkit-transform:translateX(1.25rem);transform:translateX(1.25rem)}.md-header-nav__title[data-md-state=active] .md-header-nav__topic+.md-header-nav__topic{-webkit-transform:translateX(0);transform:translateX(0);-webkit-transition:opacity .15s,-webkit-transform .4s cubic-bezier(.1,.7,.1,1);transition:opacity .15s,-webkit-transform .4s cubic-bezier(.1,.7,.1,1);transition:transform .4s cubic-bezier(.1,.7,.1,1),opacity .15s;transition:transform .4s cubic-bezier(.1,.7,.1,1),opacity .15s,-webkit-transform .4s cubic-bezier(.1,.7,.1,1);opacity:1;z-index:0;pointer-events:auto}.md-header-nav__source{display:none}.md-hero{-webkit-transition:background .25s;transition:background .25s;background-color:#3f51b5;color:#fff;font-size:1rem;overflow:hidden}.md-hero__inner{margin-top:1rem;padding:.8rem .8rem .4rem;-webkit-transition:opacity .25s,-webkit-transform .4s cubic-bezier(.1,.7,.1,1);transition:opacity .25s,-webkit-transform .4s cubic-bezier(.1,.7,.1,1);transition:transform .4s cubic-bezier(.1,.7,.1,1),opacity .25s;transition:transform .4s cubic-bezier(.1,.7,.1,1),opacity .25s,-webkit-transform .4s cubic-bezier(.1,.7,.1,1);-webkit-transition-delay:.1s;transition-delay:.1s}[data-md-state=hidden] .md-hero__inner{pointer-events:none;-webkit-transform:translateY(.625rem);transform:translateY(.625rem);-webkit-transition:opacity .1s 0s,-webkit-transform 0s .4s;transition:opacity .1s 0s,-webkit-transform 0s .4s;transition:transform 0s .4s,opacity .1s 0s;transition:transform 0s .4s,opacity .1s 0s,-webkit-transform 0s .4s;opacity:0}.md-hero--expand .md-hero__inner{margin-bottom:1.2rem}.md-footer-nav{background-color:rgba(0,0,0,.87);color:#fff}.md-footer-nav__inner{padding:.2rem;overflow:auto}.md-footer-nav__link{padding-top:1.4rem;padding-bottom:.4rem;-webkit-transition:opacity .25s;transition:opacity .25s}.md-footer-nav__link:hover{opacity:.7}.md-footer-nav__link--prev{width:25%;float:left}[dir=rtl] .md-footer-nav__link--prev{float:right}.md-footer-nav__link--next{width:75%;float:right;text-align:right}[dir=rtl] .md-footer-nav__link--next{float:left;text-align:left}.md-footer-nav__button{-webkit-transition:background .25s;transition:background .25s}.md-footer-nav__title{position:relative;padding:0 1rem;font-size:.9rem;line-height:2.4rem}.md-footer-nav__direction{position:absolute;right:0;left:0;margin-top:-1rem;padding:0 1rem;color:hsla(0,0%,100%,.7);font-size:.75rem}.md-footer-meta{background-color:rgba(0,0,0,.895)}.md-footer-meta__inner{padding:.2rem;overflow:auto}html .md-footer-meta.md-typeset a{color:hsla(0,0%,100%,.7)}html .md-footer-meta.md-typeset a:focus,html .md-footer-meta.md-typeset a:hover{color:#fff}.md-footer-copyright{margin:0 .6rem;padding:.4rem 0;color:hsla(0,0%,100%,.3);font-size:.64rem}.md-footer-copyright__highlight{color:hsla(0,0%,100%,.7)}.md-footer-social{margin:0 .4rem;padding:.2rem 0 .6rem}.md-footer-social__link{display:inline-block;width:1.6rem;height:1.6rem;font-size:.8rem;text-align:center}.md-footer-social__link:before{line-height:1.9}.md-nav{font-size:.7rem;line-height:1.3}.md-nav__title{display:block;padding:0 .6rem;font-weight:700;text-overflow:ellipsis;overflow:hidden}.md-nav__title:before{display:none;content:"\E5C4"}[dir=rtl] .md-nav__title:before{content:"\E5C8"}.md-nav__title .md-nav__button{display:none}.md-nav__list{margin:0;padding:0;list-style:none}.md-nav__item{padding:0 .6rem}.md-nav__item:last-child{padding-bottom:.6rem}.md-nav__item .md-nav__item{padding-right:0}[dir=rtl] .md-nav__item .md-nav__item{padding-right:.6rem;padding-left:0}.md-nav__item .md-nav__item:last-child{padding-bottom:0}.md-nav__button img{width:100%;height:auto}.md-nav__link{display:block;margin-top:.625em;-webkit-transition:color .125s;transition:color .125s;text-overflow:ellipsis;cursor:pointer;overflow:hidden}.md-nav__item--nested>.md-nav__link:after{content:"\E313"}html .md-nav__link[for=__toc],html .md-nav__link[for=__toc]+.md-nav__link:after,html .md-nav__link[for=__toc]~.md-nav{display:none}.md-nav__link[data-md-state=blur]{color:rgba(0,0,0,.54)}.md-nav__link--active,.md-nav__link:active{color:#3f51b5}.md-nav__item--nested>.md-nav__link{color:inherit}.md-nav__link:focus,.md-nav__link:hover{color:#536dfe}.md-nav__source,.no-js .md-search{display:none}.md-search__overlay{opacity:0;z-index:1}.md-search__form{position:relative}.md-search__input{position:relative;padding:0 2.2rem 0 3.6rem;text-overflow:ellipsis;z-index:2}[dir=rtl] .md-search__input{padding:0 3.6rem 0 2.2rem}.md-search__input::-webkit-input-placeholder{-webkit-transition:color .25s cubic-bezier(.1,.7,.1,1);transition:color .25s cubic-bezier(.1,.7,.1,1)}.md-search__input::-moz-placeholder{-webkit-transition:color .25s cubic-bezier(.1,.7,.1,1);transition:color .25s cubic-bezier(.1,.7,.1,1)}.md-search__input:-ms-input-placeholder{-webkit-transition:color .25s cubic-bezier(.1,.7,.1,1);transition:color .25s cubic-bezier(.1,.7,.1,1)}.md-search__input::-ms-input-placeholder{-webkit-transition:color .25s cubic-bezier(.1,.7,.1,1);transition:color .25s cubic-bezier(.1,.7,.1,1)}.md-search__input::placeholder{-webkit-transition:color .25s cubic-bezier(.1,.7,.1,1);transition:color .25s cubic-bezier(.1,.7,.1,1)}.md-search__input::-webkit-input-placeholder,.md-search__input~.md-search__icon{color:rgba(0,0,0,.54)}.md-search__input::-moz-placeholder,.md-search__input~.md-search__icon{color:rgba(0,0,0,.54)}.md-search__input:-ms-input-placeholder,.md-search__input~.md-search__icon{color:rgba(0,0,0,.54)}.md-search__input::-ms-input-placeholder,.md-search__input~.md-search__icon{color:rgba(0,0,0,.54)}.md-search__input::placeholder,.md-search__input~.md-search__icon{color:rgba(0,0,0,.54)}.md-search__input::-ms-clear{display:none}.md-search__icon{position:absolute;-webkit-transition:color .25s cubic-bezier(.1,.7,.1,1),opacity .25s;transition:color .25s cubic-bezier(.1,.7,.1,1),opacity .25s;font-size:1.2rem;cursor:pointer;z-index:2}.md-search__icon:hover{opacity:.7}.md-search__icon[for=__search]{top:.3rem;left:.5rem}[dir=rtl] .md-search__icon[for=__search]{right:.5rem;left:auto}.md-search__icon[for=__search]:before{content:"\E8B6"}.md-search__icon[type=reset]{top:.3rem;right:.5rem;-webkit-transform:scale(.125);transform:scale(.125);-webkit-transition:opacity .15s,-webkit-transform .15s cubic-bezier(.1,.7,.1,1);transition:opacity .15s,-webkit-transform .15s cubic-bezier(.1,.7,.1,1);transition:transform .15s cubic-bezier(.1,.7,.1,1),opacity .15s;transition:transform .15s cubic-bezier(.1,.7,.1,1),opacity .15s,-webkit-transform .15s cubic-bezier(.1,.7,.1,1);opacity:0}[dir=rtl] .md-search__icon[type=reset]{right:auto;left:.5rem}[data-md-toggle=search]:checked~.md-header .md-search__input:valid~.md-search__icon[type=reset]{-webkit-transform:scale(1);transform:scale(1);opacity:1}[data-md-toggle=search]:checked~.md-header .md-search__input:valid~.md-search__icon[type=reset]:hover{opacity:.7}.md-search__output{position:absolute;width:100%;border-radius:0 0 .1rem .1rem;overflow:hidden;z-index:1}.md-search__scrollwrap{height:100%;background-color:#fff;box-shadow:inset 0 .05rem 0 rgba(0,0,0,.07);overflow-y:auto;-webkit-overflow-scrolling:touch}.md-search-result{color:rgba(0,0,0,.87);word-break:break-word}.md-search-result__meta{padding:0 .8rem;background-color:rgba(0,0,0,.07);color:rgba(0,0,0,.54);font-size:.64rem;line-height:1.8rem}.md-search-result__list{margin:0;padding:0;border-top:.05rem solid rgba(0,0,0,.07);list-style:none}.md-search-result__item{box-shadow:0 -.05rem 0 rgba(0,0,0,.07)}.md-search-result__link{display:block;-webkit-transition:background .25s;transition:background .25s;outline:0;overflow:hidden}.md-search-result__link:hover,.md-search-result__link[data-md-state=active]{background-color:rgba(83,109,254,.1)}.md-search-result__link:hover .md-search-result__article:before,.md-search-result__link[data-md-state=active] .md-search-result__article:before{opacity:.7}.md-search-result__link:last-child .md-search-result__teaser{margin-bottom:.6rem}.md-search-result__article{position:relative;padding:0 .8rem;overflow:auto}.md-search-result__article--document:before{position:absolute;left:0;margin:.1rem;-webkit-transition:opacity .25s;transition:opacity .25s;color:rgba(0,0,0,.54);content:"\E880"}[dir=rtl] .md-search-result__article--document:before{right:0;left:auto}.md-search-result__article--document .md-search-result__title{margin:.55rem 0;font-size:.8rem;font-weight:400;line-height:1.4}.md-search-result__title{margin:.5em 0;font-size:.64rem;font-weight:700;line-height:1.4}.md-search-result__teaser{display:-webkit-box;max-height:1.65rem;margin:.5em 0;color:rgba(0,0,0,.54);font-size:.64rem;line-height:1.4;text-overflow:ellipsis;overflow:hidden;-webkit-box-orient:vertical;-webkit-line-clamp:2}.md-search-result em{font-style:normal;font-weight:700;text-decoration:underline}.md-sidebar{position:absolute;width:12.1rem;padding:1.2rem 0;overflow:hidden}.md-sidebar[data-md-state=lock]{position:fixed;top:2.4rem}.md-sidebar--secondary{display:none}.md-sidebar__scrollwrap{max-height:100%;margin:0 .2rem;overflow-y:auto;-webkit-backface-visibility:hidden;backface-visibility:hidden}.md-sidebar__scrollwrap::-webkit-scrollbar{width:.2rem;height:.2rem}.md-sidebar__scrollwrap::-webkit-scrollbar-thumb{background-color:rgba(0,0,0,.26)}.md-sidebar__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#536dfe}@-webkit-keyframes md-source__facts--done{0%{height:0}to{height:.65rem}}@keyframes md-source__facts--done{0%{height:0}to{height:.65rem}}@-webkit-keyframes md-source__fact--done{0%{-webkit-transform:translateY(100%);transform:translateY(100%);opacity:0}50%{opacity:0}to{-webkit-transform:translateY(0);transform:translateY(0);opacity:1}}@keyframes md-source__fact--done{0%{-webkit-transform:translateY(100%);transform:translateY(100%);opacity:0}50%{opacity:0}to{-webkit-transform:translateY(0);transform:translateY(0);opacity:1}}.md-source{display:block;padding-right:.6rem;-webkit-transition:opacity .25s;transition:opacity .25s;font-size:.65rem;line-height:1.2;white-space:nowrap}[dir=rtl] .md-source{padding-right:0;padding-left:.6rem}.md-source:hover{opacity:.7}.md-source:after,.md-source__icon{display:inline-block;height:2.4rem;content:"";vertical-align:middle}.md-source__icon{width:2.4rem}.md-source__icon svg{width:1.2rem;height:1.2rem;margin-top:.6rem;margin-left:.6rem}[dir=rtl] .md-source__icon svg{margin-right:.6rem;margin-left:0}.md-source__icon+.md-source__repository{margin-left:-2rem;padding-left:2rem}[dir=rtl] .md-source__icon+.md-source__repository{margin-right:-2rem;margin-left:0;padding-right:2rem;padding-left:0}.md-source__repository{display:inline-block;max-width:100%;margin-left:.6rem;font-weight:700;text-overflow:ellipsis;overflow:hidden;vertical-align:middle}.md-source__facts{margin:0;padding:0;font-size:.55rem;font-weight:700;list-style-type:none;opacity:.75;overflow:hidden}[data-md-state=done] .md-source__facts{-webkit-animation:md-source__facts--done .25s ease-in;animation:md-source__facts--done .25s ease-in}.md-source__fact{float:left}[dir=rtl] .md-source__fact{float:right}[data-md-state=done] .md-source__fact{-webkit-animation:md-source__fact--done .4s ease-out;animation:md-source__fact--done .4s ease-out}.md-source__fact:before{margin:0 .1rem;content:"\00B7"}.md-source__fact:first-child:before{display:none}.md-source-file{display:inline-block;margin:1em .5em 1em 0;padding-right:.25rem;border-radius:.1rem;background-color:rgba(0,0,0,.07);font-size:.64rem;list-style-type:none;cursor:pointer;overflow:hidden}.md-source-file:before{display:inline-block;margin-right:.25rem;padding:.25rem;background-color:rgba(0,0,0,.26);color:#fff;font-size:.8rem;content:"\E86F";vertical-align:middle}html .md-source-file{-webkit-transition:background .4s,color .4s,box-shadow .4s cubic-bezier(.4,0,.2,1);transition:background .4s,color .4s,box-shadow .4s cubic-bezier(.4,0,.2,1)}html .md-source-file:before{-webkit-transition:inherit;transition:inherit}html body .md-typeset .md-source-file{color:rgba(0,0,0,.54)}.md-source-file:hover{box-shadow:0 0 8px rgba(0,0,0,.18),0 8px 16px rgba(0,0,0,.36)}.md-source-file:hover:before{background-color:#536dfe}.md-tabs{width:100%;-webkit-transition:background .25s;transition:background .25s;background-color:#3f51b5;color:#fff;overflow:auto}.md-tabs__list{margin:0 0 0 .2rem;padding:0;list-style:none;white-space:nowrap}.md-tabs__item{display:inline-block;height:2.4rem;padding-right:.6rem;padding-left:.6rem}.md-tabs__link{display:block;margin-top:.8rem;-webkit-transition:opacity .25s,-webkit-transform .4s cubic-bezier(.1,.7,.1,1);transition:opacity .25s,-webkit-transform .4s cubic-bezier(.1,.7,.1,1);transition:transform .4s cubic-bezier(.1,.7,.1,1),opacity .25s;transition:transform .4s cubic-bezier(.1,.7,.1,1),opacity .25s,-webkit-transform .4s cubic-bezier(.1,.7,.1,1);font-size:.7rem;opacity:.7}.md-tabs__link--active,.md-tabs__link:hover{color:inherit;opacity:1}.md-tabs__item:nth-child(2) .md-tabs__link{-webkit-transition-delay:.02s;transition-delay:.02s}.md-tabs__item:nth-child(3) .md-tabs__link{-webkit-transition-delay:.04s;transition-delay:.04s}.md-tabs__item:nth-child(4) .md-tabs__link{-webkit-transition-delay:.06s;transition-delay:.06s}.md-tabs__item:nth-child(5) .md-tabs__link{-webkit-transition-delay:.08s;transition-delay:.08s}.md-tabs__item:nth-child(6) .md-tabs__link{-webkit-transition-delay:.1s;transition-delay:.1s}.md-tabs__item:nth-child(7) .md-tabs__link{-webkit-transition-delay:.12s;transition-delay:.12s}.md-tabs__item:nth-child(8) .md-tabs__link{-webkit-transition-delay:.14s;transition-delay:.14s}.md-tabs__item:nth-child(9) .md-tabs__link{-webkit-transition-delay:.16s;transition-delay:.16s}.md-tabs__item:nth-child(10) .md-tabs__link{-webkit-transition-delay:.18s;transition-delay:.18s}.md-tabs__item:nth-child(11) .md-tabs__link{-webkit-transition-delay:.2s;transition-delay:.2s}.md-tabs__item:nth-child(12) .md-tabs__link{-webkit-transition-delay:.22s;transition-delay:.22s}.md-tabs__item:nth-child(13) .md-tabs__link{-webkit-transition-delay:.24s;transition-delay:.24s}.md-tabs__item:nth-child(14) .md-tabs__link{-webkit-transition-delay:.26s;transition-delay:.26s}.md-tabs__item:nth-child(15) .md-tabs__link{-webkit-transition-delay:.28s;transition-delay:.28s}.md-tabs__item:nth-child(16) .md-tabs__link{-webkit-transition-delay:.3s;transition-delay:.3s}.md-tabs[data-md-state=hidden]{pointer-events:none}.md-tabs[data-md-state=hidden] .md-tabs__link{-webkit-transform:translateY(50%);transform:translateY(50%);-webkit-transition:color .25s,opacity .1s,-webkit-transform 0s .4s;transition:color .25s,opacity .1s,-webkit-transform 0s .4s;transition:color .25s,transform 0s .4s,opacity .1s;transition:color .25s,transform 0s .4s,opacity .1s,-webkit-transform 0s .4s;opacity:0}.md-typeset .admonition,.md-typeset details{box-shadow:0 2px 2px 0 rgba(0,0,0,.14),0 1px 5px 0 rgba(0,0,0,.12),0 3px 1px -2px rgba(0,0,0,.2);position:relative;margin:1.5625em 0;padding:0 .6rem;border-left:.2rem solid #448aff;border-radius:.1rem;font-size:.64rem;overflow:auto}[dir=rtl] .md-typeset .admonition,[dir=rtl] .md-typeset details{border-right:.2rem solid #448aff;border-left:none}html .md-typeset .admonition>:last-child,html .md-typeset details>:last-child{margin-bottom:.6rem}.md-typeset .admonition .admonition,.md-typeset .admonition details,.md-typeset details .admonition,.md-typeset details details{margin:1em 0}.md-typeset .admonition>.admonition-title,.md-typeset .admonition>summary,.md-typeset details>.admonition-title,.md-typeset details>summary{margin:0 -.6rem;padding:.4rem .6rem .4rem 2rem;border-bottom:.05rem solid rgba(68,138,255,.1);background-color:rgba(68,138,255,.1);font-weight:700}[dir=rtl] .md-typeset .admonition>.admonition-title,[dir=rtl] .md-typeset .admonition>summary,[dir=rtl] .md-typeset details>.admonition-title,[dir=rtl] .md-typeset details>summary{padding:.4rem 2rem .4rem .6rem}.md-typeset .admonition>.admonition-title:last-child,.md-typeset .admonition>summary:last-child,.md-typeset details>.admonition-title:last-child,.md-typeset details>summary:last-child{margin-bottom:0}.md-typeset .admonition>.admonition-title:before,.md-typeset .admonition>summary:before,.md-typeset details>.admonition-title:before,.md-typeset details>summary:before{position:absolute;left:.6rem;color:#448aff;font-size:1rem;content:"\E3C9"}[dir=rtl] .md-typeset .admonition>.admonition-title:before,[dir=rtl] .md-typeset .admonition>summary:before,[dir=rtl] .md-typeset details>.admonition-title:before,[dir=rtl] .md-typeset details>summary:before{right:.6rem;left:auto}.md-typeset .admonition.abstract,.md-typeset .admonition.summary,.md-typeset .admonition.tldr,.md-typeset details.abstract,.md-typeset details.summary,.md-typeset details.tldr{border-left-color:#00b0ff}[dir=rtl] .md-typeset .admonition.abstract,[dir=rtl] .md-typeset .admonition.summary,[dir=rtl] .md-typeset .admonition.tldr,[dir=rtl] .md-typeset details.abstract,[dir=rtl] .md-typeset details.summary,[dir=rtl] .md-typeset details.tldr{border-right-color:#00b0ff}.md-typeset .admonition.abstract>.admonition-title,.md-typeset .admonition.abstract>summary,.md-typeset .admonition.summary>.admonition-title,.md-typeset .admonition.summary>summary,.md-typeset .admonition.tldr>.admonition-title,.md-typeset .admonition.tldr>summary,.md-typeset details.abstract>.admonition-title,.md-typeset details.abstract>summary,.md-typeset details.summary>.admonition-title,.md-typeset details.summary>summary,.md-typeset details.tldr>.admonition-title,.md-typeset details.tldr>summary{border-bottom-color:rgba(0,176,255,.1);background-color:rgba(0,176,255,.1)}.md-typeset .admonition.abstract>.admonition-title:before,.md-typeset .admonition.abstract>summary:before,.md-typeset .admonition.summary>.admonition-title:before,.md-typeset .admonition.summary>summary:before,.md-typeset .admonition.tldr>.admonition-title:before,.md-typeset .admonition.tldr>summary:before,.md-typeset details.abstract>.admonition-title:before,.md-typeset details.abstract>summary:before,.md-typeset details.summary>.admonition-title:before,.md-typeset details.summary>summary:before,.md-typeset details.tldr>.admonition-title:before,.md-typeset details.tldr>summary:before{color:#00b0ff;content:""}.md-typeset .admonition.info,.md-typeset .admonition.todo,.md-typeset details.info,.md-typeset details.todo{border-left-color:#00b8d4}[dir=rtl] .md-typeset .admonition.info,[dir=rtl] .md-typeset .admonition.todo,[dir=rtl] .md-typeset details.info,[dir=rtl] .md-typeset details.todo{border-right-color:#00b8d4}.md-typeset .admonition.info>.admonition-title,.md-typeset .admonition.info>summary,.md-typeset .admonition.todo>.admonition-title,.md-typeset .admonition.todo>summary,.md-typeset details.info>.admonition-title,.md-typeset details.info>summary,.md-typeset details.todo>.admonition-title,.md-typeset details.todo>summary{border-bottom-color:rgba(0,184,212,.1);background-color:rgba(0,184,212,.1)}.md-typeset .admonition.info>.admonition-title:before,.md-typeset .admonition.info>summary:before,.md-typeset .admonition.todo>.admonition-title:before,.md-typeset .admonition.todo>summary:before,.md-typeset details.info>.admonition-title:before,.md-typeset details.info>summary:before,.md-typeset details.todo>.admonition-title:before,.md-typeset details.todo>summary:before{color:#00b8d4;content:""}.md-typeset .admonition.hint,.md-typeset .admonition.important,.md-typeset .admonition.tip,.md-typeset details.hint,.md-typeset details.important,.md-typeset details.tip{border-left-color:#00bfa5}[dir=rtl] .md-typeset .admonition.hint,[dir=rtl] .md-typeset .admonition.important,[dir=rtl] .md-typeset .admonition.tip,[dir=rtl] .md-typeset details.hint,[dir=rtl] .md-typeset details.important,[dir=rtl] .md-typeset details.tip{border-right-color:#00bfa5}.md-typeset .admonition.hint>.admonition-title,.md-typeset .admonition.hint>summary,.md-typeset .admonition.important>.admonition-title,.md-typeset .admonition.important>summary,.md-typeset .admonition.tip>.admonition-title,.md-typeset .admonition.tip>summary,.md-typeset details.hint>.admonition-title,.md-typeset details.hint>summary,.md-typeset details.important>.admonition-title,.md-typeset details.important>summary,.md-typeset details.tip>.admonition-title,.md-typeset details.tip>summary{border-bottom-color:rgba(0,191,165,.1);background-color:rgba(0,191,165,.1)}.md-typeset .admonition.hint>.admonition-title:before,.md-typeset .admonition.hint>summary:before,.md-typeset .admonition.important>.admonition-title:before,.md-typeset .admonition.important>summary:before,.md-typeset .admonition.tip>.admonition-title:before,.md-typeset .admonition.tip>summary:before,.md-typeset details.hint>.admonition-title:before,.md-typeset details.hint>summary:before,.md-typeset details.important>.admonition-title:before,.md-typeset details.important>summary:before,.md-typeset details.tip>.admonition-title:before,.md-typeset details.tip>summary:before{color:#00bfa5;content:""}.md-typeset .admonition.check,.md-typeset .admonition.done,.md-typeset .admonition.success,.md-typeset details.check,.md-typeset details.done,.md-typeset details.success{border-left-color:#00c853}[dir=rtl] .md-typeset .admonition.check,[dir=rtl] .md-typeset .admonition.done,[dir=rtl] .md-typeset .admonition.success,[dir=rtl] .md-typeset details.check,[dir=rtl] .md-typeset details.done,[dir=rtl] .md-typeset details.success{border-right-color:#00c853}.md-typeset .admonition.check>.admonition-title,.md-typeset .admonition.check>summary,.md-typeset .admonition.done>.admonition-title,.md-typeset .admonition.done>summary,.md-typeset .admonition.success>.admonition-title,.md-typeset .admonition.success>summary,.md-typeset details.check>.admonition-title,.md-typeset details.check>summary,.md-typeset details.done>.admonition-title,.md-typeset details.done>summary,.md-typeset details.success>.admonition-title,.md-typeset details.success>summary{border-bottom-color:rgba(0,200,83,.1);background-color:rgba(0,200,83,.1)}.md-typeset .admonition.check>.admonition-title:before,.md-typeset .admonition.check>summary:before,.md-typeset .admonition.done>.admonition-title:before,.md-typeset .admonition.done>summary:before,.md-typeset .admonition.success>.admonition-title:before,.md-typeset .admonition.success>summary:before,.md-typeset details.check>.admonition-title:before,.md-typeset details.check>summary:before,.md-typeset details.done>.admonition-title:before,.md-typeset details.done>summary:before,.md-typeset details.success>.admonition-title:before,.md-typeset details.success>summary:before{color:#00c853;content:""}.md-typeset .admonition.faq,.md-typeset .admonition.help,.md-typeset .admonition.question,.md-typeset details.faq,.md-typeset details.help,.md-typeset details.question{border-left-color:#64dd17}[dir=rtl] .md-typeset .admonition.faq,[dir=rtl] .md-typeset .admonition.help,[dir=rtl] .md-typeset .admonition.question,[dir=rtl] .md-typeset details.faq,[dir=rtl] .md-typeset details.help,[dir=rtl] .md-typeset details.question{border-right-color:#64dd17}.md-typeset .admonition.faq>.admonition-title,.md-typeset .admonition.faq>summary,.md-typeset .admonition.help>.admonition-title,.md-typeset .admonition.help>summary,.md-typeset .admonition.question>.admonition-title,.md-typeset .admonition.question>summary,.md-typeset details.faq>.admonition-title,.md-typeset details.faq>summary,.md-typeset details.help>.admonition-title,.md-typeset details.help>summary,.md-typeset details.question>.admonition-title,.md-typeset details.question>summary{border-bottom-color:rgba(100,221,23,.1);background-color:rgba(100,221,23,.1)}.md-typeset .admonition.faq>.admonition-title:before,.md-typeset .admonition.faq>summary:before,.md-typeset .admonition.help>.admonition-title:before,.md-typeset .admonition.help>summary:before,.md-typeset .admonition.question>.admonition-title:before,.md-typeset .admonition.question>summary:before,.md-typeset details.faq>.admonition-title:before,.md-typeset details.faq>summary:before,.md-typeset details.help>.admonition-title:before,.md-typeset details.help>summary:before,.md-typeset details.question>.admonition-title:before,.md-typeset details.question>summary:before{color:#64dd17;content:""}.md-typeset .admonition.attention,.md-typeset .admonition.caution,.md-typeset .admonition.warning,.md-typeset details.attention,.md-typeset details.caution,.md-typeset details.warning{border-left-color:#ff9100}[dir=rtl] .md-typeset .admonition.attention,[dir=rtl] .md-typeset .admonition.caution,[dir=rtl] .md-typeset .admonition.warning,[dir=rtl] .md-typeset details.attention,[dir=rtl] .md-typeset details.caution,[dir=rtl] .md-typeset details.warning{border-right-color:#ff9100}.md-typeset .admonition.attention>.admonition-title,.md-typeset .admonition.attention>summary,.md-typeset .admonition.caution>.admonition-title,.md-typeset .admonition.caution>summary,.md-typeset .admonition.warning>.admonition-title,.md-typeset .admonition.warning>summary,.md-typeset details.attention>.admonition-title,.md-typeset details.attention>summary,.md-typeset details.caution>.admonition-title,.md-typeset details.caution>summary,.md-typeset details.warning>.admonition-title,.md-typeset details.warning>summary{border-bottom-color:rgba(255,145,0,.1);background-color:rgba(255,145,0,.1)}.md-typeset .admonition.attention>.admonition-title:before,.md-typeset .admonition.attention>summary:before,.md-typeset .admonition.caution>.admonition-title:before,.md-typeset .admonition.caution>summary:before,.md-typeset .admonition.warning>.admonition-title:before,.md-typeset .admonition.warning>summary:before,.md-typeset details.attention>.admonition-title:before,.md-typeset details.attention>summary:before,.md-typeset details.caution>.admonition-title:before,.md-typeset details.caution>summary:before,.md-typeset details.warning>.admonition-title:before,.md-typeset details.warning>summary:before{color:#ff9100;content:""}.md-typeset .admonition.fail,.md-typeset .admonition.failure,.md-typeset .admonition.missing,.md-typeset details.fail,.md-typeset details.failure,.md-typeset details.missing{border-left-color:#ff5252}[dir=rtl] .md-typeset .admonition.fail,[dir=rtl] .md-typeset .admonition.failure,[dir=rtl] .md-typeset .admonition.missing,[dir=rtl] .md-typeset details.fail,[dir=rtl] .md-typeset details.failure,[dir=rtl] .md-typeset details.missing{border-right-color:#ff5252}.md-typeset .admonition.fail>.admonition-title,.md-typeset .admonition.fail>summary,.md-typeset .admonition.failure>.admonition-title,.md-typeset .admonition.failure>summary,.md-typeset .admonition.missing>.admonition-title,.md-typeset .admonition.missing>summary,.md-typeset details.fail>.admonition-title,.md-typeset details.fail>summary,.md-typeset details.failure>.admonition-title,.md-typeset details.failure>summary,.md-typeset details.missing>.admonition-title,.md-typeset details.missing>summary{border-bottom-color:rgba(255,82,82,.1);background-color:rgba(255,82,82,.1)}.md-typeset .admonition.fail>.admonition-title:before,.md-typeset .admonition.fail>summary:before,.md-typeset .admonition.failure>.admonition-title:before,.md-typeset .admonition.failure>summary:before,.md-typeset .admonition.missing>.admonition-title:before,.md-typeset .admonition.missing>summary:before,.md-typeset details.fail>.admonition-title:before,.md-typeset details.fail>summary:before,.md-typeset details.failure>.admonition-title:before,.md-typeset details.failure>summary:before,.md-typeset details.missing>.admonition-title:before,.md-typeset details.missing>summary:before{color:#ff5252;content:""}.md-typeset .admonition.danger,.md-typeset .admonition.error,.md-typeset details.danger,.md-typeset details.error{border-left-color:#ff1744}[dir=rtl] .md-typeset .admonition.danger,[dir=rtl] .md-typeset .admonition.error,[dir=rtl] .md-typeset details.danger,[dir=rtl] .md-typeset details.error{border-right-color:#ff1744}.md-typeset .admonition.danger>.admonition-title,.md-typeset .admonition.danger>summary,.md-typeset .admonition.error>.admonition-title,.md-typeset .admonition.error>summary,.md-typeset details.danger>.admonition-title,.md-typeset details.danger>summary,.md-typeset details.error>.admonition-title,.md-typeset details.error>summary{border-bottom-color:rgba(255,23,68,.1);background-color:rgba(255,23,68,.1)}.md-typeset .admonition.danger>.admonition-title:before,.md-typeset .admonition.danger>summary:before,.md-typeset .admonition.error>.admonition-title:before,.md-typeset .admonition.error>summary:before,.md-typeset details.danger>.admonition-title:before,.md-typeset details.danger>summary:before,.md-typeset details.error>.admonition-title:before,.md-typeset details.error>summary:before{color:#ff1744;content:""}.md-typeset .admonition.bug,.md-typeset details.bug{border-left-color:#f50057}[dir=rtl] .md-typeset .admonition.bug,[dir=rtl] .md-typeset details.bug{border-right-color:#f50057}.md-typeset .admonition.bug>.admonition-title,.md-typeset .admonition.bug>summary,.md-typeset details.bug>.admonition-title,.md-typeset details.bug>summary{border-bottom-color:rgba(245,0,87,.1);background-color:rgba(245,0,87,.1)}.md-typeset .admonition.bug>.admonition-title:before,.md-typeset .admonition.bug>summary:before,.md-typeset details.bug>.admonition-title:before,.md-typeset details.bug>summary:before{color:#f50057;content:""}.md-typeset .admonition.example,.md-typeset details.example{border-left-color:#651fff}[dir=rtl] .md-typeset .admonition.example,[dir=rtl] .md-typeset details.example{border-right-color:#651fff}.md-typeset .admonition.example>.admonition-title,.md-typeset .admonition.example>summary,.md-typeset details.example>.admonition-title,.md-typeset details.example>summary{border-bottom-color:rgba(101,31,255,.1);background-color:rgba(101,31,255,.1)}.md-typeset .admonition.example>.admonition-title:before,.md-typeset .admonition.example>summary:before,.md-typeset details.example>.admonition-title:before,.md-typeset details.example>summary:before{color:#651fff;content:""}.md-typeset .admonition.cite,.md-typeset .admonition.quote,.md-typeset details.cite,.md-typeset details.quote{border-left-color:#9e9e9e}[dir=rtl] .md-typeset .admonition.cite,[dir=rtl] .md-typeset .admonition.quote,[dir=rtl] .md-typeset details.cite,[dir=rtl] .md-typeset details.quote{border-right-color:#9e9e9e}.md-typeset .admonition.cite>.admonition-title,.md-typeset .admonition.cite>summary,.md-typeset .admonition.quote>.admonition-title,.md-typeset .admonition.quote>summary,.md-typeset details.cite>.admonition-title,.md-typeset details.cite>summary,.md-typeset details.quote>.admonition-title,.md-typeset details.quote>summary{border-bottom-color:hsla(0,0%,62%,.1);background-color:hsla(0,0%,62%,.1)}.md-typeset .admonition.cite>.admonition-title:before,.md-typeset .admonition.cite>summary:before,.md-typeset .admonition.quote>.admonition-title:before,.md-typeset .admonition.quote>summary:before,.md-typeset details.cite>.admonition-title:before,.md-typeset details.cite>summary:before,.md-typeset details.quote>.admonition-title:before,.md-typeset details.quote>summary:before{color:#9e9e9e;content:""}.codehilite .o,.codehilite .ow,.md-typeset .highlight .o,.md-typeset .highlight .ow{color:inherit}.codehilite .ge,.md-typeset .highlight .ge{color:#000}.codehilite .gr,.md-typeset .highlight .gr{color:#a00}.codehilite .gh,.md-typeset .highlight .gh{color:#999}.codehilite .go,.md-typeset .highlight .go{color:#888}.codehilite .gp,.md-typeset .highlight .gp{color:#555}.codehilite .gs,.md-typeset .highlight .gs{color:inherit}.codehilite .gu,.md-typeset .highlight .gu{color:#aaa}.codehilite .gt,.md-typeset .highlight .gt{color:#a00}.codehilite .gd,.md-typeset .highlight .gd{background-color:#fdd}.codehilite .gi,.md-typeset .highlight .gi{background-color:#dfd}.codehilite .k,.md-typeset .highlight .k{color:#3b78e7}.codehilite .kc,.md-typeset .highlight .kc{color:#a71d5d}.codehilite .kd,.codehilite .kn,.md-typeset .highlight .kd,.md-typeset .highlight .kn{color:#3b78e7}.codehilite .kp,.md-typeset .highlight .kp{color:#a71d5d}.codehilite .kr,.codehilite .kt,.md-typeset .highlight .kr,.md-typeset .highlight .kt{color:#3e61a2}.codehilite .c,.codehilite .cm,.md-typeset .highlight .c,.md-typeset .highlight .cm{color:#999}.codehilite .cp,.md-typeset .highlight .cp{color:#666}.codehilite .c1,.codehilite .ch,.codehilite .cs,.md-typeset .highlight .c1,.md-typeset .highlight .ch,.md-typeset .highlight .cs{color:#999}.codehilite .na,.codehilite .nb,.md-typeset .highlight .na,.md-typeset .highlight .nb{color:#c2185b}.codehilite .bp,.md-typeset .highlight .bp{color:#3e61a2}.codehilite .nc,.md-typeset .highlight .nc{color:#c2185b}.codehilite .no,.md-typeset .highlight .no{color:#3e61a2}.codehilite .nd,.codehilite .ni,.md-typeset .highlight .nd,.md-typeset .highlight .ni{color:#666}.codehilite .ne,.codehilite .nf,.md-typeset .highlight .ne,.md-typeset .highlight .nf{color:#c2185b}.codehilite .nl,.md-typeset .highlight .nl{color:#3b5179}.codehilite .nn,.md-typeset .highlight .nn{color:#ec407a}.codehilite .nt,.md-typeset .highlight .nt{color:#3b78e7}.codehilite .nv,.codehilite .vc,.codehilite .vg,.codehilite .vi,.md-typeset .highlight .nv,.md-typeset .highlight .vc,.md-typeset .highlight .vg,.md-typeset .highlight .vi{color:#3e61a2}.codehilite .nx,.md-typeset .highlight .nx{color:#ec407a}.codehilite .il,.codehilite .m,.codehilite .mf,.codehilite .mh,.codehilite .mi,.codehilite .mo,.md-typeset .highlight .il,.md-typeset .highlight .m,.md-typeset .highlight .mf,.md-typeset .highlight .mh,.md-typeset .highlight .mi,.md-typeset .highlight .mo{color:#e74c3c}.codehilite .s,.codehilite .sb,.codehilite .sc,.md-typeset .highlight .s,.md-typeset .highlight .sb,.md-typeset .highlight .sc{color:#0d904f}.codehilite .sd,.md-typeset .highlight .sd{color:#999}.codehilite .s2,.md-typeset .highlight .s2{color:#0d904f}.codehilite .se,.codehilite .sh,.codehilite .si,.codehilite .sx,.md-typeset .highlight .se,.md-typeset .highlight .sh,.md-typeset .highlight .si,.md-typeset .highlight .sx{color:#183691}.codehilite .sr,.md-typeset .highlight .sr{color:#009926}.codehilite .s1,.codehilite .ss,.md-typeset .highlight .s1,.md-typeset .highlight .ss{color:#0d904f}.codehilite .err,.md-typeset .highlight .err{color:#a61717}.codehilite .w,.md-typeset .highlight .w{color:transparent}.codehilite .hll,.md-typeset .highlight .hll{display:block;margin:0 -.6rem;padding:0 .6rem;background-color:rgba(255,235,59,.5)}.md-typeset .codehilite,.md-typeset .highlight{position:relative;margin:1em 0;padding:0;border-radius:.1rem;background-color:hsla(0,0%,92.5%,.5);color:#37474f;line-height:1.4;-webkit-overflow-scrolling:touch}.md-typeset .codehilite code,.md-typeset .codehilite pre,.md-typeset .highlight code,.md-typeset .highlight pre{display:block;margin:0;padding:.525rem .6rem;background-color:transparent;overflow:auto;vertical-align:top}.md-typeset .codehilite code::-webkit-scrollbar,.md-typeset .codehilite pre::-webkit-scrollbar,.md-typeset .highlight code::-webkit-scrollbar,.md-typeset .highlight pre::-webkit-scrollbar{width:.2rem;height:.2rem}.md-typeset .codehilite code::-webkit-scrollbar-thumb,.md-typeset .codehilite pre::-webkit-scrollbar-thumb,.md-typeset .highlight code::-webkit-scrollbar-thumb,.md-typeset .highlight pre::-webkit-scrollbar-thumb{background-color:rgba(0,0,0,.26)}.md-typeset .codehilite code::-webkit-scrollbar-thumb:hover,.md-typeset .codehilite pre::-webkit-scrollbar-thumb:hover,.md-typeset .highlight code::-webkit-scrollbar-thumb:hover,.md-typeset .highlight pre::-webkit-scrollbar-thumb:hover{background-color:#536dfe}.md-typeset pre.codehilite,.md-typeset pre.highlight{overflow:visible}.md-typeset pre.codehilite code,.md-typeset pre.highlight code{display:block;padding:.525rem .6rem;overflow:auto}.md-typeset .codehilitetable,.md-typeset .highlighttable{display:block;margin:1em 0;border-radius:.2em;font-size:.8rem;overflow:hidden}.md-typeset .codehilitetable tbody,.md-typeset .codehilitetable td,.md-typeset .highlighttable tbody,.md-typeset .highlighttable td{display:block;padding:0}.md-typeset .codehilitetable tr,.md-typeset .highlighttable tr{display:-webkit-box;display:flex}.md-typeset .codehilitetable .codehilite,.md-typeset .codehilitetable .highlight,.md-typeset .codehilitetable .linenodiv,.md-typeset .highlighttable .codehilite,.md-typeset .highlighttable .highlight,.md-typeset .highlighttable .linenodiv{margin:0;border-radius:0}.md-typeset .codehilitetable .linenodiv,.md-typeset .highlighttable .linenodiv{padding:.525rem .6rem}.md-typeset .codehilitetable .linenos,.md-typeset .highlighttable .linenos{background-color:rgba(0,0,0,.07);color:rgba(0,0,0,.26);-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none}.md-typeset .codehilitetable .linenos pre,.md-typeset .highlighttable .linenos pre{margin:0;padding:0;background-color:transparent;color:inherit;text-align:right}.md-typeset .codehilitetable .code,.md-typeset .highlighttable .code{-webkit-box-flex:1;flex:1;overflow:hidden}.md-typeset>.codehilitetable,.md-typeset>.highlighttable{box-shadow:none}.md-typeset [id^="fnref:"]{display:inline-block}.md-typeset [id^="fnref:"]:target{margin-top:-3.8rem;padding-top:3.8rem;pointer-events:none}.md-typeset [id^="fn:"]:before{display:none;height:0;content:""}.md-typeset [id^="fn:"]:target:before{display:block;margin-top:-3.5rem;padding-top:3.5rem;pointer-events:none}.md-typeset .footnote{color:rgba(0,0,0,.54);font-size:.64rem}.md-typeset .footnote ol{margin-left:0}.md-typeset .footnote li{-webkit-transition:color .25s;transition:color .25s}.md-typeset .footnote li:target{color:rgba(0,0,0,.87)}.md-typeset .footnote li :first-child{margin-top:0}.md-typeset .footnote li:hover .footnote-backref,.md-typeset .footnote li:target .footnote-backref{-webkit-transform:translateX(0);transform:translateX(0);opacity:1}.md-typeset .footnote li:hover .footnote-backref:hover,.md-typeset .footnote li:target .footnote-backref{color:#536dfe}.md-typeset .footnote-ref{display:inline-block;pointer-events:auto}.md-typeset .footnote-ref:before{display:inline;margin:0 .2em;border-left:.05rem solid rgba(0,0,0,.26);font-size:1.25em;content:"";vertical-align:-.25rem}.md-typeset .footnote-backref{display:inline-block;-webkit-transform:translateX(.25rem);transform:translateX(.25rem);-webkit-transition:color .25s,opacity .125s .125s,-webkit-transform .25s .125s;transition:color .25s,opacity .125s .125s,-webkit-transform .25s .125s;transition:transform .25s .125s,color .25s,opacity .125s .125s;transition:transform .25s .125s,color .25s,opacity .125s .125s,-webkit-transform .25s .125s;color:rgba(0,0,0,.26);font-size:0;opacity:0;vertical-align:text-bottom}[dir=rtl] .md-typeset .footnote-backref{-webkit-transform:translateX(-.25rem);transform:translateX(-.25rem)}.md-typeset .footnote-backref:before{display:inline-block;font-size:.8rem;content:"\E31B"}[dir=rtl] .md-typeset .footnote-backref:before{-webkit-transform:scaleX(-1);transform:scaleX(-1)}.md-typeset .headerlink{display:inline-block;margin-left:.5rem;-webkit-transform:translateY(.25rem);transform:translateY(.25rem);-webkit-transition:color .25s,opacity .125s .25s,-webkit-transform .25s .25s;transition:color .25s,opacity .125s .25s,-webkit-transform .25s .25s;transition:transform .25s .25s,color .25s,opacity .125s .25s;transition:transform .25s .25s,color .25s,opacity .125s .25s,-webkit-transform .25s .25s;opacity:0}[dir=rtl] .md-typeset .headerlink{margin-right:.5rem;margin-left:0}html body .md-typeset .headerlink{color:rgba(0,0,0,.26)}.md-typeset h1[id]:before{display:block;margin-top:-9px;padding-top:9px;content:""}.md-typeset h1[id]:target:before{margin-top:-3.45rem;padding-top:3.45rem}.md-typeset h1[id] .headerlink:focus,.md-typeset h1[id]:hover .headerlink,.md-typeset h1[id]:target .headerlink{-webkit-transform:translate(0);transform:translate(0);opacity:1}.md-typeset h1[id] .headerlink:focus,.md-typeset h1[id]:hover .headerlink:hover,.md-typeset h1[id]:target .headerlink{color:#536dfe}.md-typeset h2[id]:before{display:block;margin-top:-8px;padding-top:8px;content:""}.md-typeset h2[id]:target:before{margin-top:-3.4rem;padding-top:3.4rem}.md-typeset h2[id] .headerlink:focus,.md-typeset h2[id]:hover .headerlink,.md-typeset h2[id]:target .headerlink{-webkit-transform:translate(0);transform:translate(0);opacity:1}.md-typeset h2[id] .headerlink:focus,.md-typeset h2[id]:hover .headerlink:hover,.md-typeset h2[id]:target .headerlink{color:#536dfe}.md-typeset h3[id]:before{display:block;margin-top:-9px;padding-top:9px;content:""}.md-typeset h3[id]:target:before{margin-top:-3.45rem;padding-top:3.45rem}.md-typeset h3[id] .headerlink:focus,.md-typeset h3[id]:hover .headerlink,.md-typeset h3[id]:target .headerlink{-webkit-transform:translate(0);transform:translate(0);opacity:1}.md-typeset h3[id] .headerlink:focus,.md-typeset h3[id]:hover .headerlink:hover,.md-typeset h3[id]:target .headerlink{color:#536dfe}.md-typeset h4[id]:before{display:block;margin-top:-9px;padding-top:9px;content:""}.md-typeset h4[id]:target:before{margin-top:-3.45rem;padding-top:3.45rem}.md-typeset h4[id] .headerlink:focus,.md-typeset h4[id]:hover .headerlink,.md-typeset h4[id]:target .headerlink{-webkit-transform:translate(0);transform:translate(0);opacity:1}.md-typeset h4[id] .headerlink:focus,.md-typeset h4[id]:hover .headerlink:hover,.md-typeset h4[id]:target .headerlink{color:#536dfe}.md-typeset h5[id]:before{display:block;margin-top:-11px;padding-top:11px;content:""}.md-typeset h5[id]:target:before{margin-top:-3.55rem;padding-top:3.55rem}.md-typeset h5[id] .headerlink:focus,.md-typeset h5[id]:hover .headerlink,.md-typeset h5[id]:target .headerlink{-webkit-transform:translate(0);transform:translate(0);opacity:1}.md-typeset h5[id] .headerlink:focus,.md-typeset h5[id]:hover .headerlink:hover,.md-typeset h5[id]:target .headerlink{color:#536dfe}.md-typeset h6[id]:before{display:block;margin-top:-11px;padding-top:11px;content:""}.md-typeset h6[id]:target:before{margin-top:-3.55rem;padding-top:3.55rem}.md-typeset h6[id] .headerlink:focus,.md-typeset h6[id]:hover .headerlink,.md-typeset h6[id]:target .headerlink{-webkit-transform:translate(0);transform:translate(0);opacity:1}.md-typeset h6[id] .headerlink:focus,.md-typeset h6[id]:hover .headerlink:hover,.md-typeset h6[id]:target .headerlink{color:#536dfe}.md-typeset .MJXc-display{margin:.75em 0;padding:.75em 0;overflow:auto;-webkit-overflow-scrolling:touch}.md-typeset .MathJax_CHTML{outline:0}.md-typeset .critic.comment,.md-typeset del.critic,.md-typeset ins.critic{margin:0 .25em;padding:.0625em 0;border-radius:.1rem;-webkit-box-decoration-break:clone;box-decoration-break:clone}.md-typeset del.critic{background-color:#fdd;box-shadow:.25em 0 0 #fdd,-.25em 0 0 #fdd}.md-typeset ins.critic{background-color:#dfd;box-shadow:.25em 0 0 #dfd,-.25em 0 0 #dfd}.md-typeset .critic.comment{background-color:hsla(0,0%,92.5%,.5);color:#37474f;box-shadow:.25em 0 0 hsla(0,0%,92.5%,.5),-.25em 0 0 hsla(0,0%,92.5%,.5)}.md-typeset .critic.comment:before{padding-right:.125em;color:rgba(0,0,0,.26);content:"\E0B7";vertical-align:-.125em}.md-typeset .critic.block{display:block;margin:1em 0;padding-right:.8rem;padding-left:.8rem;box-shadow:none}.md-typeset .critic.block :first-child{margin-top:.5em}.md-typeset .critic.block :last-child{margin-bottom:.5em}.md-typeset details{display:block;padding-top:0}.md-typeset details[open]>summary:after{-webkit-transform:rotate(180deg);transform:rotate(180deg)}.md-typeset details:not([open]){padding-bottom:0}.md-typeset details:not([open])>summary{border-bottom:none}.md-typeset details summary{padding-right:2rem}[dir=rtl] .md-typeset details summary{padding-left:2rem}.no-details .md-typeset details:not([open])>*{display:none}.no-details .md-typeset details:not([open]) summary{display:block}.md-typeset summary{display:block;outline:none;cursor:pointer}.md-typeset summary::-webkit-details-marker{display:none}.md-typeset summary:after{position:absolute;top:.4rem;right:.6rem;color:rgba(0,0,0,.26);font-size:1rem;content:"\E313"}[dir=rtl] .md-typeset summary:after{right:auto;left:.6rem}.md-typeset .emojione{width:1rem;vertical-align:text-top}.md-typeset code.codehilite,.md-typeset code.highlight{margin:0 .29412em;padding:.07353em 0}.md-typeset .superfences-content{display:none;-webkit-box-ordinal-group:100;order:99;width:100%;background-color:#fff}.md-typeset .superfences-content>*{margin:0;border-radius:0}.md-typeset .superfences-tabs{display:-webkit-box;display:flex;position:relative;flex-wrap:wrap;margin:1em 0;border:.05rem solid rgba(0,0,0,.07);border-radius:.2em}.md-typeset .superfences-tabs>input{display:none}.md-typeset .superfences-tabs>input:checked+label{font-weight:700}.md-typeset .superfences-tabs>input:checked+label+.superfences-content{display:block}.md-typeset .superfences-tabs>label{width:auto;padding:.6rem;-webkit-transition:color .125s;transition:color .125s;font-size:.64rem;cursor:pointer}html .md-typeset .superfences-tabs>label:hover{color:#536dfe}.md-typeset .task-list-item{position:relative;list-style-type:none}.md-typeset .task-list-item [type=checkbox]{position:absolute;top:.45em;left:-2em}[dir=rtl] .md-typeset .task-list-item [type=checkbox]{right:-2em;left:auto}.md-typeset .task-list-control .task-list-indicator:before{position:absolute;top:.15em;left:-1.25em;color:rgba(0,0,0,.26);font-size:1.25em;content:"\E835";vertical-align:-.25em}[dir=rtl] .md-typeset .task-list-control .task-list-indicator:before{right:-1.25em;left:auto}.md-typeset .task-list-control [type=checkbox]:checked+.task-list-indicator:before{content:"\E834"}.md-typeset .task-list-control [type=checkbox]{opacity:0;z-index:-1}@media print{.md-typeset a:after{color:rgba(0,0,0,.54);content:" [" attr(href) "]"}.md-typeset code,.md-typeset pre{white-space:pre-wrap}.md-typeset code{box-shadow:none;-webkit-box-decoration-break:initial;box-decoration-break:slice}.md-clipboard,.md-content__icon,.md-footer,.md-header,.md-sidebar,.md-tabs,.md-typeset .headerlink{display:none}}@media only screen and (max-width:44.9375em){.md-typeset pre{margin:1em -.8rem;border-radius:0}.md-typeset pre>code{padding:.525rem .8rem}.md-footer-nav__link--prev .md-footer-nav__title{display:none}.md-search-result__teaser{max-height:2.5rem;-webkit-line-clamp:3}.codehilite .hll,.md-typeset .highlight .hll{margin:0 -.8rem;padding:0 .8rem}.md-typeset>.codehilite,.md-typeset>.highlight{margin:1em -.8rem;border-radius:0}.md-typeset>.codehilite code,.md-typeset>.codehilite pre,.md-typeset>.highlight code,.md-typeset>.highlight pre{padding:.525rem .8rem}.md-typeset>.codehilitetable,.md-typeset>.highlighttable{margin:1em -.8rem;border-radius:0}.md-typeset>.codehilitetable .codehilite>code,.md-typeset>.codehilitetable .codehilite>pre,.md-typeset>.codehilitetable .highlight>code,.md-typeset>.codehilitetable .highlight>pre,.md-typeset>.codehilitetable .linenodiv,.md-typeset>.highlighttable .codehilite>code,.md-typeset>.highlighttable .codehilite>pre,.md-typeset>.highlighttable .highlight>code,.md-typeset>.highlighttable .highlight>pre,.md-typeset>.highlighttable .linenodiv{padding:.5rem .8rem}.md-typeset>p>.MJXc-display{margin:.75em -.8rem;padding:.25em .8rem}.md-typeset>.superfences-tabs{margin:1em -.8rem;border:0;border-top:.05rem solid rgba(0,0,0,.07);border-radius:0}.md-typeset>.superfences-tabs code,.md-typeset>.superfences-tabs pre{padding:.525rem .8rem}}@media only screen and (min-width:100em){html{font-size:137.5%}}@media only screen and (min-width:125em){html{font-size:150%}}@media only screen and (max-width:59.9375em){body[data-md-state=lock]{overflow:hidden}.ios body[data-md-state=lock] .md-container{display:none}html .md-nav__link[for=__toc]{display:block;padding-right:2.4rem}html .md-nav__link[for=__toc]:after{color:inherit;content:"\E8DE"}html .md-nav__link[for=__toc]+.md-nav__link{display:none}html .md-nav__link[for=__toc]~.md-nav{display:-webkit-box;display:flex}html [dir=rtl] .md-nav__link{padding-right:.8rem;padding-left:2.4rem}.md-nav__source{display:block;padding:0 .2rem;background-color:rgba(50,64,144,.9675);color:#fff}.md-search__overlay{position:absolute;top:.2rem;left:.2rem;width:1.8rem;height:1.8rem;-webkit-transform-origin:center;transform-origin:center;-webkit-transition:opacity .2s .2s,-webkit-transform .3s .1s;transition:opacity .2s .2s,-webkit-transform .3s .1s;transition:transform .3s .1s,opacity .2s .2s;transition:transform .3s .1s,opacity .2s .2s,-webkit-transform .3s .1s;border-radius:1rem;background-color:#fff;overflow:hidden;pointer-events:none}[dir=rtl] .md-search__overlay{right:.2rem;left:auto}[data-md-toggle=search]:checked~.md-header .md-search__overlay{-webkit-transition:opacity .1s,-webkit-transform .4s;transition:opacity .1s,-webkit-transform .4s;transition:transform .4s,opacity .1s;transition:transform .4s,opacity .1s,-webkit-transform .4s;opacity:1}.md-search__inner{position:fixed;top:0;left:100%;width:100%;height:100%;-webkit-transform:translateX(5%);transform:translateX(5%);-webkit-transition:right 0s .3s,left 0s .3s,opacity .15s .15s,-webkit-transform .15s cubic-bezier(.4,0,.2,1) .15s;transition:right 0s .3s,left 0s .3s,opacity .15s .15s,-webkit-transform .15s cubic-bezier(.4,0,.2,1) .15s;transition:right 0s .3s,left 0s .3s,transform .15s cubic-bezier(.4,0,.2,1) .15s,opacity .15s .15s;transition:right 0s .3s,left 0s .3s,transform .15s cubic-bezier(.4,0,.2,1) .15s,opacity .15s .15s,-webkit-transform .15s cubic-bezier(.4,0,.2,1) .15s;opacity:0;z-index:2}[data-md-toggle=search]:checked~.md-header .md-search__inner{left:0;-webkit-transform:translateX(0);transform:translateX(0);-webkit-transition:right 0s 0s,left 0s 0s,opacity .15s .15s,-webkit-transform .15s cubic-bezier(.1,.7,.1,1) .15s;transition:right 0s 0s,left 0s 0s,opacity .15s .15s,-webkit-transform .15s cubic-bezier(.1,.7,.1,1) .15s;transition:right 0s 0s,left 0s 0s,transform .15s cubic-bezier(.1,.7,.1,1) .15s,opacity .15s .15s;transition:right 0s 0s,left 0s 0s,transform .15s cubic-bezier(.1,.7,.1,1) .15s,opacity .15s .15s,-webkit-transform .15s cubic-bezier(.1,.7,.1,1) .15s;opacity:1}[dir=rtl] [data-md-toggle=search]:checked~.md-header .md-search__inner{right:0;left:auto}html [dir=rtl] .md-search__inner{right:100%;left:auto;-webkit-transform:translateX(-5%);transform:translateX(-5%)}.md-search__input{width:100%;height:2.4rem;font-size:.9rem}.md-search__icon[for=__search]{top:.6rem;left:.8rem}.md-search__icon[for=__search][for=__search]:before{content:"\E5C4"}[dir=rtl] .md-search__icon[for=__search][for=__search]:before{content:"\E5C8"}.md-search__icon[type=reset]{top:.6rem;right:.8rem}.md-search__output{top:2.4rem;bottom:0}.md-search-result__article--document:before{display:none}}@media only screen and (max-width:76.1875em){[data-md-toggle=drawer]:checked~.md-overlay{width:100%;height:100%;-webkit-transition:width 0s,height 0s,opacity .25s;transition:width 0s,height 0s,opacity .25s;opacity:1}.md-header-nav__button.md-icon--home,.md-header-nav__button.md-logo{display:none}.md-hero__inner{margin-top:2.4rem;margin-bottom:1.2rem}.md-nav{background-color:#fff}.md-nav--primary,.md-nav--primary .md-nav{display:-webkit-box;display:flex;position:absolute;top:0;right:0;left:0;-webkit-box-orient:vertical;-webkit-box-direction:normal;flex-direction:column;height:100%;z-index:1}.md-nav--primary .md-nav__item,.md-nav--primary .md-nav__title{font-size:.8rem;line-height:1.5}html .md-nav--primary .md-nav__title{position:relative;height:5.6rem;padding:3rem .8rem .2rem;background-color:rgba(0,0,0,.07);color:rgba(0,0,0,.54);font-weight:400;line-height:2.4rem;white-space:nowrap;cursor:pointer}html .md-nav--primary .md-nav__title:before{display:block;position:absolute;top:.2rem;left:.2rem;width:2rem;height:2rem;color:rgba(0,0,0,.54)}html .md-nav--primary .md-nav__title~.md-nav__list{background-color:#fff;box-shadow:inset 0 .05rem 0 rgba(0,0,0,.07)}html .md-nav--primary .md-nav__title~.md-nav__list>.md-nav__item:first-child{border-top:0}html .md-nav--primary .md-nav__title--site{position:relative;background-color:#3f51b5;color:#fff}html .md-nav--primary .md-nav__title--site .md-nav__button{display:block;position:absolute;top:.2rem;left:.2rem;width:3.2rem;height:3.2rem;font-size:2.4rem}html .md-nav--primary .md-nav__title--site:before{display:none}html [dir=rtl] .md-nav--primary .md-nav__title--site .md-nav__button,html [dir=rtl] .md-nav--primary .md-nav__title:before{right:.2rem;left:auto}.md-nav--primary .md-nav__list{-webkit-box-flex:1;flex:1;overflow-y:auto}.md-nav--primary .md-nav__item{padding:0;border-top:.05rem solid rgba(0,0,0,.07)}[dir=rtl] .md-nav--primary .md-nav__item{padding:0}.md-nav--primary .md-nav__item--nested>.md-nav__link{padding-right:2.4rem}[dir=rtl] .md-nav--primary .md-nav__item--nested>.md-nav__link{padding-right:.8rem;padding-left:2.4rem}.md-nav--primary .md-nav__item--nested>.md-nav__link:after{content:"\E315"}[dir=rtl] .md-nav--primary .md-nav__item--nested>.md-nav__link:after{content:"\E314"}.md-nav--primary .md-nav__link{position:relative;margin-top:0;padding:.6rem .8rem}.md-nav--primary .md-nav__link:after{position:absolute;top:50%;right:.6rem;margin-top:-.6rem;color:inherit;font-size:1.2rem}[dir=rtl] .md-nav--primary .md-nav__link:after{right:auto;left:.6rem}.md-nav--primary .md-nav--secondary .md-nav__link{position:static}.md-nav--primary .md-nav--secondary .md-nav{position:static;background-color:transparent}.md-nav--primary .md-nav--secondary .md-nav .md-nav__link{padding-left:1.4rem}[dir=rtl] .md-nav--primary .md-nav--secondary .md-nav .md-nav__link{padding-right:1.4rem;padding-left:0}.md-nav--primary .md-nav--secondary .md-nav .md-nav .md-nav__link{padding-left:2rem}[dir=rtl] .md-nav--primary .md-nav--secondary .md-nav .md-nav .md-nav__link{padding-right:2rem;padding-left:0}.md-nav--primary .md-nav--secondary .md-nav .md-nav .md-nav .md-nav__link{padding-left:2.6rem}[dir=rtl] .md-nav--primary .md-nav--secondary .md-nav .md-nav .md-nav .md-nav__link{padding-right:2.6rem;padding-left:0}.md-nav--primary .md-nav--secondary .md-nav .md-nav .md-nav .md-nav .md-nav__link{padding-left:3.2rem}[dir=rtl] .md-nav--primary .md-nav--secondary .md-nav .md-nav .md-nav .md-nav .md-nav__link{padding-right:3.2rem;padding-left:0}.md-nav__toggle~.md-nav{display:-webkit-box;display:flex;-webkit-transform:translateX(100%);transform:translateX(100%);-webkit-transition:opacity .125s .05s,-webkit-transform .25s cubic-bezier(.8,0,.6,1);transition:opacity .125s .05s,-webkit-transform .25s cubic-bezier(.8,0,.6,1);transition:transform .25s cubic-bezier(.8,0,.6,1),opacity .125s .05s;transition:transform .25s cubic-bezier(.8,0,.6,1),opacity .125s .05s,-webkit-transform .25s cubic-bezier(.8,0,.6,1);opacity:0}[dir=rtl] .md-nav__toggle~.md-nav{-webkit-transform:translateX(-100%);transform:translateX(-100%)}.no-csstransforms3d .md-nav__toggle~.md-nav{display:none}.md-nav__toggle:checked~.md-nav{-webkit-transform:translateX(0);transform:translateX(0);-webkit-transition:opacity .125s .125s,-webkit-transform .25s cubic-bezier(.4,0,.2,1);transition:opacity .125s .125s,-webkit-transform .25s cubic-bezier(.4,0,.2,1);transition:transform .25s cubic-bezier(.4,0,.2,1),opacity .125s .125s;transition:transform .25s cubic-bezier(.4,0,.2,1),opacity .125s .125s,-webkit-transform .25s cubic-bezier(.4,0,.2,1);opacity:1}.no-csstransforms3d .md-nav__toggle:checked~.md-nav{display:-webkit-box;display:flex}.md-sidebar--primary{position:fixed;top:0;left:-12.1rem;width:12.1rem;height:100%;-webkit-transform:translateX(0);transform:translateX(0);-webkit-transition:box-shadow .25s,-webkit-transform .25s cubic-bezier(.4,0,.2,1);transition:box-shadow .25s,-webkit-transform .25s cubic-bezier(.4,0,.2,1);transition:transform .25s cubic-bezier(.4,0,.2,1),box-shadow .25s;transition:transform .25s cubic-bezier(.4,0,.2,1),box-shadow .25s,-webkit-transform .25s cubic-bezier(.4,0,.2,1);background-color:#fff;z-index:3}[dir=rtl] .md-sidebar--primary{right:-12.1rem;left:auto}.no-csstransforms3d .md-sidebar--primary{display:none}[data-md-toggle=drawer]:checked~.md-container .md-sidebar--primary{box-shadow:0 8px 10px 1px rgba(0,0,0,.14),0 3px 14px 2px rgba(0,0,0,.12),0 5px 5px -3px rgba(0,0,0,.4);-webkit-transform:translateX(12.1rem);transform:translateX(12.1rem)}[dir=rtl] [data-md-toggle=drawer]:checked~.md-container .md-sidebar--primary{-webkit-transform:translateX(-12.1rem);transform:translateX(-12.1rem)}.no-csstransforms3d [data-md-toggle=drawer]:checked~.md-container .md-sidebar--primary{display:block}.md-sidebar--primary .md-sidebar__scrollwrap{overflow:hidden;position:absolute;top:0;right:0;bottom:0;left:0;margin:0}.md-tabs{display:none}}@media only screen and (min-width:60em){.md-content{margin-right:12.1rem}[dir=rtl] .md-content{margin-right:0;margin-left:12.1rem}.md-header-nav__button.md-icon--search{display:none}.md-header-nav__source{display:block;width:11.7rem;max-width:11.7rem;padding-right:.6rem}[dir=rtl] .md-header-nav__source{padding-right:0;padding-left:.6rem}.md-search{padding:.2rem}.md-search__overlay{position:fixed;top:0;left:0;width:0;height:0;-webkit-transition:width 0s .25s,height 0s .25s,opacity .25s;transition:width 0s .25s,height 0s .25s,opacity .25s;background-color:rgba(0,0,0,.54);cursor:pointer}[dir=rtl] .md-search__overlay{right:0;left:auto}[data-md-toggle=search]:checked~.md-header .md-search__overlay{width:100%;height:100%;-webkit-transition:width 0s,height 0s,opacity .25s;transition:width 0s,height 0s,opacity .25s;opacity:1}.md-search__inner{position:relative;width:11.5rem;margin-right:.8rem;padding:.1rem 0;float:right;-webkit-transition:width .25s cubic-bezier(.1,.7,.1,1);transition:width .25s cubic-bezier(.1,.7,.1,1)}[dir=rtl] .md-search__inner{margin-right:0;margin-left:.8rem;float:left}.md-search__form,.md-search__input{border-radius:.1rem}.md-search__input{width:100%;height:1.8rem;padding-left:2.2rem;-webkit-transition:background-color .25s cubic-bezier(.1,.7,.1,1),color .25s cubic-bezier(.1,.7,.1,1);transition:background-color .25s cubic-bezier(.1,.7,.1,1),color .25s cubic-bezier(.1,.7,.1,1);background-color:rgba(0,0,0,.26);color:inherit;font-size:.8rem}[dir=rtl] .md-search__input{padding-right:2.2rem}.md-search__input+.md-search__icon{color:inherit}.md-search__input::-webkit-input-placeholder{color:hsla(0,0%,100%,.7)}.md-search__input::-moz-placeholder{color:hsla(0,0%,100%,.7)}.md-search__input:-ms-input-placeholder{color:hsla(0,0%,100%,.7)}.md-search__input::-ms-input-placeholder{color:hsla(0,0%,100%,.7)}.md-search__input::placeholder{color:hsla(0,0%,100%,.7)}.md-search__input:hover{background-color:hsla(0,0%,100%,.12)}[data-md-toggle=search]:checked~.md-header .md-search__input{border-radius:.1rem .1rem 0 0;background-color:#fff;color:rgba(0,0,0,.87);text-overflow:clip}[data-md-toggle=search]:checked~.md-header .md-search__input+.md-search__icon,[data-md-toggle=search]:checked~.md-header .md-search__input::-webkit-input-placeholder{color:rgba(0,0,0,.54)}[data-md-toggle=search]:checked~.md-header .md-search__input+.md-search__icon,[data-md-toggle=search]:checked~.md-header .md-search__input::-moz-placeholder{color:rgba(0,0,0,.54)}[data-md-toggle=search]:checked~.md-header .md-search__input+.md-search__icon,[data-md-toggle=search]:checked~.md-header .md-search__input:-ms-input-placeholder{color:rgba(0,0,0,.54)}[data-md-toggle=search]:checked~.md-header .md-search__input+.md-search__icon,[data-md-toggle=search]:checked~.md-header .md-search__input::-ms-input-placeholder{color:rgba(0,0,0,.54)}[data-md-toggle=search]:checked~.md-header .md-search__input+.md-search__icon,[data-md-toggle=search]:checked~.md-header .md-search__input::placeholder{color:rgba(0,0,0,.54)}.md-search__output{top:1.9rem;-webkit-transition:opacity .4s;transition:opacity .4s;opacity:0}[data-md-toggle=search]:checked~.md-header .md-search__output{box-shadow:0 6px 10px 0 rgba(0,0,0,.14),0 1px 18px 0 rgba(0,0,0,.12),0 3px 5px -1px rgba(0,0,0,.4);opacity:1}.md-search__scrollwrap{max-height:0}[data-md-toggle=search]:checked~.md-header .md-search__scrollwrap{max-height:75vh}.md-search__scrollwrap::-webkit-scrollbar{width:.2rem;height:.2rem}.md-search__scrollwrap::-webkit-scrollbar-thumb{background-color:rgba(0,0,0,.26)}.md-search__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#536dfe}.md-search-result__meta{padding-left:2.2rem}[dir=rtl] .md-search-result__meta{padding-right:2.2rem;padding-left:0}.md-search-result__article{padding-left:2.2rem}[dir=rtl] .md-search-result__article{padding-right:2.2rem;padding-left:.8rem}.md-sidebar--secondary{display:block;margin-left:100%;-webkit-transform:translate(-100%);transform:translate(-100%)}[dir=rtl] .md-sidebar--secondary{margin-right:100%;margin-left:0;-webkit-transform:translate(100%);transform:translate(100%)}}@media only screen and (min-width:76.25em){.md-content{margin-left:12.1rem}[dir=rtl] .md-content{margin-right:12.1rem}.md-content__inner{margin-right:1.2rem;margin-left:1.2rem}.md-header-nav__button.md-icon--menu{display:none}.md-nav[data-md-state=animate]{-webkit-transition:max-height .25s cubic-bezier(.86,0,.07,1);transition:max-height .25s cubic-bezier(.86,0,.07,1)}.md-nav__toggle~.md-nav{max-height:0;overflow:hidden}.no-js .md-nav__toggle~.md-nav{display:none}.md-nav[data-md-state=expand],.md-nav__toggle:checked~.md-nav{max-height:100%}.no-js .md-nav[data-md-state=expand],.no-js .md-nav__toggle:checked~.md-nav{display:block}.md-nav__item--nested>.md-nav>.md-nav__title{display:none}.md-nav__item--nested>.md-nav__link:after{display:inline-block;-webkit-transform-origin:.45em .45em;transform-origin:.45em .45em;-webkit-transform-style:preserve-3d;transform-style:preserve-3d;vertical-align:-.125em}.js .md-nav__item--nested>.md-nav__link:after{-webkit-transition:-webkit-transform .4s;transition:-webkit-transform .4s;transition:transform .4s;transition:transform .4s,-webkit-transform .4s}.md-nav__item--nested .md-nav__toggle:checked~.md-nav__link:after{-webkit-transform:rotateX(180deg);transform:rotateX(180deg)}.md-search__inner{margin-right:1.2rem}[dir=rtl] .md-search__inner{margin-left:1.2rem}.md-search__scrollwrap,[data-md-toggle=search]:checked~.md-header .md-search__inner{width:34.4rem}.md-sidebar--secondary{margin-left:61rem}[dir=rtl] .md-sidebar--secondary{margin-right:61rem;margin-left:0}.md-tabs~.md-main .md-nav--primary>.md-nav__list>.md-nav__item--nested{font-size:0;visibility:hidden}.md-tabs--active~.md-main .md-nav--primary .md-nav__title{display:block;padding:0}.md-tabs--active~.md-main .md-nav--primary .md-nav__title--site{display:none}.no-js .md-tabs--active~.md-main .md-nav--primary .md-nav{display:block}.md-tabs--active~.md-main .md-nav--primary>.md-nav__list>.md-nav__item{font-size:0;visibility:hidden}.md-tabs--active~.md-main .md-nav--primary>.md-nav__list>.md-nav__item--nested{display:none;font-size:.7rem;overflow:auto;visibility:visible}.md-tabs--active~.md-main .md-nav--primary>.md-nav__list>.md-nav__item--nested>.md-nav__link{display:none}.md-tabs--active~.md-main .md-nav--primary>.md-nav__list>.md-nav__item--active{display:block}.md-tabs--active~.md-main .md-nav[data-md-level="1"]{max-height:none;overflow:visible}.md-tabs--active~.md-main .md-nav[data-md-level="1"]>.md-nav__list>.md-nav__item{padding-left:0}.md-tabs--active~.md-main .md-nav[data-md-level="1"] .md-nav .md-nav__title{display:none}}@media only screen and (min-width:45em){.md-footer-nav__link{width:50%}.md-footer-copyright{max-width:75%;float:left}[dir=rtl] .md-footer-copyright{float:right}.md-footer-social{padding:.6rem 0;float:right}[dir=rtl] .md-footer-social{float:left}}@media only screen and (max-width:29.9375em){[data-md-toggle=search]:checked~.md-header .md-search__overlay{-webkit-transform:scale(45);transform:scale(45)}}@media only screen and (min-width:30em) and (max-width:44.9375em){[data-md-toggle=search]:checked~.md-header .md-search__overlay{-webkit-transform:scale(60);transform:scale(60)}}@media only screen and (min-width:45em) and (max-width:59.9375em){[data-md-toggle=search]:checked~.md-header .md-search__overlay{-webkit-transform:scale(75);transform:scale(75)}}@media only screen and (min-width:60em) and (max-width:76.1875em){.md-search__scrollwrap,[data-md-toggle=search]:checked~.md-header .md-search__inner{width:23.4rem}.md-search-result__teaser{max-height:2.5rem;-webkit-line-clamp:3}} \ No newline at end of file diff --git a/assets/stylesheets/application.adb8469c.css b/assets/stylesheets/application.adb8469c.css new file mode 100644 index 000000000..93b3dabad --- /dev/null +++ b/assets/stylesheets/application.adb8469c.css @@ -0,0 +1 @@ +html{box-sizing:border-box}*,:after,:before{box-sizing:inherit}html{-webkit-text-size-adjust:none;-moz-text-size-adjust:none;-ms-text-size-adjust:none;text-size-adjust:none}body{margin:0}hr{overflow:visible;box-sizing:content-box}a{-webkit-text-decoration-skip:objects}a,button,input,label{-webkit-tap-highlight-color:transparent}a{color:inherit;text-decoration:none}small,sub,sup{font-size:80%}sub,sup{position:relative;line-height:0;vertical-align:baseline}sub{bottom:-.25em}sup{top:-.5em}img{border-style:none}table{border-collapse:separate;border-spacing:0}td,th{font-weight:400;vertical-align:top}button{margin:0;padding:0;border:0;outline-style:none;background:transparent;font-size:inherit}input{border:0;outline:0}.md-clipboard:before,.md-icon,.md-nav__button,.md-nav__link:after,.md-nav__title:before,.md-search-result__article--document:before,.md-source-file:before,.md-typeset .admonition>.admonition-title:before,.md-typeset .admonition>summary:before,.md-typeset .critic.comment:before,.md-typeset .footnote-backref,.md-typeset .task-list-control .task-list-indicator:before,.md-typeset details>.admonition-title:before,.md-typeset details>summary:before,.md-typeset summary:after{font-family:Material Icons;font-style:normal;font-variant:normal;font-weight:400;line-height:1;text-transform:none;white-space:nowrap;word-wrap:normal;direction:ltr}.md-content__icon,.md-footer-nav__button,.md-header-nav__button,.md-nav__button,.md-nav__title:before,.md-search-result__article--document:before{display:inline-block;margin:.2rem;padding:.4rem;font-size:1.2rem;cursor:pointer}.md-icon--arrow-back:before{content:""}.md-icon--arrow-forward:before{content:""}.md-icon--menu:before{content:""}.md-icon--search:before{content:""}[dir=rtl] .md-icon--arrow-back:before{content:""}[dir=rtl] .md-icon--arrow-forward:before{content:""}body{-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}body,input{color:rgba(0,0,0,.87);font-feature-settings:"kern","liga";font-family:Helvetica Neue,Helvetica,Arial,sans-serif}code,kbd,pre{color:rgba(0,0,0,.87);font-feature-settings:"kern";font-family:Courier New,Courier,monospace}.md-typeset{font-size:.8rem;line-height:1.6;-webkit-print-color-adjust:exact}.md-typeset blockquote,.md-typeset ol,.md-typeset p,.md-typeset ul{margin:1em 0}.md-typeset h1{margin:0 0 2rem;color:rgba(0,0,0,.54);font-size:1.5625rem;line-height:1.3}.md-typeset h1,.md-typeset h2{font-weight:300;letter-spacing:-.01em}.md-typeset h2{margin:2rem 0 .8rem;font-size:1.25rem;line-height:1.4}.md-typeset h3{margin:1.6rem 0 .8rem;font-size:1rem;font-weight:400;letter-spacing:-.01em;line-height:1.5}.md-typeset h2+h3{margin-top:.8rem}.md-typeset h4{font-size:.8rem}.md-typeset h4,.md-typeset h5,.md-typeset h6{margin:.8rem 0;font-weight:700;letter-spacing:-.01em}.md-typeset h5,.md-typeset h6{color:rgba(0,0,0,.54);font-size:.64rem}.md-typeset h5{text-transform:uppercase}.md-typeset hr{margin:1.5em 0;border-bottom:.05rem dotted rgba(0,0,0,.26)}.md-typeset a{color:#3f51b5;word-break:break-word}.md-typeset a,.md-typeset a:before{-webkit-transition:color .125s;transition:color .125s}.md-typeset a:active,.md-typeset a:hover{color:#536dfe}.md-typeset code,.md-typeset pre{background-color:hsla(0,0%,92.5%,.5);color:#37474f;font-size:85%;direction:ltr}.md-typeset code{margin:0 .29412em;padding:.07353em 0;border-radius:.1rem;box-shadow:.29412em 0 0 hsla(0,0%,92.5%,.5),-.29412em 0 0 hsla(0,0%,92.5%,.5);word-break:break-word;-webkit-box-decoration-break:clone;box-decoration-break:clone}.md-typeset h1 code,.md-typeset h2 code,.md-typeset h3 code,.md-typeset h4 code,.md-typeset h5 code,.md-typeset h6 code{margin:0;background-color:transparent;box-shadow:none}.md-typeset a>code{margin:inherit;padding:inherit;border-radius:initial;background-color:inherit;color:inherit;box-shadow:none}.md-typeset pre{position:relative;margin:1em 0;border-radius:.1rem;line-height:1.4;-webkit-overflow-scrolling:touch}.md-typeset pre>code{display:block;margin:0;padding:.525rem .6rem;background-color:transparent;font-size:inherit;box-shadow:none;-webkit-box-decoration-break:slice;box-decoration-break:slice;overflow:auto}.md-typeset pre>code::-webkit-scrollbar{width:.2rem;height:.2rem}.md-typeset pre>code::-webkit-scrollbar-thumb{background-color:rgba(0,0,0,.26)}.md-typeset pre>code::-webkit-scrollbar-thumb:hover{background-color:#536dfe}.md-typeset kbd{padding:0 .29412em;border-radius:.15rem;border:.05rem solid #c9c9c9;border-bottom-color:#bcbcbc;background-color:#fcfcfc;color:#555;font-size:85%;box-shadow:0 .05rem 0 #b0b0b0;word-break:break-word}.md-typeset mark{margin:0 .25em;padding:.0625em 0;border-radius:.1rem;background-color:rgba(255,235,59,.5);box-shadow:.25em 0 0 rgba(255,235,59,.5),-.25em 0 0 rgba(255,235,59,.5);word-break:break-word;-webkit-box-decoration-break:clone;box-decoration-break:clone}.md-typeset abbr{border-bottom:.05rem dotted rgba(0,0,0,.54);text-decoration:none;cursor:help}.md-typeset small{opacity:.75}.md-typeset sub,.md-typeset sup{margin-left:.07812em}[dir=rtl] .md-typeset sub,[dir=rtl] .md-typeset sup{margin-right:.07812em;margin-left:0}.md-typeset blockquote{padding-left:.6rem;border-left:.2rem solid rgba(0,0,0,.26);color:rgba(0,0,0,.54)}[dir=rtl] .md-typeset blockquote{padding-right:.6rem;padding-left:0;border-right:.2rem solid rgba(0,0,0,.26);border-left:initial}.md-typeset ul{list-style-type:disc}.md-typeset ol,.md-typeset ul{margin-left:.625em;padding:0}[dir=rtl] .md-typeset ol,[dir=rtl] .md-typeset ul{margin-right:.625em;margin-left:0}.md-typeset ol ol,.md-typeset ul ol{list-style-type:lower-alpha}.md-typeset ol ol ol,.md-typeset ul ol ol{list-style-type:lower-roman}.md-typeset ol li,.md-typeset ul li{margin-bottom:.5em;margin-left:1.25em}[dir=rtl] .md-typeset ol li,[dir=rtl] .md-typeset ul li{margin-right:1.25em;margin-left:0}.md-typeset ol li blockquote,.md-typeset ol li p,.md-typeset ul li blockquote,.md-typeset ul li p{margin:.5em 0}.md-typeset ol li:last-child,.md-typeset ul li:last-child{margin-bottom:0}.md-typeset ol li ol,.md-typeset ol li ul,.md-typeset ul li ol,.md-typeset ul li ul{margin:.5em 0 .5em .625em}[dir=rtl] .md-typeset ol li ol,[dir=rtl] .md-typeset ol li ul,[dir=rtl] .md-typeset ul li ol,[dir=rtl] .md-typeset ul li ul{margin-right:.625em;margin-left:0}.md-typeset dd{margin:1em 0 1em 1.875em}[dir=rtl] .md-typeset dd{margin-right:1.875em;margin-left:0}.md-typeset iframe,.md-typeset img,.md-typeset svg{max-width:100%}.md-typeset table:not([class]){box-shadow:0 2px 2px 0 rgba(0,0,0,.14),0 1px 5px 0 rgba(0,0,0,.12),0 3px 1px -2px rgba(0,0,0,.2);display:inline-block;max-width:100%;border-radius:.1rem;font-size:.64rem;overflow:auto;-webkit-overflow-scrolling:touch}.md-typeset table:not([class])+*{margin-top:1.5em}.md-typeset table:not([class]) td:not([align]),.md-typeset table:not([class]) th:not([align]){text-align:left}[dir=rtl] .md-typeset table:not([class]) td:not([align]),[dir=rtl] .md-typeset table:not([class]) th:not([align]){text-align:right}.md-typeset table:not([class]) th{min-width:5rem;padding:.6rem .8rem;background-color:rgba(0,0,0,.54);color:#fff;vertical-align:top}.md-typeset table:not([class]) td{padding:.6rem .8rem;border-top:.05rem solid rgba(0,0,0,.07);vertical-align:top}.md-typeset table:not([class]) tr{-webkit-transition:background-color .125s;transition:background-color .125s}.md-typeset table:not([class]) tr:hover{background-color:rgba(0,0,0,.035);box-shadow:inset 0 .05rem 0 #fff}.md-typeset table:not([class]) tr:first-child td{border-top:0}.md-typeset table:not([class]) a{word-break:normal}.md-typeset__scrollwrap{margin:1em -.8rem;overflow-x:auto;-webkit-overflow-scrolling:touch}.md-typeset .md-typeset__table{display:inline-block;margin-bottom:.5em;padding:0 .8rem}.md-typeset .md-typeset__table table{display:table;width:100%;margin:0;overflow:hidden}html{font-size:125%;overflow-x:hidden}body,html{height:100%}body{position:relative;font-size:.5rem}hr{display:block;height:.05rem;padding:0;border:0}.md-svg{display:none}.md-grid{max-width:61rem;margin-right:auto;margin-left:auto}.md-container,.md-main{overflow:auto}.md-container{display:table;width:100%;height:100%;padding-top:2.4rem;table-layout:fixed}.md-main{display:table-row;height:100%}.md-main__inner{height:100%;padding-top:1.5rem;padding-bottom:.05rem}.md-toggle{display:none}.md-overlay{position:fixed;top:0;width:0;height:0;-webkit-transition:width 0s .25s,height 0s .25s,opacity .25s;transition:width 0s .25s,height 0s .25s,opacity .25s;background-color:rgba(0,0,0,.54);opacity:0;z-index:3}.md-flex{display:table}.md-flex__cell{display:table-cell;position:relative;vertical-align:top}.md-flex__cell--shrink{width:0}.md-flex__cell--stretch{display:table;width:100%;table-layout:fixed}.md-flex__ellipsis{display:table-cell;text-overflow:ellipsis;white-space:nowrap;overflow:hidden}.md-skip{position:fixed;width:.05rem;height:.05rem;margin:.5rem;padding:.3rem .5rem;-webkit-transform:translateY(.4rem);transform:translateY(.4rem);border-radius:.1rem;background-color:rgba(0,0,0,.87);color:#fff;font-size:.64rem;opacity:0;overflow:hidden}.md-skip:focus{width:auto;height:auto;clip:auto;-webkit-transform:translateX(0);transform:translateX(0);-webkit-transition:opacity .175s 75ms,-webkit-transform .25s cubic-bezier(.4,0,.2,1);transition:opacity .175s 75ms,-webkit-transform .25s cubic-bezier(.4,0,.2,1);transition:transform .25s cubic-bezier(.4,0,.2,1),opacity .175s 75ms;transition:transform .25s cubic-bezier(.4,0,.2,1),opacity .175s 75ms,-webkit-transform .25s cubic-bezier(.4,0,.2,1);opacity:1;z-index:10}@page{margin:25mm}.md-clipboard{position:absolute;top:.3rem;right:.3rem;width:1.4rem;height:1.4rem;border-radius:.1rem;font-size:.8rem;cursor:pointer;z-index:1;-webkit-backface-visibility:hidden;backface-visibility:hidden}.md-clipboard:before{-webkit-transition:color .25s,opacity .25s;transition:color .25s,opacity .25s;color:rgba(0,0,0,.07);content:"\E14D"}.codehilite:hover .md-clipboard:before,.md-typeset .highlight:hover .md-clipboard:before,pre:hover .md-clipboard:before{color:rgba(0,0,0,.54)}.md-clipboard:focus:before,.md-clipboard:hover:before{color:#536dfe}.md-clipboard__message{display:block;position:absolute;top:0;right:1.7rem;padding:.3rem .5rem;-webkit-transform:translateX(.4rem);transform:translateX(.4rem);-webkit-transition:opacity .175s,-webkit-transform .25s cubic-bezier(.9,.1,.9,0);transition:opacity .175s,-webkit-transform .25s cubic-bezier(.9,.1,.9,0);transition:transform .25s cubic-bezier(.9,.1,.9,0),opacity .175s;transition:transform .25s cubic-bezier(.9,.1,.9,0),opacity .175s,-webkit-transform .25s cubic-bezier(.9,.1,.9,0);border-radius:.1rem;background-color:rgba(0,0,0,.54);color:#fff;font-size:.64rem;white-space:nowrap;opacity:0;pointer-events:none}.md-clipboard__message--active{-webkit-transform:translateX(0);transform:translateX(0);-webkit-transition:opacity .175s 75ms,-webkit-transform .25s cubic-bezier(.4,0,.2,1);transition:opacity .175s 75ms,-webkit-transform .25s cubic-bezier(.4,0,.2,1);transition:transform .25s cubic-bezier(.4,0,.2,1),opacity .175s 75ms;transition:transform .25s cubic-bezier(.4,0,.2,1),opacity .175s 75ms,-webkit-transform .25s cubic-bezier(.4,0,.2,1);opacity:1;pointer-events:auto}.md-clipboard__message:before{content:attr(aria-label)}.md-clipboard__message:after{display:block;position:absolute;top:50%;right:-.2rem;width:0;margin-top:-.2rem;border-color:transparent rgba(0,0,0,.54);border-style:solid;border-width:.2rem 0 .2rem .2rem;content:""}.md-content__inner{margin:0 .8rem 1.2rem;padding-top:.6rem}.md-content__inner:before{display:block;height:.4rem;content:""}.md-content__inner>:last-child{margin-bottom:0}.md-content__icon{position:relative;margin:.4rem 0;padding:0;float:right}.md-typeset .md-content__icon{color:rgba(0,0,0,.26)}.md-header{position:fixed;top:0;right:0;left:0;height:2.4rem;-webkit-transition:background-color .25s,color .25s;transition:background-color .25s,color .25s;background-color:#3f51b5;color:#fff;box-shadow:none;z-index:2;-webkit-backface-visibility:hidden;backface-visibility:hidden}.no-js .md-header{-webkit-transition:none;transition:none;box-shadow:none}.md-header[data-md-state=shadow]{-webkit-transition:background-color .25s,color .25s,box-shadow .25s;transition:background-color .25s,color .25s,box-shadow .25s;box-shadow:0 0 .2rem rgba(0,0,0,.1),0 .2rem .4rem rgba(0,0,0,.2)}.md-header-nav{padding:0 .2rem}.md-header-nav__button{position:relative;-webkit-transition:opacity .25s;transition:opacity .25s;z-index:1}.md-header-nav__button:hover{opacity:.7}.md-header-nav__button.md-logo *{display:block}.no-js .md-header-nav__button.md-icon--search{display:none}.md-header-nav__topic{display:block;position:absolute;-webkit-transition:opacity .15s,-webkit-transform .4s cubic-bezier(.1,.7,.1,1);transition:opacity .15s,-webkit-transform .4s cubic-bezier(.1,.7,.1,1);transition:transform .4s cubic-bezier(.1,.7,.1,1),opacity .15s;transition:transform .4s cubic-bezier(.1,.7,.1,1),opacity .15s,-webkit-transform .4s cubic-bezier(.1,.7,.1,1);text-overflow:ellipsis;white-space:nowrap;overflow:hidden}.md-header-nav__topic+.md-header-nav__topic{-webkit-transform:translateX(1.25rem);transform:translateX(1.25rem);-webkit-transition:opacity .15s,-webkit-transform .4s cubic-bezier(1,.7,.1,.1);transition:opacity .15s,-webkit-transform .4s cubic-bezier(1,.7,.1,.1);transition:transform .4s cubic-bezier(1,.7,.1,.1),opacity .15s;transition:transform .4s cubic-bezier(1,.7,.1,.1),opacity .15s,-webkit-transform .4s cubic-bezier(1,.7,.1,.1);opacity:0;z-index:-1;pointer-events:none}[dir=rtl] .md-header-nav__topic+.md-header-nav__topic{-webkit-transform:translateX(-1.25rem);transform:translateX(-1.25rem)}.no-js .md-header-nav__topic{position:static}.no-js .md-header-nav__topic+.md-header-nav__topic{display:none}.md-header-nav__title{padding:0 1rem;font-size:.9rem;line-height:2.4rem}.md-header-nav__title[data-md-state=active] .md-header-nav__topic{-webkit-transform:translateX(-1.25rem);transform:translateX(-1.25rem);-webkit-transition:opacity .15s,-webkit-transform .4s cubic-bezier(1,.7,.1,.1);transition:opacity .15s,-webkit-transform .4s cubic-bezier(1,.7,.1,.1);transition:transform .4s cubic-bezier(1,.7,.1,.1),opacity .15s;transition:transform .4s cubic-bezier(1,.7,.1,.1),opacity .15s,-webkit-transform .4s cubic-bezier(1,.7,.1,.1);opacity:0;z-index:-1;pointer-events:none}[dir=rtl] .md-header-nav__title[data-md-state=active] .md-header-nav__topic{-webkit-transform:translateX(1.25rem);transform:translateX(1.25rem)}.md-header-nav__title[data-md-state=active] .md-header-nav__topic+.md-header-nav__topic{-webkit-transform:translateX(0);transform:translateX(0);-webkit-transition:opacity .15s,-webkit-transform .4s cubic-bezier(.1,.7,.1,1);transition:opacity .15s,-webkit-transform .4s cubic-bezier(.1,.7,.1,1);transition:transform .4s cubic-bezier(.1,.7,.1,1),opacity .15s;transition:transform .4s cubic-bezier(.1,.7,.1,1),opacity .15s,-webkit-transform .4s cubic-bezier(.1,.7,.1,1);opacity:1;z-index:0;pointer-events:auto}.md-header-nav__source{display:none}.md-hero{-webkit-transition:background .25s;transition:background .25s;background-color:#3f51b5;color:#fff;font-size:1rem;overflow:hidden}.md-hero__inner{margin-top:1rem;padding:.8rem .8rem .4rem;-webkit-transition:opacity .25s,-webkit-transform .4s cubic-bezier(.1,.7,.1,1);transition:opacity .25s,-webkit-transform .4s cubic-bezier(.1,.7,.1,1);transition:transform .4s cubic-bezier(.1,.7,.1,1),opacity .25s;transition:transform .4s cubic-bezier(.1,.7,.1,1),opacity .25s,-webkit-transform .4s cubic-bezier(.1,.7,.1,1);-webkit-transition-delay:.1s;transition-delay:.1s}[data-md-state=hidden] .md-hero__inner{pointer-events:none;-webkit-transform:translateY(.625rem);transform:translateY(.625rem);-webkit-transition:opacity .1s 0s,-webkit-transform 0s .4s;transition:opacity .1s 0s,-webkit-transform 0s .4s;transition:transform 0s .4s,opacity .1s 0s;transition:transform 0s .4s,opacity .1s 0s,-webkit-transform 0s .4s;opacity:0}.md-hero--expand .md-hero__inner{margin-bottom:1.2rem}.md-footer-nav{background-color:rgba(0,0,0,.87);color:#fff}.md-footer-nav__inner{padding:.2rem;overflow:auto}.md-footer-nav__link{padding-top:1.4rem;padding-bottom:.4rem;-webkit-transition:opacity .25s;transition:opacity .25s}.md-footer-nav__link:hover{opacity:.7}.md-footer-nav__link--prev{width:25%;float:left}[dir=rtl] .md-footer-nav__link--prev{float:right}.md-footer-nav__link--next{width:75%;float:right;text-align:right}[dir=rtl] .md-footer-nav__link--next{float:left;text-align:left}.md-footer-nav__button{-webkit-transition:background .25s;transition:background .25s}.md-footer-nav__title{position:relative;padding:0 1rem;font-size:.9rem;line-height:2.4rem}.md-footer-nav__direction{position:absolute;right:0;left:0;margin-top:-1rem;padding:0 1rem;color:hsla(0,0%,100%,.7);font-size:.75rem}.md-footer-meta{background-color:rgba(0,0,0,.895)}.md-footer-meta__inner{padding:.2rem;overflow:auto}html .md-footer-meta.md-typeset a{color:hsla(0,0%,100%,.7)}html .md-footer-meta.md-typeset a:focus,html .md-footer-meta.md-typeset a:hover{color:#fff}.md-footer-copyright{margin:0 .6rem;padding:.4rem 0;color:hsla(0,0%,100%,.3);font-size:.64rem}.md-footer-copyright__highlight{color:hsla(0,0%,100%,.7)}.md-footer-social{margin:0 .4rem;padding:.2rem 0 .6rem}.md-footer-social__link{display:inline-block;width:1.6rem;height:1.6rem;font-size:.8rem;text-align:center}.md-footer-social__link:before{line-height:1.9}.md-nav{font-size:.7rem;line-height:1.3}.md-nav__title{display:block;padding:0 .6rem;font-weight:700;text-overflow:ellipsis;overflow:hidden}.md-nav__title:before{display:none;content:"\E5C4"}[dir=rtl] .md-nav__title:before{content:"\E5C8"}.md-nav__title .md-nav__button{display:none}.md-nav__list{margin:0;padding:0;list-style:none}.md-nav__item{padding:0 .6rem}.md-nav__item:last-child{padding-bottom:.6rem}.md-nav__item .md-nav__item{padding-right:0}[dir=rtl] .md-nav__item .md-nav__item{padding-right:.6rem;padding-left:0}.md-nav__item .md-nav__item:last-child{padding-bottom:0}.md-nav__button img{width:100%;height:auto}.md-nav__link{display:block;margin-top:.625em;-webkit-transition:color .125s;transition:color .125s;text-overflow:ellipsis;cursor:pointer;overflow:hidden}.md-nav__item--nested>.md-nav__link:after{content:"\E313"}html .md-nav__link[for=__toc],html .md-nav__link[for=__toc]+.md-nav__link:after,html .md-nav__link[for=__toc]~.md-nav{display:none}.md-nav__link[data-md-state=blur]{color:rgba(0,0,0,.54)}.md-nav__link--active,.md-nav__link:active{color:#3f51b5}.md-nav__item--nested>.md-nav__link{color:inherit}.md-nav__link:focus,.md-nav__link:hover{color:#536dfe}.md-nav__source,.no-js .md-search{display:none}.md-search__overlay{opacity:0;z-index:1}.md-search__form{position:relative}.md-search__input{position:relative;padding:0 2.2rem 0 3.6rem;text-overflow:ellipsis;z-index:2}[dir=rtl] .md-search__input{padding:0 3.6rem 0 2.2rem}.md-search__input::-webkit-input-placeholder{-webkit-transition:color .25s cubic-bezier(.1,.7,.1,1);transition:color .25s cubic-bezier(.1,.7,.1,1)}.md-search__input::-moz-placeholder{-moz-transition:color .25s cubic-bezier(.1,.7,.1,1);transition:color .25s cubic-bezier(.1,.7,.1,1)}.md-search__input:-ms-input-placeholder{-ms-transition:color .25s cubic-bezier(.1,.7,.1,1);transition:color .25s cubic-bezier(.1,.7,.1,1)}.md-search__input::-ms-input-placeholder{-ms-transition:color .25s cubic-bezier(.1,.7,.1,1);transition:color .25s cubic-bezier(.1,.7,.1,1)}.md-search__input::placeholder{-webkit-transition:color .25s cubic-bezier(.1,.7,.1,1);transition:color .25s cubic-bezier(.1,.7,.1,1)}.md-search__input::-webkit-input-placeholder{color:rgba(0,0,0,.54)}.md-search__input::-moz-placeholder{color:rgba(0,0,0,.54)}.md-search__input:-ms-input-placeholder{color:rgba(0,0,0,.54)}.md-search__input::-ms-input-placeholder{color:rgba(0,0,0,.54)}.md-search__input::placeholder,.md-search__input~.md-search__icon{color:rgba(0,0,0,.54)}.md-search__input::-ms-clear{display:none}.md-search__icon{position:absolute;-webkit-transition:color .25s cubic-bezier(.1,.7,.1,1),opacity .25s;transition:color .25s cubic-bezier(.1,.7,.1,1),opacity .25s;font-size:1.2rem;cursor:pointer;z-index:2}.md-search__icon:hover{opacity:.7}.md-search__icon[for=__search]{top:.3rem;left:.5rem}[dir=rtl] .md-search__icon[for=__search]{right:.5rem;left:auto}.md-search__icon[for=__search]:before{content:"\E8B6"}.md-search__icon[type=reset]{top:.3rem;right:.5rem;-webkit-transform:scale(.125);transform:scale(.125);-webkit-transition:opacity .15s,-webkit-transform .15s cubic-bezier(.1,.7,.1,1);transition:opacity .15s,-webkit-transform .15s cubic-bezier(.1,.7,.1,1);transition:transform .15s cubic-bezier(.1,.7,.1,1),opacity .15s;transition:transform .15s cubic-bezier(.1,.7,.1,1),opacity .15s,-webkit-transform .15s cubic-bezier(.1,.7,.1,1);opacity:0}[dir=rtl] .md-search__icon[type=reset]{right:auto;left:.5rem}[data-md-toggle=search]:checked~.md-header .md-search__input:valid~.md-search__icon[type=reset]{-webkit-transform:scale(1);transform:scale(1);opacity:1}[data-md-toggle=search]:checked~.md-header .md-search__input:valid~.md-search__icon[type=reset]:hover{opacity:.7}.md-search__output{position:absolute;width:100%;border-radius:0 0 .1rem .1rem;overflow:hidden;z-index:1}.md-search__scrollwrap{height:100%;background-color:#fff;box-shadow:inset 0 .05rem 0 rgba(0,0,0,.07);overflow-y:auto;-webkit-overflow-scrolling:touch}.md-search-result{color:rgba(0,0,0,.87);word-break:break-word}.md-search-result__meta{padding:0 .8rem;background-color:rgba(0,0,0,.07);color:rgba(0,0,0,.54);font-size:.64rem;line-height:1.8rem}.md-search-result__list{margin:0;padding:0;border-top:.05rem solid rgba(0,0,0,.07);list-style:none}.md-search-result__item{box-shadow:0 -.05rem 0 rgba(0,0,0,.07)}.md-search-result__link{display:block;-webkit-transition:background .25s;transition:background .25s;outline:0;overflow:hidden}.md-search-result__link:hover,.md-search-result__link[data-md-state=active]{background-color:rgba(83,109,254,.1)}.md-search-result__link:hover .md-search-result__article:before,.md-search-result__link[data-md-state=active] .md-search-result__article:before{opacity:.7}.md-search-result__link:last-child .md-search-result__teaser{margin-bottom:.6rem}.md-search-result__article{position:relative;padding:0 .8rem;overflow:auto}.md-search-result__article--document:before{position:absolute;left:0;margin:.1rem;-webkit-transition:opacity .25s;transition:opacity .25s;color:rgba(0,0,0,.54);content:"\E880"}[dir=rtl] .md-search-result__article--document:before{right:0;left:auto}.md-search-result__article--document .md-search-result__title{margin:.55rem 0;font-size:.8rem;font-weight:400;line-height:1.4}.md-search-result__title{margin:.5em 0;font-size:.64rem;font-weight:700;line-height:1.4}.md-search-result__teaser{display:-webkit-box;max-height:1.65rem;margin:.5em 0;color:rgba(0,0,0,.54);font-size:.64rem;line-height:1.4;text-overflow:ellipsis;overflow:hidden;-webkit-box-orient:vertical;-webkit-line-clamp:2}.md-search-result em{font-style:normal;font-weight:700;text-decoration:underline}.md-sidebar{position:absolute;width:12.1rem;padding:1.2rem 0;overflow:hidden}.md-sidebar[data-md-state=lock]{position:fixed;top:2.4rem}.md-sidebar--secondary{display:none}.md-sidebar__scrollwrap{max-height:100%;margin:0 .2rem;overflow-y:auto;-webkit-backface-visibility:hidden;backface-visibility:hidden}.md-sidebar__scrollwrap::-webkit-scrollbar{width:.2rem;height:.2rem}.md-sidebar__scrollwrap::-webkit-scrollbar-thumb{background-color:rgba(0,0,0,.26)}.md-sidebar__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#536dfe}@-webkit-keyframes md-source__facts--done{0%{height:0}to{height:.65rem}}@keyframes md-source__facts--done{0%{height:0}to{height:.65rem}}@-webkit-keyframes md-source__fact--done{0%{-webkit-transform:translateY(100%);transform:translateY(100%);opacity:0}50%{opacity:0}to{-webkit-transform:translateY(0);transform:translateY(0);opacity:1}}@keyframes md-source__fact--done{0%{-webkit-transform:translateY(100%);transform:translateY(100%);opacity:0}50%{opacity:0}to{-webkit-transform:translateY(0);transform:translateY(0);opacity:1}}.md-source{display:block;padding-right:.6rem;-webkit-transition:opacity .25s;transition:opacity .25s;font-size:.65rem;line-height:1.2;white-space:nowrap}[dir=rtl] .md-source{padding-right:0;padding-left:.6rem}.md-source:hover{opacity:.7}.md-source:after,.md-source__icon{display:inline-block;height:2.4rem;content:"";vertical-align:middle}.md-source__icon{width:2.4rem}.md-source__icon svg{width:1.2rem;height:1.2rem;margin-top:.6rem;margin-left:.6rem}[dir=rtl] .md-source__icon svg{margin-right:.6rem;margin-left:0}.md-source__icon+.md-source__repository{margin-left:-2rem;padding-left:2rem}[dir=rtl] .md-source__icon+.md-source__repository{margin-right:-2rem;margin-left:0;padding-right:2rem;padding-left:0}.md-source__repository{display:inline-block;max-width:100%;margin-left:.6rem;font-weight:700;text-overflow:ellipsis;overflow:hidden;vertical-align:middle}.md-source__facts{margin:0;padding:0;font-size:.55rem;font-weight:700;list-style-type:none;opacity:.75;overflow:hidden}[data-md-state=done] .md-source__facts{-webkit-animation:md-source__facts--done .25s ease-in;animation:md-source__facts--done .25s ease-in}.md-source__fact{float:left}[dir=rtl] .md-source__fact{float:right}[data-md-state=done] .md-source__fact{-webkit-animation:md-source__fact--done .4s ease-out;animation:md-source__fact--done .4s ease-out}.md-source__fact:before{margin:0 .1rem;content:"\00B7"}.md-source__fact:first-child:before{display:none}.md-source-file{display:inline-block;margin:1em .5em 1em 0;padding-right:.25rem;border-radius:.1rem;background-color:rgba(0,0,0,.07);font-size:.64rem;list-style-type:none;cursor:pointer;overflow:hidden}.md-source-file:before{display:inline-block;margin-right:.25rem;padding:.25rem;background-color:rgba(0,0,0,.26);color:#fff;font-size:.8rem;content:"\E86F";vertical-align:middle}html .md-source-file{-webkit-transition:background .4s,color .4s,box-shadow .4s cubic-bezier(.4,0,.2,1);transition:background .4s,color .4s,box-shadow .4s cubic-bezier(.4,0,.2,1)}html .md-source-file:before{-webkit-transition:inherit;transition:inherit}html body .md-typeset .md-source-file{color:rgba(0,0,0,.54)}.md-source-file:hover{box-shadow:0 0 8px rgba(0,0,0,.18),0 8px 16px rgba(0,0,0,.36)}.md-source-file:hover:before{background-color:#536dfe}.md-tabs{width:100%;-webkit-transition:background .25s;transition:background .25s;background-color:#3f51b5;color:#fff;overflow:auto}.md-tabs__list{margin:0 0 0 .2rem;padding:0;list-style:none;white-space:nowrap}.md-tabs__item{display:inline-block;height:2.4rem;padding-right:.6rem;padding-left:.6rem}.md-tabs__link{display:block;margin-top:.8rem;-webkit-transition:opacity .25s,-webkit-transform .4s cubic-bezier(.1,.7,.1,1);transition:opacity .25s,-webkit-transform .4s cubic-bezier(.1,.7,.1,1);transition:transform .4s cubic-bezier(.1,.7,.1,1),opacity .25s;transition:transform .4s cubic-bezier(.1,.7,.1,1),opacity .25s,-webkit-transform .4s cubic-bezier(.1,.7,.1,1);font-size:.7rem;opacity:.7}.md-tabs__link--active,.md-tabs__link:hover{color:inherit;opacity:1}.md-tabs__item:nth-child(2) .md-tabs__link{-webkit-transition-delay:.02s;transition-delay:.02s}.md-tabs__item:nth-child(3) .md-tabs__link{-webkit-transition-delay:.04s;transition-delay:.04s}.md-tabs__item:nth-child(4) .md-tabs__link{-webkit-transition-delay:.06s;transition-delay:.06s}.md-tabs__item:nth-child(5) .md-tabs__link{-webkit-transition-delay:.08s;transition-delay:.08s}.md-tabs__item:nth-child(6) .md-tabs__link{-webkit-transition-delay:.1s;transition-delay:.1s}.md-tabs__item:nth-child(7) .md-tabs__link{-webkit-transition-delay:.12s;transition-delay:.12s}.md-tabs__item:nth-child(8) .md-tabs__link{-webkit-transition-delay:.14s;transition-delay:.14s}.md-tabs__item:nth-child(9) .md-tabs__link{-webkit-transition-delay:.16s;transition-delay:.16s}.md-tabs__item:nth-child(10) .md-tabs__link{-webkit-transition-delay:.18s;transition-delay:.18s}.md-tabs__item:nth-child(11) .md-tabs__link{-webkit-transition-delay:.2s;transition-delay:.2s}.md-tabs__item:nth-child(12) .md-tabs__link{-webkit-transition-delay:.22s;transition-delay:.22s}.md-tabs__item:nth-child(13) .md-tabs__link{-webkit-transition-delay:.24s;transition-delay:.24s}.md-tabs__item:nth-child(14) .md-tabs__link{-webkit-transition-delay:.26s;transition-delay:.26s}.md-tabs__item:nth-child(15) .md-tabs__link{-webkit-transition-delay:.28s;transition-delay:.28s}.md-tabs__item:nth-child(16) .md-tabs__link{-webkit-transition-delay:.3s;transition-delay:.3s}.md-tabs[data-md-state=hidden]{pointer-events:none}.md-tabs[data-md-state=hidden] .md-tabs__link{-webkit-transform:translateY(50%);transform:translateY(50%);-webkit-transition:color .25s,opacity .1s,-webkit-transform 0s .4s;transition:color .25s,opacity .1s,-webkit-transform 0s .4s;transition:color .25s,transform 0s .4s,opacity .1s;transition:color .25s,transform 0s .4s,opacity .1s,-webkit-transform 0s .4s;opacity:0}.md-typeset .admonition,.md-typeset details{box-shadow:0 2px 2px 0 rgba(0,0,0,.14),0 1px 5px 0 rgba(0,0,0,.12),0 3px 1px -2px rgba(0,0,0,.2);position:relative;margin:1.5625em 0;padding:0 .6rem;border-left:.2rem solid #448aff;border-radius:.1rem;font-size:.64rem;overflow:auto}[dir=rtl] .md-typeset .admonition,[dir=rtl] .md-typeset details{border-right:.2rem solid #448aff;border-left:none}html .md-typeset .admonition>:last-child,html .md-typeset details>:last-child{margin-bottom:.6rem}.md-typeset .admonition .admonition,.md-typeset .admonition details,.md-typeset details .admonition,.md-typeset details details{margin:1em 0}.md-typeset .admonition>.admonition-title,.md-typeset .admonition>summary,.md-typeset details>.admonition-title,.md-typeset details>summary{margin:0 -.6rem;padding:.4rem .6rem .4rem 2rem;border-bottom:.05rem solid rgba(68,138,255,.1);background-color:rgba(68,138,255,.1);font-weight:700}[dir=rtl] .md-typeset .admonition>.admonition-title,[dir=rtl] .md-typeset .admonition>summary,[dir=rtl] .md-typeset details>.admonition-title,[dir=rtl] .md-typeset details>summary{padding:.4rem 2rem .4rem .6rem}.md-typeset .admonition>.admonition-title:last-child,.md-typeset .admonition>summary:last-child,.md-typeset details>.admonition-title:last-child,.md-typeset details>summary:last-child{margin-bottom:0}.md-typeset .admonition>.admonition-title:before,.md-typeset .admonition>summary:before,.md-typeset details>.admonition-title:before,.md-typeset details>summary:before{position:absolute;left:.6rem;color:#448aff;font-size:1rem;content:"\E3C9"}[dir=rtl] .md-typeset .admonition>.admonition-title:before,[dir=rtl] .md-typeset .admonition>summary:before,[dir=rtl] .md-typeset details>.admonition-title:before,[dir=rtl] .md-typeset details>summary:before{right:.6rem;left:auto}.md-typeset .admonition.abstract,.md-typeset .admonition.summary,.md-typeset .admonition.tldr,.md-typeset details.abstract,.md-typeset details.summary,.md-typeset details.tldr{border-left-color:#00b0ff}[dir=rtl] .md-typeset .admonition.abstract,[dir=rtl] .md-typeset .admonition.summary,[dir=rtl] .md-typeset .admonition.tldr,[dir=rtl] .md-typeset details.abstract,[dir=rtl] .md-typeset details.summary,[dir=rtl] .md-typeset details.tldr{border-right-color:#00b0ff}.md-typeset .admonition.abstract>.admonition-title,.md-typeset .admonition.abstract>summary,.md-typeset .admonition.summary>.admonition-title,.md-typeset .admonition.summary>summary,.md-typeset .admonition.tldr>.admonition-title,.md-typeset .admonition.tldr>summary,.md-typeset details.abstract>.admonition-title,.md-typeset details.abstract>summary,.md-typeset details.summary>.admonition-title,.md-typeset details.summary>summary,.md-typeset details.tldr>.admonition-title,.md-typeset details.tldr>summary{border-bottom-color:rgba(0,176,255,.1);background-color:rgba(0,176,255,.1)}.md-typeset .admonition.abstract>.admonition-title:before,.md-typeset .admonition.abstract>summary:before,.md-typeset .admonition.summary>.admonition-title:before,.md-typeset .admonition.summary>summary:before,.md-typeset .admonition.tldr>.admonition-title:before,.md-typeset .admonition.tldr>summary:before,.md-typeset details.abstract>.admonition-title:before,.md-typeset details.abstract>summary:before,.md-typeset details.summary>.admonition-title:before,.md-typeset details.summary>summary:before,.md-typeset details.tldr>.admonition-title:before,.md-typeset details.tldr>summary:before{color:#00b0ff;content:""}.md-typeset .admonition.info,.md-typeset .admonition.todo,.md-typeset details.info,.md-typeset details.todo{border-left-color:#00b8d4}[dir=rtl] .md-typeset .admonition.info,[dir=rtl] .md-typeset .admonition.todo,[dir=rtl] .md-typeset details.info,[dir=rtl] .md-typeset details.todo{border-right-color:#00b8d4}.md-typeset .admonition.info>.admonition-title,.md-typeset .admonition.info>summary,.md-typeset .admonition.todo>.admonition-title,.md-typeset .admonition.todo>summary,.md-typeset details.info>.admonition-title,.md-typeset details.info>summary,.md-typeset details.todo>.admonition-title,.md-typeset details.todo>summary{border-bottom-color:rgba(0,184,212,.1);background-color:rgba(0,184,212,.1)}.md-typeset .admonition.info>.admonition-title:before,.md-typeset .admonition.info>summary:before,.md-typeset .admonition.todo>.admonition-title:before,.md-typeset .admonition.todo>summary:before,.md-typeset details.info>.admonition-title:before,.md-typeset details.info>summary:before,.md-typeset details.todo>.admonition-title:before,.md-typeset details.todo>summary:before{color:#00b8d4;content:""}.md-typeset .admonition.hint,.md-typeset .admonition.important,.md-typeset .admonition.tip,.md-typeset details.hint,.md-typeset details.important,.md-typeset details.tip{border-left-color:#00bfa5}[dir=rtl] .md-typeset .admonition.hint,[dir=rtl] .md-typeset .admonition.important,[dir=rtl] .md-typeset .admonition.tip,[dir=rtl] .md-typeset details.hint,[dir=rtl] .md-typeset details.important,[dir=rtl] .md-typeset details.tip{border-right-color:#00bfa5}.md-typeset .admonition.hint>.admonition-title,.md-typeset .admonition.hint>summary,.md-typeset .admonition.important>.admonition-title,.md-typeset .admonition.important>summary,.md-typeset .admonition.tip>.admonition-title,.md-typeset .admonition.tip>summary,.md-typeset details.hint>.admonition-title,.md-typeset details.hint>summary,.md-typeset details.important>.admonition-title,.md-typeset details.important>summary,.md-typeset details.tip>.admonition-title,.md-typeset details.tip>summary{border-bottom-color:rgba(0,191,165,.1);background-color:rgba(0,191,165,.1)}.md-typeset .admonition.hint>.admonition-title:before,.md-typeset .admonition.hint>summary:before,.md-typeset .admonition.important>.admonition-title:before,.md-typeset .admonition.important>summary:before,.md-typeset .admonition.tip>.admonition-title:before,.md-typeset .admonition.tip>summary:before,.md-typeset details.hint>.admonition-title:before,.md-typeset details.hint>summary:before,.md-typeset details.important>.admonition-title:before,.md-typeset details.important>summary:before,.md-typeset details.tip>.admonition-title:before,.md-typeset details.tip>summary:before{color:#00bfa5;content:""}.md-typeset .admonition.check,.md-typeset .admonition.done,.md-typeset .admonition.success,.md-typeset details.check,.md-typeset details.done,.md-typeset details.success{border-left-color:#00c853}[dir=rtl] .md-typeset .admonition.check,[dir=rtl] .md-typeset .admonition.done,[dir=rtl] .md-typeset .admonition.success,[dir=rtl] .md-typeset details.check,[dir=rtl] .md-typeset details.done,[dir=rtl] .md-typeset details.success{border-right-color:#00c853}.md-typeset .admonition.check>.admonition-title,.md-typeset .admonition.check>summary,.md-typeset .admonition.done>.admonition-title,.md-typeset .admonition.done>summary,.md-typeset .admonition.success>.admonition-title,.md-typeset .admonition.success>summary,.md-typeset details.check>.admonition-title,.md-typeset details.check>summary,.md-typeset details.done>.admonition-title,.md-typeset details.done>summary,.md-typeset details.success>.admonition-title,.md-typeset details.success>summary{border-bottom-color:rgba(0,200,83,.1);background-color:rgba(0,200,83,.1)}.md-typeset .admonition.check>.admonition-title:before,.md-typeset .admonition.check>summary:before,.md-typeset .admonition.done>.admonition-title:before,.md-typeset .admonition.done>summary:before,.md-typeset .admonition.success>.admonition-title:before,.md-typeset .admonition.success>summary:before,.md-typeset details.check>.admonition-title:before,.md-typeset details.check>summary:before,.md-typeset details.done>.admonition-title:before,.md-typeset details.done>summary:before,.md-typeset details.success>.admonition-title:before,.md-typeset details.success>summary:before{color:#00c853;content:""}.md-typeset .admonition.faq,.md-typeset .admonition.help,.md-typeset .admonition.question,.md-typeset details.faq,.md-typeset details.help,.md-typeset details.question{border-left-color:#64dd17}[dir=rtl] .md-typeset .admonition.faq,[dir=rtl] .md-typeset .admonition.help,[dir=rtl] .md-typeset .admonition.question,[dir=rtl] .md-typeset details.faq,[dir=rtl] .md-typeset details.help,[dir=rtl] .md-typeset details.question{border-right-color:#64dd17}.md-typeset .admonition.faq>.admonition-title,.md-typeset .admonition.faq>summary,.md-typeset .admonition.help>.admonition-title,.md-typeset .admonition.help>summary,.md-typeset .admonition.question>.admonition-title,.md-typeset .admonition.question>summary,.md-typeset details.faq>.admonition-title,.md-typeset details.faq>summary,.md-typeset details.help>.admonition-title,.md-typeset details.help>summary,.md-typeset details.question>.admonition-title,.md-typeset details.question>summary{border-bottom-color:rgba(100,221,23,.1);background-color:rgba(100,221,23,.1)}.md-typeset .admonition.faq>.admonition-title:before,.md-typeset .admonition.faq>summary:before,.md-typeset .admonition.help>.admonition-title:before,.md-typeset .admonition.help>summary:before,.md-typeset .admonition.question>.admonition-title:before,.md-typeset .admonition.question>summary:before,.md-typeset details.faq>.admonition-title:before,.md-typeset details.faq>summary:before,.md-typeset details.help>.admonition-title:before,.md-typeset details.help>summary:before,.md-typeset details.question>.admonition-title:before,.md-typeset details.question>summary:before{color:#64dd17;content:""}.md-typeset .admonition.attention,.md-typeset .admonition.caution,.md-typeset .admonition.warning,.md-typeset details.attention,.md-typeset details.caution,.md-typeset details.warning{border-left-color:#ff9100}[dir=rtl] .md-typeset .admonition.attention,[dir=rtl] .md-typeset .admonition.caution,[dir=rtl] .md-typeset .admonition.warning,[dir=rtl] .md-typeset details.attention,[dir=rtl] .md-typeset details.caution,[dir=rtl] .md-typeset details.warning{border-right-color:#ff9100}.md-typeset .admonition.attention>.admonition-title,.md-typeset .admonition.attention>summary,.md-typeset .admonition.caution>.admonition-title,.md-typeset .admonition.caution>summary,.md-typeset .admonition.warning>.admonition-title,.md-typeset .admonition.warning>summary,.md-typeset details.attention>.admonition-title,.md-typeset details.attention>summary,.md-typeset details.caution>.admonition-title,.md-typeset details.caution>summary,.md-typeset details.warning>.admonition-title,.md-typeset details.warning>summary{border-bottom-color:rgba(255,145,0,.1);background-color:rgba(255,145,0,.1)}.md-typeset .admonition.attention>.admonition-title:before,.md-typeset .admonition.attention>summary:before,.md-typeset .admonition.caution>.admonition-title:before,.md-typeset .admonition.caution>summary:before,.md-typeset .admonition.warning>.admonition-title:before,.md-typeset .admonition.warning>summary:before,.md-typeset details.attention>.admonition-title:before,.md-typeset details.attention>summary:before,.md-typeset details.caution>.admonition-title:before,.md-typeset details.caution>summary:before,.md-typeset details.warning>.admonition-title:before,.md-typeset details.warning>summary:before{color:#ff9100;content:""}.md-typeset .admonition.fail,.md-typeset .admonition.failure,.md-typeset .admonition.missing,.md-typeset details.fail,.md-typeset details.failure,.md-typeset details.missing{border-left-color:#ff5252}[dir=rtl] .md-typeset .admonition.fail,[dir=rtl] .md-typeset .admonition.failure,[dir=rtl] .md-typeset .admonition.missing,[dir=rtl] .md-typeset details.fail,[dir=rtl] .md-typeset details.failure,[dir=rtl] .md-typeset details.missing{border-right-color:#ff5252}.md-typeset .admonition.fail>.admonition-title,.md-typeset .admonition.fail>summary,.md-typeset .admonition.failure>.admonition-title,.md-typeset .admonition.failure>summary,.md-typeset .admonition.missing>.admonition-title,.md-typeset .admonition.missing>summary,.md-typeset details.fail>.admonition-title,.md-typeset details.fail>summary,.md-typeset details.failure>.admonition-title,.md-typeset details.failure>summary,.md-typeset details.missing>.admonition-title,.md-typeset details.missing>summary{border-bottom-color:rgba(255,82,82,.1);background-color:rgba(255,82,82,.1)}.md-typeset .admonition.fail>.admonition-title:before,.md-typeset .admonition.fail>summary:before,.md-typeset .admonition.failure>.admonition-title:before,.md-typeset .admonition.failure>summary:before,.md-typeset .admonition.missing>.admonition-title:before,.md-typeset .admonition.missing>summary:before,.md-typeset details.fail>.admonition-title:before,.md-typeset details.fail>summary:before,.md-typeset details.failure>.admonition-title:before,.md-typeset details.failure>summary:before,.md-typeset details.missing>.admonition-title:before,.md-typeset details.missing>summary:before{color:#ff5252;content:""}.md-typeset .admonition.danger,.md-typeset .admonition.error,.md-typeset details.danger,.md-typeset details.error{border-left-color:#ff1744}[dir=rtl] .md-typeset .admonition.danger,[dir=rtl] .md-typeset .admonition.error,[dir=rtl] .md-typeset details.danger,[dir=rtl] .md-typeset details.error{border-right-color:#ff1744}.md-typeset .admonition.danger>.admonition-title,.md-typeset .admonition.danger>summary,.md-typeset .admonition.error>.admonition-title,.md-typeset .admonition.error>summary,.md-typeset details.danger>.admonition-title,.md-typeset details.danger>summary,.md-typeset details.error>.admonition-title,.md-typeset details.error>summary{border-bottom-color:rgba(255,23,68,.1);background-color:rgba(255,23,68,.1)}.md-typeset .admonition.danger>.admonition-title:before,.md-typeset .admonition.danger>summary:before,.md-typeset .admonition.error>.admonition-title:before,.md-typeset .admonition.error>summary:before,.md-typeset details.danger>.admonition-title:before,.md-typeset details.danger>summary:before,.md-typeset details.error>.admonition-title:before,.md-typeset details.error>summary:before{color:#ff1744;content:""}.md-typeset .admonition.bug,.md-typeset details.bug{border-left-color:#f50057}[dir=rtl] .md-typeset .admonition.bug,[dir=rtl] .md-typeset details.bug{border-right-color:#f50057}.md-typeset .admonition.bug>.admonition-title,.md-typeset .admonition.bug>summary,.md-typeset details.bug>.admonition-title,.md-typeset details.bug>summary{border-bottom-color:rgba(245,0,87,.1);background-color:rgba(245,0,87,.1)}.md-typeset .admonition.bug>.admonition-title:before,.md-typeset .admonition.bug>summary:before,.md-typeset details.bug>.admonition-title:before,.md-typeset details.bug>summary:before{color:#f50057;content:""}.md-typeset .admonition.example,.md-typeset details.example{border-left-color:#651fff}[dir=rtl] .md-typeset .admonition.example,[dir=rtl] .md-typeset details.example{border-right-color:#651fff}.md-typeset .admonition.example>.admonition-title,.md-typeset .admonition.example>summary,.md-typeset details.example>.admonition-title,.md-typeset details.example>summary{border-bottom-color:rgba(101,31,255,.1);background-color:rgba(101,31,255,.1)}.md-typeset .admonition.example>.admonition-title:before,.md-typeset .admonition.example>summary:before,.md-typeset details.example>.admonition-title:before,.md-typeset details.example>summary:before{color:#651fff;content:""}.md-typeset .admonition.cite,.md-typeset .admonition.quote,.md-typeset details.cite,.md-typeset details.quote{border-left-color:#9e9e9e}[dir=rtl] .md-typeset .admonition.cite,[dir=rtl] .md-typeset .admonition.quote,[dir=rtl] .md-typeset details.cite,[dir=rtl] .md-typeset details.quote{border-right-color:#9e9e9e}.md-typeset .admonition.cite>.admonition-title,.md-typeset .admonition.cite>summary,.md-typeset .admonition.quote>.admonition-title,.md-typeset .admonition.quote>summary,.md-typeset details.cite>.admonition-title,.md-typeset details.cite>summary,.md-typeset details.quote>.admonition-title,.md-typeset details.quote>summary{border-bottom-color:hsla(0,0%,62%,.1);background-color:hsla(0,0%,62%,.1)}.md-typeset .admonition.cite>.admonition-title:before,.md-typeset .admonition.cite>summary:before,.md-typeset .admonition.quote>.admonition-title:before,.md-typeset .admonition.quote>summary:before,.md-typeset details.cite>.admonition-title:before,.md-typeset details.cite>summary:before,.md-typeset details.quote>.admonition-title:before,.md-typeset details.quote>summary:before{color:#9e9e9e;content:""}.codehilite .o,.codehilite .ow,.md-typeset .highlight .o,.md-typeset .highlight .ow{color:inherit}.codehilite .ge,.md-typeset .highlight .ge{color:#000}.codehilite .gr,.md-typeset .highlight .gr{color:#a00}.codehilite .gh,.md-typeset .highlight .gh{color:#999}.codehilite .go,.md-typeset .highlight .go{color:#888}.codehilite .gp,.md-typeset .highlight .gp{color:#555}.codehilite .gs,.md-typeset .highlight .gs{color:inherit}.codehilite .gu,.md-typeset .highlight .gu{color:#aaa}.codehilite .gt,.md-typeset .highlight .gt{color:#a00}.codehilite .gd,.md-typeset .highlight .gd{background-color:#fdd}.codehilite .gi,.md-typeset .highlight .gi{background-color:#dfd}.codehilite .k,.md-typeset .highlight .k{color:#3b78e7}.codehilite .kc,.md-typeset .highlight .kc{color:#a71d5d}.codehilite .kd,.codehilite .kn,.md-typeset .highlight .kd,.md-typeset .highlight .kn{color:#3b78e7}.codehilite .kp,.md-typeset .highlight .kp{color:#a71d5d}.codehilite .kr,.codehilite .kt,.md-typeset .highlight .kr,.md-typeset .highlight .kt{color:#3e61a2}.codehilite .c,.codehilite .cm,.md-typeset .highlight .c,.md-typeset .highlight .cm{color:#999}.codehilite .cp,.md-typeset .highlight .cp{color:#666}.codehilite .c1,.codehilite .ch,.codehilite .cs,.md-typeset .highlight .c1,.md-typeset .highlight .ch,.md-typeset .highlight .cs{color:#999}.codehilite .na,.codehilite .nb,.md-typeset .highlight .na,.md-typeset .highlight .nb{color:#c2185b}.codehilite .bp,.md-typeset .highlight .bp{color:#3e61a2}.codehilite .nc,.md-typeset .highlight .nc{color:#c2185b}.codehilite .no,.md-typeset .highlight .no{color:#3e61a2}.codehilite .nd,.codehilite .ni,.md-typeset .highlight .nd,.md-typeset .highlight .ni{color:#666}.codehilite .ne,.codehilite .nf,.md-typeset .highlight .ne,.md-typeset .highlight .nf{color:#c2185b}.codehilite .nl,.md-typeset .highlight .nl{color:#3b5179}.codehilite .nn,.md-typeset .highlight .nn{color:#ec407a}.codehilite .nt,.md-typeset .highlight .nt{color:#3b78e7}.codehilite .nv,.codehilite .vc,.codehilite .vg,.codehilite .vi,.md-typeset .highlight .nv,.md-typeset .highlight .vc,.md-typeset .highlight .vg,.md-typeset .highlight .vi{color:#3e61a2}.codehilite .nx,.md-typeset .highlight .nx{color:#ec407a}.codehilite .il,.codehilite .m,.codehilite .mf,.codehilite .mh,.codehilite .mi,.codehilite .mo,.md-typeset .highlight .il,.md-typeset .highlight .m,.md-typeset .highlight .mf,.md-typeset .highlight .mh,.md-typeset .highlight .mi,.md-typeset .highlight .mo{color:#e74c3c}.codehilite .s,.codehilite .sb,.codehilite .sc,.md-typeset .highlight .s,.md-typeset .highlight .sb,.md-typeset .highlight .sc{color:#0d904f}.codehilite .sd,.md-typeset .highlight .sd{color:#999}.codehilite .s2,.md-typeset .highlight .s2{color:#0d904f}.codehilite .se,.codehilite .sh,.codehilite .si,.codehilite .sx,.md-typeset .highlight .se,.md-typeset .highlight .sh,.md-typeset .highlight .si,.md-typeset .highlight .sx{color:#183691}.codehilite .sr,.md-typeset .highlight .sr{color:#009926}.codehilite .s1,.codehilite .ss,.md-typeset .highlight .s1,.md-typeset .highlight .ss{color:#0d904f}.codehilite .err,.md-typeset .highlight .err{color:#a61717}.codehilite .w,.md-typeset .highlight .w{color:transparent}.codehilite .hll,.md-typeset .highlight .hll{display:block;margin:0 -.6rem;padding:0 .6rem;background-color:rgba(255,235,59,.5)}.md-typeset .codehilitetable,.md-typeset .highlighttable{display:block;overflow:hidden}.md-typeset .codehilitetable tbody,.md-typeset .codehilitetable td,.md-typeset .highlighttable tbody,.md-typeset .highlighttable td{display:block;padding:0}.md-typeset .codehilitetable tr,.md-typeset .highlighttable tr{display:-webkit-box;display:flex}.md-typeset .codehilitetable .linenodiv,.md-typeset .codehilitetable pre,.md-typeset .highlighttable .linenodiv,.md-typeset .highlighttable pre{margin:0;border-radius:0}.md-typeset .codehilitetable .linenodiv,.md-typeset .highlighttable .linenodiv{padding:.525rem .6rem}.md-typeset .codehilitetable .linenos,.md-typeset .highlighttable .linenos{background-color:rgba(0,0,0,.07);color:rgba(0,0,0,.26);-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none}.md-typeset .codehilitetable .linenos pre,.md-typeset .highlighttable .linenos pre{background-color:transparent;color:inherit;text-align:right}.md-typeset .codehilitetable .code,.md-typeset .highlighttable .code{-webkit-box-flex:1;flex:1;overflow:hidden}.md-typeset>.codehilitetable,.md-typeset>.highlighttable{margin:1em 0;border-radius:.2em}.md-typeset [id^="fnref:"]{display:inline-block}.md-typeset [id^="fnref:"]:target{margin-top:-3.8rem;padding-top:3.8rem;pointer-events:none}.md-typeset [id^="fn:"]:before{display:none;height:0;content:""}.md-typeset [id^="fn:"]:target:before{display:block;margin-top:-3.5rem;padding-top:3.5rem;pointer-events:none}.md-typeset .footnote{color:rgba(0,0,0,.54);font-size:.64rem}.md-typeset .footnote ol{margin-left:0}.md-typeset .footnote li{-webkit-transition:color .25s;transition:color .25s}.md-typeset .footnote li:target{color:rgba(0,0,0,.87)}.md-typeset .footnote li :first-child{margin-top:0}.md-typeset .footnote li:hover .footnote-backref,.md-typeset .footnote li:target .footnote-backref{-webkit-transform:translateX(0);transform:translateX(0);opacity:1}.md-typeset .footnote li:hover .footnote-backref:hover,.md-typeset .footnote li:target .footnote-backref{color:#536dfe}.md-typeset .footnote-ref{display:inline-block;pointer-events:auto}.md-typeset .footnote-ref:before{display:inline;margin:0 .2em;border-left:.05rem solid rgba(0,0,0,.26);font-size:1.25em;content:"";vertical-align:-.25rem}.md-typeset .footnote-backref{display:inline-block;-webkit-transform:translateX(.25rem);transform:translateX(.25rem);-webkit-transition:color .25s,opacity .125s .125s,-webkit-transform .25s .125s;transition:color .25s,opacity .125s .125s,-webkit-transform .25s .125s;transition:transform .25s .125s,color .25s,opacity .125s .125s;transition:transform .25s .125s,color .25s,opacity .125s .125s,-webkit-transform .25s .125s;color:rgba(0,0,0,.26);font-size:0;opacity:0;vertical-align:text-bottom}[dir=rtl] .md-typeset .footnote-backref{-webkit-transform:translateX(-.25rem);transform:translateX(-.25rem)}.md-typeset .footnote-backref:before{display:inline-block;font-size:.8rem;content:"\E31B"}[dir=rtl] .md-typeset .footnote-backref:before{-webkit-transform:scaleX(-1);transform:scaleX(-1)}.md-typeset .headerlink{display:inline-block;margin-left:.5rem;-webkit-transform:translateY(.25rem);transform:translateY(.25rem);-webkit-transition:color .25s,opacity .125s .25s,-webkit-transform .25s .25s;transition:color .25s,opacity .125s .25s,-webkit-transform .25s .25s;transition:transform .25s .25s,color .25s,opacity .125s .25s;transition:transform .25s .25s,color .25s,opacity .125s .25s,-webkit-transform .25s .25s;opacity:0}[dir=rtl] .md-typeset .headerlink{margin-right:.5rem;margin-left:0}html body .md-typeset .headerlink{color:rgba(0,0,0,.26)}.md-typeset h1[id]:before{display:block;margin-top:-9px;padding-top:9px;content:""}.md-typeset h1[id]:target:before{margin-top:-3.45rem;padding-top:3.45rem}.md-typeset h1[id] .headerlink:focus,.md-typeset h1[id]:hover .headerlink,.md-typeset h1[id]:target .headerlink{-webkit-transform:translate(0);transform:translate(0);opacity:1}.md-typeset h1[id] .headerlink:focus,.md-typeset h1[id]:hover .headerlink:hover,.md-typeset h1[id]:target .headerlink{color:#536dfe}.md-typeset h2[id]:before{display:block;margin-top:-8px;padding-top:8px;content:""}.md-typeset h2[id]:target:before{margin-top:-3.4rem;padding-top:3.4rem}.md-typeset h2[id] .headerlink:focus,.md-typeset h2[id]:hover .headerlink,.md-typeset h2[id]:target .headerlink{-webkit-transform:translate(0);transform:translate(0);opacity:1}.md-typeset h2[id] .headerlink:focus,.md-typeset h2[id]:hover .headerlink:hover,.md-typeset h2[id]:target .headerlink{color:#536dfe}.md-typeset h3[id]:before{display:block;margin-top:-9px;padding-top:9px;content:""}.md-typeset h3[id]:target:before{margin-top:-3.45rem;padding-top:3.45rem}.md-typeset h3[id] .headerlink:focus,.md-typeset h3[id]:hover .headerlink,.md-typeset h3[id]:target .headerlink{-webkit-transform:translate(0);transform:translate(0);opacity:1}.md-typeset h3[id] .headerlink:focus,.md-typeset h3[id]:hover .headerlink:hover,.md-typeset h3[id]:target .headerlink{color:#536dfe}.md-typeset h4[id]:before{display:block;margin-top:-9px;padding-top:9px;content:""}.md-typeset h4[id]:target:before{margin-top:-3.45rem;padding-top:3.45rem}.md-typeset h4[id] .headerlink:focus,.md-typeset h4[id]:hover .headerlink,.md-typeset h4[id]:target .headerlink{-webkit-transform:translate(0);transform:translate(0);opacity:1}.md-typeset h4[id] .headerlink:focus,.md-typeset h4[id]:hover .headerlink:hover,.md-typeset h4[id]:target .headerlink{color:#536dfe}.md-typeset h5[id]:before{display:block;margin-top:-11px;padding-top:11px;content:""}.md-typeset h5[id]:target:before{margin-top:-3.55rem;padding-top:3.55rem}.md-typeset h5[id] .headerlink:focus,.md-typeset h5[id]:hover .headerlink,.md-typeset h5[id]:target .headerlink{-webkit-transform:translate(0);transform:translate(0);opacity:1}.md-typeset h5[id] .headerlink:focus,.md-typeset h5[id]:hover .headerlink:hover,.md-typeset h5[id]:target .headerlink{color:#536dfe}.md-typeset h6[id]:before{display:block;margin-top:-11px;padding-top:11px;content:""}.md-typeset h6[id]:target:before{margin-top:-3.55rem;padding-top:3.55rem}.md-typeset h6[id] .headerlink:focus,.md-typeset h6[id]:hover .headerlink,.md-typeset h6[id]:target .headerlink{-webkit-transform:translate(0);transform:translate(0);opacity:1}.md-typeset h6[id] .headerlink:focus,.md-typeset h6[id]:hover .headerlink:hover,.md-typeset h6[id]:target .headerlink{color:#536dfe}.md-typeset .MJXc-display{margin:.75em 0;padding:.75em 0;overflow:auto;-webkit-overflow-scrolling:touch}.md-typeset .MathJax_CHTML{outline:0}.md-typeset .critic.comment,.md-typeset del.critic,.md-typeset ins.critic{margin:0 .25em;padding:.0625em 0;border-radius:.1rem;-webkit-box-decoration-break:clone;box-decoration-break:clone}.md-typeset del.critic{background-color:#fdd;box-shadow:.25em 0 0 #fdd,-.25em 0 0 #fdd}.md-typeset ins.critic{background-color:#dfd;box-shadow:.25em 0 0 #dfd,-.25em 0 0 #dfd}.md-typeset .critic.comment{background-color:hsla(0,0%,92.5%,.5);color:#37474f;box-shadow:.25em 0 0 hsla(0,0%,92.5%,.5),-.25em 0 0 hsla(0,0%,92.5%,.5)}.md-typeset .critic.comment:before{padding-right:.125em;color:rgba(0,0,0,.26);content:"\E0B7";vertical-align:-.125em}.md-typeset .critic.block{display:block;margin:1em 0;padding-right:.8rem;padding-left:.8rem;box-shadow:none}.md-typeset .critic.block :first-child{margin-top:.5em}.md-typeset .critic.block :last-child{margin-bottom:.5em}.md-typeset details{display:block;padding-top:0}.md-typeset details[open]>summary:after{-webkit-transform:rotate(180deg);transform:rotate(180deg)}.md-typeset details:not([open]){padding-bottom:0}.md-typeset details:not([open])>summary{border-bottom:none}.md-typeset details summary{padding-right:2rem}[dir=rtl] .md-typeset details summary{padding-left:2rem}.no-details .md-typeset details:not([open])>*{display:none}.no-details .md-typeset details:not([open]) summary{display:block}.md-typeset summary{display:block;outline:none;cursor:pointer}.md-typeset summary::-webkit-details-marker{display:none}.md-typeset summary:after{position:absolute;top:.4rem;right:.6rem;color:rgba(0,0,0,.26);font-size:1rem;content:"\E313"}[dir=rtl] .md-typeset summary:after{right:auto;left:.6rem}.md-typeset .emojione,.md-typeset .gemoji,.md-typeset .twemoji{width:1rem;vertical-align:text-top}.md-typeset code.codehilite,.md-typeset code.highlight{margin:0 .29412em;padding:.07353em 0}.md-typeset .superfences-content{display:none;-webkit-box-ordinal-group:100;order:99;width:100%;background-color:#fff}.md-typeset .superfences-content pre{margin:0;border-radius:0}.md-typeset .superfences-tabs{display:-webkit-box;display:flex;position:relative;flex-wrap:wrap;margin:1em 0;border:.05rem solid rgba(0,0,0,.07);border-radius:.2em}.md-typeset .superfences-tabs>input{display:none}.md-typeset .superfences-tabs>input:checked+label{font-weight:700}.md-typeset .superfences-tabs>input:checked+label+.superfences-content{display:block}.md-typeset .superfences-tabs>label{width:auto;padding:.6rem;-webkit-transition:color .125s;transition:color .125s;font-size:.64rem;cursor:pointer}html .md-typeset .superfences-tabs>label:hover{color:#536dfe}.md-typeset .task-list-item{position:relative;list-style-type:none}.md-typeset .task-list-item [type=checkbox]{position:absolute;top:.45em;left:-2em}[dir=rtl] .md-typeset .task-list-item [type=checkbox]{right:-2em;left:auto}.md-typeset .task-list-control .task-list-indicator:before{position:absolute;top:.15em;left:-1.25em;color:rgba(0,0,0,.26);font-size:1.25em;content:"\E835";vertical-align:-.25em}[dir=rtl] .md-typeset .task-list-control .task-list-indicator:before{right:-1.25em;left:auto}.md-typeset .task-list-control [type=checkbox]:checked+.task-list-indicator:before{content:"\E834"}.md-typeset .task-list-control [type=checkbox]{opacity:0;z-index:-1}@media print{.md-typeset a:after{color:rgba(0,0,0,.54);content:" [" attr(href) "]"}.md-typeset code,.md-typeset pre{white-space:pre-wrap}.md-typeset code{box-shadow:none;-webkit-box-decoration-break:initial;box-decoration-break:slice}.md-clipboard,.md-content__icon,.md-footer,.md-header,.md-sidebar,.md-tabs,.md-typeset .headerlink{display:none}}@media only screen and (max-width:44.9375em){.md-typeset>pre{margin:1em -.8rem;border-radius:0}.md-typeset>pre>code{padding:.525rem .8rem}.md-footer-nav__link--prev .md-footer-nav__title{display:none}.md-search-result__teaser{max-height:2.5rem;-webkit-line-clamp:3}.codehilite .hll,.md-typeset .highlight .hll{margin:0 -.8rem;padding:0 .8rem}.md-typeset>.codehilite,.md-typeset>.highlight{margin:1em -.8rem}.md-typeset>.codehilite code,.md-typeset>.highlight code{padding:.525rem .8rem}.md-typeset>.codehilitetable,.md-typeset>.highlighttable{margin:1em -.8rem;border-radius:0}.md-typeset>.codehilitetable .linenodiv,.md-typeset>.highlighttable .linenodiv{padding:.5rem .8rem}.md-typeset>p>.MJXc-display{margin:.75em -.8rem;padding:.25em .8rem}.md-typeset>.superfences-tabs{margin:1em -.8rem;border:0;border-top:.05rem solid rgba(0,0,0,.07);border-radius:0}.md-typeset>.superfences-tabs code{padding:.525rem .8rem}.md-typeset>.superfences-tabs input:first-child+label{margin-left:.2rem}}@media only screen and (min-width:100em){html{font-size:137.5%}}@media only screen and (min-width:125em){html{font-size:150%}}@media only screen and (max-width:59.9375em){body[data-md-state=lock]{overflow:hidden}.ios body[data-md-state=lock] .md-container{display:none}html .md-nav__link[for=__toc]{display:block;padding-right:2.4rem}html .md-nav__link[for=__toc]:after{color:inherit;content:"\E8DE"}html .md-nav__link[for=__toc]+.md-nav__link{display:none}html .md-nav__link[for=__toc]~.md-nav{display:-webkit-box;display:flex}html [dir=rtl] .md-nav__link{padding-right:.8rem;padding-left:2.4rem}.md-nav__source{display:block;padding:0 .2rem;background-color:rgba(50,64,144,.9675);color:#fff}.md-search__overlay{position:absolute;top:.2rem;left:.2rem;width:1.8rem;height:1.8rem;-webkit-transform-origin:center;transform-origin:center;-webkit-transition:opacity .2s .2s,-webkit-transform .3s .1s;transition:opacity .2s .2s,-webkit-transform .3s .1s;transition:transform .3s .1s,opacity .2s .2s;transition:transform .3s .1s,opacity .2s .2s,-webkit-transform .3s .1s;border-radius:1rem;background-color:#fff;overflow:hidden;pointer-events:none}[dir=rtl] .md-search__overlay{right:.2rem;left:auto}[data-md-toggle=search]:checked~.md-header .md-search__overlay{-webkit-transition:opacity .1s,-webkit-transform .4s;transition:opacity .1s,-webkit-transform .4s;transition:transform .4s,opacity .1s;transition:transform .4s,opacity .1s,-webkit-transform .4s;opacity:1}.md-search__inner{position:fixed;top:0;left:100%;width:100%;height:100%;-webkit-transform:translateX(5%);transform:translateX(5%);-webkit-transition:right 0s .3s,left 0s .3s,opacity .15s .15s,-webkit-transform .15s cubic-bezier(.4,0,.2,1) .15s;transition:right 0s .3s,left 0s .3s,opacity .15s .15s,-webkit-transform .15s cubic-bezier(.4,0,.2,1) .15s;transition:right 0s .3s,left 0s .3s,transform .15s cubic-bezier(.4,0,.2,1) .15s,opacity .15s .15s;transition:right 0s .3s,left 0s .3s,transform .15s cubic-bezier(.4,0,.2,1) .15s,opacity .15s .15s,-webkit-transform .15s cubic-bezier(.4,0,.2,1) .15s;opacity:0;z-index:2}[data-md-toggle=search]:checked~.md-header .md-search__inner{left:0;-webkit-transform:translateX(0);transform:translateX(0);-webkit-transition:right 0s 0s,left 0s 0s,opacity .15s .15s,-webkit-transform .15s cubic-bezier(.1,.7,.1,1) .15s;transition:right 0s 0s,left 0s 0s,opacity .15s .15s,-webkit-transform .15s cubic-bezier(.1,.7,.1,1) .15s;transition:right 0s 0s,left 0s 0s,transform .15s cubic-bezier(.1,.7,.1,1) .15s,opacity .15s .15s;transition:right 0s 0s,left 0s 0s,transform .15s cubic-bezier(.1,.7,.1,1) .15s,opacity .15s .15s,-webkit-transform .15s cubic-bezier(.1,.7,.1,1) .15s;opacity:1}[dir=rtl] [data-md-toggle=search]:checked~.md-header .md-search__inner{right:0;left:auto}html [dir=rtl] .md-search__inner{right:100%;left:auto;-webkit-transform:translateX(-5%);transform:translateX(-5%)}.md-search__input{width:100%;height:2.4rem;font-size:.9rem}.md-search__icon[for=__search]{top:.6rem;left:.8rem}.md-search__icon[for=__search][for=__search]:before{content:"\E5C4"}[dir=rtl] .md-search__icon[for=__search][for=__search]:before{content:"\E5C8"}.md-search__icon[type=reset]{top:.6rem;right:.8rem}.md-search__output{top:2.4rem;bottom:0}.md-search-result__article--document:before{display:none}}@media only screen and (max-width:76.1875em){[data-md-toggle=drawer]:checked~.md-overlay{width:100%;height:100%;-webkit-transition:width 0s,height 0s,opacity .25s;transition:width 0s,height 0s,opacity .25s;opacity:1}.md-header-nav__button.md-icon--home,.md-header-nav__button.md-logo{display:none}.md-hero__inner{margin-top:2.4rem;margin-bottom:1.2rem}.md-nav{background-color:#fff}.md-nav--primary,.md-nav--primary .md-nav{display:-webkit-box;display:flex;position:absolute;top:0;right:0;left:0;-webkit-box-orient:vertical;-webkit-box-direction:normal;flex-direction:column;height:100%;z-index:1}.md-nav--primary .md-nav__item,.md-nav--primary .md-nav__title{font-size:.8rem;line-height:1.5}html .md-nav--primary .md-nav__title{position:relative;height:5.6rem;padding:3rem .8rem .2rem;background-color:rgba(0,0,0,.07);color:rgba(0,0,0,.54);font-weight:400;line-height:2.4rem;white-space:nowrap;cursor:pointer}html .md-nav--primary .md-nav__title:before{display:block;position:absolute;top:.2rem;left:.2rem;width:2rem;height:2rem;color:rgba(0,0,0,.54)}html .md-nav--primary .md-nav__title~.md-nav__list{background-color:#fff;box-shadow:inset 0 .05rem 0 rgba(0,0,0,.07)}html .md-nav--primary .md-nav__title~.md-nav__list>.md-nav__item:first-child{border-top:0}html .md-nav--primary .md-nav__title--site{position:relative;background-color:#3f51b5;color:#fff}html .md-nav--primary .md-nav__title--site .md-nav__button{display:block;position:absolute;top:.2rem;left:.2rem;width:3.2rem;height:3.2rem;font-size:2.4rem}html .md-nav--primary .md-nav__title--site:before{display:none}html [dir=rtl] .md-nav--primary .md-nav__title--site .md-nav__button,html [dir=rtl] .md-nav--primary .md-nav__title:before{right:.2rem;left:auto}.md-nav--primary .md-nav__list{-webkit-box-flex:1;flex:1;overflow-y:auto}.md-nav--primary .md-nav__item{padding:0;border-top:.05rem solid rgba(0,0,0,.07)}[dir=rtl] .md-nav--primary .md-nav__item{padding:0}.md-nav--primary .md-nav__item--nested>.md-nav__link{padding-right:2.4rem}[dir=rtl] .md-nav--primary .md-nav__item--nested>.md-nav__link{padding-right:.8rem;padding-left:2.4rem}.md-nav--primary .md-nav__item--nested>.md-nav__link:after{content:"\E315"}[dir=rtl] .md-nav--primary .md-nav__item--nested>.md-nav__link:after{content:"\E314"}.md-nav--primary .md-nav__link{position:relative;margin-top:0;padding:.6rem .8rem}.md-nav--primary .md-nav__link:after{position:absolute;top:50%;right:.6rem;margin-top:-.6rem;color:inherit;font-size:1.2rem}[dir=rtl] .md-nav--primary .md-nav__link:after{right:auto;left:.6rem}.md-nav--primary .md-nav--secondary .md-nav__link{position:static}.md-nav--primary .md-nav--secondary .md-nav{position:static;background-color:transparent}.md-nav--primary .md-nav--secondary .md-nav .md-nav__link{padding-left:1.4rem}[dir=rtl] .md-nav--primary .md-nav--secondary .md-nav .md-nav__link{padding-right:1.4rem;padding-left:0}.md-nav--primary .md-nav--secondary .md-nav .md-nav .md-nav__link{padding-left:2rem}[dir=rtl] .md-nav--primary .md-nav--secondary .md-nav .md-nav .md-nav__link{padding-right:2rem;padding-left:0}.md-nav--primary .md-nav--secondary .md-nav .md-nav .md-nav .md-nav__link{padding-left:2.6rem}[dir=rtl] .md-nav--primary .md-nav--secondary .md-nav .md-nav .md-nav .md-nav__link{padding-right:2.6rem;padding-left:0}.md-nav--primary .md-nav--secondary .md-nav .md-nav .md-nav .md-nav .md-nav__link{padding-left:3.2rem}[dir=rtl] .md-nav--primary .md-nav--secondary .md-nav .md-nav .md-nav .md-nav .md-nav__link{padding-right:3.2rem;padding-left:0}.md-nav__toggle~.md-nav{display:-webkit-box;display:flex;-webkit-transform:translateX(100%);transform:translateX(100%);-webkit-transition:opacity .125s .05s,-webkit-transform .25s cubic-bezier(.8,0,.6,1);transition:opacity .125s .05s,-webkit-transform .25s cubic-bezier(.8,0,.6,1);transition:transform .25s cubic-bezier(.8,0,.6,1),opacity .125s .05s;transition:transform .25s cubic-bezier(.8,0,.6,1),opacity .125s .05s,-webkit-transform .25s cubic-bezier(.8,0,.6,1);opacity:0}[dir=rtl] .md-nav__toggle~.md-nav{-webkit-transform:translateX(-100%);transform:translateX(-100%)}.no-csstransforms3d .md-nav__toggle~.md-nav{display:none}.md-nav__toggle:checked~.md-nav{-webkit-transform:translateX(0);transform:translateX(0);-webkit-transition:opacity .125s .125s,-webkit-transform .25s cubic-bezier(.4,0,.2,1);transition:opacity .125s .125s,-webkit-transform .25s cubic-bezier(.4,0,.2,1);transition:transform .25s cubic-bezier(.4,0,.2,1),opacity .125s .125s;transition:transform .25s cubic-bezier(.4,0,.2,1),opacity .125s .125s,-webkit-transform .25s cubic-bezier(.4,0,.2,1);opacity:1}.no-csstransforms3d .md-nav__toggle:checked~.md-nav{display:-webkit-box;display:flex}.md-sidebar--primary{position:fixed;top:0;left:-12.1rem;width:12.1rem;height:100%;-webkit-transform:translateX(0);transform:translateX(0);-webkit-transition:box-shadow .25s,-webkit-transform .25s cubic-bezier(.4,0,.2,1);transition:box-shadow .25s,-webkit-transform .25s cubic-bezier(.4,0,.2,1);transition:transform .25s cubic-bezier(.4,0,.2,1),box-shadow .25s;transition:transform .25s cubic-bezier(.4,0,.2,1),box-shadow .25s,-webkit-transform .25s cubic-bezier(.4,0,.2,1);background-color:#fff;z-index:3}[dir=rtl] .md-sidebar--primary{right:-12.1rem;left:auto}.no-csstransforms3d .md-sidebar--primary{display:none}[data-md-toggle=drawer]:checked~.md-container .md-sidebar--primary{box-shadow:0 8px 10px 1px rgba(0,0,0,.14),0 3px 14px 2px rgba(0,0,0,.12),0 5px 5px -3px rgba(0,0,0,.4);-webkit-transform:translateX(12.1rem);transform:translateX(12.1rem)}[dir=rtl] [data-md-toggle=drawer]:checked~.md-container .md-sidebar--primary{-webkit-transform:translateX(-12.1rem);transform:translateX(-12.1rem)}.no-csstransforms3d [data-md-toggle=drawer]:checked~.md-container .md-sidebar--primary{display:block}.md-sidebar--primary .md-sidebar__scrollwrap{overflow:hidden;position:absolute;top:0;right:0;bottom:0;left:0;margin:0}.md-tabs{display:none}}@media only screen and (min-width:60em){.md-content{margin-right:12.1rem}[dir=rtl] .md-content{margin-right:0;margin-left:12.1rem}.md-header-nav__button.md-icon--search{display:none}.md-header-nav__source{display:block;width:11.7rem;max-width:11.7rem;padding-right:.6rem}[dir=rtl] .md-header-nav__source{padding-right:0;padding-left:.6rem}.md-search{padding:.2rem}.md-search__overlay{position:fixed;top:0;left:0;width:0;height:0;-webkit-transition:width 0s .25s,height 0s .25s,opacity .25s;transition:width 0s .25s,height 0s .25s,opacity .25s;background-color:rgba(0,0,0,.54);cursor:pointer}[dir=rtl] .md-search__overlay{right:0;left:auto}[data-md-toggle=search]:checked~.md-header .md-search__overlay{width:100%;height:100%;-webkit-transition:width 0s,height 0s,opacity .25s;transition:width 0s,height 0s,opacity .25s;opacity:1}.md-search__inner{position:relative;width:11.5rem;margin-right:.8rem;padding:.1rem 0;float:right;-webkit-transition:width .25s cubic-bezier(.1,.7,.1,1);transition:width .25s cubic-bezier(.1,.7,.1,1)}[dir=rtl] .md-search__inner{margin-right:0;margin-left:.8rem;float:left}.md-search__form,.md-search__input{border-radius:.1rem}.md-search__input{width:100%;height:1.8rem;padding-left:2.2rem;-webkit-transition:background-color .25s cubic-bezier(.1,.7,.1,1),color .25s cubic-bezier(.1,.7,.1,1);transition:background-color .25s cubic-bezier(.1,.7,.1,1),color .25s cubic-bezier(.1,.7,.1,1);background-color:rgba(0,0,0,.26);color:inherit;font-size:.8rem}[dir=rtl] .md-search__input{padding-right:2.2rem}.md-search__input+.md-search__icon{color:inherit}.md-search__input::-webkit-input-placeholder{color:hsla(0,0%,100%,.7)}.md-search__input::-moz-placeholder{color:hsla(0,0%,100%,.7)}.md-search__input:-ms-input-placeholder{color:hsla(0,0%,100%,.7)}.md-search__input::-ms-input-placeholder{color:hsla(0,0%,100%,.7)}.md-search__input::placeholder{color:hsla(0,0%,100%,.7)}.md-search__input:hover{background-color:hsla(0,0%,100%,.12)}[data-md-toggle=search]:checked~.md-header .md-search__input{border-radius:.1rem .1rem 0 0;background-color:#fff;color:rgba(0,0,0,.87);text-overflow:clip}[data-md-toggle=search]:checked~.md-header .md-search__input::-webkit-input-placeholder{color:rgba(0,0,0,.54)}[data-md-toggle=search]:checked~.md-header .md-search__input::-moz-placeholder{color:rgba(0,0,0,.54)}[data-md-toggle=search]:checked~.md-header .md-search__input:-ms-input-placeholder{color:rgba(0,0,0,.54)}[data-md-toggle=search]:checked~.md-header .md-search__input::-ms-input-placeholder{color:rgba(0,0,0,.54)}[data-md-toggle=search]:checked~.md-header .md-search__input+.md-search__icon,[data-md-toggle=search]:checked~.md-header .md-search__input::placeholder{color:rgba(0,0,0,.54)}.md-search__output{top:1.9rem;-webkit-transition:opacity .4s;transition:opacity .4s;opacity:0}[data-md-toggle=search]:checked~.md-header .md-search__output{box-shadow:0 6px 10px 0 rgba(0,0,0,.14),0 1px 18px 0 rgba(0,0,0,.12),0 3px 5px -1px rgba(0,0,0,.4);opacity:1}.md-search__scrollwrap{max-height:0}[data-md-toggle=search]:checked~.md-header .md-search__scrollwrap{max-height:75vh}.md-search__scrollwrap::-webkit-scrollbar{width:.2rem;height:.2rem}.md-search__scrollwrap::-webkit-scrollbar-thumb{background-color:rgba(0,0,0,.26)}.md-search__scrollwrap::-webkit-scrollbar-thumb:hover{background-color:#536dfe}.md-search-result__meta{padding-left:2.2rem}[dir=rtl] .md-search-result__meta{padding-right:2.2rem;padding-left:0}.md-search-result__article{padding-left:2.2rem}[dir=rtl] .md-search-result__article{padding-right:2.2rem;padding-left:.8rem}.md-sidebar--secondary{display:block;margin-left:100%;-webkit-transform:translate(-100%);transform:translate(-100%)}[dir=rtl] .md-sidebar--secondary{margin-right:100%;margin-left:0;-webkit-transform:translate(100%);transform:translate(100%)}}@media only screen and (min-width:76.25em){.md-content{margin-left:12.1rem}[dir=rtl] .md-content{margin-right:12.1rem}.md-content__inner{margin-right:1.2rem;margin-left:1.2rem}.md-header-nav__button.md-icon--menu{display:none}.md-nav[data-md-state=animate]{-webkit-transition:max-height .25s cubic-bezier(.86,0,.07,1);transition:max-height .25s cubic-bezier(.86,0,.07,1)}.md-nav__toggle~.md-nav{max-height:0;overflow:hidden}.no-js .md-nav__toggle~.md-nav{display:none}.md-nav[data-md-state=expand],.md-nav__toggle:checked~.md-nav{max-height:100%}.no-js .md-nav[data-md-state=expand],.no-js .md-nav__toggle:checked~.md-nav{display:block}.md-nav__item--nested>.md-nav>.md-nav__title{display:none}.md-nav__item--nested>.md-nav__link:after{display:inline-block;-webkit-transform-origin:.45em .45em;transform-origin:.45em .45em;-webkit-transform-style:preserve-3d;transform-style:preserve-3d;vertical-align:-.125em}.js .md-nav__item--nested>.md-nav__link:after{-webkit-transition:-webkit-transform .4s;transition:-webkit-transform .4s;transition:transform .4s;transition:transform .4s,-webkit-transform .4s}.md-nav__item--nested .md-nav__toggle:checked~.md-nav__link:after{-webkit-transform:rotateX(180deg);transform:rotateX(180deg)}.md-search__inner{margin-right:1.2rem}[dir=rtl] .md-search__inner{margin-left:1.2rem}.md-search__scrollwrap,[data-md-toggle=search]:checked~.md-header .md-search__inner{width:34.4rem}.md-sidebar--secondary{margin-left:61rem}[dir=rtl] .md-sidebar--secondary{margin-right:61rem;margin-left:0}.md-tabs~.md-main .md-nav--primary>.md-nav__list>.md-nav__item--nested{font-size:0;visibility:hidden}.md-tabs--active~.md-main .md-nav--primary .md-nav__title{display:block;padding:0}.md-tabs--active~.md-main .md-nav--primary .md-nav__title--site{display:none}.no-js .md-tabs--active~.md-main .md-nav--primary .md-nav{display:block}.md-tabs--active~.md-main .md-nav--primary>.md-nav__list>.md-nav__item{font-size:0;visibility:hidden}.md-tabs--active~.md-main .md-nav--primary>.md-nav__list>.md-nav__item--nested{display:none;font-size:.7rem;overflow:auto;visibility:visible}.md-tabs--active~.md-main .md-nav--primary>.md-nav__list>.md-nav__item--nested>.md-nav__link{display:none}.md-tabs--active~.md-main .md-nav--primary>.md-nav__list>.md-nav__item--active{display:block}.md-tabs--active~.md-main .md-nav[data-md-level="1"]{max-height:none;overflow:visible}.md-tabs--active~.md-main .md-nav[data-md-level="1"]>.md-nav__list>.md-nav__item{padding-left:0}.md-tabs--active~.md-main .md-nav[data-md-level="1"] .md-nav .md-nav__title{display:none}}@media only screen and (min-width:45em){.md-footer-nav__link{width:50%}.md-footer-copyright{max-width:75%;float:left}[dir=rtl] .md-footer-copyright{float:right}.md-footer-social{padding:.6rem 0;float:right}[dir=rtl] .md-footer-social{float:left}}@media only screen and (max-width:29.9375em){[data-md-toggle=search]:checked~.md-header .md-search__overlay{-webkit-transform:scale(45);transform:scale(45)}}@media only screen and (min-width:30em) and (max-width:44.9375em){[data-md-toggle=search]:checked~.md-header .md-search__overlay{-webkit-transform:scale(60);transform:scale(60)}}@media only screen and (min-width:45em) and (max-width:59.9375em){[data-md-toggle=search]:checked~.md-header .md-search__overlay{-webkit-transform:scale(75);transform:scale(75)}}@media only screen and (min-width:60em) and (max-width:76.1875em){.md-search__scrollwrap,[data-md-toggle=search]:checked~.md-header .md-search__inner{width:23.4rem}.md-search-result__teaser{max-height:2.5rem;-webkit-line-clamp:3}} \ No newline at end of file diff --git a/deploy/baremetal/index.html b/deploy/baremetal/index.html index 18be98272..47d063873 100644 --- a/deploy/baremetal/index.html +++ b/deploy/baremetal/index.html @@ -34,7 +34,7 @@ - + @@ -42,7 +42,7 @@ - + @@ -53,12 +53,12 @@ - + - + @@ -114,7 +114,7 @@ - + Skip to content @@ -123,7 +123,7 @@ - + public @@ -154,7 +154,7 @@ - + @@ -1263,7 +1263,7 @@ Kubernetes cluster running on bare-metal. supported cloud provider, effectively allowing the usage of LoadBalancer Services within any cluster. This section demonstrates how to use the Layer 2 configuration mode of MetalLB together with the NGINX Ingress controller in a Kubernetes cluster that has publicly accessible nodes. In this mode, one node attracts all -the traffic for the ingress-nginx Service IP. See Traffic policies for more details. +the traffic for the ingress-nginx Service IP. See Traffic policies for more details. Note @@ -1276,22 +1276,22 @@ yourself by reading the official documentation thoroughly. MetalLB can be deployed either with a simple Kubernetes manifest or with Helm. The rest of this example assumes MetalLB was deployed following the Installation instructions. -MetalLB requires a pool of IP addresses in order to be able to take ownership of the ingress-nginx Service. This pool -can be defined in a ConfigMap named config located in the same namespace as the MetalLB controller. This pool of IPs must be dedicated to MetalLB's use, you can't reuse the Kubernetes node IPs or IPs handed out by a DHCP server. +MetalLB requires a pool of IP addresses in order to be able to take ownership of the ingress-nginx Service. This pool +can be defined in a ConfigMap named config located in the same namespace as the MetalLB controller. This pool of IPs must be dedicated to MetalLB's use, you can't reuse the Kubernetes node IPs or IPs handed out by a DHCP server. Example Given the following 3-node Kubernetes cluster (the external IP is added as an example, in most bare-metal environments this value is <None>) -$ kubectl get node +$ kubectl get node NAME STATUS ROLES EXTERNAL-IP host-1 Ready master 203.0.113.1 host-2 Ready node 203.0.113.2 host-3 Ready node 203.0.113.3 - + After creating the following ConfigMap, MetalLB takes ownership of one of the IP addresses in the pool and updates -the loadBalancer IP field of the ingress-nginx Service accordingly. -apiVersion: v1 +the loadBalancer IP field of the ingress-nginx Service accordingly. +apiVersion: v1 kind: ConfigMap metadata: namespace: metallb-system @@ -1303,26 +1303,26 @@ the loadBalancer IP field of the ingress-nginx protocol: layer2 addresses: - 203.0.113.10-203.0.113.15 - + -$ kubectl -n ingress-nginx get svc +$ kubectl -n ingress-nginx get svc NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) default-http-backend ClusterIP 10.0.64.249 <none> 80/TCP ingress-nginx LoadBalancer 10.0.220.217 203.0.113.10 80:30100/TCP,443:30101/TCP - + -As soon as MetalLB sets the external IP address of the ingress-nginx LoadBalancer Service, the corresponding entries +As soon as MetalLB sets the external IP address of the ingress-nginx LoadBalancer Service, the corresponding entries are created in the iptables NAT table and the node with the selected IP address starts responding to HTTP requests on the ports configured in the LoadBalancer Service: -$ curl -D- http://203.0.113.3 -H 'Host: myapp.example.com' +$ curl -D- http://203.0.113.3 -H 'Host: myapp.example.com' HTTP/1.1 200 OK Server: nginx/1.15.2 - + Tip -In order to preserve the source IP address in HTTP requests sent to NGINX, it is necessary to use the Local +In order to preserve the source IP address in HTTP requests sent to NGINX, it is necessary to use the Local traffic policy. Traffic policies are described in more details in Traffic policies as well as in the next section. @@ -1331,42 +1331,42 @@ well as in the next section. installation guide. Info -A Service of type NodePort exposes, via the kube-proxy component, the same unprivileged port (default: +A Service of type NodePort exposes, via the kube-proxy component, the same unprivileged port (default: 30000-32767) on every Kubernetes node, masters included. For more information, see Services. In this configuration, the NGINX container remains isolated from the host network. As a result, it can safely bind to any port, including the standard HTTP ports 80 and 443. However, due to the container namespace isolation, a client located outside the cluster network (e.g. on the public internet) is not able to access Ingress hosts directly on ports -80 and 443. Instead, the external client must append the NodePort allocated to the ingress-nginx Service to HTTP +80 and 443. Instead, the external client must append the NodePort allocated to the ingress-nginx Service to HTTP requests. Example -Given the NodePort 30100 allocated to the ingress-nginx Service -$ kubectl -n ingress-nginx get svc +Given the NodePort 30100 allocated to the ingress-nginx Service +$ kubectl -n ingress-nginx get svc NAME TYPE CLUSTER-IP PORT(S) default-http-backend ClusterIP 10.0.64.249 80/TCP ingress-nginx NodePort 10.0.220.217 80:30100/TCP,443:30101/TCP - + -and a Kubernetes node with the public IP address 203.0.113.2 (the external IP is added as an example, in most +and a Kubernetes node with the public IP address 203.0.113.2 (the external IP is added as an example, in most bare-metal environments this value is <None>) -$ kubectl get node +$ kubectl get node NAME STATUS ROLES EXTERNAL-IP host-1 Ready master 203.0.113.1 host-2 Ready node 203.0.113.2 host-3 Ready node 203.0.113.3 - + -a client would reach an Ingress with host: myapp.example.com at http://myapp.example.com:30100, where the +a client would reach an Ingress with host: myapp.example.com at http://myapp.example.com:30100, where the myapp.example.com subdomain resolves to the 203.0.113.2 IP address. Impact on the host system -While it may sound tempting to reconfigure the NodePort range using the --service-node-port-range API server flag +While it may sound tempting to reconfigure the NodePort range using the --service-node-port-range API server flag to include unprivileged ports and be able to expose ports 80 and 443, doing so may result in unexpected issues including (but not limited to) the use of ports otherwise reserved to system daemons and the necessity to grant -kube-proxy privileges it may otherwise not require. +kube-proxy privileges it may otherwise not require. This practice is therefore discouraged. See the other approaches proposed in this page for alternatives. This approach has a few other limitations one ought to be aware of: @@ -1376,8 +1376,8 @@ including (but not limited to) the use of ports otherwise reserved to system dae Services of type NodePort perform source address translation by default. This means the source IP of a HTTP request is always the IP address of the Kubernetes node that received the request from the perspective of NGINX. -The recommended way to preserve the source IP in a NodePort setup is to set the value of the externalTrafficPolicy -field of the ingress-nginx Service spec to Local (example). +The recommended way to preserve the source IP in a NodePort setup is to set the value of the externalTrafficPolicy +field of the ingress-nginx Service spec to Local (example). Warning This setting effectively drops packets sent to Kubernetes nodes which are not running any instance of the NGINX @@ -1388,40 +1388,40 @@ the NGINX Ingress controller should be scheduled or not scheduled. Example In a Kubernetes cluster composed of 3 nodes (the external IP is added as an example, in most bare-metal environments this value is <None>) -$ kubectl get node +$ kubectl get node NAME STATUS ROLES EXTERNAL-IP host-1 Ready master 203.0.113.1 host-2 Ready node 203.0.113.2 host-3 Ready node 203.0.113.3 - + -with a nginx-ingress-controller Deployment composed of 2 replicas -$ kubectl -n ingress-nginx get pod -o wide +with a nginx-ingress-controller Deployment composed of 2 replicas +$ kubectl -n ingress-nginx get pod -o wide NAME READY STATUS IP NODE default-http-backend-7c5bc89cc9-p86md 1/1 Running 172.17.1.1 host-2 nginx-ingress-controller-cf9ff8c96-8vvf8 1/1 Running 172.17.0.3 host-3 nginx-ingress-controller-cf9ff8c96-pxsds 1/1 Running 172.17.1.4 host-2 - + -Requests sent to host-2 and host-3 would be forwarded to NGINX and original client's IP would be preserved, -while requests to host-1 would get dropped because there is no NGINX replica running on that node. +Requests sent to host-2 and host-3 would be forwarded to NGINX and original client's IP would be preserved, +while requests to host-1 would get dropped because there is no NGINX replica running on that node. Ingress status Because NodePort Services do not get a LoadBalancerIP assigned by definition, the NGINX Ingress controller does not update the status of Ingress objects it manages. -$ kubectl get ingress +$ kubectl get ingress NAME HOSTS ADDRESS PORTS test-ingress myapp.example.com 80 - + Despite the fact there is no load balancer providing a public IP address to the NGINX Ingress controller, it is possible -to force the status update of all managed Ingress objects by setting the externalIPs field of the ingress-nginx +to force the status update of all managed Ingress objects by setting the externalIPs field of the ingress-nginx Service. Warning -There is more to setting externalIPs than just enabling the NGINX Ingress controller to update the status of +There is more to setting externalIPs than just enabling the NGINX Ingress controller to update the status of Ingress objects. Please read about this option in the Services page of official Kubernetes documentation as well as the section about External IPs in this document for more information. @@ -1429,26 +1429,26 @@ documentation as well as the section about External IPs< Example Given the following 3-node Kubernetes cluster (the external IP is added as an example, in most bare-metal environments this value is <None>) -$ kubectl get node +$ kubectl get node NAME STATUS ROLES EXTERNAL-IP host-1 Ready master 203.0.113.1 host-2 Ready node 203.0.113.2 host-3 Ready node 203.0.113.3 - + -one could edit the ingress-nginx Service and add the following field to the object spec -spec: +one could edit the ingress-nginx Service and add the following field to the object spec +spec: externalIPs: - 203.0.113.1 - 203.0.113.2 - 203.0.113.3 - + which would in turn be reflected on Ingress objects as follows: -$ kubectl get ingress -o wide +$ kubectl get ingress -o wide NAME HOSTS ADDRESS PORTS test-ingress myapp.example.com 203.0.113.1,203.0.113.2,203.0.113.3 80 - + @@ -1458,30 +1458,30 @@ environments this value is <None>) for generating redirect URLs that take into account the URL used by external clients, including the NodePort. Example -Redirects generated by NGINX, for instance HTTP to HTTPS or domain to www.domain, are generated without +Redirects generated by NGINX, for instance HTTP to HTTPS or domain to www.domain, are generated without NodePort: -$ curl -D- http://myapp.example.com:30100` +$ curl -D- http://myapp.example.com:30100` HTTP/1.1 308 Permanent Redirect Server: nginx/1.15.2 Location: https://myapp.example.com/ #-> missing NodePort in HTTPS redirect - + Via the host network ¶ In a setup where there is no external load balancer available but using NodePorts is not an option, one can configure -ingress-nginx Pods to use the network of the host they run on instead of a dedicated network namespace. The benefit of +ingress-nginx Pods to use the network of the host they run on instead of a dedicated network namespace. The benefit of this approach is that the NGINX Ingress controller can bind ports 80 and 443 directly to Kubernetes nodes' network interfaces, without the extra network translation imposed by NodePort Services. Note -This approach does not leverage any Service object to expose the NGINX Ingress controller. If the ingress-nginx +This approach does not leverage any Service object to expose the NGINX Ingress controller. If the ingress-nginx Service exists in the target cluster, it is recommended to delete it. -This can be achieved by enabling the hostNetwork option in the Pods' spec. -template: +This can be achieved by enabling the hostNetwork option in the Pods' spec. +template: spec: hostNetwork: true - + Security considerations @@ -1490,26 +1490,26 @@ including the host's loopback. Please evaluate the impact this may have on the s Example -Consider this nginx-ingress-controller Deployment composed of 2 replicas, NGINX Pods inherit from the IP address +Consider this nginx-ingress-controller Deployment composed of 2 replicas, NGINX Pods inherit from the IP address of their host instead of an internal Pod IP. -$ kubectl -n ingress-nginx get pod -o wide +$ kubectl -n ingress-nginx get pod -o wide NAME READY STATUS IP NODE default-http-backend-7c5bc89cc9-p86md 1/1 Running 172.17.1.1 host-2 nginx-ingress-controller-5b4cf5fc6-7lg6c 1/1 Running 203.0.113.3 host-3 nginx-ingress-controller-5b4cf5fc6-lzrls 1/1 Running 203.0.113.2 host-2 - + One major limitation of this deployment approach is that only a single NGINX Ingress controller Pod may be scheduled on each cluster node, because binding the same port multiple times on the same network interface is technically impossible. Pods that are unschedulable due to such situation fail with the following event: -$ kubectl -n ingress-nginx describe pod <unschedulable-nginx-ingress-controller-pod> +$ kubectl -n ingress-nginx describe pod <unschedulable-nginx-ingress-controller-pod> ... Events: Type Reason From Message ---- ------ ---- ------- Warning FailedScheduling default-scheduler 0/3 nodes are available: 3 node(s) didn't have free ports for the requested pod ports. - + One way to ensure only schedulable Pods are created is to deploy the NGINX Ingress controller as a DaemonSet instead of a traditional Deployment. @@ -1526,43 +1526,43 @@ configuration of the corresponding manifest at the user's discretion. DNS resolution Pods configured with hostNetwork: true do not use the internal DNS resolver (i.e. kube-dns or CoreDNS), unless -their dnsPolicy spec field is set to ClusterFirstWithHostNet. Consider using this setting if NGINX is +their dnsPolicy spec field is set to ClusterFirstWithHostNet. Consider using this setting if NGINX is expected to resolve internal names for any reason. Ingress status Because there is no Service exposing the NGINX Ingress controller in a configuration using the host network, the default ---publish-service flag used in standard cloud setups does not apply and the status of all Ingress objects remains +--publish-service flag used in standard cloud setups does not apply and the status of all Ingress objects remains blank. -$ kubectl get ingress +$ kubectl get ingress NAME HOSTS ADDRESS PORTS test-ingress myapp.example.com 80 - + Instead, and because bare-metal nodes usually don't have an ExternalIP, one has to enable the ---report-node-internal-ip-address flag, which sets the status of all Ingress objects to the internal IP +--report-node-internal-ip-address flag, which sets the status of all Ingress objects to the internal IP address of all nodes running the NGINX Ingress controller. Example -Given a nginx-ingress-controller DaemonSet composed of 2 replicas -$ kubectl -n ingress-nginx get pod -o wide +Given a nginx-ingress-controller DaemonSet composed of 2 replicas +$ kubectl -n ingress-nginx get pod -o wide NAME READY STATUS IP NODE default-http-backend-7c5bc89cc9-p86md 1/1 Running 172.17.1.1 host-2 nginx-ingress-controller-5b4cf5fc6-7lg6c 1/1 Running 203.0.113.3 host-3 nginx-ingress-controller-5b4cf5fc6-lzrls 1/1 Running 203.0.113.2 host-2 - + the controller sets the status of all Ingress objects it manages to the following value: -$ kubectl get ingress -o wide +$ kubectl get ingress -o wide NAME HOSTS ADDRESS PORTS test-ingress myapp.example.com 203.0.113.2,203.0.113.3 80 - + Note Alternatively, it is possible to override the address written to Ingress objects using the ---publish-status-address flag. See Command line arguments. +--publish-status-address flag. See Command line arguments. Using a self-provisioned edge ¶ Similarly to cloud environments, this deployment approach requires an edge network component providing a public @@ -1581,49 +1581,50 @@ on the target nodes as shown in the diagram below: This method does not allow preserving the source IP of HTTP requests in any manner, it is therefore not recommended to use it despite its apparent simplicity. -The externalIPs Service option was previously mentioned in the NodePort section. -As per the Services page of the official Kubernetes documentation, the externalIPs option causes -kube-proxy to route traffic sent to arbitrary IP addresses and on the Service ports to the endpoints of that +The externalIPs Service option was previously mentioned in the NodePort section. +As per the Services page of the official Kubernetes documentation, the externalIPs option causes +kube-proxy to route traffic sent to arbitrary IP addresses and on the Service ports to the endpoints of that Service. These IP addresses must belong to the target node. Example Given the following 3-node Kubernetes cluster (the external IP is added as an example, in most bare-metal environments this value is <None>) -$ kubectl get node +$ kubectl get node NAME STATUS ROLES EXTERNAL-IP host-1 Ready master 203.0.113.1 host-2 Ready node 203.0.113.2 host-3 Ready node 203.0.113.3 - + -and the following ingress-nginx NodePort Service -$ kubectl -n ingress-nginx get svc +and the following ingress-nginx NodePort Service +$ kubectl -n ingress-nginx get svc NAME TYPE CLUSTER-IP PORT(S) ingress-nginx NodePort 10.0.220.217 80:30100/TCP,443:30101/TCP - + One could set the following external IPs in the Service spec, and NGINX would become available on both the NodePort and the Service port: -spec: +spec: externalIPs: - 203.0.113.2 - 203.0.113.3 - + -$ curl -D- http://myapp.example.com:30100 +$ curl -D- http://myapp.example.com:30100 HTTP/1.1 200 OK Server: nginx/1.15.2 $ curl -D- http://myapp.example.com HTTP/1.1 200 OK Server: nginx/1.15.2 - + We assume the myapp.example.com subdomain above resolves to both 203.0.113.2 and 203.0.113.3 IP addresses. + @@ -1678,9 +1679,9 @@ and the Service port: @@ -1690,7 +1691,7 @@ and the Service port: - + diff --git a/deploy/index.html b/deploy/index.html index 038c3bc4c..72eee0bea 100644 --- a/deploy/index.html +++ b/deploy/index.html @@ -34,7 +34,7 @@ - + @@ -42,7 +42,7 @@ - + @@ -53,12 +53,12 @@ - + - + @@ -114,7 +114,7 @@ - + Skip to content @@ -123,7 +123,7 @@ - + public @@ -154,7 +154,7 @@ - + @@ -1446,7 +1446,7 @@ Attention The default configuration watches Ingress object from all the namespaces. -To change this behavior use the flag --watch-namespace to limit the scope to a particular namespace. +To change this behavior use the flag --watch-namespace to limit the scope to a particular namespace. Warning @@ -1455,51 +1455,51 @@ To change this behavior use the flag --watch-namespace< Attention If you're using GKE you need to initialize your user as a cluster-admin with the following command: -kubectl create clusterrolebinding cluster-admin-binding \ +kubectl create clusterrolebinding cluster-admin-binding \ --clusterrole cluster-admin \ --user $(gcloud config get-value account) - + The following Mandatory Command is required for all deployments. -kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/mandatory.yaml - +kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/mandatory.yaml + Tip -If you are using a Kubernetes version previous to 1.14, you need to change kubernetes.io/os to beta.kubernetes.io/os at line 217 of mandatory.yaml, see Labels details. +If you are using a Kubernetes version previous to 1.14, you need to change kubernetes.io/os to beta.kubernetes.io/os at line 217 of mandatory.yaml, see Labels details. Provider Specific Steps ¶ There are cloud provider specific yaml files. Docker for Mac ¶ Kubernetes is available in Docker for Mac (from version 18.06.0-ce) Create a service -kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/cloud-generic.yaml - +kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/cloud-generic.yaml + minikube ¶ For standard usage: -minikube addons enable ingress - +minikube addons enable ingress + For development: Disable the ingress addon: -minikube addons disable ingress - +minikube addons disable ingress + -Execute make dev-env -Confirm the nginx-ingress-controller deployment exists: +Execute make dev-env +Confirm the nginx-ingress-controller deployment exists: -$ kubectl get pods -n ingress-nginx +$ kubectl get pods -n ingress-nginx NAME READY STATUS RESTARTS AGE default-http-backend-66b447d9cf-rrlf9 1/1 Running 0 12s nginx-ingress-controller-fdcdcd6dd-vvpgs 1/1 Running 0 11s - + AWS ¶ -In AWS we use an Elastic Load Balancer (ELB) to expose the NGINX Ingress controller behind a Service of Type=LoadBalancer. +In AWS we use an Elastic Load Balancer (ELB) to expose the NGINX Ingress controller behind a Service of Type=LoadBalancer. Since Kubernetes v1.9.0 it is possible to use a classic load balancer (ELB) or network load balancer (NLB) Please check the elastic load balancing AWS details page Elastic Load Balancer - ELB ¶ @@ -1509,45 +1509,45 @@ Please check the Layer 7: use HTTP as the listener protocol for port 80 and terminate TLS in the ELB For L4: -Check that no change is necessary with regards to the ELB idle timeout. In some scenarios, users may want to modify the ELB idle timeout, so please check the ELB Idle Timeouts section for additional information. If a change is required, users will need to update the value of service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout in provider/aws/service-l4.yaml +Check that no change is necessary with regards to the ELB idle timeout. In some scenarios, users may want to modify the ELB idle timeout, so please check the ELB Idle Timeouts section for additional information. If a change is required, users will need to update the value of service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout in provider/aws/service-l4.yaml Then execute: -kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/aws/service-l4.yaml +kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/aws/service-l4.yaml kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/aws/patch-configmap-l4.yaml - + For L7: -Change line of the file provider/aws/service-l7.yaml replacing the dummy id with a valid one "arn:aws:acm:us-west-2:XXXXXXXX:certificate/XXXXXX-XXXXXXX-XXXXXXX-XXXXXXXX" -Check that no change is necessary with regards to the ELB idle timeout. In some scenarios, users may want to modify the ELB idle timeout, so please check the ELB Idle Timeouts section for additional information. If a change is required, users will need to update the value of service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout in provider/aws/service-l7.yaml +Change line of the file provider/aws/service-l7.yaml replacing the dummy id with a valid one "arn:aws:acm:us-west-2:XXXXXXXX:certificate/XXXXXX-XXXXXXX-XXXXXXX-XXXXXXXX" +Check that no change is necessary with regards to the ELB idle timeout. In some scenarios, users may want to modify the ELB idle timeout, so please check the ELB Idle Timeouts section for additional information. If a change is required, users will need to update the value of service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout in provider/aws/service-l7.yaml Then execute: -kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/aws/service-l7.yaml +kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/aws/service-l7.yaml kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/aws/patch-configmap-l7.yaml - + This example creates an ELB with just two listeners, one in port 80 and another in port 443 ELB Idle Timeouts ¶ -In some scenarios users will need to modify the value of the ELB idle timeout. Users need to ensure the idle timeout is less than the keepalive_timeout that is configured for NGINX. By default NGINX keepalive_timeout is set to 75s. -The default ELB idle timeout will work for most scenarios, unless the NGINX keepalive_timeout has been modified, in which case service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout will need to be modified to ensure it is less than the keepalive_timeout the user has configured. -Please Note: An idle timeout of 3600s is recommended when using WebSockets. +In some scenarios users will need to modify the value of the ELB idle timeout. Users need to ensure the idle timeout is less than the keepalive_timeout that is configured for NGINX. By default NGINX keepalive_timeout is set to 75s. +The default ELB idle timeout will work for most scenarios, unless the NGINX keepalive_timeout has been modified, in which case service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout will need to be modified to ensure it is less than the keepalive_timeout the user has configured. +Please Note: An idle timeout of 3600s is recommended when using WebSockets. More information with regards to idle timeouts for your Load Balancer can be found in the official AWS documentation. Network Load Balancer (NLB) ¶ This type of load balancer is supported since v1.10.0 as an ALPHA feature. -kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/aws/service-nlb.yaml - +kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/aws/service-nlb.yaml + GCE-GKE ¶ -kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/cloud-generic.yaml - +kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/cloud-generic.yaml + Important Note: proxy protocol is not supported in GCE/GKE Azure ¶ -kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/cloud-generic.yaml - +kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/cloud-generic.yaml + Bare-metal ¶ Using NodePort: -kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/baremetal/service-nodeport.yaml - +kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/baremetal/service-nodeport.yaml + Tip @@ -1555,42 +1555,43 @@ Please check the Verify installation ¶ To check if the ingress controller pods have started, run the following command: -kubectl get pods --all-namespaces -l app.kubernetes.io/name=ingress-nginx --watch - +kubectl get pods --all-namespaces -l app.kubernetes.io/name=ingress-nginx --watch + -Once the operator pods are running, you can cancel the above command by typing Ctrl+C. +Once the operator pods are running, you can cancel the above command by typing Ctrl+C. Now, you are ready to create your first ingress. Detect installed version ¶ -To detect which version of the ingress controller is running, exec into the pod and run nginx-ingress-controller version command. -POD_NAMESPACE=ingress-nginx +To detect which version of the ingress controller is running, exec into the pod and run nginx-ingress-controller version command. +POD_NAMESPACE=ingress-nginx POD_NAME=$(kubectl get pods -n $POD_NAMESPACE -l app.kubernetes.io/name=ingress-nginx -o jsonpath='{.items[0].metadata.name}') kubectl exec -it $POD_NAME -n $POD_NAMESPACE -- /nginx-ingress-controller --version - + Using Helm ¶ NGINX Ingress controller can be installed via Helm using the chart stable/nginx-ingress from the official charts repository. -To install the chart with the release name my-nginx: -helm install my-nginx stable/nginx-ingress - +To install the chart with the release name my-nginx: +helm install my-nginx stable/nginx-ingress + If the kubernetes cluster has RBAC enabled, then run: -helm install my-nginx stable/nginx-ingress --set rbac.create=true - +helm install my-nginx stable/nginx-ingress --set rbac.create=true + -If you are using Helm 2 then specify release name using --name flag -helm install stable/nginx-ingress --name my-nginx - +If you are using Helm 2 then specify release name using --name flag +helm install stable/nginx-ingress --name my-nginx + or -helm install stable/nginx-ingress --name my-nginx --set rbac.create=true - +helm install stable/nginx-ingress --name my-nginx --set rbac.create=true + Detect installed version: -POD_NAME=$(kubectl get pods -l app.kubernetes.io/name=ingress-nginx -o jsonpath='{.items[0].metadata.name}') +POD_NAME=$(kubectl get pods -l app.kubernetes.io/name=ingress-nginx -o jsonpath='{.items[0].metadata.name}') kubectl exec -it $POD_NAME -- /nginx-ingress-controller --version - + + @@ -1645,9 +1646,9 @@ or @@ -1657,7 +1658,7 @@ or - + diff --git a/deploy/rbac/index.html b/deploy/rbac/index.html index 631e125f7..63acfc767 100644 --- a/deploy/rbac/index.html +++ b/deploy/rbac/index.html @@ -34,7 +34,7 @@ - + @@ -42,7 +42,7 @@ - + @@ -53,12 +53,12 @@ - + - + @@ -114,7 +114,7 @@ - + Skip to content @@ -123,7 +123,7 @@ - + public @@ -154,7 +154,7 @@ - + @@ -1280,40 +1280,40 @@ This example applies to nginx-ingress-controllers being deployed in an environment with RBAC enabled. Role Based Access Control is comprised of four layers: -ClusterRole - permissions assigned to a role that apply to an entire cluster -ClusterRoleBinding - binding a ClusterRole to a specific account -Role - permissions assigned to a role that apply to a specific namespace -RoleBinding - binding a Role to a specific account +ClusterRole - permissions assigned to a role that apply to an entire cluster +ClusterRoleBinding - binding a ClusterRole to a specific account +Role - permissions assigned to a role that apply to a specific namespace +RoleBinding - binding a Role to a specific account In order for RBAC to be applied to an nginx-ingress-controller, that controller -should be assigned to a ServiceAccount. That ServiceAccount should be -bound to the Roles and ClusterRoles defined for the nginx-ingress-controller. +should be assigned to a ServiceAccount. That ServiceAccount should be +bound to the Roles and ClusterRoles defined for the nginx-ingress-controller. Service Accounts created in this example ¶ -One ServiceAccount is created in this example, nginx-ingress-serviceaccount. +One ServiceAccount is created in this example, nginx-ingress-serviceaccount. Permissions Granted in this example ¶ There are two sets of permissions defined in this example. Cluster-wide -permissions defined by the ClusterRole named nginx-ingress-clusterrole, and -namespace specific permissions defined by the Role named nginx-ingress-role. +permissions defined by the ClusterRole named nginx-ingress-clusterrole, and +namespace specific permissions defined by the Role named nginx-ingress-role. Cluster Permissions ¶ These permissions are granted in order for the nginx-ingress-controller to be able to function as an ingress across the cluster. These permissions are -granted to the ClusterRole named nginx-ingress-clusterrole +granted to the ClusterRole named nginx-ingress-clusterrole -configmaps, endpoints, nodes, pods, secrets: list, watch -nodes: get -services, ingresses: get, list, watch -events: create, patch -ingresses/status: update +configmaps, endpoints, nodes, pods, secrets: list, watch +nodes: get +services, ingresses: get, list, watch +events: create, patch +ingresses/status: update Namespace Permissions ¶ These permissions are granted specific to the nginx-ingress namespace. These -permissions are granted to the Role named nginx-ingress-role +permissions are granted to the Role named nginx-ingress-role -configmaps, pods, secrets: get -endpoints: get +configmaps, pods, secrets: get +endpoints: get Furthermore to support leader-election, the nginx-ingress-controller needs to -have access to a configmap using the resourceName ingress-controller-leader-nginx +have access to a configmap using the resourceName ingress-controller-leader-nginx Note that resourceNames can NOT be used to limit requests using the “create” verb because authorizers only have access to information that can be obtained @@ -1321,27 +1321,28 @@ from the request URL, method, and headers (resource names in a “create” requ are part of the request body). -configmaps: get, update (for resourceName ingress-controller-leader-nginx) -configmaps: create +configmaps: get, update (for resourceName ingress-controller-leader-nginx) +configmaps: create -This resourceName is the concatenation of the election-id and the -ingress-class as defined by the ingress-controller, which defaults to: +This resourceName is the concatenation of the election-id and the +ingress-class as defined by the ingress-controller, which defaults to: -election-id: ingress-controller-leader -ingress-class: nginx -resourceName : <election-id>-<ingress-class> +election-id: ingress-controller-leader +ingress-class: nginx +resourceName : <election-id>-<ingress-class> Please adapt accordingly if you overwrite either parameter when launching the nginx-ingress-controller. Bindings ¶ -The ServiceAccount nginx-ingress-serviceaccount is bound to the Role -nginx-ingress-role and the ClusterRole nginx-ingress-clusterrole. +The ServiceAccount nginx-ingress-serviceaccount is bound to the Role +nginx-ingress-role and the ClusterRole nginx-ingress-clusterrole. The serviceAccountName associated with the containers in the deployment must match the serviceAccount. The namespace references in the Deployment metadata, container arguments, and POD_NAMESPACE should be in the nginx-ingress namespace. + @@ -1396,9 +1397,9 @@ container arguments, and POD_NAMESPACE should be in the nginx-ingress namespace. @@ -1408,7 +1409,7 @@ container arguments, and POD_NAMESPACE should be in the nginx-ingress namespace. - + diff --git a/deploy/upgrade/index.html b/deploy/upgrade/index.html index 33fd38dd0..1fdb517e2 100644 --- a/deploy/upgrade/index.html +++ b/deploy/upgrade/index.html @@ -34,7 +34,7 @@ - + @@ -42,7 +42,7 @@ - + @@ -53,12 +53,12 @@ - + - + @@ -114,7 +114,7 @@ - + Skip to content @@ -123,7 +123,7 @@ - + public @@ -154,7 +154,7 @@ - + @@ -1217,7 +1217,7 @@ make sure your templates are compatible with the new version of ingress-nginxTo upgrade your ingress-nginx installation, it should be enough to change the version of the image in the controller Deployment. I.e. if your deployment resource looks like (partial example): -kind: Deployment +kind: Deployment metadata: name: nginx-ingress-controller namespace: ingress-nginx @@ -1231,23 +1231,24 @@ in the controller Deployment. - name: nginx-ingress-controller image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.9.0 args: ... - + -simply change the 0.9.0 tag to the version you wish to upgrade to. +simply change the 0.9.0 tag to the version you wish to upgrade to. The easiest way to do this is e.g. (do note you may need to change the name parameter according to your installation): -kubectl set image deployment/nginx-ingress-controller \ - nginx-ingress-controller=quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.28.0 - +kubectl set image deployment/nginx-ingress-controller \ + nginx-ingress-controller=quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.28.0 + -For interactive editing, use kubectl edit deployment nginx-ingress-controller. +For interactive editing, use kubectl edit deployment nginx-ingress-controller. With Helm ¶ -If you installed ingress-nginx using the Helm command in the deployment docs so its name is ngx-ingress, +If you installed ingress-nginx using the Helm command in the deployment docs so its name is ngx-ingress, you should be able to upgrade using -helm upgrade --reuse-values ngx-ingress stable/nginx-ingress - +helm upgrade --reuse-values ngx-ingress stable/nginx-ingress + + @@ -1302,9 +1303,9 @@ you should be able to upgrade using @@ -1314,7 +1315,7 @@ you should be able to upgrade using - + diff --git a/deploy/validating-webhook/index.html b/deploy/validating-webhook/index.html index 409340685..4fae92755 100644 --- a/deploy/validating-webhook/index.html +++ b/deploy/validating-webhook/index.html @@ -34,7 +34,7 @@ - + @@ -42,7 +42,7 @@ - + @@ -53,12 +53,12 @@ - + - + @@ -114,7 +114,7 @@ - + Skip to content @@ -123,7 +123,7 @@ - + public @@ -154,7 +154,7 @@ - + @@ -1352,16 +1352,16 @@ Validating webhook must be served using TLS, you need to generate a certificate. Note that kube API server is checking the hostname of the certificate, the common name of your certificate will need to match the service name. Example -To run the validating webhook with a service named ingress-validation-webhook in the namespace ingress-nginx, run -openssl req -x509 -newkey rsa:2048 -keyout certificate.pem -out key.pem -days 365 -nodes -subj "/CN=ingress-validation-webhook.ingress-nginx.svc" - +To run the validating webhook with a service named ingress-validation-webhook in the namespace ingress-nginx, run +openssl req -x509 -newkey rsa:2048 -keyout certificate.pem -out key.pem -days 365 -nodes -subj "/CN=ingress-validation-webhook.ingress-nginx.svc" + Using Kubernetes CA ¶ Kubernetes also provides primitives to sign a certificate request. Here is an example on how to use it Example -#!/bin/bash +#!/bin/bash SERVICE_NAME=ingress-nginx NAMESPACE=ingress-nginx @@ -1423,17 +1423,17 @@ kubectl create secret generic ingress-nginx.svc \ --from-file=key.pem=${TEMP_DIRECTORY}/server-key.pem \ --from-file=cert.pem=${TEMP_DIRECTORY}/server-cert.pem \ -n ${NAMESPACE} - + Using helm ¶ To generate the certificate using helm, you can use the following snippet Example -{{- $cn := printf "%s.%s.svc" ( include "nginx-ingress.validatingWebhook.fullname" . ) .Release.Namespace }} +{{- $cn := printf "%s.%s.svc" ( include "nginx-ingress.validatingWebhook.fullname" . ) .Release.Namespace }} {{- $ca := genCA (printf "%s-ca" ( include "nginx-ingress.validatingWebhook.fullname" . )) .Values.validatingWebhook.certificateValidity -}} {{- $cert := genSignedCert $cn nil nil .Values.validatingWebhook.certificateValidity $ca -}} - + Ingress controller flags ¶ @@ -1448,19 +1448,19 @@ kubectl create secret generic ingress-nginx.svc \ ---validating-webhook +--validating-webhook The address to start an admission controller on -:8080 +:8080 ---validating-webhook-certificate +--validating-webhook-certificate The certificate the webhook is using for its TLS handling -/usr/local/certificates/validating-webhook.pem +/usr/local/certificates/validating-webhook.pem ---validating-webhook-key +--validating-webhook-key The key the webhook is using for its TLS handling -/usr/local/certificates/validating-webhook-key.pem +/usr/local/certificates/validating-webhook-key.pem @@ -1469,7 +1469,7 @@ kubectl create secret generic ingress-nginx.svc \ To check that your kube API server runs with the required flags, please refer to the kubernetes documentation. Additional kubernetes objects ¶ Once both the ingress controller and the kube API server are configured to serve the webhook, add the you can configure the webhook with the following objects: -apiVersion: v1 +apiVersion: v1 kind: Service metadata: name: ingress-validation-webhook @@ -1507,10 +1507,11 @@ To check that your kube API server runs with the required flags, please refer to name: ingress-validation-webhook path: /networking.k8s.io/v1beta1/ingress caBundle: <pem encoded ca cert that signs the server cert used by the webhook> - + + @@ -1565,9 +1566,9 @@ To check that your kube API server runs with the required flags, please refer to @@ -1577,7 +1578,7 @@ To check that your kube API server runs with the required flags, please refer to - + diff --git a/development/index.html b/development/index.html index 891a80769..2e76315c1 100644 --- a/development/index.html +++ b/development/index.html @@ -34,7 +34,7 @@ - + @@ -42,7 +42,7 @@ - + @@ -53,12 +53,12 @@ - + - + @@ -114,7 +114,7 @@ - + Skip to content @@ -123,7 +123,7 @@ - + public @@ -154,7 +154,7 @@ - + @@ -1363,112 +1363,112 @@ It includes how to build, test, and release ingress controllers. Quick Start ¶ Getting the code ¶ The code must be checked out as a subdirectory of k8s.io, and not github.com. -mkdir -p $GOPATH/src/k8s.io -cd $GOPATH/src/k8s.io -# Replace "$YOUR_GITHUB_USERNAME" below with your github username -git clone https://github.com/$YOUR_GITHUB_USERNAME/ingress-nginx.git -cd ingress-nginx - +mkdir -p $GOPATH/src/k8s.io +cd $GOPATH/src/k8s.io +# Replace "$YOUR_GITHUB_USERNAME" below with your github username +git clone https://github.com/$YOUR_GITHUB_USERNAME/ingress-nginx.git +cd ingress-nginx + Initial developer environment build ¶ Prequisites: Minikube must be installed. See releases for installation instructions. -If you are using MacOS and deploying to minikube, the following command will build the local nginx controller container image and deploy the ingress controller onto a minikube cluster with RBAC enabled in the namespace ingress-nginx: -$ make dev-env - +If you are using MacOS and deploying to minikube, the following command will build the local nginx controller container image and deploy the ingress controller onto a minikube cluster with RBAC enabled in the namespace ingress-nginx: +$ make dev-env + Updating the deployment ¶ The nginx controller container image can be rebuilt using: -$ ARCH=amd64 TAG=dev REGISTRY=$USER/ingress-controller make build container - +$ ARCH=amd64 TAG=dev REGISTRY=$USER/ingress-controller make build container + The image will only be used by pods created after the rebuild. To delete old pods which will cause new ones to spin up: -$ kubectl get pods -n ingress-nginx +$ kubectl get pods -n ingress-nginx $ kubectl delete pod -n ingress-nginx nginx-ingress-controller-<unique-pod-id> - + Dependencies ¶ -The build uses dependencies in the vendor directory, which +The build uses dependencies in the vendor directory, which must be installed before building a binary/image. Occasionally, you might need to update the dependencies. This guide requires you to install go 1.13 or newer. -This will automatically save the dependencies to the vendor/ directory. -$ go get +This will automatically save the dependencies to the vendor/ directory. +$ go get $ make dep-ensure - + Building ¶ All ingress controllers are built through a Makefile. Depending on your requirements you can build a raw server binary, a local container image, or push an image to a remote repository. In order to use your local Docker, you may need to set the following environment variables: -# "gcloud docker" (default) or "docker" +# "gcloud docker" (default) or "docker" $ export DOCKER=<docker> # "quay.io/kubernetes-ingress-controller" (default), "index.docker.io", or your own registry $ export REGISTRY=<your-docker-registry> - + -To find the registry simply run: docker system info | grep Registry +To find the registry simply run: docker system info | grep Registry Building the e2e test image ¶ The e2e test image can also be built through the Makefile. -$ make e2e-test-image - +$ make e2e-test-image + You can then make this image available on your minikube host by exporting the image and loading it with the minikube docker context: -$ docker save nginx-ingress-controller:e2e | (eval $(minikube docker-env) && docker load) - +$ docker save nginx-ingress-controller:e2e | (eval $(minikube docker-env) && docker load) + Nginx Controller ¶ Build a raw server binary -$ make build - +$ make build + TODO: add more specific instructions needed for raw server binary. Build a local container image -$ TAG=<tag> REGISTRY=$USER/ingress-controller make container - +$ TAG=<tag> REGISTRY=$USER/ingress-controller make container + Push the container image to a remote repository -$ TAG=<tag> REGISTRY=$USER/ingress-controller make push - +$ TAG=<tag> REGISTRY=$USER/ingress-controller make push + Deploying ¶ There are several ways to deploy the ingress controller onto a cluster. Please check the deployment guide Testing ¶ To run unit-tests, just run -$ cd $GOPATH/src/k8s.io/ingress-nginx +$ cd $GOPATH/src/k8s.io/ingress-nginx $ make test - + If you have access to a Kubernetes cluster, you can also run e2e tests using ginkgo. -$ cd $GOPATH/src/k8s.io/ingress-nginx +$ cd $GOPATH/src/k8s.io/ingress-nginx $ make e2e-test - + NOTE: if your e2e pod keeps hanging in an ImagePullBackoff, make sure you've made your e2e nginx-ingress-controller image available to minikube as explained in the Building the e2e test image section To run unit-tests for lua code locally, run: -$ cd $GOPATH/src/k8s.io/ingress-nginx +$ cd $GOPATH/src/k8s.io/ingress-nginx $ ./rootfs/etc/nginx/lua/test/up.sh $ make lua-test - + -Lua tests are located in $GOPATH/src/k8s.io/ingress-nginx/rootfs/etc/nginx/lua/test. When creating a new test file it must follow the naming convention <mytest>_test.lua or it will be ignored. +Lua tests are located in $GOPATH/src/k8s.io/ingress-nginx/rootfs/etc/nginx/lua/test. When creating a new test file it must follow the naming convention <mytest>_test.lua or it will be ignored. Releasing ¶ All Makefiles will produce a release binary, as shown above. To publish this to a wider Kubernetes user base, push the image to a container registry, like -gcr.io. All release images are hosted under gcr.io/google_containers and +gcr.io. All release images are hosted under gcr.io/google_containers and tagged according to a semver scheme. An example release might look like: -$ make release - +$ make release + Please follow these guidelines to cut a release: Update the release page with a short description of the major changes that correspond to a given image tag. Cut a release branch, if appropriate. Release branches follow the format of -controller-release-version. Typically, pre-releases are cut from HEAD. +controller-release-version. Typically, pre-releases are cut from HEAD. All major feature work is done in HEAD. Specific bug fixes are cherry-picked into a release branch. If you're not confident about the stability of the code, @@ -1478,6 +1478,7 @@ Typically, a release branch should have stable code. + @@ -1532,9 +1533,9 @@ Typically, a release branch should have stable code. @@ -1544,7 +1545,7 @@ Typically, a release branch should have stable code. - + diff --git a/enhancements/20190724-only-dynamic-ssl/index.html b/enhancements/20190724-only-dynamic-ssl/index.html index bb9a6b794..c95f33d62 100644 --- a/enhancements/20190724-only-dynamic-ssl/index.html +++ b/enhancements/20190724-only-dynamic-ssl/index.html @@ -34,7 +34,7 @@ - + @@ -42,7 +42,7 @@ - + @@ -53,12 +53,12 @@ - + - + @@ -114,7 +114,7 @@ - + Skip to content @@ -123,7 +123,7 @@ - + public @@ -154,7 +154,7 @@ - + @@ -1240,7 +1240,7 @@ The static configuration implies reloads, something that affects the majority of the users. Goals ¶ -Deprecation of the flag --enable-dynamic-certificates. +Deprecation of the flag --enable-dynamic-certificates. Cleanup of the codebase. Non-Goals ¶ @@ -1253,8 +1253,8 @@ Implementation Details/Notes/Constraints ¶ -Deprecate the flag Move the directives ssl_certificate and ssl_certificate_key from each server block to the http section. These settings are required to avoid NGINX errors in the logs. -Remove any action of the flag --enable-dynamic-certificates +Deprecate the flag Move the directives ssl_certificate and ssl_certificate_key from each server block to the http section. These settings are required to avoid NGINX errors in the logs. +Remove any action of the flag --enable-dynamic-certificates Drawbacks ¶ Alternatives ¶ @@ -1262,6 +1262,7 @@ + @@ -1281,9 +1282,9 @@ @@ -1293,7 +1294,7 @@ - + diff --git a/enhancements/20190815-zone-aware-routing/index.html b/enhancements/20190815-zone-aware-routing/index.html index fa854c9b5..69018f779 100644 --- a/enhancements/20190815-zone-aware-routing/index.html +++ b/enhancements/20190815-zone-aware-routing/index.html @@ -34,7 +34,7 @@ - + @@ -42,7 +42,7 @@ - + @@ -53,12 +53,12 @@ - + - + @@ -114,7 +114,7 @@ - + Skip to content @@ -123,7 +123,7 @@ - + public @@ -154,7 +154,7 @@ - + @@ -1250,13 +1250,13 @@ if there is no zone-local endpoint then it will fallback to current behaviour. How does controller know what zone it runs in? We can have the pod spec do pass node name using downward API as an environment variable. Then on start controller can get node details from the API based on node name. Once the node details is obtained -we can extract the zone from failure-domain.beta.kubernetes.io/zone annotation. Then we can pass that value to Lua land through Nginx configuration -when loading lua_ingress.lua module in init_by_lua phase. +we can extract the zone from failure-domain.beta.kubernetes.io/zone annotation. Then we can pass that value to Lua land through Nginx configuration +when loading lua_ingress.lua module in init_by_lua phase. How do we extract zones for endpoints? We can have the controller watch create and update events on nodes in the entire cluster and based on that keep the map of nodes to zones in the memory. -And when we generate endpoints list, we can access node name using .subsets.addresses[i].nodeName +And when we generate endpoints list, we can access node name using .subsets.addresses[i].nodeName and based on that fetch zone from the map in memory and store it as a field on the endpoint. -This solution assumes failure-domain.beta.kubernetes.io/zone annotation does not change until the end of node's life. Otherwise we have to +This solution assumes failure-domain.beta.kubernetes.io/zone annotation does not change until the end of node's life. Otherwise we have to watch update events as well on the nodes and that'll add even more overhead. Alternatively, we can get the list of nodes only when there's no node in the memory for given node name. This is probably a better solution because then we would avoid watching for API changes on node resources. We can eagrly fetch all the nodes and build node name to zone mapping on start. @@ -1279,6 +1279,7 @@ see no endpoints for the backend and therefore we will use general balancer. + @@ -1298,9 +1299,9 @@ see no endpoints for the backend and therefore we will use general balancer. @@ -1310,7 +1311,7 @@ see no endpoints for the backend and therefore we will use general balancer. - + diff --git a/enhancements/YYYYMMDD-kep-template/index.html b/enhancements/YYYYMMDD-kep-template/index.html index 5bf5fa769..33b180b81 100644 --- a/enhancements/YYYYMMDD-kep-template/index.html +++ b/enhancements/YYYYMMDD-kep-template/index.html @@ -34,7 +34,7 @@ - + @@ -42,7 +42,7 @@ - + @@ -53,12 +53,12 @@ - + - + @@ -114,7 +114,7 @@ - + Skip to content @@ -123,7 +123,7 @@ - + public @@ -154,7 +154,7 @@ - + @@ -1297,11 +1297,11 @@ This is the title of the KEP. Keep it simple and descriptive. A good title can help communicate what the KEP is and should be considered as part of any review. -The title should be lowercased and spaces/punctuation should be replaced with -. +The title should be lowercased and spaces/punctuation should be replaced with -. To get started with this template: Make a copy of this template. - Create a copy of this template and name it YYYYMMDD-my-title.md, where YYYYMMDD is the date the KEP was first drafted. + Create a copy of this template and name it YYYYMMDD-my-title.md, where YYYYMMDD is the date the KEP was first drafted. Fill out the "overview" sections. This includes the Summary and Motivation sections. These should be easy if you've preflighted the idea of the KEP in an issue. @@ -1312,17 +1312,17 @@ A good title can help communicate what the KEP is and should be considered as pa Merge early. Avoid getting hung up on specific details and instead aim to get the goal of the KEP merged quickly. The best way to do this is to just start with the "Overview" sections and fill out details incrementally in follow on PRs. - View anything marked as a provisional as a working document and subject to change. + View anything marked as a provisional as a working document and subject to change. Aim for single topic PRs to keep discussions focused. If you disagree with what is already in a document, open a new PR with suggested changes. The canonical place for the latest set of instructions (and the likely source of this file) is here. -The Metadata section above is intended to support the creation of tooling around the KEP process. +The Metadata section above is intended to support the creation of tooling around the KEP process. This will be a YAML section that is fenced as a code block. See the KEP process for details on each of these items. Table of Contents ¶ A table of contents is helpful for quickly jumping to sections of a KEP and for highlighting any additional information provided beyond the standard KEP template. -Ensure the TOC is wrapped with <!-- toc --&rt;<!-- /toc --&rt; tags, and then generate with hack/update-toc.sh. +Ensure the TOC is wrapped with <!-- toc --&rt;<!-- /toc --&rt; tags, and then generate with hack/update-toc.sh. @@ -1349,7 +1349,7 @@ See the KEP process for details on each of these items. Summary ¶ -The Summary section is incredibly important for producing high quality user-focused documentation such as release notes or a development roadmap. +The Summary section is incredibly important for producing high quality user-focused documentation such as release notes or a development roadmap. It should be possible to collect this information before implementation begins in order to avoid requiring implementors to split their attention between writing release notes and implementing the feature itself. A good summary is probably at least a paragraph in length. Motivation ¶ @@ -1402,11 +1402,11 @@ Please adhere to the Implementation History ¶ -Major milestones in the life cycle of a KEP should be tracked in Implementation History. +Major milestones in the life cycle of a KEP should be tracked in Implementation History. Major milestones might include -the Summary and Motivation sections being merged signaling acceptance -the Proposal section being merged signaling agreement on a proposed design +the Summary and Motivation sections being merged signaling acceptance +the Proposal section being merged signaling agreement on a proposed design the date implementation started the first Kubernetes release where an initial version of the KEP was available the version of Kubernetes where the KEP graduated to general availability @@ -1415,10 +1415,11 @@ Major milestones might include Drawbacks [optional] ¶ Why should this KEP not be implemented. Alternatives [optional] ¶ -Similar to the Drawbacks section the Alternatives section is used to highlight and record other possible approaches to delivering the value proposed by a KEP. +Similar to the Drawbacks section the Alternatives section is used to highlight and record other possible approaches to delivering the value proposed by a KEP. + @@ -1438,9 +1439,9 @@ Major milestones might include @@ -1450,7 +1451,7 @@ Major milestones might include - + diff --git a/enhancements/index.html b/enhancements/index.html index 18ef6a089..b693fc47a 100644 --- a/enhancements/index.html +++ b/enhancements/index.html @@ -34,7 +34,7 @@ - + @@ -42,7 +42,7 @@ - + @@ -53,12 +53,12 @@ - + - + @@ -114,7 +114,7 @@ - + Skip to content @@ -123,7 +123,7 @@ - + public @@ -154,7 +154,7 @@ - + @@ -1172,7 +1172,7 @@ Kubernetes Enhancement Proposals (KEPs) ¶ -A Kubernetes Enhancement Proposal (KEP) is a way to propose, communicate and coordinate on new efforts for the Kubernetes project. For this reason, the ingress-nginx project is adopting it. +A Kubernetes Enhancement Proposal (KEP) is a way to propose, communicate and coordinate on new efforts for the Kubernetes project. For this reason, the ingress-nginx project is adopting it. Quick start for the KEP process ¶ Follow the process outlined in the KEP template Do I have to use the KEP process? ¶ @@ -1193,6 +1193,7 @@ As such, we want to build a well curated set of clear proposals in a common form + @@ -1212,9 +1213,9 @@ As such, we want to build a well curated set of clear proposals in a common form @@ -1224,7 +1225,7 @@ As such, we want to build a well curated set of clear proposals in a common form - + diff --git a/examples/PREREQUISITES/index.html b/examples/PREREQUISITES/index.html index c99af7038..45cbdccbe 100644 --- a/examples/PREREQUISITES/index.html +++ b/examples/PREREQUISITES/index.html @@ -34,7 +34,7 @@ - + @@ -42,7 +42,7 @@ - + @@ -53,12 +53,12 @@ - + - + @@ -114,7 +114,7 @@ - + Skip to content @@ -123,7 +123,7 @@ - + public @@ -154,7 +154,7 @@ - + @@ -1226,7 +1226,7 @@ TLS certificates ¶ Unless otherwise mentioned, the TLS secret used in examples is a 2048 bit RSA key/cert pair with an arbitrarily chosen hostname, created as follows -$ openssl req -x509 -sha256 -nodes -days 365 -newkey rsa:2048 -keyout tls.key -out tls.crt -subj "/CN=nginxsvc/O=nginxsvc" +$ openssl req -x509 -sha256 -nodes -days 365 -newkey rsa:2048 -keyout tls.key -out tls.crt -subj "/CN=nginxsvc/O=nginxsvc" Generating a 2048 bit RSA private key ................+++ ................+++ @@ -1235,7 +1235,7 @@ key/cert pair with an arbitrarily chosen hostname, created as follows $ kubectl create secret tls tls-secret --key tls.key --cert tls.crt secret "tls-secret" created - + Note: If using CA Authentication, described below, you will need to sign the server certificate with the CA. Client Certificate Authentication ¶ @@ -1246,24 +1246,24 @@ both our server certificate and client certificate. Then every time we want to a pass the client certificate. These instructions are based on the following blog Generate the CA Key and Certificate: -openssl req -x509 -sha256 -newkey rsa:4096 -keyout ca.key -out ca.crt -days 356 -nodes -subj '/CN=My Cert Authority' - +openssl req -x509 -sha256 -newkey rsa:4096 -keyout ca.key -out ca.crt -days 356 -nodes -subj '/CN=My Cert Authority' + Generate the Server Key, and Certificate and Sign with the CA Certificate: -openssl req -new -newkey rsa:4096 -keyout server.key -out server.csr -nodes -subj '/CN=mydomain.com' +openssl req -new -newkey rsa:4096 -keyout server.key -out server.csr -nodes -subj '/CN=mydomain.com' openssl x509 -req -sha256 -days 365 -in server.csr -CA ca.crt -CAkey ca.key -set_serial 01 -out server.crt - + Generate the Client Key, and Certificate and Sign with the CA Certificate: -openssl req -new -newkey rsa:4096 -keyout client.key -out client.csr -nodes -subj '/CN=My Client' +openssl req -new -newkey rsa:4096 -keyout client.key -out client.csr -nodes -subj '/CN=My Client' openssl x509 -req -sha256 -days 365 -in client.csr -CA ca.crt -CAkey ca.key -set_serial 02 -out client.crt - + Once this is complete you can continue to follow the instructions here Test HTTP Service ¶ All examples that require a test HTTP Service use the standard http-svc pod, which you can deploy as follows -$ kubectl create -f http-svc.yaml +$ kubectl create -f http-svc.yaml service "http-svc" created replicationcontroller "http-svc" created @@ -1274,10 +1274,10 @@ which you can deploy as follows $ kubectl get svc NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE http-svc 10.0.122.116 <pending> 80:30301/TCP 1d - + You can test that the HTTP Service works by exposing it temporarily -$ kubectl patch svc http-svc -p '{"spec":{"type": "LoadBalancer"}}' +$ kubectl patch svc http-svc -p '{"spec":{"type": "LoadBalancer"}}' "http-svc" patched $ kubectl get svc http-svc @@ -1324,10 +1324,11 @@ which you can deploy as follows $ kubectl patch svc http-svc -p '{"spec":{"type": "NodePort"}}' "http-svc" patched - + + @@ -1382,9 +1383,9 @@ which you can deploy as follows @@ -1394,7 +1395,7 @@ which you can deploy as follows - + diff --git a/examples/affinity/cookie/index.html b/examples/affinity/cookie/index.html index 5f4055788..c41293b92 100644 --- a/examples/affinity/cookie/index.html +++ b/examples/affinity/cookie/index.html @@ -34,7 +34,7 @@ - + @@ -42,7 +42,7 @@ - + @@ -53,12 +53,12 @@ - + - + @@ -114,7 +114,7 @@ - + Skip to content @@ -123,7 +123,7 @@ - + public @@ -154,7 +154,7 @@ - + @@ -1222,18 +1222,18 @@ nginx.ingress.kubernetes.io/affinity -Type of the affinity, set this to cookie to enable session affinity -string (NGINX only supports cookie) +Type of the affinity, set this to cookie to enable session affinity +string (NGINX only supports cookie) nginx.ingress.kubernetes.io/affinity-mode -The affinity mode defines how sticky a session is. Use balanced to redistribute some sessions when scaling pods or persistent for maximum stickyness. -balanced (default) or persistent +The affinity mode defines how sticky a session is. Use balanced to redistribute some sessions when scaling pods or persistent for maximum stickyness. +balanced (default) or persistent nginx.ingress.kubernetes.io/session-cookie-name Name of the cookie that will be created -string (defaults to INGRESSCOOKIE) +string (defaults to INGRESSCOOKIE) nginx.ingress.kubernetes.io/session-cookie-path @@ -1243,37 +1243,37 @@ nginx.ingress.kubernetes.io/session-cookie-samesite SameSite attribute to apply to the cookie -Browser accepted values are None, Lax, and Strict +Browser accepted values are None, Lax, and Strict nginx.ingress.kubernetes.io/session-cookie-conditional-samesite-none -Will omit SameSite=None attribute for older browsers which reject the more-recently defined SameSite=None value -"true" or "false" +Will omit SameSite=None attribute for older browsers which reject the more-recently defined SameSite=None value +"true" or "false" nginx.ingress.kubernetes.io/session-cookie-max-age -Time until the cookie expires, corresponds to the Max-Age cookie directive +Time until the cookie expires, corresponds to the Max-Age cookie directive number of seconds nginx.ingress.kubernetes.io/session-cookie-expires -Legacy version of the previous annotation for compatibility with older browsers, generates an Expires cookie directive by adding the seconds to the current date +Legacy version of the previous annotation for compatibility with older browsers, generates an Expires cookie directive by adding the seconds to the current date number of seconds nginx.ingress.kubernetes.io/session-cookie-change-on-failure -When set to false nginx ingress will send request to upstream pointed by sticky cookie even if previous attempt failed. When set to true and previous attempt failed, sticky cookie will be changed to point to another upstream. -true or false (defaults to false) +When set to false nginx ingress will send request to upstream pointed by sticky cookie even if previous attempt failed. When set to true and previous attempt failed, sticky cookie will be changed to point to another upstream. +true or false (defaults to false) You can create the example Ingress to test this: -kubectl create -f ingress.yaml - +kubectl create -f ingress.yaml + Validation ¶ You can confirm that the Ingress works: -$ kubectl describe ing nginx-test +$ kubectl describe ing nginx-test Name: nginx-test Namespace: default Address: @@ -1305,10 +1305,10 @@ Last-Modified: Tue, 24 Jan 2017 14:02:19 GMT ETag: "58875e6b-264" Accept-Ranges: bytes - + -In the example above, you can see that the response contains a Set-Cookie header with the settings we have defined. -This cookie is created by NGINX, it contains a randomly generated key corresponding to the upstream used for that request (selected using consistent hashing) and has an Expires directive. +In the example above, you can see that the response contains a Set-Cookie header with the settings we have defined. +This cookie is created by NGINX, it contains a randomly generated key corresponding to the upstream used for that request (selected using consistent hashing) and has an Expires directive. If the user changes this cookie, NGINX creates a new one and redirects the user to another upstream. If the backend pool grows NGINX will keep sending the requests through the same server of the first request, even if it's overloaded. When the backend server is removed, the requests are re-routed to another upstream server. This does not require the cookie to be updated because the key's consistent hash will change. @@ -1317,6 +1317,7 @@ This means that you can face the situation that you've configured session affini + @@ -1371,9 +1372,9 @@ This means that you can face the situation that you've configured session affini @@ -1383,7 +1384,7 @@ This means that you can face the situation that you've configured session affini - + diff --git a/examples/auth/basic/index.html b/examples/auth/basic/index.html index 7952da852..a0af6a3c8 100644 --- a/examples/auth/basic/index.html +++ b/examples/auth/basic/index.html @@ -34,7 +34,7 @@ - + @@ -42,7 +42,7 @@ - + @@ -53,12 +53,12 @@ - + - + @@ -114,7 +114,7 @@ - + Skip to content @@ -123,7 +123,7 @@ - + public @@ -154,7 +154,7 @@ - + @@ -1151,20 +1151,20 @@ Basic Authentication ¶ -This example shows how to add authentication in a Ingress rule using a secret that contains a file generated with htpasswd. -It's important the file generated is named auth (actually - that the secret has a key data.auth), otherwise the ingress-controller returns a 503. -$ htpasswd -c auth foo +This example shows how to add authentication in a Ingress rule using a secret that contains a file generated with htpasswd. +It's important the file generated is named auth (actually - that the secret has a key data.auth), otherwise the ingress-controller returns a 503. +$ htpasswd -c auth foo New password: <bar> New password: Re-type new password: Adding password for user foo - + -$ kubectl create secret generic basic-auth --from-file=auth +$ kubectl create secret generic basic-auth --from-file=auth secret "basic-auth" created - + -$ kubectl get secret basic-auth -o yaml +$ kubectl get secret basic-auth -o yaml apiVersion: v1 data: auth: Zm9vOiRhcHIxJE9GRzNYeWJwJGNrTDBGSERBa29YWUlsSDkuY3lzVDAK @@ -1173,9 +1173,9 @@ It's important the file generated is named auth name: basic-auth namespace: default type: Opaque - + -echo " +echo " apiVersion: networking.k8s.io/v1beta1 kind: Ingress metadata: @@ -1197,9 +1197,9 @@ It's important the file generated is named auth serviceName: http-svc servicePort: 80 " | kubectl create -f - - + -$ curl -v http://10.2.29.4/ -H 'Host: foo.bar.com' +$ curl -v http://10.2.29.4/ -H 'Host: foo.bar.com' * Trying 10.2.29.4... * Connected to 10.2.29.4 (10.2.29.4) port 80 (#0) > GET / HTTP/1.1 @@ -1223,9 +1223,9 @@ It's important the file generated is named auth </body> </html> * Connection #0 to host 10.2.29.4 left intact - + -$ curl -v http://10.2.29.4/ -H 'Host: foo.bar.com' -u 'foo:bar' +$ curl -v http://10.2.29.4/ -H 'Host: foo.bar.com' -u 'foo:bar' * Trying 10.2.29.4... * Connected to 10.2.29.4 (10.2.29.4) port 80 (#0) * Server auth using Basic with user 'foo' @@ -1268,10 +1268,11 @@ x-real-ip=10.2.29.1 BODY: * Connection #0 to host 10.2.29.4 left intact -no body in request- - + + @@ -1326,9 +1327,9 @@ BODY: @@ -1338,7 +1339,7 @@ BODY: - + diff --git a/examples/auth/client-certs/index.html b/examples/auth/client-certs/index.html index 227b663b3..55c524807 100644 --- a/examples/auth/client-certs/index.html +++ b/examples/auth/client-certs/index.html @@ -34,7 +34,7 @@ - + @@ -42,7 +42,7 @@ - + @@ -53,12 +53,12 @@ - + - + @@ -114,7 +114,7 @@ - + Skip to content @@ -123,7 +123,7 @@ - + public @@ -154,7 +154,7 @@ - + @@ -1219,12 +1219,12 @@ Before getting started you must have the following Certificates Setup: For more details on the generation process, checkout the Prerequisite docs. You can have as many certificates as you want. If they're in the binary DER format, you can convert them as the following: -openssl x509 -in certificate.der -inform der -out certificate.crt -outform pem - +openssl x509 -in certificate.der -inform der -out certificate.crt -outform pem + Then, you can concatenate them all in only one file, named 'ca.crt' as the following: -cat certificate1.crt certificate2.crt certificate3.crt >> ca.crt - +cat certificate1.crt certificate2.crt certificate3.crt >> ca.crt + Note: Make sure that the Key Size is greater than 1024 and Hashing Algorithm(Digest) is something better than md5 for each certificate generated. Otherwise you will receive an error. @@ -1235,23 +1235,23 @@ Authentication to work properly. You can create a secret containing just the CA certificate and another Secret containing the Server Certificate which is Signed by the CA. -kubectl create secret generic ca-secret --from-file=ca.crt=ca.crt +kubectl create secret generic ca-secret --from-file=ca.crt=ca.crt kubectl create secret generic tls-secret --from-file=tls.crt=server.crt --from-file=tls.key=server.key - + You can create a secret containing CA certificate along with the Server Certificate, that can be used for both TLS and Client Auth. -kubectl create secret generic ca-secret --from-file=tls.crt=server.crt --from-file=tls.key=server.key --from-file=ca.crt=ca.crt - +kubectl create secret generic ca-secret --from-file=tls.crt=server.crt --from-file=tls.key=server.key --from-file=ca.crt=ca.crt + If you want to also enable Certificate Revocation List verification you can create the secret also containing the CRL file in PEM format: - kubectl create secret generic ca-secret --from-file=ca.crt=ca.crt --from-file=ca.crl=ca.crl - + kubectl create secret generic ca-secret --from-file=ca.crt=ca.crt --from-file=ca.crl=ca.crl + Note: The CA Certificate must contain the trusted certificate authority chain to verify client certificates. @@ -1264,6 +1264,7 @@ kubectl create secret generic tls-secret --from-file=tls. + @@ -1318,9 +1319,9 @@ kubectl create secret generic tls-secret --from-file=tls. @@ -1330,7 +1331,7 @@ kubectl create secret generic tls-secret --from-file=tls. - + diff --git a/examples/auth/external-auth/index.html b/examples/auth/external-auth/index.html index 919e18c53..f021668dd 100644 --- a/examples/auth/external-auth/index.html +++ b/examples/auth/external-auth/index.html @@ -34,7 +34,7 @@ - + @@ -42,7 +42,7 @@ - + @@ -53,12 +53,12 @@ - + - + @@ -114,7 +114,7 @@ - + Skip to content @@ -123,7 +123,7 @@ - + public @@ -154,7 +154,7 @@ - + @@ -1197,8 +1197,8 @@ External Basic Authentication ¶ Example 1: ¶ -Use an external service (Basic Auth) located in https://httpbin.org -$ kubectl create -f ingress.yaml +Use an external service (Basic Auth) located in https://httpbin.org +$ kubectl create -f ingress.yaml ingress "external-auth" created $ kubectl get ing external-auth @@ -1232,10 +1232,10 @@ status: ingress: - ip: 172.17.4.99 $ - + Test 1: no username/password (expect code 401) -$ curl -k http://172.17.4.99 -v -H 'Host: external-auth-01.sample.com' +$ curl -k http://172.17.4.99 -v -H 'Host: external-auth-01.sample.com' * Rebuilt URL to: http://172.17.4.99/ * Trying 172.17.4.99... * Connected to 172.17.4.99 (172.17.4.99) port 80 (#0) @@ -1260,10 +1260,10 @@ $ </body> </html> * Connection #0 to host 172.17.4.99 left intact - + Test 2: valid username/password (expect code 200) -$ curl -k http://172.17.4.99 -v -H 'Host: external-auth-01.sample.com' -u 'user:passwd' +$ curl -k http://172.17.4.99 -v -H 'Host: external-auth-01.sample.com' -u 'user:passwd' * Rebuilt URL to: http://172.17.4.99/ * Trying 172.17.4.99... * Connected to 172.17.4.99 (172.17.4.99) port 80 (#0) @@ -1306,9 +1306,9 @@ x-real-ip=10.2.60.1 BODY: * Connection #0 to host 172.17.4.99 left intact -no body in request- - + Test 3: invalid username/password (expect code 401) -curl -k http://172.17.4.99 -v -H 'Host: external-auth-01.sample.com' -u 'user:user' +curl -k http://172.17.4.99 -v -H 'Host: external-auth-01.sample.com' -u 'user:user' * Rebuilt URL to: http://172.17.4.99/ * Trying 172.17.4.99... * Connected to 172.17.4.99 (172.17.4.99) port 80 (#0) @@ -1336,10 +1336,11 @@ BODY: </body> </html> * Connection #0 to host 172.17.4.99 left intact - + + @@ -1394,9 +1395,9 @@ BODY: @@ -1406,7 +1407,7 @@ BODY: - + diff --git a/examples/auth/oauth-external-auth/index.html b/examples/auth/oauth-external-auth/index.html index 1d0bdde53..6a8fd6c59 100644 --- a/examples/auth/oauth-external-auth/index.html +++ b/examples/auth/oauth-external-auth/index.html @@ -34,7 +34,7 @@ - + @@ -42,7 +42,7 @@ - + @@ -53,12 +53,12 @@ - + - + @@ -114,7 +114,7 @@ - + Skip to content @@ -123,7 +123,7 @@ - + public @@ -154,7 +154,7 @@ - + @@ -1251,45 +1251,45 @@ External OAUTH Authentication ¶ Overview ¶ -The auth-url and auth-signin annotations allow you to use an external +The auth-url and auth-signin annotations allow you to use an external authentication provider to protect your Ingress resources. Important -This annotation requires nginx-ingress-controller v0.9.0 or greater.) +This annotation requires nginx-ingress-controller v0.9.0 or greater.) Key Detail ¶ This functionality is enabled by deploying multiple Ingress objects for a single host. One Ingress object has no special annotations and handles authentication. Other Ingress objects can then be annotated in such a way that require the user to -authenticate against the first Ingress's endpoint, and can redirect 401s to the +authenticate against the first Ingress's endpoint, and can redirect 401s to the same endpoint. Sample: -... +... metadata: name: application annotations: nginx.ingress.kubernetes.io/auth-url: "https://$host/oauth2/auth" nginx.ingress.kubernetes.io/auth-signin: "https://$host/oauth2/start?rd=$escaped_request_uri" ... - + Example: OAuth2 Proxy + Kubernetes-Dashboard ¶ -This example will show you how to deploy oauth2_proxy +This example will show you how to deploy oauth2_proxy into a Kubernetes cluster and use it to protect the Kubernetes Dashboard using github as oAuth2 provider Prepare ¶ Install the kubernetes dashboard -kubectl create -f https://raw.githubusercontent.com/kubernetes/kops/master/addons/kubernetes-dashboard/v1.10.1.yaml - +kubectl create -f https://raw.githubusercontent.com/kubernetes/kops/master/addons/kubernetes-dashboard/v1.10.1.yaml + Create a custom Github OAuth application -Homepage URL is the FQDN in the Ingress rule, like https://foo.bar.com -Authorization callback URL is the same as the base FQDN plus /oauth2, like https://foo.bar.com/oauth2 +Homepage URL is the FQDN in the Ingress rule, like https://foo.bar.com +Authorization callback URL is the same as the base FQDN plus /oauth2, like https://foo.bar.com/oauth2 @@ -1297,9 +1297,9 @@ into a Kubernetes cluster and use it to protect the Kubernetes Dashboard using g Configure oauth2_proxy values in the file oauth2-proxy.yaml with the values: -OAUTH2_PROXY_CLIENT_ID with the github <Client ID> +OAUTH2_PROXY_CLIENT_ID with the github <Client ID> -OAUTH2_PROXY_CLIENT_SECRET with the github <Client Secret> +OAUTH2_PROXY_CLIENT_SECRET with the github <Client Secret> OAUTH2_PROXY_COOKIE_SECRET with value of python -c 'import os,base64; print(base64.b64encode(os.urandom(16)).decode("ascii"))' @@ -1307,20 +1307,21 @@ into a Kubernetes cluster and use it to protect the Kubernetes Dashboard using g Customize the contents of the file dashboard-ingress.yaml: -Replace __INGRESS_HOST__ with a valid FQDN and __INGRESS_SECRET__ with a Secret with a valid SSL certificate. +Replace __INGRESS_HOST__ with a valid FQDN and __INGRESS_SECRET__ with a Secret with a valid SSL certificate. Deploy the oauth2 proxy and the ingress rules running: -$ kubectl create -f oauth2-proxy.yaml,dashboard-ingress.yaml - +$ kubectl create -f oauth2-proxy.yaml,dashboard-ingress.yaml + -Test the oauth integration accessing the configured URL, like https://foo.bar.com +Test the oauth integration accessing the configured URL, like https://foo.bar.com + @@ -1375,9 +1376,9 @@ into a Kubernetes cluster and use it to protect the Kubernetes Dashboard using g @@ -1387,7 +1388,7 @@ into a Kubernetes cluster and use it to protect the Kubernetes Dashboard using g - + diff --git a/examples/customization/configuration-snippets/index.html b/examples/customization/configuration-snippets/index.html index 93261d5bd..289d164d9 100644 --- a/examples/customization/configuration-snippets/index.html +++ b/examples/customization/configuration-snippets/index.html @@ -34,7 +34,7 @@ - + @@ -42,7 +42,7 @@ - + @@ -53,12 +53,12 @@ - + - + @@ -114,7 +114,7 @@ - + Skip to content @@ -123,7 +123,7 @@ - + public @@ -154,7 +154,7 @@ - + @@ -1212,15 +1212,16 @@ Configuration Snippets ¶ Ingress ¶ The Ingress in this example adds a custom header to Nginx configuration that only applies to that specific Ingress. If you want to add headers that apply globally to all Ingresses, please have a look at this example. -$ kubectl apply -f ingress.yaml - +$ kubectl apply -f ingress.yaml + Test ¶ Check if the contents of the annotation are present in the nginx.conf file using: -kubectl exec nginx-ingress-controller-873061567-4n3k2 -n kube-system cat /etc/nginx/nginx.conf +kubectl exec nginx-ingress-controller-873061567-4n3k2 -n kube-system cat /etc/nginx/nginx.conf + @@ -1275,9 +1276,9 @@ @@ -1287,7 +1288,7 @@ - + diff --git a/examples/customization/custom-configuration/index.html b/examples/customization/custom-configuration/index.html index 101b098a7..1ffb204f9 100644 --- a/examples/customization/custom-configuration/index.html +++ b/examples/customization/custom-configuration/index.html @@ -34,7 +34,7 @@ - + @@ -42,7 +42,7 @@ - + @@ -53,12 +53,12 @@ - + - + @@ -114,7 +114,7 @@ - + Skip to content @@ -123,7 +123,7 @@ - + public @@ -154,7 +154,7 @@ - + @@ -1153,7 +1153,7 @@ Custom Configuration ¶ Using a ConfigMap is possible to customize the NGINX configuration For example, if we want to change the timeouts we need to create a ConfigMap: -$ cat configmap.yaml +$ cat configmap.yaml apiVersion: v1 data: proxy-connect-timeout: "10" @@ -1162,16 +1162,17 @@ data: kind: ConfigMap metadata: name: nginx-configuration - + -curl https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/docs/examples/customization/custom-configuration/configmap.yaml \ - | kubectl apply -f - - +curl https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/docs/examples/customization/custom-configuration/configmap.yaml \ + | kubectl apply -f - + If the Configmap it is updated, NGINX will be reloaded with the new configuration. + @@ -1226,9 +1227,9 @@ metadata: @@ -1238,7 +1239,7 @@ metadata: - + diff --git a/examples/customization/custom-errors/index.html b/examples/customization/custom-errors/index.html index 8b95dceed..1b33c4505 100644 --- a/examples/customization/custom-errors/index.html +++ b/examples/customization/custom-errors/index.html @@ -34,7 +34,7 @@ - + @@ -42,7 +42,7 @@ - + @@ -53,12 +53,12 @@ - + - + @@ -114,7 +114,7 @@ - + Skip to content @@ -123,7 +123,7 @@ - + public @@ -154,7 +154,7 @@ - + @@ -1226,49 +1226,49 @@ Custom Errors ¶ This example demonstrates how to use a custom backend to render custom error pages. Customized default backend ¶ -First, create the custom default-backend. It will be used by the Ingress controller later on. -$ kubectl create -f custom-default-backend.yaml +First, create the custom default-backend. It will be used by the Ingress controller later on. +$ kubectl create -f custom-default-backend.yaml service "nginx-errors" created deployment.apps "nginx-errors" created - + -This should have created a Deployment and a Service with the name nginx-errors. -$ kubectl get deploy,svc +This should have created a Deployment and a Service with the name nginx-errors. +$ kubectl get deploy,svc NAME DESIRED CURRENT READY AGE deployment.apps/nginx-errors 1 1 1 10s NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE service/nginx-errors ClusterIP 10.0.0.12 <none> 80/TCP 10s - + Ingress controller configuration ¶ If you do not already have an instance of the NGINX Ingress controller running, deploy it according to the deployment guide, then follow these steps: -Edit the nginx-ingress-controller Deployment and set the value of the --default-backend flag to the name of the +Edit the nginx-ingress-controller Deployment and set the value of the --default-backend flag to the name of the newly created error backend. -Edit the nginx-configuration ConfigMap and create the key custom-http-errors with a value of 404,503. +Edit the nginx-configuration ConfigMap and create the key custom-http-errors with a value of 404,503. Take note of the IP address assigned to the NGINX Ingress controller Service. - $ kubectl get svc ingress-nginx + $ kubectl get svc ingress-nginx NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE ingress-nginx ClusterIP 10.0.0.13 <none> 80/TCP,443/TCP 10m - + Note -The ingress-nginx Service is of type ClusterIP in this example. This may vary depending on your environment. +The ingress-nginx Service is of type ClusterIP in this example. This may vary depending on your environment. Make sure you can use the Service to reach NGINX before proceeding with the rest of this example. Testing error pages ¶ Let us send a couple of HTTP requests using cURL and validate everything is working as expected. A request to the default backend returns a 404 error with a custom message: -$ curl -D- http://10.0.0.13/ +$ curl -D- http://10.0.0.13/ HTTP/1.1 404 Not Found Server: nginx/1.13.12 Date: Tue, 12 Jun 2018 19:11:24 GMT @@ -1277,10 +1277,10 @@ Transfer-Encoding: chunked Connection: keep-alive <span>The page you're looking for could not be found.</span> - + -A request with a custom Accept header returns the corresponding document type (JSON): -$ curl -D- -H 'Accept: application/json' http://10.0.0.13/ +A request with a custom Accept header returns the corresponding document type (JSON): +$ curl -D- -H 'Accept: application/json' http://10.0.0.13/ HTTP/1.1 404 Not Found Server: nginx/1.13.12 Date: Tue, 12 Jun 2018 19:12:36 GMT @@ -1290,13 +1290,14 @@ Connection: keep-alive Vary: Accept-Encoding { "message": "The page you're looking for could not be found" } - + To go further with this example, feel free to deploy your own applications and Ingress objects, and validate that the responses are still in the correct format when a backend returns 503 (eg. if you scale a Deployment down to 0 replica). + @@ -1351,9 +1352,9 @@ responses are still in the correct format when a backend returns 503 (eg. if you @@ -1363,7 +1364,7 @@ responses are still in the correct format when a backend returns 503 (eg. if you - + diff --git a/examples/customization/custom-headers/index.html b/examples/customization/custom-headers/index.html index 0f1e35542..dc86c2fb3 100644 --- a/examples/customization/custom-headers/index.html +++ b/examples/customization/custom-headers/index.html @@ -34,7 +34,7 @@ - + @@ -42,7 +42,7 @@ - + @@ -53,12 +53,12 @@ - + - + @@ -114,7 +114,7 @@ - + Skip to content @@ -123,7 +123,7 @@ - + public @@ -154,7 +154,7 @@ - + @@ -1199,21 +1199,22 @@ This example demonstrates configuration of the nginx ingress controller via a ConfigMap to pass a custom list of headers to the upstream server. -custom-headers.yaml defines a ConfigMap in the ingress-nginx namespace named custom-headers, holding several custom X-prefixed HTTP headers. -kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/docs/examples/customization/custom-headers/custom-headers.yaml - +custom-headers.yaml defines a ConfigMap in the ingress-nginx namespace named custom-headers, holding several custom X-prefixed HTTP headers. +kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/docs/examples/customization/custom-headers/custom-headers.yaml + -configmap.yaml defines a ConfigMap in the ingress-nginx namespace named nginx-configuration. This controls the global configuration of the ingress controller, and already exists in a standard installation. The key proxy-set-headers is set to cite the previously-created ingress-nginx/custom-headers ConfigMap. -kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/docs/examples/customization/custom-headers/configmap.yaml - +configmap.yaml defines a ConfigMap in the ingress-nginx namespace named nginx-configuration. This controls the global configuration of the ingress controller, and already exists in a standard installation. The key proxy-set-headers is set to cite the previously-created ingress-nginx/custom-headers ConfigMap. +kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/docs/examples/customization/custom-headers/configmap.yaml + -The nginx ingress controller will read the ingress-nginx/nginx-configuration ConfigMap, find the proxy-set-headers key, read HTTP headers from the ingress-nginx/custom-headers ConfigMap, and include those HTTP headers in all requests flowing from nginx to the backends. +The nginx ingress controller will read the ingress-nginx/nginx-configuration ConfigMap, find the proxy-set-headers key, read HTTP headers from the ingress-nginx/custom-headers ConfigMap, and include those HTTP headers in all requests flowing from nginx to the backends. Test ¶ Check the contents of the ConfigMaps are present in the nginx.conf file using: -kubectl exec nginx-ingress-controller-873061567-4n3k2 -n ingress-nginx cat /etc/nginx/nginx.conf +kubectl exec nginx-ingress-controller-873061567-4n3k2 -n ingress-nginx cat /etc/nginx/nginx.conf + @@ -1268,9 +1269,9 @@ server. @@ -1280,7 +1281,7 @@ server. - + diff --git a/examples/customization/external-auth-headers/index.html b/examples/customization/external-auth-headers/index.html index 563e25515..7b8140135 100644 --- a/examples/customization/external-auth-headers/index.html +++ b/examples/customization/external-auth-headers/index.html @@ -34,7 +34,7 @@ - + @@ -42,7 +42,7 @@ - + @@ -53,12 +53,12 @@ - + - + @@ -114,7 +114,7 @@ - + Skip to content @@ -123,7 +123,7 @@ - + public @@ -154,7 +154,7 @@ - + @@ -1156,8 +1156,8 @@ to backend service. Sample configuration includes: Sample authentication service producing several response headers -Authentication logic is based on HTTP header: requests with header User containing string internal are considered authenticated -After successful authentication service generates response headers UserID and UserRole +Authentication logic is based on HTTP header: requests with header User containing string internal are considered authenticated +After successful authentication service generates response headers UserID and UserRole Sample echo service displaying header information Two ingress objects pointing to echo service Public, which allows access from unauthenticated users @@ -1165,7 +1165,7 @@ to backend service. You can deploy the controller as follows: -$ kubectl create -f deploy/ +$ kubectl create -f deploy/ deployment "demo-auth-service" created service "demo-auth-service" created ingress "demo-auth-service" created @@ -1183,10 +1183,10 @@ follows: NAME HOSTS ADDRESS PORTS AGE public-demo-echo-service public-demo-echo-service.kube.local 80 1m secure-demo-echo-service secure-demo-echo-service.kube.local 80 1m - + Test 1: public service with no auth header -$ curl -H 'Host: public-demo-echo-service.kube.local' -v 192.168.99.100 +$ curl -H 'Host: public-demo-echo-service.kube.local' -v 192.168.99.100 * Rebuilt URL to: 192.168.99.100/ * Trying 192.168.99.100... * Connected to 192.168.99.100 (192.168.99.100) port 80 (#0) @@ -1204,10 +1204,10 @@ follows: < * Connection #0 to host 192.168.99.100 left intact UserID: , UserRole: - + Test 2: secure service with no auth header -$ curl -H 'Host: secure-demo-echo-service.kube.local' -v 192.168.99.100 +$ curl -H 'Host: secure-demo-echo-service.kube.local' -v 192.168.99.100 * Rebuilt URL to: 192.168.99.100/ * Trying 192.168.99.100... * Connected to 192.168.99.100 (192.168.99.100) port 80 (#0) @@ -1231,10 +1231,10 @@ follows: </body> </html> * Connection #0 to host 192.168.99.100 left intact - + Test 3: public service with valid auth header -$ curl -H 'Host: public-demo-echo-service.kube.local' -H 'User:internal' -v 192.168.99.100 +$ curl -H 'Host: public-demo-echo-service.kube.local' -H 'User:internal' -v 192.168.99.100 * Rebuilt URL to: 192.168.99.100/ * Trying 192.168.99.100... * Connected to 192.168.99.100 (192.168.99.100) port 80 (#0) @@ -1253,10 +1253,10 @@ follows: < * Connection #0 to host 192.168.99.100 left intact UserID: 1443635317331776148, UserRole: admin - + Test 4: secure service with valid auth header -$ curl -H 'Host: secure-demo-echo-service.kube.local' -H 'User:internal' -v 192.168.99.100 +$ curl -H 'Host: secure-demo-echo-service.kube.local' -H 'User:internal' -v 192.168.99.100 * Rebuilt URL to: 192.168.99.100/ * Trying 192.168.99.100... * Connected to 192.168.99.100 (192.168.99.100) port 80 (#0) @@ -1275,10 +1275,11 @@ follows: < * Connection #0 to host 192.168.99.100 left intact UserID: 605394647632969758, UserRole: admin - + + @@ -1333,9 +1334,9 @@ follows: @@ -1345,7 +1346,7 @@ follows: - + diff --git a/examples/customization/ssl-dh-param/index.html b/examples/customization/ssl-dh-param/index.html index 07d812b28..cd185cf34 100644 --- a/examples/customization/ssl-dh-param/index.html +++ b/examples/customization/ssl-dh-param/index.html @@ -34,7 +34,7 @@ - + @@ -42,7 +42,7 @@ - + @@ -53,12 +53,12 @@ - + - + @@ -114,7 +114,7 @@ - + Skip to content @@ -123,7 +123,7 @@ - + public @@ -154,7 +154,7 @@ - + @@ -1228,7 +1228,7 @@ use a ConfigMap to configure custom Diffie-Hellman parameters file to help with "Perfect Forward Secrecy". Custom configuration ¶ -$ cat configmap.yaml +$ cat configmap.yaml apiVersion: v1 data: ssl-dh-param: "ingress-nginx/lb-dhparam" @@ -1239,17 +1239,17 @@ use a ConfigMap to configure custom Diffie-Hellman parameters file to help with labels: app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - + -$ kubectl create -f configmap.yaml - +$ kubectl create -f configmap.yaml + Custom DH parameters secret ¶ -$> openssl dhparam 1024 2> /dev/null | base64 +$> openssl dhparam 1024 2> /dev/null | base64 LS0tLS1CRUdJTiBESCBQQVJBTUVURVJ... - + -$ cat ssl-dh-param.yaml +$ cat ssl-dh-param.yaml apiVersion: v1 data: dhparam.pem: "LS0tLS1CRUdJTiBESCBQQVJBTUVURVJ..." @@ -1260,17 +1260,18 @@ use a ConfigMap to configure custom Diffie-Hellman parameters file to help with labels: app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - + -$ kubectl create -f ssl-dh-param.yaml - +$ kubectl create -f ssl-dh-param.yaml + Test ¶ Check the contents of the configmap is present in the nginx.conf file using: -kubectl exec nginx-ingress-controller-873061567-4n3k2 -n kube-system cat /etc/nginx/nginx.conf +kubectl exec nginx-ingress-controller-873061567-4n3k2 -n kube-system cat /etc/nginx/nginx.conf + @@ -1325,9 +1326,9 @@ use a ConfigMap to configure custom Diffie-Hellman parameters file to help with @@ -1337,7 +1338,7 @@ use a ConfigMap to configure custom Diffie-Hellman parameters file to help with - + diff --git a/examples/customization/sysctl/index.html b/examples/customization/sysctl/index.html index 585df06e7..cd3e3d6b0 100644 --- a/examples/customization/sysctl/index.html +++ b/examples/customization/sysctl/index.html @@ -34,7 +34,7 @@ - + @@ -42,7 +42,7 @@ - + @@ -53,12 +53,12 @@ - + - + @@ -114,7 +114,7 @@ - + Skip to content @@ -123,7 +123,7 @@ - + public @@ -154,7 +154,7 @@ - + @@ -1151,20 +1151,21 @@ Sysctl tuning ¶ -This example aims to demonstrate the use of an Init Container to adjust sysctl default values using kubectl patch -kubectl patch deployment -n ingress-nginx nginx-ingress-controller \ +This example aims to demonstrate the use of an Init Container to adjust sysctl default values using kubectl patch +kubectl patch deployment -n ingress-nginx nginx-ingress-controller \ --patch="$(curl https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/docs/examples/customization/sysctl/patch.json)" - + Changes: -Backlog Queue setting net.core.somaxconn from 128 to 32768 -Ephemeral Ports setting net.ipv4.ip_local_port_range from 32768 60999 to 1024 65000 +Backlog Queue setting net.core.somaxconn from 128 to 32768 +Ephemeral Ports setting net.ipv4.ip_local_port_range from 32768 60999 to 1024 65000 In a post from the NGINX blog, it is possible to see an explanation for the changes. + @@ -1219,9 +1220,9 @@ @@ -1231,7 +1232,7 @@ - + diff --git a/examples/docker-registry/index.html b/examples/docker-registry/index.html index aba072e88..e8cd9daa2 100644 --- a/examples/docker-registry/index.html +++ b/examples/docker-registry/index.html @@ -34,7 +34,7 @@ - + @@ -42,7 +42,7 @@ - + @@ -53,12 +53,12 @@ - + - + @@ -114,7 +114,7 @@ - + Skip to content @@ -123,7 +123,7 @@ - + public @@ -154,7 +154,7 @@ - + @@ -1251,42 +1251,43 @@ This example demonstrates how to deploy a docker registry in the cluster and configure Ingress enable access from Internet Deployment ¶ First we deploy the docker registry in the cluster: -kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/docs/examples/docker-registry/deployment.yaml - +kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/docs/examples/docker-registry/deployment.yaml + Important DO NOT RUN THIS IN PRODUCTION -This deployment uses emptyDir in the volumeMount which means the contents of the registry will be deleted when the pod dies. +This deployment uses emptyDir in the volumeMount which means the contents of the registry will be deleted when the pod dies. The next required step is creation of the ingress rules. To do this we have two options: with and without TLS Without TLS ¶ -Download and edit the yaml deployment replacing registry.<your domain> with a valid DNS name pointing to the ingress controller: -wget https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/docs/examples/docker-registry/ingress-without-tls.yaml - +Download and edit the yaml deployment replacing registry.<your domain> with a valid DNS name pointing to the ingress controller: +wget https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/docs/examples/docker-registry/ingress-without-tls.yaml + Important - Running a docker registry without TLS requires we configure our local docker daemon with the insecure registry flag. + Please check deploy a plain http registry With TLS ¶ -Download and edit the yaml deployment replacing registry.<your domain> with a valid DNS name pointing to the ingress controller: -wget https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/docs/examples/docker-registry/ingress-with-tls.yaml - +Download and edit the yaml deployment replacing registry.<your domain> with a valid DNS name pointing to the ingress controller: +wget https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/docs/examples/docker-registry/ingress-with-tls.yaml + Deploy kube lego use Let's Encrypt certificates or edit the ingress rule to use a secret with an existing SSL certificate. Testing ¶ To test the registry is working correctly we download a known image from docker hub, create a tag pointing to the new registry and upload the image: -docker pull ubuntu:16.04 +docker pull ubuntu:16.04 docker tag ubuntu:16.04 `registry.<your domain>/ubuntu:16.04` docker push `registry.<your domain>/ubuntu:16.04` - + -Please replace registry.<your domain> with your domain. +Please replace registry.<your domain> with your domain. + @@ -1341,9 +1342,9 @@ @@ -1353,7 +1354,7 @@ - + diff --git a/examples/grpc/index.html b/examples/grpc/index.html index b8c22f513..789251326 100644 --- a/examples/grpc/index.html +++ b/examples/grpc/index.html @@ -34,7 +34,7 @@ - + @@ -42,7 +42,7 @@ - + @@ -53,12 +53,12 @@ - + - + @@ -114,7 +114,7 @@ - + Skip to content @@ -123,7 +123,7 @@ - + public @@ -154,7 +154,7 @@ - + @@ -1295,9 +1295,9 @@ nginx controller. Prerequisites ¶ You have a kubernetes cluster running. -You have a domain name such as example.com that is configured to route +You have a domain name such as example.com that is configured to route traffic to the ingress controller. Replace references to - fortune-teller.stack.build (the domain name used in this example) to your + fortune-teller.stack.build (the domain name used in this example) to your own domain name (you're also responsible for provisioning an SSL certificate for the ingress). You have the nginx-ingress controller installed in typical fashion (must be @@ -1309,59 +1309,59 @@ nginx controller. fortune-teller application provided here as an example. -Step 1: kubernetes Deployment ¶ -$ kubectl create -f app.yaml - +Step 1: kubernetes Deployment ¶ +$ kubectl create -f app.yaml + This is a standard kubernetes deployment object. It is running a grpc service -listening on port 50051. +listening on port 50051. The sample application fortune-teller-app is a grpc server implemented in go. Here's the stripped-down implementation: -func main() { +func main() { grpcServer := grpc.NewServer() fortune.RegisterFortuneTellerServer(grpcServer, &FortuneTeller{}) lis, _ := net.Listen("tcp", ":50051") grpcServer.Serve(lis) } - + The takeaway is that we are not doing any TLS configuration on the server (as we are terminating TLS at the ingress level, grpc traffic will travel unencrypted inside the cluster and arrive "insecure"). For your own application you may or may not want to do this. If you prefer to forward encrypted traffic to your POD and terminate TLS at the gRPC server -itself, add the ingress annotation nginx.ingress.kubernetes.io/backend-protocol: "GRPCS". -Step 2: the kubernetes Service ¶ -$ kubectl create -f svc.yaml - +itself, add the ingress annotation nginx.ingress.kubernetes.io/backend-protocol: "GRPCS". +Step 2: the kubernetes Service ¶ +$ kubectl create -f svc.yaml + Here we have a typical service. Nothing special, just routing traffic to the -backend application on port 50051. -Step 3: the kubernetes Ingress ¶ -$ kubectl create -f ingress.yaml - +backend application on port 50051. +Step 3: the kubernetes Ingress ¶ +$ kubectl create -f ingress.yaml + A few things to note: We've tagged the ingress with the annotation - nginx.ingress.kubernetes.io/backend-protocol: "GRPC". This is the magic + nginx.ingress.kubernetes.io/backend-protocol: "GRPC". This is the magic ingredient that sets up the appropriate nginx configuration to route http/2 traffic to our service. We're terminating TLS at the ingress and have configured an SSL certificate - fortune-teller.stack.build. The ingress matches traffic arriving as - https://fortune-teller.stack.build:443 and routes unencrypted messages to + fortune-teller.stack.build. The ingress matches traffic arriving as + https://fortune-teller.stack.build:443 and routes unencrypted messages to our kubernetes service. Step 4: test the connection ¶ Once we've applied our configuration to kubernetes, it's time to test that we can actually talk to the backend. To do this, we'll use the grpcurl utility: -$ grpcurl fortune-teller.stack.build:443 build.stack.fortune.FortuneTeller/Predict +$ grpcurl fortune-teller.stack.build:443 build.stack.fortune.FortuneTeller/Predict { "message": "Let us endeavor so to live that when we come to die even the undertaker will be sorry.\n\t\t-- Mark Twain, \"Pudd'nhead Wilson's Calendar\"" } - + Debugging Hints ¶ @@ -1369,7 +1369,7 @@ can actually talk to the backend. To do this, we'll use the Watch the logs for the nginx-ingress-controller (increasing verbosity as needed). Double-check your address and ports. -Set the GODEBUG=http2debug=2 environment variable to get detailed http/2 +Set the GODEBUG=http2debug=2 environment variable to get detailed http/2 logging on the client and/or server. Study RFC 7540 (http/2) https://tools.ietf.org/html/rfc7540. @@ -1381,18 +1381,19 @@ to help make it easier for your users to consume your API. Notes on using response/request streams ¶ -If your server does only response streaming and you expect a stream to be open longer than 60 seconds, you will have to change the grpc_read_timeout to acommodate for this. +If your server does only response streaming and you expect a stream to be open longer than 60 seconds, you will have to change the grpc_read_timeout to acommodate for this. If your service does only request streaming and you expect a stream to be open longer than 60 seconds, you have to change the -grpc_send_timeout and the client_body_timeout. -If you do both response and request streaming with an open stream longer than 60 seconds, you have to change all three timeouts: grpc_read_timeout, grpc_send_timeout and client_body_timeout. +grpc_send_timeout and the client_body_timeout. +If you do both response and request streaming with an open stream longer than 60 seconds, you have to change all three timeouts: grpc_read_timeout, grpc_send_timeout and client_body_timeout. -Values for the timeouts must be specified as e.g. "1200s". +Values for the timeouts must be specified as e.g. "1200s". -On the most recent versions of nginx-ingress, changing these timeouts requires using the nginx.ingress.kubernetes.io/server-snippet annotation. There are plans for future releases to allow using the Kubernetes annotations to define each timeout seperately. +On the most recent versions of nginx-ingress, changing these timeouts requires using the nginx.ingress.kubernetes.io/server-snippet annotation. There are plans for future releases to allow using the Kubernetes annotations to define each timeout seperately. + @@ -1447,9 +1448,9 @@ to help make it easier for your users to consume your API. @@ -1459,7 +1460,7 @@ to help make it easier for your users to consume your API. - + diff --git a/examples/index.html b/examples/index.html index beece6329..e426ce67e 100644 --- a/examples/index.html +++ b/examples/index.html @@ -34,7 +34,7 @@ - + @@ -42,7 +42,7 @@ - + @@ -53,12 +53,12 @@ - + - + @@ -114,7 +114,7 @@ - + Skip to content @@ -123,7 +123,7 @@ - + public @@ -154,7 +154,7 @@ - + @@ -1268,6 +1268,7 @@ Please review the prerequisites before trying them. + @@ -1322,9 +1323,9 @@ Please review the prerequisites before trying them. @@ -1334,7 +1335,7 @@ Please review the prerequisites before trying them. - + diff --git a/examples/multi-tls/index.html b/examples/multi-tls/index.html index 0a95b6873..3458b36a9 100644 --- a/examples/multi-tls/index.html +++ b/examples/multi-tls/index.html @@ -34,7 +34,7 @@ - + @@ -42,7 +42,7 @@ - + @@ -53,12 +53,12 @@ - + - + @@ -114,7 +114,7 @@ - + Skip to content @@ -123,7 +123,7 @@ - + public @@ -154,7 +154,7 @@ - + @@ -1156,7 +1156,7 @@ Create multi-tls.yaml This should generate a segment like: -$ kubectl exec -it nginx-ingress-controller-6vwd1 -- cat /etc/nginx/nginx.conf | grep "foo.bar.com" -B 7 -A 35 +$ kubectl exec -it nginx-ingress-controller-6vwd1 -- cat /etc/nginx/nginx.conf | grep "foo.bar.com" -B 7 -A 35 server { listen 80; listen 443 ssl http2; @@ -1198,9 +1198,9 @@ proxy_pass http://default-http-svc-80; } - + And you should be able to reach your nginx service or http-svc service using a hostname switch: -$ kubectl get ing +$ kubectl get ing NAME RULE BACKEND ADDRESS AGE foo-tls - 104.154.30.67 13m foo.bar.com @@ -1237,10 +1237,11 @@ $ curl 104.154.30.67 default backend - 404 - + + @@ -1295,9 +1296,9 @@ @@ -1307,7 +1308,7 @@ - + diff --git a/examples/psp/index.html b/examples/psp/index.html index 9e1fbfc85..0ba8ab905 100644 --- a/examples/psp/index.html +++ b/examples/psp/index.html @@ -34,7 +34,7 @@ - + @@ -42,7 +42,7 @@ - + @@ -53,12 +53,12 @@ - + - + @@ -114,7 +114,7 @@ - + Skip to content @@ -123,7 +123,7 @@ - + public @@ -154,7 +154,7 @@ - + @@ -1157,8 +1157,8 @@ Kubernetes however provides a more fine-grained authorization policy called If you have PSP enabled on the cluster, and you deploy ingress-nginx, you will need to provide the Deployment with the permissions to create pods. Before applying any objects, first apply the PSP permissions by running: -kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/docs/examples/psp/psp.yaml - +kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/docs/examples/psp/psp.yaml + Now that the pod security policy is applied, we can continue as usual by applying the mandatory.yaml according to the Installation Guide. @@ -1168,6 +1168,7 @@ only after deleting them and reapplying mandatory.yaml. + @@ -1208,9 +1209,9 @@ only after deleting them and reapplying mandatory.yaml. @@ -1220,7 +1221,7 @@ only after deleting them and reapplying mandatory.yaml. - + diff --git a/examples/rewrite/index.html b/examples/rewrite/index.html index d9b590faa..c370ddf13 100644 --- a/examples/rewrite/index.html +++ b/examples/rewrite/index.html @@ -34,7 +34,7 @@ - + @@ -42,7 +42,7 @@ - + @@ -53,12 +53,12 @@ - + - + @@ -114,7 +114,7 @@ - + Skip to content @@ -123,7 +123,7 @@ - + public @@ -154,7 +154,7 @@ - + @@ -1309,14 +1309,14 @@ and that you have an ingress controller running in y Rewrite Target ¶ Attention -Starting in Version 0.22.0, ingress definitions using the annotation nginx.ingress.kubernetes.io/rewrite-target are not backwards compatible with previous versions. In Version 0.22.0 and beyond, any substrings within the request URI that need to be passed to the rewritten path must explicitly be defined in a capture group. +Starting in Version 0.22.0, ingress definitions using the annotation nginx.ingress.kubernetes.io/rewrite-target are not backwards compatible with previous versions. In Version 0.22.0 and beyond, any substrings within the request URI that need to be passed to the rewritten path must explicitly be defined in a capture group. Note -Captured groups are saved in numbered placeholders, chronologically, in the form $1, $2 ... $n. These placeholders can be used as parameters in the rewrite-target annotation. +Captured groups are saved in numbered placeholders, chronologically, in the form $1, $2 ... $n. These placeholders can be used as parameters in the rewrite-target annotation. Create an Ingress rule with a rewrite annotation: -$ echo ' +$ echo ' apiVersion: networking.k8s.io/v1beta1 kind: Ingress metadata: @@ -1334,18 +1334,18 @@ and that you have an ingress controller running in y servicePort: 80 path: /something(/|$)(.*) ' | kubectl create -f - - + -In this ingress definition, any characters captured by (.*) will be assigned to the placeholder $2, which is then used as a parameter in the rewrite-target annotation. +In this ingress definition, any characters captured by (.*) will be assigned to the placeholder $2, which is then used as a parameter in the rewrite-target annotation. For example, the ingress definition above will result in the following rewrites: -rewrite.bar.com/something rewrites to rewrite.bar.com/ -rewrite.bar.com/something/ rewrites to rewrite.bar.com/ -rewrite.bar.com/something/new rewrites to rewrite.bar.com/new +rewrite.bar.com/something rewrites to rewrite.bar.com/ +rewrite.bar.com/something/ rewrites to rewrite.bar.com/ +rewrite.bar.com/something/new rewrites to rewrite.bar.com/new App Root ¶ Create an Ingress rule with a app-root annotation: -$ echo " +$ echo " apiVersion: networking.k8s.io/v1beta1 kind: Ingress metadata: @@ -1363,9 +1363,9 @@ and that you have an ingress controller running in y servicePort: 80 path: / " | kubectl create -f - - + Check the rewrite is working -$ curl -I -k http://approot.bar.com/ +$ curl -I -k http://approot.bar.com/ HTTP/1.1 302 Moved Temporarily Server: nginx/1.11.10 Date: Mon, 13 Mar 2017 14:57:15 GMT @@ -1373,10 +1373,11 @@ Content-Type: text/html Content-Length: 162 Location: http://stickyingress.example.com/app1 Connection: keep-alive - + + @@ -1431,9 +1432,9 @@ Connection: keep-alive @@ -1443,7 +1444,7 @@ Connection: keep-alive - + diff --git a/examples/static-ip/index.html b/examples/static-ip/index.html index 383c3dd75..971bac511 100644 --- a/examples/static-ip/index.html +++ b/examples/static-ip/index.html @@ -34,7 +34,7 @@ - + @@ -42,7 +42,7 @@ - + @@ -53,12 +53,12 @@ - + - + @@ -114,7 +114,7 @@ - + Skip to content @@ -123,7 +123,7 @@ - + public @@ -154,7 +154,7 @@ - + @@ -1262,27 +1262,27 @@ by default nginx Ingresses will only get static IPs if your cloudprovider supports static IP assignments to nodes. On GKE/GCE for example, even though nodes get static IPs, the IPs are not retained across upgrade. To acquire a static IP for the nginx ingress controller, simply put it -behind a Service of Type=LoadBalancer. +behind a Service of Type=LoadBalancer. First, create a loadbalancer Service and wait for it to acquire an IP -$ kubectl create -f static-ip-svc.yaml +$ kubectl create -f static-ip-svc.yaml service "nginx-ingress-lb" created $ kubectl get svc nginx-ingress-lb NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE nginx-ingress-lb 10.0.138.113 104.154.109.191 80:31457/TCP,443:32240/TCP 15m - + then, update the ingress controller so it adopts the static IP of the Service -by passing the --publish-service flag (the example yaml used in the next step +by passing the --publish-service flag (the example yaml used in the next step already has it set to "nginx-ingress-lb"). -$ kubectl create -f nginx-ingress-controller.yaml +$ kubectl create -f nginx-ingress-controller.yaml deployment "nginx-ingress-controller" created - + Assigning the IP to an Ingress ¶ -From here on every Ingress created with the ingress.class annotation set to -nginx will get the IP allocated in the previous step -$ kubectl create -f nginx-ingress.yaml +From here on every Ingress created with the ingress.class annotation set to +nginx will get the IP allocated in the previous step +$ kubectl create -f nginx-ingress.yaml ingress "nginx-ingress" created $ kubectl get ing ingress-nginx @@ -1298,11 +1298,11 @@ already has it set to "nginx-ingress-lb"). request_version=1.1 request_uri=http://104.154.109.191:8080/ ... - + Retaining the IP ¶ You can test retention by deleting the Ingress -$ kubectl delete ing nginx-ingress +$ kubectl delete ing nginx-ingress ingress "nginx-ingress" deleted $ kubectl create -f nginx-ingress.yaml @@ -1311,7 +1311,7 @@ already has it set to "nginx-ingress-lb"). $ kubectl get ing nginx-ingress NAME HOSTS ADDRESS PORTS AGE nginx-ingress * 104.154.109.191 80, 443 13m - + Note that unlike the GCE Ingress, the same loadbalancer IP is shared amongst all @@ -1320,14 +1320,14 @@ controllers. Promote ephemeral to static IP ¶ To promote the allocated IP to static, you can update the Service manifest -$ kubectl patch svc nginx-ingress-lb -p '{"spec": {"loadBalancerIP": "104.154.109.191"}}' +$ kubectl patch svc nginx-ingress-lb -p '{"spec": {"loadBalancerIP": "104.154.109.191"}}' "nginx-ingress-lb" patched - + and promote the IP to static (promotion works differently for cloudproviders, provided example is for GKE/GCE) ` -$ gcloud compute addresses create nginx-ingress-lb --addresses 104.154.109.191 --region us-central1 +$ gcloud compute addresses create nginx-ingress-lb --addresses 104.154.109.191 --region us-central1 Created [https://www.googleapis.com/compute/v1/projects/kubernetesdev/regions/us-central1/addresses/nginx-ingress-lb]. --- address: 104.154.109.191 @@ -1341,12 +1341,13 @@ provided example is for GKE/GCE) status: IN_USE users: - us-central1/forwardingRules/a09f6913ae80e11e6a8c542010af0000 - + Now even if the Service is deleted, the IP will persist, so you can recreate the -Service with spec.loadBalancerIP set to 104.154.109.191. +Service with spec.loadBalancerIP set to 104.154.109.191. + @@ -1401,9 +1402,9 @@ Service with spec.loadBalancerIP set to powered by - MkDocs + MkDocs and - + Material for MkDocs @@ -1413,7 +1414,7 @@ Service with spec.loadBalancerIP set to - + diff --git a/examples/tls-termination/index.html b/examples/tls-termination/index.html index 7710b5fd5..546263674 100644 --- a/examples/tls-termination/index.html +++ b/examples/tls-termination/index.html @@ -34,7 +34,7 @@ - + @@ -42,7 +42,7 @@ - + @@ -53,12 +53,12 @@ - + - + @@ -114,7 +114,7 @@ - + Skip to content @@ -123,7 +123,7 @@ - + public @@ -154,7 +154,7 @@ - + @@ -1226,8 +1226,8 @@ Prerequisites ¶ You need a TLS cert and a test HTTP service for this example. Deployment ¶ -Create a values.yaml file. -apiVersion: networking.k8s.io/v1beta1 +Create a values.yaml file. +apiVersion: networking.k8s.io/v1beta1 kind: Ingress metadata: name: nginx-test @@ -1247,16 +1247,16 @@ # This assumes http-svc exists and routes to healthy endpoints serviceName: http-svc servicePort: 80 - + The following command instructs the controller to terminate traffic using the provided TLS cert, and forward un-encrypted HTTP traffic to the test HTTP service. -kubectl apply -f ingress.yaml - +kubectl apply -f ingress.yaml + Validation ¶ You can confirm that the Ingress works. -$ kubectl describe ing nginx-test +$ kubectl describe ing nginx-test Name: nginx-test Namespace: default Address: 104.198.183.6 @@ -1303,10 +1303,11 @@ TLS cert, and forward un-encrypted HTTP traffic to the test HTTP service. x-forwarded-for=104.132.0.80, 35.186.221.137 x-forwarded-proto=https BODY: - + + @@ -1361,9 +1362,9 @@ TLS cert, and forward un-encrypted HTTP traffic to the test HTTP service. @@ -1373,7 +1374,7 @@ TLS cert, and forward un-encrypted HTTP traffic to the test HTTP service. - + diff --git a/how-it-works/index.html b/how-it-works/index.html index a19c7855e..d00e28ac4 100644 --- a/how-it-works/index.html +++ b/how-it-works/index.html @@ -34,7 +34,7 @@ - + @@ -42,7 +42,7 @@ - + @@ -53,12 +53,12 @@ - + - + @@ -114,7 +114,7 @@ - + Skip to content @@ -123,7 +123,7 @@ - + public @@ -154,7 +154,7 @@ - + @@ -1292,10 +1292,10 @@ How it works ¶ The objective of this document is to explain how the NGINX Ingress controller works, in particular how the NGINX model is built and why we need one. NGINX configuration ¶ -The goal of this Ingress controller is the assembly of a configuration file (nginx.conf). The main implication of this requirement is the need to reload NGINX after any change in the configuration file. Though it is important to note that we don't reload Nginx on changes that impact only an upstream configuration (i.e Endpoints change when you deploy your app). We use lua-nginx-module to achieve this. Check below to learn more about how it's done. +The goal of this Ingress controller is the assembly of a configuration file (nginx.conf). The main implication of this requirement is the need to reload NGINX after any change in the configuration file. Though it is important to note that we don't reload Nginx on changes that impact only an upstream configuration (i.e Endpoints change when you deploy your app). We use lua-nginx-module to achieve this. Check below to learn more about how it's done. NGINX model ¶ Usually, a Kubernetes Controller utilizes the synchronization loop pattern to check if the desired state in the controller is updated or a change is required. To this purpose, we need to build a model using different objects from the cluster, in particular (in no special order) Ingresses, Services, Endpoints, Secrets, and Configmaps to generate a point in time configuration file that reflects the state of the cluster. -To get this object from the cluster, we use Kubernetes Informers, in particular, FilteredSharedInformer. This informers allows reacting to changes in using callbacks to individual changes when a new object is added, modified or removed. Unfortunately, there is no way to know if a particular change is going to affect the final configuration file. Therefore on every change, we have to rebuild a new model from scratch based on the state of cluster and compare it to the current model. If the new model equals to the current one, then we avoid generating a new NGINX configuration and triggering a reload. Otherwise, we check if the difference is only about Endpoints. If so we then send the new list of Endpoints to a Lua handler running inside Nginx using HTTP POST request and again avoid generating a new NGINX configuration and triggering a reload. If the difference between running and new model is about more than just Endpoints we create a new NGINX configuration based on the new model, replace the current model and trigger a reload. +To get this object from the cluster, we use Kubernetes Informers, in particular, FilteredSharedInformer. This informers allows reacting to changes in using callbacks to individual changes when a new object is added, modified or removed. Unfortunately, there is no way to know if a particular change is going to affect the final configuration file. Therefore on every change, we have to rebuild a new model from scratch based on the state of cluster and compare it to the current model. If the new model equals to the current one, then we avoid generating a new NGINX configuration and triggering a reload. Otherwise, we check if the difference is only about Endpoints. If so we then send the new list of Endpoints to a Lua handler running inside Nginx using HTTP POST request and again avoid generating a new NGINX configuration and triggering a reload. If the difference between running and new model is about more than just Endpoints we create a new NGINX configuration based on the new model, replace the current model and trigger a reload. One of the uses of the model is to avoid unnecessary reloads when there's no change in the state and to detect conflicts in definitions. The final representation of the NGINX configuration is generated from a Go template using the new model as input for the variables required by the template. Building the NGINX model ¶ @@ -1303,7 +1303,7 @@ Operations to build the model: -Order Ingress rules by CreationTimestamp field, i.e., old rules first. +Order Ingress rules by CreationTimestamp field, i.e., old rules first. If the same path for the same host is defined in more than one Ingress, the oldest rule wins. @@ -1325,7 +1325,7 @@ New Ingress Resource Created. TLS section is added to existing Ingress. -Change in Ingress annotations that impacts more than just upstream configuration. For instance load-balance annotation does not require a reload. +Change in Ingress annotations that impacts more than just upstream configuration. For instance load-balance annotation does not require a reload. A path is added/removed from an Ingress. An Ingress, Service, Secret is removed. Some missing referenced object from the Ingress is available, like a Service or Secret. @@ -1334,15 +1334,16 @@ Avoiding reloads ¶ In some cases, it is possible to avoid reloads, in particular when there is a change in the endpoints, i.e., a pod is started or replaced. It is out of the scope of this Ingress controller to remove reloads completely. This would require an incredible amount of work and at some point makes no sense. This can change only if NGINX changes the way new configurations are read, basically, new changes do not replace worker processes. Avoiding reloads on Endpoints changes ¶ -On every endpoint change the controller fetches endpoints from all the services it sees and generates corresponding Backend objects. It then sends these objects to a Lua handler running inside Nginx. The Lua code in turn stores those backends in a shared memory zone. Then for every request Lua code running in balancer_by_lua context detects what endpoints it should choose upstream peer from and applies the configured load balancing algorithm to choose the peer. Then Nginx takes care of the rest. This way we avoid reloading Nginx on endpoint changes. Note that this includes annotation changes that affects only upstream configuration in Nginx as well. +On every endpoint change the controller fetches endpoints from all the services it sees and generates corresponding Backend objects. It then sends these objects to a Lua handler running inside Nginx. The Lua code in turn stores those backends in a shared memory zone. Then for every request Lua code running in balancer_by_lua context detects what endpoints it should choose upstream peer from and applies the configured load balancing algorithm to choose the peer. Then Nginx takes care of the rest. This way we avoid reloading Nginx on endpoint changes. Note that this includes annotation changes that affects only upstream configuration in Nginx as well. In a relatively big clusters with frequently deploying apps this feature saves significant number of Nginx reloads which can otherwise affect response latency, load balancing quality (after every reload Nginx resets the state of load balancing) and so on. Avoiding outage from wrong configuration ¶ -Because the ingress controller works using the synchronization loop pattern, it is applying the configuration for all matching objects. In case some Ingress objects have a broken configuration, for example a syntax error in the nginx.ingress.kubernetes.io/configuration-snippet annotation, the generated configuration becomes invalid, does not reload and hence no more ingresses will be taken into account. +Because the ingress controller works using the synchronization loop pattern, it is applying the configuration for all matching objects. In case some Ingress objects have a broken configuration, for example a syntax error in the nginx.ingress.kubernetes.io/configuration-snippet annotation, the generated configuration becomes invalid, does not reload and hence no more ingresses will be taken into account. To prevent this situation to happen, the nginx ingress controller optionally exposes a validating admission webhook server to ensure the validity of incoming ingress objects. This webhook appends the incoming ingress objects to the list of ingresses, generates the configuration and calls nginx to ensure the configuration has no syntax errors. + @@ -1397,9 +1398,9 @@ This webhook appends the incoming ingress objects to the list of ingresses, gene @@ -1409,7 +1410,7 @@ This webhook appends the incoming ingress objects to the list of ingresses, gene - + diff --git a/index.html b/index.html index f800d9138..c2ac90a9f 100644 --- a/index.html +++ b/index.html @@ -34,7 +34,7 @@ - + @@ -42,7 +42,7 @@ - + @@ -53,12 +53,12 @@ - + - + @@ -114,7 +114,7 @@ - + Skip to content @@ -123,7 +123,7 @@ - + public @@ -154,7 +154,7 @@ - + @@ -1202,6 +1202,7 @@ + @@ -1242,9 +1243,9 @@ @@ -1254,7 +1255,7 @@ - + diff --git a/kubectl-plugin/index.html b/kubectl-plugin/index.html index 85c81899f..0010e8b0b 100644 --- a/kubectl-plugin/index.html +++ b/kubectl-plugin/index.html @@ -34,7 +34,7 @@ - + @@ -42,7 +42,7 @@ - + @@ -53,12 +53,12 @@ - + - + @@ -114,7 +114,7 @@ - + Skip to content @@ -123,7 +123,7 @@ - + public @@ -154,7 +154,7 @@ - + @@ -1384,15 +1384,15 @@ Do not move it without providing redirects. The ingress-nginx kubectl plugin ¶ Installation ¶ Install krew, then run -kubectl krew install ingress-nginx - +kubectl krew install ingress-nginx + to install the plugin. Then run -kubectl ingress-nginx --help - +kubectl ingress-nginx --help + to make sure the plugin is properly installed and to get a list of commands: -kubectl ingress-nginx --help +kubectl ingress-nginx --help A kubectl plugin for inspecting your ingress-nginx deployments Usage: @@ -1430,29 +1430,29 @@ Do not move it without providing redirects. --user string The name of the kubeconfig user to use Use "ingress-nginx [command] --help" for more information about a command. - + -If a new ingress-nginx version has just been released, the plugin may not yet have been updated inside the repository. In that case, you can install the latest version of the plugin by running: -( +If a new ingress-nginx version has just been released, the plugin may not yet have been updated inside the repository. In that case, you can install the latest version of the plugin by running: +( set -x; cd "$(mktemp -d)" && curl -fsSLO "https://github.com/kubernetes/ingress-nginx/releases/download/nginx-0.24.0/{ingress-nginx.yaml,kubectl-ingress_nginx-$(uname | tr '[:upper:]' '[:lower:]')-amd64.tar.gz}" && kubectl krew install \ --manifest=ingress-nginx.yaml --archive=kubectl-ingress_nginx-$(uname | tr '[:upper:]' '[:lower:]')-amd64.tar.gz ) - + -Replacing 0.24.0 with the recently released version. +Replacing 0.24.0 with the recently released version. Common Flags ¶ -Every subcommand supports the basic kubectl configuration flags like --namespace, --context, --client-key and so on. -Subcommands that act on a particular ingress-nginx pod (backends, certs, conf, exec, general, logs, ssh), support the --deployment <deployment> and --pod <pod> flags to select either a pod from a deployment with the given name, or a pod with the given name. The --deployment flag defaults to nginx-ingress-controller. -Subcommands that inspect resources (ingresses, lint) support the --all-namespaces flag, which causes them to inspect resources in every namespace. +Every subcommand supports the basic kubectl configuration flags like --namespace, --context, --client-key and so on. +Subcommands that act on a particular ingress-nginx pod (backends, certs, conf, exec, general, logs, ssh), support the --deployment <deployment> and --pod <pod> flags to select either a pod from a deployment with the given name, or a pod with the given name. The --deployment flag defaults to nginx-ingress-controller. +Subcommands that inspect resources (ingresses, lint) support the --all-namespaces flag, which causes them to inspect resources in every namespace. Subcommands ¶ -Note that backends, general, certs, and conf require ingress-nginx version 0.23.0 or higher. +Note that backends, general, certs, and conf require ingress-nginx version 0.23.0 or higher. backends ¶ -Run kubectl ingress-nginx backends to get a JSON array of the backends that an ingress-nginx controller currently knows about: -$ kubectl ingress-nginx backends -n ingress-nginx +Run kubectl ingress-nginx backends to get a JSON array of the backends that an ingress-nginx controller currently knows about: +$ kubectl ingress-nginx backends -n ingress-nginx [ { "name": "default-apple-service-5678", @@ -1513,13 +1513,13 @@ Do not move it without providing redirects. ... } ] - + -Add the --list option to show only the backend names. Add the --backend <backend> option to show only the backend with the given name. +Add the --list option to show only the backend names. Add the --backend <backend> option to show only the backend with the given name. certs ¶ -Use kubectl ingress-nginx certs --host <hostname> to dump the SSL cert/key information for a given host. Requires that --enable-dynamic-certificates is true (this is the default as of version 0.24.0). +Use kubectl ingress-nginx certs --host <hostname> to dump the SSL cert/key information for a given host. Requires that --enable-dynamic-certificates is true (this is the default as of version 0.24.0). WARNING: This command will dump sensitive private key information. Don't blindly share the output, and certainly don't log it anywhere. -$ kubectl ingress-nginx certs -n ingress-nginx --host testaddr.local +$ kubectl ingress-nginx certs -n ingress-nginx --host testaddr.local -----BEGIN CERTIFICATE----- ... -----END CERTIFICATE----- @@ -1530,11 +1530,11 @@ Do not move it without providing redirects. -----BEGIN RSA PRIVATE KEY----- <REDACTED! DO NOT SHARE THIS!> -----END RSA PRIVATE KEY----- - + conf ¶ -Use kubectl ingress-nginx conf to dump the generated nginx.conf file. Add the --host <hostname> option to view only the server block for that host: -kubectl ingress-nginx conf -n ingress-nginx --host testaddr.local +Use kubectl ingress-nginx conf to dump the generated nginx.conf file. Add the --host <hostname> option to view only the server block for that host: +kubectl ingress-nginx conf -n ingress-nginx --host testaddr.local server { server_name testaddr.local ; @@ -1556,11 +1556,11 @@ Do not move it without providing redirects. set $location_path "/"; ... - + exec ¶ -kubectl ingress-nginx exec is exactly the same as kubectl exec, with the same command flags. It will automatically choose an ingress-nginx pod to run the command in. -$ kubectl ingress-nginx exec -i -n ingress-nginx -- ls /etc/nginx +kubectl ingress-nginx exec is exactly the same as kubectl exec, with the same command flags. It will automatically choose an ingress-nginx pod to run the command in. +$ kubectl ingress-nginx exec -i -n ingress-nginx -- ls /etc/nginx fastcgi_params geoip lua @@ -1571,44 +1571,44 @@ Do not move it without providing redirects. opentracing.json owasp-modsecurity-crs template - + general ¶ -kubectl ingress-nginx general dumps miscellaneous controller state as a JSON object. Currently it just shows the number of controller pods known to a particular controller pod. -$ kubectl ingress-nginx general -n ingress-nginx +kubectl ingress-nginx general dumps miscellaneous controller state as a JSON object. Currently it just shows the number of controller pods known to a particular controller pod. +$ kubectl ingress-nginx general -n ingress-nginx { "controllerPodsCount": 1 } - + info ¶ -Shows the internal and external IP/CNAMES for an ingress-nginx service. -$ kubectl ingress-nginx info -n ingress-nginx +Shows the internal and external IP/CNAMES for an ingress-nginx service. +$ kubectl ingress-nginx info -n ingress-nginx Service cluster IP address: 10.187.253.31 LoadBalancer IP|CNAME: 35.123.123.123 - + -Use the --service <service> flag if your ingress-nginx LoadBalancer service is not named ingress-nginx. +Use the --service <service> flag if your ingress-nginx LoadBalancer service is not named ingress-nginx. ingresses ¶ -kubectl ingress-nginx ingresses, alternately kubectl ingress-nginx ing, shows a more detailed view of the ingress definitions in a namespace. Compare: -$ kubectl get ingresses --all-namespaces +kubectl ingress-nginx ingresses, alternately kubectl ingress-nginx ing, shows a more detailed view of the ingress definitions in a namespace. Compare: +$ kubectl get ingresses --all-namespaces NAMESPACE NAME HOSTS ADDRESS PORTS AGE default example-ingress1 testaddr.local,testaddr2.local localhost 80 5d default test-ingress-2 * localhost 80 5d - + vs -$ kubectl ingress-nginx ingresses --all-namespaces +$ kubectl ingress-nginx ingresses --all-namespaces NAMESPACE INGRESS NAME HOST+PATH ADDRESSES TLS SERVICE SERVICE PORT ENDPOINTS default example-ingress1 testaddr.local/etameta localhost NO pear-service 5678 5 default example-ingress1 testaddr2.local/otherpath localhost NO apple-service 5678 1 default example-ingress1 testaddr2.local/otherotherpath localhost NO pear-service 5678 5 default test-ingress-2 * localhost NO echo-service 8080 2 - + lint ¶ -kubectl ingress-nginx lint can check a namespace or entire cluster for potential configuration issues. This command is especially useful when upgrading between ingress-nginx versions. -$ kubectl ingress-nginx lint --all-namespaces --verbose +kubectl ingress-nginx lint can check a namespace or entire cluster for potential configuration issues. This command is especially useful when upgrading between ingress-nginx versions. +$ kubectl ingress-nginx lint --all-namespaces --verbose Checking ingresses... ✗ anamespace/this-nginx - Contains the removed session-cookie-hash annotation. @@ -1627,10 +1627,10 @@ Do not move it without providing redirects. - Uses removed config flag --enable-dynamic-certificates Lint added for version 0.24.0 https://github.com/kubernetes/ingress-nginx/issues/3808 - + -to show the lints added only for a particular ingress-nginx release, use the --from-version and --to-version flags: -$ kubectl ingress-nginx lint --all-namespaces --verbose --from-version 0.24.0 --to-version 0.24.0 +to show the lints added only for a particular ingress-nginx release, use the --from-version and --to-version flags: +$ kubectl ingress-nginx lint --all-namespaces --verbose --from-version 0.24.0 --to-version 0.24.0 Checking ingresses... ✗ anamespace/this-nginx - Contains the removed session-cookie-hash annotation. @@ -1642,11 +1642,11 @@ Do not move it without providing redirects. - Uses removed config flag --enable-dynamic-certificates Lint added for version 0.24.0 https://github.com/kubernetes/ingress-nginx/issues/3808 - + logs ¶ -kubectl ingress-nginx logs is almost the same as kubectl logs, with fewer flags. It will automatically choose an ingress-nginx pod to read logs from. -$ kubectl ingress-nginx logs -n ingress-nginx +kubectl ingress-nginx logs is almost the same as kubectl logs, with fewer flags. It will automatically choose an ingress-nginx pod to read logs from. +$ kubectl ingress-nginx logs -n ingress-nginx ------------------------------------------------------------------------------- NGINX Ingress controller Release: dev @@ -1662,16 +1662,17 @@ Do not move it without providing redirects. I0405 16:53:46.183359 7 nginx.go:265] Starting NGINX Ingress controller I0405 16:53:46.193913 7 event.go:209] Event(v1.ObjectReference{Kind:"ConfigMap", Namespace:"ingress-nginx", Name:"udp-services", UID:"82258915-563e-11e9-9c52-025000000001", APIVersion:"v1", ResourceVersion:"494", FieldPath:""}): type: 'Normal' reason: 'CREATE' ConfigMap ingress-nginx/udp-services ... - + ssh ¶ -kubectl ingress-nginx ssh is exactly the same as kubectl ingress-nginx exec -it -- /bin/bash. Use it when you want to quickly be dropped into a shell inside a running ingress-nginx container. -$ kubectl ingress-nginx ssh -n ingress-nginx +kubectl ingress-nginx ssh is exactly the same as kubectl ingress-nginx exec -it -- /bin/bash. Use it when you want to quickly be dropped into a shell inside a running ingress-nginx container. +$ kubectl ingress-nginx ssh -n ingress-nginx www-data@nginx-ingress-controller-7cbf77c976-wx5pn:/etc/nginx$ - + + @@ -1726,9 +1727,9 @@ Do not move it without providing redirects. @@ -1738,7 +1739,7 @@ Do not move it without providing redirects. - + diff --git a/search/search_index.json b/search/search_index.json index 573e83c48..1340c42d6 100644 --- a/search/search_index.json +++ b/search/search_index.json @@ -1 +1 @@ -{"config":{"lang":["en"],"prebuild_index":false,"separator":"[\\s\\-]+"},"docs":[{"location":"","text":"Welcome \u00b6 This is the documentation for the NGINX Ingress Controller. It is built around the Kubernetes Ingress resource , using a ConfigMap to store the NGINX configuration. Learn more about using Ingress on k8s.io . Getting Started \u00b6 See Deployment for a whirlwind tour that will get you started.","title":"Welcome"},{"location":"#welcome","text":"This is the documentation for the NGINX Ingress Controller. It is built around the Kubernetes Ingress resource , using a ConfigMap to store the NGINX configuration. Learn more about using Ingress on k8s.io .","title":"Welcome"},{"location":"#getting-started","text":"See Deployment for a whirlwind tour that will get you started.","title":"Getting Started"},{"location":"development/","text":"Developing for NGINX Ingress Controller \u00b6 This document explains how to get started with developing for NGINX Ingress controller. It includes how to build, test, and release ingress controllers. Quick Start \u00b6 Getting the code \u00b6 The code must be checked out as a subdirectory of k8s.io, and not github.com. mkdir -p $GOPATH/src/k8s.io cd $GOPATH/src/k8s.io # Replace \"$YOUR_GITHUB_USERNAME\" below with your github username git clone https://github.com/$YOUR_GITHUB_USERNAME/ingress-nginx.git cd ingress-nginx Initial developer environment build \u00b6 Prequisites : Minikube must be installed. See releases for installation instructions. If you are using MacOS and deploying to minikube , the following command will build the local nginx controller container image and deploy the ingress controller onto a minikube cluster with RBAC enabled in the namespace ingress-nginx : $ make dev-env Updating the deployment \u00b6 The nginx controller container image can be rebuilt using: $ ARCH = amd64 TAG = dev REGISTRY = $USER /ingress-controller make build container The image will only be used by pods created after the rebuild. To delete old pods which will cause new ones to spin up: $ kubectl get pods -n ingress-nginx $ kubectl delete pod -n ingress-nginx nginx-ingress-controller- Dependencies \u00b6 The build uses dependencies in the vendor directory, which must be installed before building a binary/image. Occasionally, you might need to update the dependencies. This guide requires you to install go 1.13 or newer. This will automatically save the dependencies to the vendor/ directory. $ go get $ make dep-ensure Building \u00b6 All ingress controllers are built through a Makefile. Depending on your requirements you can build a raw server binary, a local container image, or push an image to a remote repository. In order to use your local Docker, you may need to set the following environment variables: # \"gcloud docker\" ( default ) or \"docker\" $ export DOCKER = # \"quay.io/kubernetes-ingress-controller\" ( default ) , \"index.docker.io\" , or your own registry $ export REGISTRY = To find the registry simply run: docker system info | grep Registry Building the e2e test image \u00b6 The e2e test image can also be built through the Makefile. $ make e2e-test-image You can then make this image available on your minikube host by exporting the image and loading it with the minikube docker context: $ docker save nginx-ingress-controller:e2e | ( eval $( minikube docker-env ) && docker load ) Nginx Controller \u00b6 Build a raw server binary $ make build TODO : add more specific instructions needed for raw server binary. Build a local container image $ TAG = REGISTRY = $USER /ingress-controller make container Push the container image to a remote repository $ TAG = REGISTRY = $USER /ingress-controller make push Deploying \u00b6 There are several ways to deploy the ingress controller onto a cluster. Please check the deployment guide Testing \u00b6 To run unit-tests, just run $ cd $GOPATH /src/k8s.io/ingress-nginx $ make test If you have access to a Kubernetes cluster, you can also run e2e tests using ginkgo. $ cd $GOPATH /src/k8s.io/ingress-nginx $ make e2e-test NOTE: if your e2e pod keeps hanging in an ImagePullBackoff, make sure you've made your e2e nginx-ingress-controller image available to minikube as explained in the Building the e2e test image section To run unit-tests for lua code locally, run: $ cd $GOPATH /src/k8s.io/ingress-nginx $ ./rootfs/etc/nginx/lua/test/up.sh $ make lua-test Lua tests are located in $GOPATH/src/k8s.io/ingress-nginx/rootfs/etc/nginx/lua/test . When creating a new test file it must follow the naming convention _test.lua or it will be ignored. Releasing \u00b6 All Makefiles will produce a release binary, as shown above. To publish this to a wider Kubernetes user base, push the image to a container registry, like gcr.io . All release images are hosted under gcr.io/google_containers and tagged according to a semver scheme. An example release might look like: $ make release Please follow these guidelines to cut a release: Update the release page with a short description of the major changes that correspond to a given image tag. Cut a release branch, if appropriate. Release branches follow the format of controller-release-version . Typically, pre-releases are cut from HEAD. All major feature work is done in HEAD. Specific bug fixes are cherry-picked into a release branch. If you're not confident about the stability of the code, tag it as alpha or beta. Typically, a release branch should have stable code.","title":"Development"},{"location":"development/#developing-for-nginx-ingress-controller","text":"This document explains how to get started with developing for NGINX Ingress controller. It includes how to build, test, and release ingress controllers.","title":"Developing for NGINX Ingress Controller"},{"location":"development/#quick-start","text":"","title":"Quick Start"},{"location":"development/#getting-the-code","text":"The code must be checked out as a subdirectory of k8s.io, and not github.com. mkdir -p $GOPATH/src/k8s.io cd $GOPATH/src/k8s.io # Replace \"$YOUR_GITHUB_USERNAME\" below with your github username git clone https://github.com/$YOUR_GITHUB_USERNAME/ingress-nginx.git cd ingress-nginx","title":"Getting the code"},{"location":"development/#initial-developer-environment-build","text":"Prequisites : Minikube must be installed. See releases for installation instructions. If you are using MacOS and deploying to minikube , the following command will build the local nginx controller container image and deploy the ingress controller onto a minikube cluster with RBAC enabled in the namespace ingress-nginx : $ make dev-env","title":"Initial developer environment build"},{"location":"development/#updating-the-deployment","text":"The nginx controller container image can be rebuilt using: $ ARCH = amd64 TAG = dev REGISTRY = $USER /ingress-controller make build container The image will only be used by pods created after the rebuild. To delete old pods which will cause new ones to spin up: $ kubectl get pods -n ingress-nginx $ kubectl delete pod -n ingress-nginx nginx-ingress-controller-","title":"Updating the deployment"},{"location":"development/#dependencies","text":"The build uses dependencies in the vendor directory, which must be installed before building a binary/image. Occasionally, you might need to update the dependencies. This guide requires you to install go 1.13 or newer. This will automatically save the dependencies to the vendor/ directory. $ go get $ make dep-ensure","title":"Dependencies"},{"location":"development/#building","text":"All ingress controllers are built through a Makefile. Depending on your requirements you can build a raw server binary, a local container image, or push an image to a remote repository. In order to use your local Docker, you may need to set the following environment variables: # \"gcloud docker\" ( default ) or \"docker\" $ export DOCKER = # \"quay.io/kubernetes-ingress-controller\" ( default ) , \"index.docker.io\" , or your own registry $ export REGISTRY = To find the registry simply run: docker system info | grep Registry","title":"Building"},{"location":"development/#building-the-e2e-test-image","text":"The e2e test image can also be built through the Makefile. $ make e2e-test-image You can then make this image available on your minikube host by exporting the image and loading it with the minikube docker context: $ docker save nginx-ingress-controller:e2e | ( eval $( minikube docker-env ) && docker load )","title":"Building the e2e test image"},{"location":"development/#nginx-controller","text":"Build a raw server binary $ make build TODO : add more specific instructions needed for raw server binary. Build a local container image $ TAG = REGISTRY = $USER /ingress-controller make container Push the container image to a remote repository $ TAG = REGISTRY = $USER /ingress-controller make push","title":"Nginx Controller"},{"location":"development/#deploying","text":"There are several ways to deploy the ingress controller onto a cluster. Please check the deployment guide","title":"Deploying"},{"location":"development/#testing","text":"To run unit-tests, just run $ cd $GOPATH /src/k8s.io/ingress-nginx $ make test If you have access to a Kubernetes cluster, you can also run e2e tests using ginkgo. $ cd $GOPATH /src/k8s.io/ingress-nginx $ make e2e-test NOTE: if your e2e pod keeps hanging in an ImagePullBackoff, make sure you've made your e2e nginx-ingress-controller image available to minikube as explained in the Building the e2e test image section To run unit-tests for lua code locally, run: $ cd $GOPATH /src/k8s.io/ingress-nginx $ ./rootfs/etc/nginx/lua/test/up.sh $ make lua-test Lua tests are located in $GOPATH/src/k8s.io/ingress-nginx/rootfs/etc/nginx/lua/test . When creating a new test file it must follow the naming convention _test.lua or it will be ignored.","title":"Testing"},{"location":"development/#releasing","text":"All Makefiles will produce a release binary, as shown above. To publish this to a wider Kubernetes user base, push the image to a container registry, like gcr.io . All release images are hosted under gcr.io/google_containers and tagged according to a semver scheme. An example release might look like: $ make release Please follow these guidelines to cut a release: Update the release page with a short description of the major changes that correspond to a given image tag. Cut a release branch, if appropriate. Release branches follow the format of controller-release-version . Typically, pre-releases are cut from HEAD. All major feature work is done in HEAD. Specific bug fixes are cherry-picked into a release branch. If you're not confident about the stability of the code, tag it as alpha or beta. Typically, a release branch should have stable code.","title":"Releasing"},{"location":"how-it-works/","text":"How it works \u00b6 The objective of this document is to explain how the NGINX Ingress controller works, in particular how the NGINX model is built and why we need one. NGINX configuration \u00b6 The goal of this Ingress controller is the assembly of a configuration file (nginx.conf). The main implication of this requirement is the need to reload NGINX after any change in the configuration file. Though it is important to note that we don't reload Nginx on changes that impact only an upstream configuration (i.e Endpoints change when you deploy your app) . We use lua-nginx-module to achieve this. Check below to learn more about how it's done. NGINX model \u00b6 Usually, a Kubernetes Controller utilizes the synchronization loop pattern to check if the desired state in the controller is updated or a change is required. To this purpose, we need to build a model using different objects from the cluster, in particular (in no special order) Ingresses, Services, Endpoints, Secrets, and Configmaps to generate a point in time configuration file that reflects the state of the cluster. To get this object from the cluster, we use Kubernetes Informers , in particular, FilteredSharedInformer . This informers allows reacting to changes in using callbacks to individual changes when a new object is added, modified or removed. Unfortunately, there is no way to know if a particular change is going to affect the final configuration file. Therefore on every change, we have to rebuild a new model from scratch based on the state of cluster and compare it to the current model. If the new model equals to the current one, then we avoid generating a new NGINX configuration and triggering a reload. Otherwise, we check if the difference is only about Endpoints. If so we then send the new list of Endpoints to a Lua handler running inside Nginx using HTTP POST request and again avoid generating a new NGINX configuration and triggering a reload. If the difference between running and new model is about more than just Endpoints we create a new NGINX configuration based on the new model, replace the current model and trigger a reload. One of the uses of the model is to avoid unnecessary reloads when there's no change in the state and to detect conflicts in definitions. The final representation of the NGINX configuration is generated from a Go template using the new model as input for the variables required by the template. Building the NGINX model \u00b6 Building a model is an expensive operation, for this reason, the use of the synchronization loop is a must. By using a work queue it is possible to not lose changes and remove the use of sync.Mutex to force a single execution of the sync loop and additionally it is possible to create a time window between the start and end of the sync loop that allows us to discard unnecessary updates. It is important to understand that any change in the cluster could generate events that the informer will send to the controller and one of the reasons for the work queue . Operations to build the model: Order Ingress rules by CreationTimestamp field, i.e., old rules first. If the same path for the same host is defined in more than one Ingress, the oldest rule wins. If more than one Ingress contains a TLS section for the same host, the oldest rule wins. If multiple Ingresses define an annotation that affects the configuration of the Server block, the oldest rule wins. Create a list of NGINX Servers (per hostname) Create a list of NGINX Upstreams If multiple Ingresses define different paths for the same host, the ingress controller will merge the definitions. Annotations are applied to all the paths in the Ingress. Multiple Ingresses can define different annotations. These definitions are not shared between Ingresses. When a reload is required \u00b6 The next list describes the scenarios when a reload is required: New Ingress Resource Created. TLS section is added to existing Ingress. Change in Ingress annotations that impacts more than just upstream configuration. For instance load-balance annotation does not require a reload. A path is added/removed from an Ingress. An Ingress, Service, Secret is removed. Some missing referenced object from the Ingress is available, like a Service or Secret. A Secret is updated. Avoiding reloads \u00b6 In some cases, it is possible to avoid reloads, in particular when there is a change in the endpoints, i.e., a pod is started or replaced. It is out of the scope of this Ingress controller to remove reloads completely. This would require an incredible amount of work and at some point makes no sense. This can change only if NGINX changes the way new configurations are read, basically, new changes do not replace worker processes. Avoiding reloads on Endpoints changes \u00b6 On every endpoint change the controller fetches endpoints from all the services it sees and generates corresponding Backend objects. It then sends these objects to a Lua handler running inside Nginx. The Lua code in turn stores those backends in a shared memory zone. Then for every request Lua code running in balancer_by_lua context detects what endpoints it should choose upstream peer from and applies the configured load balancing algorithm to choose the peer. Then Nginx takes care of the rest. This way we avoid reloading Nginx on endpoint changes. Note that this includes annotation changes that affects only upstream configuration in Nginx as well. In a relatively big clusters with frequently deploying apps this feature saves significant number of Nginx reloads which can otherwise affect response latency, load balancing quality (after every reload Nginx resets the state of load balancing) and so on. Avoiding outage from wrong configuration \u00b6 Because the ingress controller works using the synchronization loop pattern , it is applying the configuration for all matching objects. In case some Ingress objects have a broken configuration, for example a syntax error in the nginx.ingress.kubernetes.io/configuration-snippet annotation, the generated configuration becomes invalid, does not reload and hence no more ingresses will be taken into account. To prevent this situation to happen, the nginx ingress controller optionally exposes a validating admission webhook server to ensure the validity of incoming ingress objects. This webhook appends the incoming ingress objects to the list of ingresses, generates the configuration and calls nginx to ensure the configuration has no syntax errors.","title":"How it works"},{"location":"how-it-works/#how-it-works","text":"The objective of this document is to explain how the NGINX Ingress controller works, in particular how the NGINX model is built and why we need one.","title":"How it works"},{"location":"how-it-works/#nginx-configuration","text":"The goal of this Ingress controller is the assembly of a configuration file (nginx.conf). The main implication of this requirement is the need to reload NGINX after any change in the configuration file. Though it is important to note that we don't reload Nginx on changes that impact only an upstream configuration (i.e Endpoints change when you deploy your app) . We use lua-nginx-module to achieve this. Check below to learn more about how it's done.","title":"NGINX configuration"},{"location":"how-it-works/#nginx-model","text":"Usually, a Kubernetes Controller utilizes the synchronization loop pattern to check if the desired state in the controller is updated or a change is required. To this purpose, we need to build a model using different objects from the cluster, in particular (in no special order) Ingresses, Services, Endpoints, Secrets, and Configmaps to generate a point in time configuration file that reflects the state of the cluster. To get this object from the cluster, we use Kubernetes Informers , in particular, FilteredSharedInformer . This informers allows reacting to changes in using callbacks to individual changes when a new object is added, modified or removed. Unfortunately, there is no way to know if a particular change is going to affect the final configuration file. Therefore on every change, we have to rebuild a new model from scratch based on the state of cluster and compare it to the current model. If the new model equals to the current one, then we avoid generating a new NGINX configuration and triggering a reload. Otherwise, we check if the difference is only about Endpoints. If so we then send the new list of Endpoints to a Lua handler running inside Nginx using HTTP POST request and again avoid generating a new NGINX configuration and triggering a reload. If the difference between running and new model is about more than just Endpoints we create a new NGINX configuration based on the new model, replace the current model and trigger a reload. One of the uses of the model is to avoid unnecessary reloads when there's no change in the state and to detect conflicts in definitions. The final representation of the NGINX configuration is generated from a Go template using the new model as input for the variables required by the template.","title":"NGINX model"},{"location":"how-it-works/#building-the-nginx-model","text":"Building a model is an expensive operation, for this reason, the use of the synchronization loop is a must. By using a work queue it is possible to not lose changes and remove the use of sync.Mutex to force a single execution of the sync loop and additionally it is possible to create a time window between the start and end of the sync loop that allows us to discard unnecessary updates. It is important to understand that any change in the cluster could generate events that the informer will send to the controller and one of the reasons for the work queue . Operations to build the model: Order Ingress rules by CreationTimestamp field, i.e., old rules first. If the same path for the same host is defined in more than one Ingress, the oldest rule wins. If more than one Ingress contains a TLS section for the same host, the oldest rule wins. If multiple Ingresses define an annotation that affects the configuration of the Server block, the oldest rule wins. Create a list of NGINX Servers (per hostname) Create a list of NGINX Upstreams If multiple Ingresses define different paths for the same host, the ingress controller will merge the definitions. Annotations are applied to all the paths in the Ingress. Multiple Ingresses can define different annotations. These definitions are not shared between Ingresses.","title":"Building the NGINX model"},{"location":"how-it-works/#when-a-reload-is-required","text":"The next list describes the scenarios when a reload is required: New Ingress Resource Created. TLS section is added to existing Ingress. Change in Ingress annotations that impacts more than just upstream configuration. For instance load-balance annotation does not require a reload. A path is added/removed from an Ingress. An Ingress, Service, Secret is removed. Some missing referenced object from the Ingress is available, like a Service or Secret. A Secret is updated.","title":"When a reload is required"},{"location":"how-it-works/#avoiding-reloads","text":"In some cases, it is possible to avoid reloads, in particular when there is a change in the endpoints, i.e., a pod is started or replaced. It is out of the scope of this Ingress controller to remove reloads completely. This would require an incredible amount of work and at some point makes no sense. This can change only if NGINX changes the way new configurations are read, basically, new changes do not replace worker processes.","title":"Avoiding reloads"},{"location":"how-it-works/#avoiding-reloads-on-endpoints-changes","text":"On every endpoint change the controller fetches endpoints from all the services it sees and generates corresponding Backend objects. It then sends these objects to a Lua handler running inside Nginx. The Lua code in turn stores those backends in a shared memory zone. Then for every request Lua code running in balancer_by_lua context detects what endpoints it should choose upstream peer from and applies the configured load balancing algorithm to choose the peer. Then Nginx takes care of the rest. This way we avoid reloading Nginx on endpoint changes. Note that this includes annotation changes that affects only upstream configuration in Nginx as well. In a relatively big clusters with frequently deploying apps this feature saves significant number of Nginx reloads which can otherwise affect response latency, load balancing quality (after every reload Nginx resets the state of load balancing) and so on.","title":"Avoiding reloads on Endpoints changes"},{"location":"how-it-works/#avoiding-outage-from-wrong-configuration","text":"Because the ingress controller works using the synchronization loop pattern , it is applying the configuration for all matching objects. In case some Ingress objects have a broken configuration, for example a syntax error in the nginx.ingress.kubernetes.io/configuration-snippet annotation, the generated configuration becomes invalid, does not reload and hence no more ingresses will be taken into account. To prevent this situation to happen, the nginx ingress controller optionally exposes a validating admission webhook server to ensure the validity of incoming ingress objects. This webhook appends the incoming ingress objects to the list of ingresses, generates the configuration and calls nginx to ensure the configuration has no syntax errors.","title":"Avoiding outage from wrong configuration"},{"location":"kubectl-plugin/","text":"The ingress-nginx kubectl plugin \u00b6 Installation \u00b6 Install krew , then run kubectl krew install ingress-nginx to install the plugin. Then run kubectl ingress-nginx --help to make sure the plugin is properly installed and to get a list of commands: kubectl ingress-nginx --help A kubectl plugin for inspecting your ingress-nginx deployments Usage: ingress-nginx [command] Available Commands: backends Inspect the dynamic backend information of an ingress-nginx instance certs Output the certificate data stored in an ingress-nginx pod conf Inspect the generated nginx.conf exec Execute a command inside an ingress-nginx pod general Inspect the other dynamic ingress-nginx information help Help about any command info Show information about the ingress-nginx service ingresses Provide a short summary of all of the ingress definitions lint Inspect kubernetes resources for possible issues logs Get the kubernetes logs for an ingress-nginx pod ssh ssh into a running ingress-nginx pod Flags: --as string Username to impersonate for the operation --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. --cache-dir string Default HTTP cache directory (default \"/Users/alexkursell/.kube/http-cache\") --certificate-authority string Path to a cert file for the certificate authority --client-certificate string Path to a client certificate file for TLS --client-key string Path to a client key file for TLS --cluster string The name of the kubeconfig cluster to use --context string The name of the kubeconfig context to use -h, --help help for ingress-nginx --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure --kubeconfig string Path to the kubeconfig file to use for CLI requests. -n, --namespace string If present, the namespace scope for this CLI request --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default \"0\") -s, --server string The address and port of the Kubernetes API server --token string Bearer token for authentication to the API server --user string The name of the kubeconfig user to use Use \"ingress-nginx [command] --help\" for more information about a command. If a new ingress-nginx version has just been released, the plugin may not yet have been updated inside the repository. In that case, you can install the latest version of the plugin by running: ( set -x; cd \"$(mktemp -d)\" && curl -fsSLO \"https://github.com/kubernetes/ingress-nginx/releases/download/nginx-0.24.0/{ingress-nginx.yaml,kubectl-ingress_nginx-$(uname | tr '[:upper:]' '[:lower:]')-amd64.tar.gz}\" && kubectl krew install \\ --manifest=ingress-nginx.yaml --archive=kubectl-ingress_nginx-$(uname | tr '[:upper:]' '[:lower:]')-amd64.tar.gz ) Replacing 0.24.0 with the recently released version. Common Flags \u00b6 Every subcommand supports the basic kubectl configuration flags like --namespace , --context , --client-key and so on. Subcommands that act on a particular ingress-nginx pod ( backends , certs , conf , exec , general , logs , ssh ), support the --deployment and --pod flags to select either a pod from a deployment with the given name, or a pod with the given name. The --deployment flag defaults to nginx-ingress-controller . Subcommands that inspect resources ( ingresses , lint ) support the --all-namespaces flag, which causes them to inspect resources in every namespace. Subcommands \u00b6 Note that backends , general , certs , and conf require ingress-nginx version 0.23.0 or higher. backends \u00b6 Run kubectl ingress-nginx backends to get a JSON array of the backends that an ingress-nginx controller currently knows about: $ kubectl ingress-nginx backends -n ingress-nginx [ { \"name\": \"default-apple-service-5678\", \"service\": { \"metadata\": { \"creationTimestamp\": null }, \"spec\": { \"ports\": [ { \"protocol\": \"TCP\", \"port\": 5678, \"targetPort\": 5678 } ], \"selector\": { \"app\": \"apple\" }, \"clusterIP\": \"10.97.230.121\", \"type\": \"ClusterIP\", \"sessionAffinity\": \"None\" }, \"status\": { \"loadBalancer\": {} } }, \"port\": 0, \"sslPassthrough\": false, \"endpoints\": [ { \"address\": \"10.1.3.86\", \"port\": \"5678\" } ], \"sessionAffinityConfig\": { \"name\": \"\", \"cookieSessionAffinity\": { \"name\": \"\" } }, \"upstreamHashByConfig\": { \"upstream-hash-by-subset-size\": 3 }, \"noServer\": false, \"trafficShapingPolicy\": { \"weight\": 0, \"header\": \"\", \"headerValue\": \"\", \"cookie\": \"\" } }, { \"name\": \"default-echo-service-8080\", ... }, { \"name\": \"upstream-default-backend\", ... } ] Add the --list option to show only the backend names. Add the --backend option to show only the backend with the given name. certs \u00b6 Use kubectl ingress-nginx certs --host to dump the SSL cert/key information for a given host. Requires that --enable-dynamic-certificates is true (this is the default as of version 0.24.0 ). WARNING: This command will dump sensitive private key information. Don't blindly share the output, and certainly don't log it anywhere. $ kubectl ingress-nginx certs -n ingress-nginx --host testaddr.local -----BEGIN CERTIFICATE----- ... -----END CERTIFICATE----- -----BEGIN CERTIFICATE----- ... -----END CERTIFICATE----- -----BEGIN RSA PRIVATE KEY----- -----END RSA PRIVATE KEY----- conf \u00b6 Use kubectl ingress-nginx conf to dump the generated nginx.conf file. Add the --host option to view only the server block for that host: kubectl ingress-nginx conf -n ingress-nginx --host testaddr.local server { server_name testaddr.local ; listen 80; set $proxy_upstream_name \"-\"; set $pass_access_scheme $scheme; set $pass_server_port $server_port; set $best_http_host $http_host; set $pass_port $pass_server_port; location / { set $namespace \"\"; set $ingress_name \"\"; set $service_name \"\"; set $service_port \"0\"; set $location_path \"/\"; ... exec \u00b6 kubectl ingress-nginx exec is exactly the same as kubectl exec , with the same command flags. It will automatically choose an ingress-nginx pod to run the command in. $ kubectl ingress-nginx exec -i -n ingress-nginx -- ls /etc/nginx fastcgi_params geoip lua mime.types modsecurity modules nginx.conf opentracing.json owasp-modsecurity-crs template general \u00b6 kubectl ingress-nginx general dumps miscellaneous controller state as a JSON object. Currently it just shows the number of controller pods known to a particular controller pod. $ kubectl ingress-nginx general -n ingress-nginx { \"controllerPodsCount\": 1 } info \u00b6 Shows the internal and external IP/CNAMES for an ingress-nginx service. $ kubectl ingress-nginx info -n ingress-nginx Service cluster IP address: 10.187.253.31 LoadBalancer IP|CNAME: 35.123.123.123 Use the --service flag if your ingress-nginx LoadBalancer service is not named ingress-nginx . ingresses \u00b6 kubectl ingress-nginx ingresses , alternately kubectl ingress-nginx ing , shows a more detailed view of the ingress definitions in a namespace. Compare: $ kubectl get ingresses --all-namespaces NAMESPACE NAME HOSTS ADDRESS PORTS AGE default example-ingress1 testaddr.local,testaddr2.local localhost 80 5d default test-ingress-2 * localhost 80 5d vs $ kubectl ingress-nginx ingresses --all-namespaces NAMESPACE INGRESS NAME HOST+PATH ADDRESSES TLS SERVICE SERVICE PORT ENDPOINTS default example-ingress1 testaddr.local/etameta localhost NO pear-service 5678 5 default example-ingress1 testaddr2.local/otherpath localhost NO apple-service 5678 1 default example-ingress1 testaddr2.local/otherotherpath localhost NO pear-service 5678 5 default test-ingress-2 * localhost NO echo-service 8080 2 lint \u00b6 kubectl ingress-nginx lint can check a namespace or entire cluster for potential configuration issues. This command is especially useful when upgrading between ingress-nginx versions. $ kubectl ingress-nginx lint --all-namespaces --verbose Checking ingresses... \u2717 anamespace/this-nginx - Contains the removed session-cookie-hash annotation. Lint added for version 0.24.0 https://github.com/kubernetes/ingress-nginx/issues/3743 \u2717 othernamespace/ingress-definition-blah - The rewrite-target annotation value does not reference a capture group Lint added for version 0.22.0 https://github.com/kubernetes/ingress-nginx/issues/3174 Checking deployments... \u2717 namespace2/nginx-ingress-controller - Uses removed config flag --sort-backends Lint added for version 0.22.0 https://github.com/kubernetes/ingress-nginx/issues/3655 - Uses removed config flag --enable-dynamic-certificates Lint added for version 0.24.0 https://github.com/kubernetes/ingress-nginx/issues/3808 to show the lints added only for a particular ingress-nginx release, use the --from-version and --to-version flags: $ kubectl ingress-nginx lint --all-namespaces --verbose --from-version 0 .24.0 --to-version 0 .24.0 Checking ingresses... \u2717 anamespace/this-nginx - Contains the removed session-cookie-hash annotation. Lint added for version 0.24.0 https://github.com/kubernetes/ingress-nginx/issues/3743 Checking deployments... \u2717 namespace2/nginx-ingress-controller - Uses removed config flag --enable-dynamic-certificates Lint added for version 0.24.0 https://github.com/kubernetes/ingress-nginx/issues/3808 logs \u00b6 kubectl ingress-nginx logs is almost the same as kubectl logs , with fewer flags. It will automatically choose an ingress-nginx pod to read logs from. $ kubectl ingress-nginx logs -n ingress-nginx ------------------------------------------------------------------------------- NGINX Ingress controller Release: dev Build: git-48dc3a867 Repository: git@github.com:kubernetes/ingress-nginx.git ------------------------------------------------------------------------------- W0405 16:53:46.061589 7 flags.go:214] SSL certificate chain completion is disabled (--enable-ssl-chain-completion=false) nginx version: nginx/1.15.9 W0405 16:53:46.070093 7 client_config.go:549] Neither --kubeconfig nor --master was specified. Using the inClusterConfig. This might not work. I0405 16:53:46.070499 7 main.go:205] Creating API client for https://10.96.0.1:443 I0405 16:53:46.077784 7 main.go:249] Running in Kubernetes cluster version v1.10 (v1.10.11) - git (clean) commit 637c7e288581ee40ab4ca210618a89a555b6e7e9 - platform linux/amd64 I0405 16:53:46.183359 7 nginx.go:265] Starting NGINX Ingress controller I0405 16:53:46.193913 7 event.go:209] Event(v1.ObjectReference{Kind:\"ConfigMap\", Namespace:\"ingress-nginx\", Name:\"udp-services\", UID:\"82258915-563e-11e9-9c52-025000000001\", APIVersion:\"v1\", ResourceVersion:\"494\", FieldPath:\"\"}): type: 'Normal' reason: 'CREATE' ConfigMap ingress-nginx/udp-services ... ssh \u00b6 kubectl ingress-nginx ssh is exactly the same as kubectl ingress-nginx exec -it -- /bin/bash . Use it when you want to quickly be dropped into a shell inside a running ingress-nginx container. $ kubectl ingress-nginx ssh -n ingress-nginx www-data@nginx-ingress-controller-7cbf77c976-wx5pn:/etc/nginx$","title":"kubectl plugin"},{"location":"kubectl-plugin/#the-ingress-nginx-kubectl-plugin","text":"","title":"The ingress-nginx kubectl plugin"},{"location":"kubectl-plugin/#installation","text":"Install krew , then run kubectl krew install ingress-nginx to install the plugin. Then run kubectl ingress-nginx --help to make sure the plugin is properly installed and to get a list of commands: kubectl ingress-nginx --help A kubectl plugin for inspecting your ingress-nginx deployments Usage: ingress-nginx [command] Available Commands: backends Inspect the dynamic backend information of an ingress-nginx instance certs Output the certificate data stored in an ingress-nginx pod conf Inspect the generated nginx.conf exec Execute a command inside an ingress-nginx pod general Inspect the other dynamic ingress-nginx information help Help about any command info Show information about the ingress-nginx service ingresses Provide a short summary of all of the ingress definitions lint Inspect kubernetes resources for possible issues logs Get the kubernetes logs for an ingress-nginx pod ssh ssh into a running ingress-nginx pod Flags: --as string Username to impersonate for the operation --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. --cache-dir string Default HTTP cache directory (default \"/Users/alexkursell/.kube/http-cache\") --certificate-authority string Path to a cert file for the certificate authority --client-certificate string Path to a client certificate file for TLS --client-key string Path to a client key file for TLS --cluster string The name of the kubeconfig cluster to use --context string The name of the kubeconfig context to use -h, --help help for ingress-nginx --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure --kubeconfig string Path to the kubeconfig file to use for CLI requests. -n, --namespace string If present, the namespace scope for this CLI request --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default \"0\") -s, --server string The address and port of the Kubernetes API server --token string Bearer token for authentication to the API server --user string The name of the kubeconfig user to use Use \"ingress-nginx [command] --help\" for more information about a command. If a new ingress-nginx version has just been released, the plugin may not yet have been updated inside the repository. In that case, you can install the latest version of the plugin by running: ( set -x; cd \"$(mktemp -d)\" && curl -fsSLO \"https://github.com/kubernetes/ingress-nginx/releases/download/nginx-0.24.0/{ingress-nginx.yaml,kubectl-ingress_nginx-$(uname | tr '[:upper:]' '[:lower:]')-amd64.tar.gz}\" && kubectl krew install \\ --manifest=ingress-nginx.yaml --archive=kubectl-ingress_nginx-$(uname | tr '[:upper:]' '[:lower:]')-amd64.tar.gz ) Replacing 0.24.0 with the recently released version.","title":"Installation"},{"location":"kubectl-plugin/#common-flags","text":"Every subcommand supports the basic kubectl configuration flags like --namespace , --context , --client-key and so on. Subcommands that act on a particular ingress-nginx pod ( backends , certs , conf , exec , general , logs , ssh ), support the --deployment and --pod flags to select either a pod from a deployment with the given name, or a pod with the given name. The --deployment flag defaults to nginx-ingress-controller . Subcommands that inspect resources ( ingresses , lint ) support the --all-namespaces flag, which causes them to inspect resources in every namespace.","title":"Common Flags"},{"location":"kubectl-plugin/#subcommands","text":"Note that backends , general , certs , and conf require ingress-nginx version 0.23.0 or higher.","title":"Subcommands"},{"location":"kubectl-plugin/#backends","text":"Run kubectl ingress-nginx backends to get a JSON array of the backends that an ingress-nginx controller currently knows about: $ kubectl ingress-nginx backends -n ingress-nginx [ { \"name\": \"default-apple-service-5678\", \"service\": { \"metadata\": { \"creationTimestamp\": null }, \"spec\": { \"ports\": [ { \"protocol\": \"TCP\", \"port\": 5678, \"targetPort\": 5678 } ], \"selector\": { \"app\": \"apple\" }, \"clusterIP\": \"10.97.230.121\", \"type\": \"ClusterIP\", \"sessionAffinity\": \"None\" }, \"status\": { \"loadBalancer\": {} } }, \"port\": 0, \"sslPassthrough\": false, \"endpoints\": [ { \"address\": \"10.1.3.86\", \"port\": \"5678\" } ], \"sessionAffinityConfig\": { \"name\": \"\", \"cookieSessionAffinity\": { \"name\": \"\" } }, \"upstreamHashByConfig\": { \"upstream-hash-by-subset-size\": 3 }, \"noServer\": false, \"trafficShapingPolicy\": { \"weight\": 0, \"header\": \"\", \"headerValue\": \"\", \"cookie\": \"\" } }, { \"name\": \"default-echo-service-8080\", ... }, { \"name\": \"upstream-default-backend\", ... } ] Add the --list option to show only the backend names. Add the --backend option to show only the backend with the given name.","title":"backends"},{"location":"kubectl-plugin/#certs","text":"Use kubectl ingress-nginx certs --host to dump the SSL cert/key information for a given host. Requires that --enable-dynamic-certificates is true (this is the default as of version 0.24.0 ). WARNING: This command will dump sensitive private key information. Don't blindly share the output, and certainly don't log it anywhere. $ kubectl ingress-nginx certs -n ingress-nginx --host testaddr.local -----BEGIN CERTIFICATE----- ... -----END CERTIFICATE----- -----BEGIN CERTIFICATE----- ... -----END CERTIFICATE----- -----BEGIN RSA PRIVATE KEY----- -----END RSA PRIVATE KEY-----","title":"certs"},{"location":"kubectl-plugin/#conf","text":"Use kubectl ingress-nginx conf to dump the generated nginx.conf file. Add the --host option to view only the server block for that host: kubectl ingress-nginx conf -n ingress-nginx --host testaddr.local server { server_name testaddr.local ; listen 80; set $proxy_upstream_name \"-\"; set $pass_access_scheme $scheme; set $pass_server_port $server_port; set $best_http_host $http_host; set $pass_port $pass_server_port; location / { set $namespace \"\"; set $ingress_name \"\"; set $service_name \"\"; set $service_port \"0\"; set $location_path \"/\"; ...","title":"conf"},{"location":"kubectl-plugin/#exec","text":"kubectl ingress-nginx exec is exactly the same as kubectl exec , with the same command flags. It will automatically choose an ingress-nginx pod to run the command in. $ kubectl ingress-nginx exec -i -n ingress-nginx -- ls /etc/nginx fastcgi_params geoip lua mime.types modsecurity modules nginx.conf opentracing.json owasp-modsecurity-crs template","title":"exec"},{"location":"kubectl-plugin/#general","text":"kubectl ingress-nginx general dumps miscellaneous controller state as a JSON object. Currently it just shows the number of controller pods known to a particular controller pod. $ kubectl ingress-nginx general -n ingress-nginx { \"controllerPodsCount\": 1 }","title":"general"},{"location":"kubectl-plugin/#info","text":"Shows the internal and external IP/CNAMES for an ingress-nginx service. $ kubectl ingress-nginx info -n ingress-nginx Service cluster IP address: 10.187.253.31 LoadBalancer IP|CNAME: 35.123.123.123 Use the --service flag if your ingress-nginx LoadBalancer service is not named ingress-nginx .","title":"info"},{"location":"kubectl-plugin/#ingresses","text":"kubectl ingress-nginx ingresses , alternately kubectl ingress-nginx ing , shows a more detailed view of the ingress definitions in a namespace. Compare: $ kubectl get ingresses --all-namespaces NAMESPACE NAME HOSTS ADDRESS PORTS AGE default example-ingress1 testaddr.local,testaddr2.local localhost 80 5d default test-ingress-2 * localhost 80 5d vs $ kubectl ingress-nginx ingresses --all-namespaces NAMESPACE INGRESS NAME HOST+PATH ADDRESSES TLS SERVICE SERVICE PORT ENDPOINTS default example-ingress1 testaddr.local/etameta localhost NO pear-service 5678 5 default example-ingress1 testaddr2.local/otherpath localhost NO apple-service 5678 1 default example-ingress1 testaddr2.local/otherotherpath localhost NO pear-service 5678 5 default test-ingress-2 * localhost NO echo-service 8080 2","title":"ingresses"},{"location":"kubectl-plugin/#lint","text":"kubectl ingress-nginx lint can check a namespace or entire cluster for potential configuration issues. This command is especially useful when upgrading between ingress-nginx versions. $ kubectl ingress-nginx lint --all-namespaces --verbose Checking ingresses... \u2717 anamespace/this-nginx - Contains the removed session-cookie-hash annotation. Lint added for version 0.24.0 https://github.com/kubernetes/ingress-nginx/issues/3743 \u2717 othernamespace/ingress-definition-blah - The rewrite-target annotation value does not reference a capture group Lint added for version 0.22.0 https://github.com/kubernetes/ingress-nginx/issues/3174 Checking deployments... \u2717 namespace2/nginx-ingress-controller - Uses removed config flag --sort-backends Lint added for version 0.22.0 https://github.com/kubernetes/ingress-nginx/issues/3655 - Uses removed config flag --enable-dynamic-certificates Lint added for version 0.24.0 https://github.com/kubernetes/ingress-nginx/issues/3808 to show the lints added only for a particular ingress-nginx release, use the --from-version and --to-version flags: $ kubectl ingress-nginx lint --all-namespaces --verbose --from-version 0 .24.0 --to-version 0 .24.0 Checking ingresses... \u2717 anamespace/this-nginx - Contains the removed session-cookie-hash annotation. Lint added for version 0.24.0 https://github.com/kubernetes/ingress-nginx/issues/3743 Checking deployments... \u2717 namespace2/nginx-ingress-controller - Uses removed config flag --enable-dynamic-certificates Lint added for version 0.24.0 https://github.com/kubernetes/ingress-nginx/issues/3808","title":"lint"},{"location":"kubectl-plugin/#logs","text":"kubectl ingress-nginx logs is almost the same as kubectl logs , with fewer flags. It will automatically choose an ingress-nginx pod to read logs from. $ kubectl ingress-nginx logs -n ingress-nginx ------------------------------------------------------------------------------- NGINX Ingress controller Release: dev Build: git-48dc3a867 Repository: git@github.com:kubernetes/ingress-nginx.git ------------------------------------------------------------------------------- W0405 16:53:46.061589 7 flags.go:214] SSL certificate chain completion is disabled (--enable-ssl-chain-completion=false) nginx version: nginx/1.15.9 W0405 16:53:46.070093 7 client_config.go:549] Neither --kubeconfig nor --master was specified. Using the inClusterConfig. This might not work. I0405 16:53:46.070499 7 main.go:205] Creating API client for https://10.96.0.1:443 I0405 16:53:46.077784 7 main.go:249] Running in Kubernetes cluster version v1.10 (v1.10.11) - git (clean) commit 637c7e288581ee40ab4ca210618a89a555b6e7e9 - platform linux/amd64 I0405 16:53:46.183359 7 nginx.go:265] Starting NGINX Ingress controller I0405 16:53:46.193913 7 event.go:209] Event(v1.ObjectReference{Kind:\"ConfigMap\", Namespace:\"ingress-nginx\", Name:\"udp-services\", UID:\"82258915-563e-11e9-9c52-025000000001\", APIVersion:\"v1\", ResourceVersion:\"494\", FieldPath:\"\"}): type: 'Normal' reason: 'CREATE' ConfigMap ingress-nginx/udp-services ...","title":"logs"},{"location":"kubectl-plugin/#ssh","text":"kubectl ingress-nginx ssh is exactly the same as kubectl ingress-nginx exec -it -- /bin/bash . Use it when you want to quickly be dropped into a shell inside a running ingress-nginx container. $ kubectl ingress-nginx ssh -n ingress-nginx www-data@nginx-ingress-controller-7cbf77c976-wx5pn:/etc/nginx$","title":"ssh"},{"location":"troubleshooting/","text":"Troubleshooting \u00b6 Ingress-Controller Logs and Events \u00b6 There are many ways to troubleshoot the ingress-controller. The following are basic troubleshooting methods to obtain more information. Check the Ingress Resource Events $ kubectl get ing -n NAME HOSTS ADDRESS PORTS AGE cafe-ingress cafe.com 10.0.2.15 80 25s $ kubectl describe ing -n Name: cafe-ingress Namespace: default Address: 10.0.2.15 Default backend: default-http-backend:80 (172.17.0.5:8080) Rules: Host Path Backends ---- ---- -------- cafe.com /tea tea-svc:80 () /coffee coffee-svc:80 () Annotations: kubectl.kubernetes.io/last-applied-configuration: {\"apiVersion\":\"networking.k8s.io/v1beta1\",\"kind\":\"Ingress\",\"metadata\":{\"annotations\":{},\"name\":\"cafe-ingress\",\"namespace\":\"default\",\"selfLink\":\"/apis/networking/v1beta1/namespaces/default/ingresses/cafe-ingress\"},\"spec\":{\"rules\":[{\"host\":\"cafe.com\",\"http\":{\"paths\":[{\"backend\":{\"serviceName\":\"tea-svc\",\"servicePort\":80},\"path\":\"/tea\"},{\"backend\":{\"serviceName\":\"coffee-svc\",\"servicePort\":80},\"path\":\"/coffee\"}]}}]},\"status\":{\"loadBalancer\":{\"ingress\":[{\"ip\":\"169.48.142.110\"}]}}} Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal CREATE 1m nginx-ingress-controller Ingress default/cafe-ingress Normal UPDATE 58s nginx-ingress-controller Ingress default/cafe-ingress Check the Ingress Controller Logs $ kubectl get pods -n NAME READY STATUS RESTARTS AGE nginx-ingress-controller-67956bf89d-fv58j 1/1 Running 0 1m $ kubectl logs -n nginx-ingress-controller-67956bf89d-fv58j ------------------------------------------------------------------------------- NGINX Ingress controller Release: 0.14.0 Build: git-734361d Repository: https://github.com/kubernetes/ingress-nginx ------------------------------------------------------------------------------- .... Check the Nginx Configuration $ kubectl get pods -n NAME READY STATUS RESTARTS AGE nginx-ingress-controller-67956bf89d-fv58j 1/1 Running 0 1m $ kubectl exec -it -n nginx-ingress-controller-67956bf89d-fv58j cat /etc/nginx/nginx.conf daemon off; worker_processes 2; pid /run/nginx.pid; worker_rlimit_nofile 523264; worker_shutdown_timeout 240s; events { multi_accept on; worker_connections 16384; use epoll; } http { .... Check if used Services Exist $ kubectl get svc --all-namespaces NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE default coffee-svc ClusterIP 10.106.154.35 80/TCP 18m default kubernetes ClusterIP 10.96.0.1 443/TCP 30m default tea-svc ClusterIP 10.104.172.12 80/TCP 18m kube-system default-http-backend NodePort 10.108.189.236 80:30001/TCP 30m kube-system kube-dns ClusterIP 10.96.0.10 53/UDP,53/TCP 30m kube-system kubernetes-dashboard NodePort 10.103.128.17 80:30000/TCP 30m Debug Logging \u00b6 Using the flag --v=XX it is possible to increase the level of logging. This is performed by editing the deployment. $ kubectl get deploy -n NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE default-http-backend 1 1 1 1 35m nginx-ingress-controller 1 1 1 1 35m $ kubectl edit deploy -n nginx-ingress-controller # Add --v = X to \"- args\" , where X is an integer --v=2 shows details using diff about the changes in the configuration in nginx --v=3 shows details about the service, Ingress rule, endpoint changes and it dumps the nginx configuration in JSON format --v=5 configures NGINX in debug mode Authentication to the Kubernetes API Server \u00b6 A number of components are involved in the authentication process and the first step is to narrow down the source of the problem, namely whether it is a problem with service authentication or with the kubeconfig file. Both authentications must work: +-------------+ service +------------+ | | authentication | | + apiserver +<-------------------+ ingress | | | | controller | +-------------+ +------------+ Service authentication The Ingress controller needs information from apiserver. Therefore, authentication is required, which can be achieved in two different ways: Service Account: This is recommended, because nothing has to be configured. The Ingress controller will use information provided by the system to communicate with the API server. See 'Service Account' section for details. Kubeconfig file: In some Kubernetes environments service accounts are not available. In this case a manual configuration is required. The Ingress controller binary can be started with the --kubeconfig flag. The value of the flag is a path to a file specifying how to connect to the API server. Using the --kubeconfig does not requires the flag --apiserver-host . The format of the file is identical to ~/.kube/config which is used by kubectl to connect to the API server. See 'kubeconfig' section for details. Using the flag --apiserver-host : Using this flag --apiserver-host=http://localhost:8080 it is possible to specify an unsecured API server or reach a remote kubernetes cluster using kubectl proxy . Please do not use this approach in production. In the diagram below you can see the full authentication flow with all options, starting with the browser on the lower left hand side. Kubernetes Workstation +---------------------------------------------------+ +------------------+ | | | | | +-----------+ apiserver +------------+ | | +------------+ | | | | proxy | | | | | | | | | apiserver | | ingress | | | | ingress | | | | | | controller | | | | controller | | | | | | | | | | | | | | | | | | | | | | | | | service account/ | | | | | | | | | | kubeconfig | | | | | | | | | +<-------------------+ | | | | | | | | | | | | | | | | | +------+----+ kubeconfig +------+-----+ | | +------+-----+ | | |<--------------------------------------------------------| | | | | | +---------------------------------------------------+ +------------------+ Service Account \u00b6 If using a service account to connect to the API server, Dashboard expects the file /var/run/secrets/kubernetes.io/serviceaccount/token to be present. It provides a secret token that is required to authenticate with the API server. Verify with the following commands: # start a container that contains curl $ kubectl run test --image = tutum/curl -- sleep 10000 # check that container is running $ kubectl get pods NAME READY STATUS RESTARTS AGE test-701078429-s5kca 1/1 Running 0 16s # check if secret exists $ kubectl exec test-701078429-s5kca ls /var/run/secrets/kubernetes.io/serviceaccount/ ca.crt namespace token # get service IP of master $ kubectl get services NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes 10.0.0.1 443/TCP 1d # check base connectivity from cluster inside $ kubectl exec test-701078429-s5kca -- curl -k https://10.0.0.1 Unauthorized # connect using tokens $ TOKEN_VALUE = $( kubectl exec test-701078429-s5kca -- cat /var/run/secrets/kubernetes.io/serviceaccount/token ) $ echo $TOKEN_VALUE eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3Mi....9A $ kubectl exec test-701078429-s5kca -- curl --cacert /var/run/secrets/kubernetes.io/serviceaccount/ca.crt -H \"Authorization: Bearer $TOKEN_VALUE \" https://10.0.0.1 { \"paths\": [ \"/api\", \"/api/v1\", \"/apis\", \"/apis/apps\", \"/apis/apps/v1alpha1\", \"/apis/authentication.k8s.io\", \"/apis/authentication.k8s.io/v1beta1\", \"/apis/authorization.k8s.io\", \"/apis/authorization.k8s.io/v1beta1\", \"/apis/autoscaling\", \"/apis/autoscaling/v1\", \"/apis/batch\", \"/apis/batch/v1\", \"/apis/batch/v2alpha1\", \"/apis/certificates.k8s.io\", \"/apis/certificates.k8s.io/v1alpha1\", \"/apis/networking\", \"/apis/networking/v1beta1\", \"/apis/policy\", \"/apis/policy/v1alpha1\", \"/apis/rbac.authorization.k8s.io\", \"/apis/rbac.authorization.k8s.io/v1alpha1\", \"/apis/storage.k8s.io\", \"/apis/storage.k8s.io/v1beta1\", \"/healthz\", \"/healthz/ping\", \"/logs\", \"/metrics\", \"/swaggerapi/\", \"/ui/\", \"/version\" ] } If it is not working, there are two possible reasons: The contents of the tokens are invalid. Find the secret name with kubectl get secrets | grep service-account and delete it with kubectl delete secret . It will automatically be recreated. You have a non-standard Kubernetes installation and the file containing the token may not be present. The API server will mount a volume containing this file, but only if the API server is configured to use the ServiceAccount admission controller. If you experience this error, verify that your API server is using the ServiceAccount admission controller. If you are configuring the API server by hand, you can set this with the --admission-control parameter. Note that you should use other admission controllers as well. Before configuring this option, you should read about admission controllers. More information: User Guide: Service Accounts Cluster Administrator Guide: Managing Service Accounts Kube-Config \u00b6 If you want to use a kubeconfig file for authentication, follow the deploy procedure and add the flag --kubeconfig=/etc/kubernetes/kubeconfig.yaml to the args section of the deployment. Using GDB with Nginx \u00b6 Gdb can be used to with nginx to perform a configuration dump. This allows us to see which configuration is being used, as well as older configurations. Note: The below is based on the nginx documentation . SSH into the worker $ ssh user@workerIP Obtain the Docker Container Running nginx $ docker ps | grep nginx-ingress-controller CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES d9e1d243156a quay.io/kubernetes-ingress-controller/nginx-ingress-controller \"/usr/bin/dumb-init \u2026\" 19 minutes ago Up 19 minutes k8s_nginx-ingress-controller_nginx-ingress-controller-67956bf89d-mqxzt_kube-system_079f31ec-aa37-11e8-ad39-080027a227db_0 Exec into the container $ docker exec -it --user = 0 --privileged d9e1d243156a bash Make sure nginx is running in --with-debug $ nginx -V 2 > & 1 | grep -- '--with-debug' Get list of processes running on container $ ps -ef UID PID PPID C STIME TTY TIME CMD root 1 0 0 20:23 ? 00:00:00 /usr/bin/dumb-init /nginx-ingres root 5 1 0 20:23 ? 00:00:05 /nginx-ingress-controller --defa root 21 5 0 20:23 ? 00:00:00 nginx: master process /usr/sbin/ nobody 106 21 0 20:23 ? 00:00:00 nginx: worker process nobody 107 21 0 20:23 ? 00:00:00 nginx: worker process root 172 0 0 20:43 pts/0 00:00:00 bash Attach gdb to the nginx master process $ gdb -p 21 .... Attaching to process 21 Reading symbols from /usr/sbin/nginx...done. .... (gdb) Copy and paste the following: set $cd = ngx_cycle->config_dump set $nelts = $cd.nelts set $elts = (ngx_conf_dump_t*)($cd.elts) while ($nelts-- > 0) set $name = $elts[$nelts]->name.data printf \"Dumping %s to nginx_conf.txt\\n\", $name append memory nginx_conf.txt \\ $ elts [ $nelts ] ->buffer.start $elts [ $nelts ] ->buffer.end end Quit GDB by pressing CTRL+D Open nginx_conf.txt cat nginx_conf.txt","title":"Troubleshooting"},{"location":"troubleshooting/#troubleshooting","text":"","title":"Troubleshooting"},{"location":"troubleshooting/#ingress-controller-logs-and-events","text":"There are many ways to troubleshoot the ingress-controller. The following are basic troubleshooting methods to obtain more information. Check the Ingress Resource Events $ kubectl get ing -n NAME HOSTS ADDRESS PORTS AGE cafe-ingress cafe.com 10.0.2.15 80 25s $ kubectl describe ing -n Name: cafe-ingress Namespace: default Address: 10.0.2.15 Default backend: default-http-backend:80 (172.17.0.5:8080) Rules: Host Path Backends ---- ---- -------- cafe.com /tea tea-svc:80 () /coffee coffee-svc:80 () Annotations: kubectl.kubernetes.io/last-applied-configuration: {\"apiVersion\":\"networking.k8s.io/v1beta1\",\"kind\":\"Ingress\",\"metadata\":{\"annotations\":{},\"name\":\"cafe-ingress\",\"namespace\":\"default\",\"selfLink\":\"/apis/networking/v1beta1/namespaces/default/ingresses/cafe-ingress\"},\"spec\":{\"rules\":[{\"host\":\"cafe.com\",\"http\":{\"paths\":[{\"backend\":{\"serviceName\":\"tea-svc\",\"servicePort\":80},\"path\":\"/tea\"},{\"backend\":{\"serviceName\":\"coffee-svc\",\"servicePort\":80},\"path\":\"/coffee\"}]}}]},\"status\":{\"loadBalancer\":{\"ingress\":[{\"ip\":\"169.48.142.110\"}]}}} Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal CREATE 1m nginx-ingress-controller Ingress default/cafe-ingress Normal UPDATE 58s nginx-ingress-controller Ingress default/cafe-ingress Check the Ingress Controller Logs $ kubectl get pods -n NAME READY STATUS RESTARTS AGE nginx-ingress-controller-67956bf89d-fv58j 1/1 Running 0 1m $ kubectl logs -n nginx-ingress-controller-67956bf89d-fv58j ------------------------------------------------------------------------------- NGINX Ingress controller Release: 0.14.0 Build: git-734361d Repository: https://github.com/kubernetes/ingress-nginx ------------------------------------------------------------------------------- .... Check the Nginx Configuration $ kubectl get pods -n NAME READY STATUS RESTARTS AGE nginx-ingress-controller-67956bf89d-fv58j 1/1 Running 0 1m $ kubectl exec -it -n nginx-ingress-controller-67956bf89d-fv58j cat /etc/nginx/nginx.conf daemon off; worker_processes 2; pid /run/nginx.pid; worker_rlimit_nofile 523264; worker_shutdown_timeout 240s; events { multi_accept on; worker_connections 16384; use epoll; } http { .... Check if used Services Exist $ kubectl get svc --all-namespaces NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE default coffee-svc ClusterIP 10.106.154.35 80/TCP 18m default kubernetes ClusterIP 10.96.0.1 443/TCP 30m default tea-svc ClusterIP 10.104.172.12 80/TCP 18m kube-system default-http-backend NodePort 10.108.189.236 80:30001/TCP 30m kube-system kube-dns ClusterIP 10.96.0.10 53/UDP,53/TCP 30m kube-system kubernetes-dashboard NodePort 10.103.128.17 80:30000/TCP 30m","title":"Ingress-Controller Logs and Events"},{"location":"troubleshooting/#debug-logging","text":"Using the flag --v=XX it is possible to increase the level of logging. This is performed by editing the deployment. $ kubectl get deploy -n NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE default-http-backend 1 1 1 1 35m nginx-ingress-controller 1 1 1 1 35m $ kubectl edit deploy -n nginx-ingress-controller # Add --v = X to \"- args\" , where X is an integer --v=2 shows details using diff about the changes in the configuration in nginx --v=3 shows details about the service, Ingress rule, endpoint changes and it dumps the nginx configuration in JSON format --v=5 configures NGINX in debug mode","title":"Debug Logging"},{"location":"troubleshooting/#authentication-to-the-kubernetes-api-server","text":"A number of components are involved in the authentication process and the first step is to narrow down the source of the problem, namely whether it is a problem with service authentication or with the kubeconfig file. Both authentications must work: +-------------+ service +------------+ | | authentication | | + apiserver +<-------------------+ ingress | | | | controller | +-------------+ +------------+ Service authentication The Ingress controller needs information from apiserver. Therefore, authentication is required, which can be achieved in two different ways: Service Account: This is recommended, because nothing has to be configured. The Ingress controller will use information provided by the system to communicate with the API server. See 'Service Account' section for details. Kubeconfig file: In some Kubernetes environments service accounts are not available. In this case a manual configuration is required. The Ingress controller binary can be started with the --kubeconfig flag. The value of the flag is a path to a file specifying how to connect to the API server. Using the --kubeconfig does not requires the flag --apiserver-host . The format of the file is identical to ~/.kube/config which is used by kubectl to connect to the API server. See 'kubeconfig' section for details. Using the flag --apiserver-host : Using this flag --apiserver-host=http://localhost:8080 it is possible to specify an unsecured API server or reach a remote kubernetes cluster using kubectl proxy . Please do not use this approach in production. In the diagram below you can see the full authentication flow with all options, starting with the browser on the lower left hand side. Kubernetes Workstation +---------------------------------------------------+ +------------------+ | | | | | +-----------+ apiserver +------------+ | | +------------+ | | | | proxy | | | | | | | | | apiserver | | ingress | | | | ingress | | | | | | controller | | | | controller | | | | | | | | | | | | | | | | | | | | | | | | | service account/ | | | | | | | | | | kubeconfig | | | | | | | | | +<-------------------+ | | | | | | | | | | | | | | | | | +------+----+ kubeconfig +------+-----+ | | +------+-----+ | | |<--------------------------------------------------------| | | | | | +---------------------------------------------------+ +------------------+","title":"Authentication to the Kubernetes API Server"},{"location":"troubleshooting/#service-account","text":"If using a service account to connect to the API server, Dashboard expects the file /var/run/secrets/kubernetes.io/serviceaccount/token to be present. It provides a secret token that is required to authenticate with the API server. Verify with the following commands: # start a container that contains curl $ kubectl run test --image = tutum/curl -- sleep 10000 # check that container is running $ kubectl get pods NAME READY STATUS RESTARTS AGE test-701078429-s5kca 1/1 Running 0 16s # check if secret exists $ kubectl exec test-701078429-s5kca ls /var/run/secrets/kubernetes.io/serviceaccount/ ca.crt namespace token # get service IP of master $ kubectl get services NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes 10.0.0.1 443/TCP 1d # check base connectivity from cluster inside $ kubectl exec test-701078429-s5kca -- curl -k https://10.0.0.1 Unauthorized # connect using tokens $ TOKEN_VALUE = $( kubectl exec test-701078429-s5kca -- cat /var/run/secrets/kubernetes.io/serviceaccount/token ) $ echo $TOKEN_VALUE eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3Mi....9A $ kubectl exec test-701078429-s5kca -- curl --cacert /var/run/secrets/kubernetes.io/serviceaccount/ca.crt -H \"Authorization: Bearer $TOKEN_VALUE \" https://10.0.0.1 { \"paths\": [ \"/api\", \"/api/v1\", \"/apis\", \"/apis/apps\", \"/apis/apps/v1alpha1\", \"/apis/authentication.k8s.io\", \"/apis/authentication.k8s.io/v1beta1\", \"/apis/authorization.k8s.io\", \"/apis/authorization.k8s.io/v1beta1\", \"/apis/autoscaling\", \"/apis/autoscaling/v1\", \"/apis/batch\", \"/apis/batch/v1\", \"/apis/batch/v2alpha1\", \"/apis/certificates.k8s.io\", \"/apis/certificates.k8s.io/v1alpha1\", \"/apis/networking\", \"/apis/networking/v1beta1\", \"/apis/policy\", \"/apis/policy/v1alpha1\", \"/apis/rbac.authorization.k8s.io\", \"/apis/rbac.authorization.k8s.io/v1alpha1\", \"/apis/storage.k8s.io\", \"/apis/storage.k8s.io/v1beta1\", \"/healthz\", \"/healthz/ping\", \"/logs\", \"/metrics\", \"/swaggerapi/\", \"/ui/\", \"/version\" ] } If it is not working, there are two possible reasons: The contents of the tokens are invalid. Find the secret name with kubectl get secrets | grep service-account and delete it with kubectl delete secret . It will automatically be recreated. You have a non-standard Kubernetes installation and the file containing the token may not be present. The API server will mount a volume containing this file, but only if the API server is configured to use the ServiceAccount admission controller. If you experience this error, verify that your API server is using the ServiceAccount admission controller. If you are configuring the API server by hand, you can set this with the --admission-control parameter. Note that you should use other admission controllers as well. Before configuring this option, you should read about admission controllers. More information: User Guide: Service Accounts Cluster Administrator Guide: Managing Service Accounts","title":"Service Account"},{"location":"troubleshooting/#kube-config","text":"If you want to use a kubeconfig file for authentication, follow the deploy procedure and add the flag --kubeconfig=/etc/kubernetes/kubeconfig.yaml to the args section of the deployment.","title":"Kube-Config"},{"location":"troubleshooting/#using-gdb-with-nginx","text":"Gdb can be used to with nginx to perform a configuration dump. This allows us to see which configuration is being used, as well as older configurations. Note: The below is based on the nginx documentation . SSH into the worker $ ssh user@workerIP Obtain the Docker Container Running nginx $ docker ps | grep nginx-ingress-controller CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES d9e1d243156a quay.io/kubernetes-ingress-controller/nginx-ingress-controller \"/usr/bin/dumb-init \u2026\" 19 minutes ago Up 19 minutes k8s_nginx-ingress-controller_nginx-ingress-controller-67956bf89d-mqxzt_kube-system_079f31ec-aa37-11e8-ad39-080027a227db_0 Exec into the container $ docker exec -it --user = 0 --privileged d9e1d243156a bash Make sure nginx is running in --with-debug $ nginx -V 2 > & 1 | grep -- '--with-debug' Get list of processes running on container $ ps -ef UID PID PPID C STIME TTY TIME CMD root 1 0 0 20:23 ? 00:00:00 /usr/bin/dumb-init /nginx-ingres root 5 1 0 20:23 ? 00:00:05 /nginx-ingress-controller --defa root 21 5 0 20:23 ? 00:00:00 nginx: master process /usr/sbin/ nobody 106 21 0 20:23 ? 00:00:00 nginx: worker process nobody 107 21 0 20:23 ? 00:00:00 nginx: worker process root 172 0 0 20:43 pts/0 00:00:00 bash Attach gdb to the nginx master process $ gdb -p 21 .... Attaching to process 21 Reading symbols from /usr/sbin/nginx...done. .... (gdb) Copy and paste the following: set $cd = ngx_cycle->config_dump set $nelts = $cd.nelts set $elts = (ngx_conf_dump_t*)($cd.elts) while ($nelts-- > 0) set $name = $elts[$nelts]->name.data printf \"Dumping %s to nginx_conf.txt\\n\", $name append memory nginx_conf.txt \\ $ elts [ $nelts ] ->buffer.start $elts [ $nelts ] ->buffer.end end Quit GDB by pressing CTRL+D Open nginx_conf.txt cat nginx_conf.txt","title":"Using GDB with Nginx"},{"location":"deploy/","text":"Installation Guide \u00b6 Contents \u00b6 Prerequisite Generic Deployment Command Provider Specific Steps Docker for Mac minikube AWS GCE - GKE Azure Bare-metal Verify installation Detect installed version Using Helm Prerequisite Generic Deployment Command \u00b6 Attention The default configuration watches Ingress object from all the namespaces . To change this behavior use the flag --watch-namespace to limit the scope to a particular namespace. Warning If multiple Ingresses define different paths for the same host, the ingress controller will merge the definitions. Attention If you're using GKE you need to initialize your user as a cluster-admin with the following command: kubectl create clusterrolebinding cluster-admin-binding \\ --clusterrole cluster-admin \\ --user $(gcloud config get-value account) The following Mandatory Command is required for all deployments. kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/mandatory.yaml Tip If you are using a Kubernetes version previous to 1.14, you need to change kubernetes.io/os to beta.kubernetes.io/os at line 217 of mandatory.yaml , see Labels details . Provider Specific Steps \u00b6 There are cloud provider specific yaml files. Docker for Mac \u00b6 Kubernetes is available in Docker for Mac (from version 18.06.0-ce ) Create a service kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/cloud-generic.yaml minikube \u00b6 For standard usage: minikube addons enable ingress For development: Disable the ingress addon: minikube addons disable ingress Execute make dev-env Confirm the nginx-ingress-controller deployment exists: $ kubectl get pods -n ingress-nginx NAME READY STATUS RESTARTS AGE default-http-backend-66b447d9cf-rrlf9 1/1 Running 0 12s nginx-ingress-controller-fdcdcd6dd-vvpgs 1/1 Running 0 11s AWS \u00b6 In AWS we use an Elastic Load Balancer (ELB) to expose the NGINX Ingress controller behind a Service of Type=LoadBalancer . Since Kubernetes v1.9.0 it is possible to use a classic load balancer (ELB) or network load balancer (NLB) Please check the elastic load balancing AWS details page Elastic Load Balancer - ELB \u00b6 This setup requires to choose in which layer (L4 or L7) we want to configure the ELB: Layer 4 : use TCP as the listener protocol for ports 80 and 443. Layer 7 : use HTTP as the listener protocol for port 80 and terminate TLS in the ELB For L4: Check that no change is necessary with regards to the ELB idle timeout. In some scenarios, users may want to modify the ELB idle timeout, so please check the ELB Idle Timeouts section for additional information. If a change is required, users will need to update the value of service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout in provider/aws/service-l4.yaml Then execute: kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/aws/service-l4.yaml kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/aws/patch-configmap-l4.yaml For L7: Change line of the file provider/aws/service-l7.yaml replacing the dummy id with a valid one \"arn:aws:acm:us-west-2:XXXXXXXX:certificate/XXXXXX-XXXXXXX-XXXXXXX-XXXXXXXX\" Check that no change is necessary with regards to the ELB idle timeout. In some scenarios, users may want to modify the ELB idle timeout, so please check the ELB Idle Timeouts section for additional information. If a change is required, users will need to update the value of service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout in provider/aws/service-l7.yaml Then execute: kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/aws/service-l7.yaml kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/aws/patch-configmap-l7.yaml This example creates an ELB with just two listeners, one in port 80 and another in port 443 ELB Idle Timeouts \u00b6 In some scenarios users will need to modify the value of the ELB idle timeout. Users need to ensure the idle timeout is less than the keepalive_timeout that is configured for NGINX. By default NGINX keepalive_timeout is set to 75s . The default ELB idle timeout will work for most scenarios, unless the NGINX keepalive_timeout has been modified, in which case service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout will need to be modified to ensure it is less than the keepalive_timeout the user has configured. Please Note: An idle timeout of 3600s is recommended when using WebSockets. More information with regards to idle timeouts for your Load Balancer can be found in the official AWS documentation . Network Load Balancer (NLB) \u00b6 This type of load balancer is supported since v1.10.0 as an ALPHA feature. kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/aws/service-nlb.yaml GCE-GKE \u00b6 kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/cloud-generic.yaml Important Note: proxy protocol is not supported in GCE/GKE Azure \u00b6 kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/cloud-generic.yaml Bare-metal \u00b6 Using NodePort : kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/baremetal/service-nodeport.yaml Tip For extended notes regarding deployments on bare-metal, see Bare-metal considerations . Verify installation \u00b6 To check if the ingress controller pods have started, run the following command: kubectl get pods --all-namespaces -l app.kubernetes.io/name=ingress-nginx --watch Once the operator pods are running, you can cancel the above command by typing Ctrl+C . Now, you are ready to create your first ingress. Detect installed version \u00b6 To detect which version of the ingress controller is running, exec into the pod and run nginx-ingress-controller version command. POD_NAMESPACE=ingress-nginx POD_NAME=$(kubectl get pods -n $POD_NAMESPACE -l app.kubernetes.io/name=ingress-nginx -o jsonpath='{.items[0].metadata.name}') kubectl exec -it $POD_NAME -n $POD_NAMESPACE -- /nginx-ingress-controller --version Using Helm \u00b6 NGINX Ingress controller can be installed via Helm using the chart stable/nginx-ingress from the official charts repository. To install the chart with the release name my-nginx : helm install my-nginx stable/nginx-ingress If the kubernetes cluster has RBAC enabled, then run: helm install my-nginx stable/nginx-ingress --set rbac.create=true If you are using Helm 2 then specify release name using --name flag helm install stable/nginx-ingress --name my-nginx or helm install stable/nginx-ingress --name my-nginx --set rbac.create=true Detect installed version: POD_NAME=$(kubectl get pods -l app.kubernetes.io/name=ingress-nginx -o jsonpath='{.items[0].metadata.name}') kubectl exec -it $POD_NAME -- /nginx-ingress-controller --version","title":"Installation Guide"},{"location":"deploy/#installation-guide","text":"","title":"Installation Guide"},{"location":"deploy/#contents","text":"Prerequisite Generic Deployment Command Provider Specific Steps Docker for Mac minikube AWS GCE - GKE Azure Bare-metal Verify installation Detect installed version Using Helm","title":"Contents"},{"location":"deploy/#prerequisite-generic-deployment-command","text":"Attention The default configuration watches Ingress object from all the namespaces . To change this behavior use the flag --watch-namespace to limit the scope to a particular namespace. Warning If multiple Ingresses define different paths for the same host, the ingress controller will merge the definitions. Attention If you're using GKE you need to initialize your user as a cluster-admin with the following command: kubectl create clusterrolebinding cluster-admin-binding \\ --clusterrole cluster-admin \\ --user $(gcloud config get-value account) The following Mandatory Command is required for all deployments. kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/mandatory.yaml Tip If you are using a Kubernetes version previous to 1.14, you need to change kubernetes.io/os to beta.kubernetes.io/os at line 217 of mandatory.yaml , see Labels details .","title":"Prerequisite Generic Deployment Command"},{"location":"deploy/#provider-specific-steps","text":"There are cloud provider specific yaml files.","title":"Provider Specific Steps"},{"location":"deploy/#docker-for-mac","text":"Kubernetes is available in Docker for Mac (from version 18.06.0-ce ) Create a service kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/cloud-generic.yaml","title":"Docker for Mac"},{"location":"deploy/#minikube","text":"For standard usage: minikube addons enable ingress For development: Disable the ingress addon: minikube addons disable ingress Execute make dev-env Confirm the nginx-ingress-controller deployment exists: $ kubectl get pods -n ingress-nginx NAME READY STATUS RESTARTS AGE default-http-backend-66b447d9cf-rrlf9 1/1 Running 0 12s nginx-ingress-controller-fdcdcd6dd-vvpgs 1/1 Running 0 11s","title":"minikube"},{"location":"deploy/#aws","text":"In AWS we use an Elastic Load Balancer (ELB) to expose the NGINX Ingress controller behind a Service of Type=LoadBalancer . Since Kubernetes v1.9.0 it is possible to use a classic load balancer (ELB) or network load balancer (NLB) Please check the elastic load balancing AWS details page","title":"AWS"},{"location":"deploy/#elastic-load-balancer-elb","text":"This setup requires to choose in which layer (L4 or L7) we want to configure the ELB: Layer 4 : use TCP as the listener protocol for ports 80 and 443. Layer 7 : use HTTP as the listener protocol for port 80 and terminate TLS in the ELB For L4: Check that no change is necessary with regards to the ELB idle timeout. In some scenarios, users may want to modify the ELB idle timeout, so please check the ELB Idle Timeouts section for additional information. If a change is required, users will need to update the value of service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout in provider/aws/service-l4.yaml Then execute: kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/aws/service-l4.yaml kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/aws/patch-configmap-l4.yaml For L7: Change line of the file provider/aws/service-l7.yaml replacing the dummy id with a valid one \"arn:aws:acm:us-west-2:XXXXXXXX:certificate/XXXXXX-XXXXXXX-XXXXXXX-XXXXXXXX\" Check that no change is necessary with regards to the ELB idle timeout. In some scenarios, users may want to modify the ELB idle timeout, so please check the ELB Idle Timeouts section for additional information. If a change is required, users will need to update the value of service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout in provider/aws/service-l7.yaml Then execute: kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/aws/service-l7.yaml kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/aws/patch-configmap-l7.yaml This example creates an ELB with just two listeners, one in port 80 and another in port 443","title":"Elastic Load Balancer - ELB"},{"location":"deploy/#elb-idle-timeouts","text":"In some scenarios users will need to modify the value of the ELB idle timeout. Users need to ensure the idle timeout is less than the keepalive_timeout that is configured for NGINX. By default NGINX keepalive_timeout is set to 75s . The default ELB idle timeout will work for most scenarios, unless the NGINX keepalive_timeout has been modified, in which case service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout will need to be modified to ensure it is less than the keepalive_timeout the user has configured. Please Note: An idle timeout of 3600s is recommended when using WebSockets. More information with regards to idle timeouts for your Load Balancer can be found in the official AWS documentation .","title":"ELB Idle Timeouts"},{"location":"deploy/#network-load-balancer-nlb","text":"This type of load balancer is supported since v1.10.0 as an ALPHA feature. kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/aws/service-nlb.yaml","title":"Network Load Balancer (NLB)"},{"location":"deploy/#gce-gke","text":"kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/cloud-generic.yaml Important Note: proxy protocol is not supported in GCE/GKE","title":"GCE-GKE"},{"location":"deploy/#azure","text":"kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/cloud-generic.yaml","title":"Azure"},{"location":"deploy/#bare-metal","text":"Using NodePort : kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/baremetal/service-nodeport.yaml Tip For extended notes regarding deployments on bare-metal, see Bare-metal considerations .","title":"Bare-metal"},{"location":"deploy/#verify-installation","text":"To check if the ingress controller pods have started, run the following command: kubectl get pods --all-namespaces -l app.kubernetes.io/name=ingress-nginx --watch Once the operator pods are running, you can cancel the above command by typing Ctrl+C . Now, you are ready to create your first ingress.","title":"Verify installation"},{"location":"deploy/#detect-installed-version","text":"To detect which version of the ingress controller is running, exec into the pod and run nginx-ingress-controller version command. POD_NAMESPACE=ingress-nginx POD_NAME=$(kubectl get pods -n $POD_NAMESPACE -l app.kubernetes.io/name=ingress-nginx -o jsonpath='{.items[0].metadata.name}') kubectl exec -it $POD_NAME -n $POD_NAMESPACE -- /nginx-ingress-controller --version","title":"Detect installed version"},{"location":"deploy/#using-helm","text":"NGINX Ingress controller can be installed via Helm using the chart stable/nginx-ingress from the official charts repository. To install the chart with the release name my-nginx : helm install my-nginx stable/nginx-ingress If the kubernetes cluster has RBAC enabled, then run: helm install my-nginx stable/nginx-ingress --set rbac.create=true If you are using Helm 2 then specify release name using --name flag helm install stable/nginx-ingress --name my-nginx or helm install stable/nginx-ingress --name my-nginx --set rbac.create=true Detect installed version: POD_NAME=$(kubectl get pods -l app.kubernetes.io/name=ingress-nginx -o jsonpath='{.items[0].metadata.name}') kubectl exec -it $POD_NAME -- /nginx-ingress-controller --version","title":"Using Helm"},{"location":"deploy/baremetal/","text":"Bare-metal considerations \u00b6 In traditional cloud environments, where network load balancers are available on-demand, a single Kubernetes manifest suffices to provide a single point of contact to the NGINX Ingress controller to external clients and, indirectly, to any application running inside the cluster. Bare-metal environments lack this commodity, requiring a slightly different setup to offer the same kind of access to external consumers. The rest of this document describes a few recommended approaches to deploying the NGINX Ingress controller inside a Kubernetes cluster running on bare-metal. A pure software solution: MetalLB \u00b6 MetalLB provides a network load-balancer implementation for Kubernetes clusters that do not run on a supported cloud provider, effectively allowing the usage of LoadBalancer Services within any cluster. This section demonstrates how to use the Layer 2 configuration mode of MetalLB together with the NGINX Ingress controller in a Kubernetes cluster that has publicly accessible nodes . In this mode, one node attracts all the traffic for the ingress-nginx Service IP. See Traffic policies for more details. Note The description of other supported configuration modes is off-scope for this document. Warning MetalLB is currently in beta . Read about the Project maturity and make sure you inform yourself by reading the official documentation thoroughly. MetalLB can be deployed either with a simple Kubernetes manifest or with Helm. The rest of this example assumes MetalLB was deployed following the Installation instructions. MetalLB requires a pool of IP addresses in order to be able to take ownership of the ingress-nginx Service. This pool can be defined in a ConfigMap named config located in the same namespace as the MetalLB controller. This pool of IPs must be dedicated to MetalLB's use, you can't reuse the Kubernetes node IPs or IPs handed out by a DHCP server. Example Given the following 3-node Kubernetes cluster (the external IP is added as an example, in most bare-metal environments this value is ) $ kubectl get node NAME STATUS ROLES EXTERNAL-IP host-1 Ready master 203.0.113.1 host-2 Ready node 203.0.113.2 host-3 Ready node 203.0.113.3 After creating the following ConfigMap, MetalLB takes ownership of one of the IP addresses in the pool and updates the loadBalancer IP field of the ingress-nginx Service accordingly. apiVersion : v1 kind : ConfigMap metadata : namespace : metallb-system name : config data : config : | address-pools: - name: default protocol: layer2 addresses: - 203.0.113.10-203.0.113.15 $ kubectl -n ingress-nginx get svc NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) default-http-backend ClusterIP 10.0.64.249 80/TCP ingress-nginx LoadBalancer 10.0.220.217 203.0.113.10 80:30100/TCP,443:30101/TCP As soon as MetalLB sets the external IP address of the ingress-nginx LoadBalancer Service, the corresponding entries are created in the iptables NAT table and the node with the selected IP address starts responding to HTTP requests on the ports configured in the LoadBalancer Service: $ curl -D- http://203.0.113.3 -H 'Host: myapp.example.com' HTTP/1.1 200 OK Server: nginx/1.15.2 Tip In order to preserve the source IP address in HTTP requests sent to NGINX, it is necessary to use the Local traffic policy. Traffic policies are described in more details in Traffic policies as well as in the next section. Over a NodePort Service \u00b6 Due to its simplicity, this is the setup a user will deploy by default when following the steps described in the installation guide . Info A Service of type NodePort exposes, via the kube-proxy component, the same unprivileged port (default: 30000-32767) on every Kubernetes node, masters included. For more information, see Services . In this configuration, the NGINX container remains isolated from the host network. As a result, it can safely bind to any port, including the standard HTTP ports 80 and 443. However, due to the container namespace isolation, a client located outside the cluster network (e.g. on the public internet) is not able to access Ingress hosts directly on ports 80 and 443. Instead, the external client must append the NodePort allocated to the ingress-nginx Service to HTTP requests. Example Given the NodePort 30100 allocated to the ingress-nginx Service $ kubectl -n ingress-nginx get svc NAME TYPE CLUSTER-IP PORT(S) default-http-backend ClusterIP 10.0.64.249 80/TCP ingress-nginx NodePort 10.0.220.217 80:30100/TCP,443:30101/TCP and a Kubernetes node with the public IP address 203.0.113.2 (the external IP is added as an example, in most bare-metal environments this value is ) $ kubectl get node NAME STATUS ROLES EXTERNAL-IP host-1 Ready master 203.0.113.1 host-2 Ready node 203.0.113.2 host-3 Ready node 203.0.113.3 a client would reach an Ingress with host : myapp . example . com at http://myapp.example.com:30100 , where the myapp.example.com subdomain resolves to the 203.0.113.2 IP address. Impact on the host system While it may sound tempting to reconfigure the NodePort range using the --service-node-port-range API server flag to include unprivileged ports and be able to expose ports 80 and 443, doing so may result in unexpected issues including (but not limited to) the use of ports otherwise reserved to system daemons and the necessity to grant kube-proxy privileges it may otherwise not require. This practice is therefore discouraged . See the other approaches proposed in this page for alternatives. This approach has a few other limitations one ought to be aware of: Source IP address Services of type NodePort perform source address translation by default. This means the source IP of a HTTP request is always the IP address of the Kubernetes node that received the request from the perspective of NGINX. The recommended way to preserve the source IP in a NodePort setup is to set the value of the externalTrafficPolicy field of the ingress-nginx Service spec to Local ( example ). Warning This setting effectively drops packets sent to Kubernetes nodes which are not running any instance of the NGINX Ingress controller. Consider assigning NGINX Pods to specific nodes in order to control on what nodes the NGINX Ingress controller should be scheduled or not scheduled. Example In a Kubernetes cluster composed of 3 nodes (the external IP is added as an example, in most bare-metal environments this value is ) $ kubectl get node NAME STATUS ROLES EXTERNAL-IP host-1 Ready master 203.0.113.1 host-2 Ready node 203.0.113.2 host-3 Ready node 203.0.113.3 with a nginx-ingress-controller Deployment composed of 2 replicas $ kubectl -n ingress-nginx get pod -o wide NAME READY STATUS IP NODE default-http-backend-7c5bc89cc9-p86md 1/1 Running 172.17.1.1 host-2 nginx-ingress-controller-cf9ff8c96-8vvf8 1/1 Running 172.17.0.3 host-3 nginx-ingress-controller-cf9ff8c96-pxsds 1/1 Running 172.17.1.4 host-2 Requests sent to host-2 and host-3 would be forwarded to NGINX and original client's IP would be preserved, while requests to host-1 would get dropped because there is no NGINX replica running on that node. Ingress status Because NodePort Services do not get a LoadBalancerIP assigned by definition, the NGINX Ingress controller does not update the status of Ingress objects it manages . $ kubectl get ingress NAME HOSTS ADDRESS PORTS test-ingress myapp.example.com 80 Despite the fact there is no load balancer providing a public IP address to the NGINX Ingress controller, it is possible to force the status update of all managed Ingress objects by setting the externalIPs field of the ingress-nginx Service. Warning There is more to setting externalIPs than just enabling the NGINX Ingress controller to update the status of Ingress objects. Please read about this option in the Services page of official Kubernetes documentation as well as the section about External IPs in this document for more information. Example Given the following 3-node Kubernetes cluster (the external IP is added as an example, in most bare-metal environments this value is ) $ kubectl get node NAME STATUS ROLES EXTERNAL-IP host-1 Ready master 203.0.113.1 host-2 Ready node 203.0.113.2 host-3 Ready node 203.0.113.3 one could edit the ingress-nginx Service and add the following field to the object spec spec : externalIPs : - 203.0.113.1 - 203.0.113.2 - 203.0.113.3 which would in turn be reflected on Ingress objects as follows: $ kubectl get ingress -o wide NAME HOSTS ADDRESS PORTS test-ingress myapp.example.com 203.0.113.1,203.0.113.2,203.0.113.3 80 Redirects As NGINX is not aware of the port translation operated by the NodePort Service , backend applications are responsible for generating redirect URLs that take into account the URL used by external clients, including the NodePort. Example Redirects generated by NGINX, for instance HTTP to HTTPS or domain to www.domain , are generated without NodePort: $ curl -D- http://myapp.example.com:30100 ` HTTP/1.1 308 Permanent Redirect Server: nginx/1.15.2 Location: https://myapp.example.com/ #-> missing NodePort in HTTPS redirect Via the host network \u00b6 In a setup where there is no external load balancer available but using NodePorts is not an option, one can configure ingress-nginx Pods to use the network of the host they run on instead of a dedicated network namespace. The benefit of this approach is that the NGINX Ingress controller can bind ports 80 and 443 directly to Kubernetes nodes' network interfaces, without the extra network translation imposed by NodePort Services. Note This approach does not leverage any Service object to expose the NGINX Ingress controller. If the ingress-nginx Service exists in the target cluster, it is recommended to delete it . This can be achieved by enabling the hostNetwork option in the Pods' spec. template : spec : hostNetwork : true Security considerations Enabling this option exposes every system daemon to the NGINX Ingress controller on any network interface, including the host's loopback. Please evaluate the impact this may have on the security of your system carefully. Example Consider this nginx-ingress-controller Deployment composed of 2 replicas, NGINX Pods inherit from the IP address of their host instead of an internal Pod IP. $ kubectl -n ingress-nginx get pod -o wide NAME READY STATUS IP NODE default-http-backend-7c5bc89cc9-p86md 1/1 Running 172.17.1.1 host-2 nginx-ingress-controller-5b4cf5fc6-7lg6c 1/1 Running 203.0.113.3 host-3 nginx-ingress-controller-5b4cf5fc6-lzrls 1/1 Running 203.0.113.2 host-2 One major limitation of this deployment approach is that only a single NGINX Ingress controller Pod may be scheduled on each cluster node, because binding the same port multiple times on the same network interface is technically impossible. Pods that are unschedulable due to such situation fail with the following event: $ kubectl -n ingress-nginx describe pod ... Events: Type Reason From Message ---- ------ ---- ------- Warning FailedScheduling default-scheduler 0/3 nodes are available: 3 node(s) didn't have free ports for the requested pod ports. One way to ensure only schedulable Pods are created is to deploy the NGINX Ingress controller as a DaemonSet instead of a traditional Deployment. Info A DaemonSet schedules exactly one type of Pod per cluster node, masters included, unless a node is configured to repel those Pods . For more information, see DaemonSet . Because most properties of DaemonSet objects are identical to Deployment objects, this documentation page leaves the configuration of the corresponding manifest at the user's discretion. Like with NodePorts, this approach has a few quirks it is important to be aware of. DNS resolution Pods configured with hostNetwork : true do not use the internal DNS resolver (i.e. kube-dns or CoreDNS ), unless their dnsPolicy spec field is set to ClusterFirstWithHostNet . Consider using this setting if NGINX is expected to resolve internal names for any reason. Ingress status Because there is no Service exposing the NGINX Ingress controller in a configuration using the host network, the default --publish-service flag used in standard cloud setups does not apply and the status of all Ingress objects remains blank. $ kubectl get ingress NAME HOSTS ADDRESS PORTS test-ingress myapp.example.com 80 Instead, and because bare-metal nodes usually don't have an ExternalIP, one has to enable the --report-node-internal-ip-address flag, which sets the status of all Ingress objects to the internal IP address of all nodes running the NGINX Ingress controller. Example Given a nginx-ingress-controller DaemonSet composed of 2 replicas $ kubectl -n ingress-nginx get pod -o wide NAME READY STATUS IP NODE default-http-backend-7c5bc89cc9-p86md 1/1 Running 172.17.1.1 host-2 nginx-ingress-controller-5b4cf5fc6-7lg6c 1/1 Running 203.0.113.3 host-3 nginx-ingress-controller-5b4cf5fc6-lzrls 1/1 Running 203.0.113.2 host-2 the controller sets the status of all Ingress objects it manages to the following value: $ kubectl get ingress -o wide NAME HOSTS ADDRESS PORTS test-ingress myapp.example.com 203.0.113.2,203.0.113.3 80 Note Alternatively, it is possible to override the address written to Ingress objects using the --publish-status-address flag. See Command line arguments . Using a self-provisioned edge \u00b6 Similarly to cloud environments, this deployment approach requires an edge network component providing a public entrypoint to the Kubernetes cluster. This edge component can be either hardware (e.g. vendor appliance) or software (e.g. HAproxy ) and is usually managed outside of the Kubernetes landscape by operations teams. Such deployment builds upon the NodePort Service described above in Over a NodePort Service , with one significant difference: external clients do not access cluster nodes directly, only the edge component does. This is particularly suitable for private Kubernetes clusters where none of the nodes has a public IP address. On the edge side, the only prerequisite is to dedicate a public IP address that forwards all HTTP traffic to Kubernetes nodes and/or masters. Incoming traffic on TCP ports 80 and 443 is forwarded to the corresponding HTTP and HTTPS NodePort on the target nodes as shown in the diagram below: External IPs \u00b6 Source IP address This method does not allow preserving the source IP of HTTP requests in any manner, it is therefore not recommended to use it despite its apparent simplicity. The externalIPs Service option was previously mentioned in the NodePort section. As per the Services page of the official Kubernetes documentation, the externalIPs option causes kube-proxy to route traffic sent to arbitrary IP addresses and on the Service ports to the endpoints of that Service. These IP addresses must belong to the target node . Example Given the following 3-node Kubernetes cluster (the external IP is added as an example, in most bare-metal environments this value is ) $ kubectl get node NAME STATUS ROLES EXTERNAL-IP host-1 Ready master 203.0.113.1 host-2 Ready node 203.0.113.2 host-3 Ready node 203.0.113.3 and the following ingress-nginx NodePort Service $ kubectl -n ingress-nginx get svc NAME TYPE CLUSTER-IP PORT(S) ingress-nginx NodePort 10.0.220.217 80:30100/TCP,443:30101/TCP One could set the following external IPs in the Service spec, and NGINX would become available on both the NodePort and the Service port: spec : externalIPs : - 203.0.113.2 - 203.0.113.3 $ curl -D- http://myapp.example.com:30100 HTTP/1.1 200 OK Server: nginx/1.15.2 $ curl -D- http://myapp.example.com HTTP/1.1 200 OK Server: nginx/1.15.2 We assume the myapp.example.com subdomain above resolves to both 203.0.113.2 and 203.0.113.3 IP addresses.","title":"Bare-metal considerations"},{"location":"deploy/baremetal/#bare-metal-considerations","text":"In traditional cloud environments, where network load balancers are available on-demand, a single Kubernetes manifest suffices to provide a single point of contact to the NGINX Ingress controller to external clients and, indirectly, to any application running inside the cluster. Bare-metal environments lack this commodity, requiring a slightly different setup to offer the same kind of access to external consumers. The rest of this document describes a few recommended approaches to deploying the NGINX Ingress controller inside a Kubernetes cluster running on bare-metal.","title":"Bare-metal considerations"},{"location":"deploy/baremetal/#a-pure-software-solution-metallb","text":"MetalLB provides a network load-balancer implementation for Kubernetes clusters that do not run on a supported cloud provider, effectively allowing the usage of LoadBalancer Services within any cluster. This section demonstrates how to use the Layer 2 configuration mode of MetalLB together with the NGINX Ingress controller in a Kubernetes cluster that has publicly accessible nodes . In this mode, one node attracts all the traffic for the ingress-nginx Service IP. See Traffic policies for more details. Note The description of other supported configuration modes is off-scope for this document. Warning MetalLB is currently in beta . Read about the Project maturity and make sure you inform yourself by reading the official documentation thoroughly. MetalLB can be deployed either with a simple Kubernetes manifest or with Helm. The rest of this example assumes MetalLB was deployed following the Installation instructions. MetalLB requires a pool of IP addresses in order to be able to take ownership of the ingress-nginx Service. This pool can be defined in a ConfigMap named config located in the same namespace as the MetalLB controller. This pool of IPs must be dedicated to MetalLB's use, you can't reuse the Kubernetes node IPs or IPs handed out by a DHCP server. Example Given the following 3-node Kubernetes cluster (the external IP is added as an example, in most bare-metal environments this value is ) $ kubectl get node NAME STATUS ROLES EXTERNAL-IP host-1 Ready master 203.0.113.1 host-2 Ready node 203.0.113.2 host-3 Ready node 203.0.113.3 After creating the following ConfigMap, MetalLB takes ownership of one of the IP addresses in the pool and updates the loadBalancer IP field of the ingress-nginx Service accordingly. apiVersion : v1 kind : ConfigMap metadata : namespace : metallb-system name : config data : config : | address-pools: - name: default protocol: layer2 addresses: - 203.0.113.10-203.0.113.15 $ kubectl -n ingress-nginx get svc NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) default-http-backend ClusterIP 10.0.64.249 80/TCP ingress-nginx LoadBalancer 10.0.220.217 203.0.113.10 80:30100/TCP,443:30101/TCP As soon as MetalLB sets the external IP address of the ingress-nginx LoadBalancer Service, the corresponding entries are created in the iptables NAT table and the node with the selected IP address starts responding to HTTP requests on the ports configured in the LoadBalancer Service: $ curl -D- http://203.0.113.3 -H 'Host: myapp.example.com' HTTP/1.1 200 OK Server: nginx/1.15.2 Tip In order to preserve the source IP address in HTTP requests sent to NGINX, it is necessary to use the Local traffic policy. Traffic policies are described in more details in Traffic policies as well as in the next section.","title":"A pure software solution: MetalLB"},{"location":"deploy/baremetal/#over-a-nodeport-service","text":"Due to its simplicity, this is the setup a user will deploy by default when following the steps described in the installation guide . Info A Service of type NodePort exposes, via the kube-proxy component, the same unprivileged port (default: 30000-32767) on every Kubernetes node, masters included. For more information, see Services . In this configuration, the NGINX container remains isolated from the host network. As a result, it can safely bind to any port, including the standard HTTP ports 80 and 443. However, due to the container namespace isolation, a client located outside the cluster network (e.g. on the public internet) is not able to access Ingress hosts directly on ports 80 and 443. Instead, the external client must append the NodePort allocated to the ingress-nginx Service to HTTP requests. Example Given the NodePort 30100 allocated to the ingress-nginx Service $ kubectl -n ingress-nginx get svc NAME TYPE CLUSTER-IP PORT(S) default-http-backend ClusterIP 10.0.64.249 80/TCP ingress-nginx NodePort 10.0.220.217 80:30100/TCP,443:30101/TCP and a Kubernetes node with the public IP address 203.0.113.2 (the external IP is added as an example, in most bare-metal environments this value is ) $ kubectl get node NAME STATUS ROLES EXTERNAL-IP host-1 Ready master 203.0.113.1 host-2 Ready node 203.0.113.2 host-3 Ready node 203.0.113.3 a client would reach an Ingress with host : myapp . example . com at http://myapp.example.com:30100 , where the myapp.example.com subdomain resolves to the 203.0.113.2 IP address. Impact on the host system While it may sound tempting to reconfigure the NodePort range using the --service-node-port-range API server flag to include unprivileged ports and be able to expose ports 80 and 443, doing so may result in unexpected issues including (but not limited to) the use of ports otherwise reserved to system daemons and the necessity to grant kube-proxy privileges it may otherwise not require. This practice is therefore discouraged . See the other approaches proposed in this page for alternatives. This approach has a few other limitations one ought to be aware of: Source IP address Services of type NodePort perform source address translation by default. This means the source IP of a HTTP request is always the IP address of the Kubernetes node that received the request from the perspective of NGINX. The recommended way to preserve the source IP in a NodePort setup is to set the value of the externalTrafficPolicy field of the ingress-nginx Service spec to Local ( example ). Warning This setting effectively drops packets sent to Kubernetes nodes which are not running any instance of the NGINX Ingress controller. Consider assigning NGINX Pods to specific nodes in order to control on what nodes the NGINX Ingress controller should be scheduled or not scheduled. Example In a Kubernetes cluster composed of 3 nodes (the external IP is added as an example, in most bare-metal environments this value is ) $ kubectl get node NAME STATUS ROLES EXTERNAL-IP host-1 Ready master 203.0.113.1 host-2 Ready node 203.0.113.2 host-3 Ready node 203.0.113.3 with a nginx-ingress-controller Deployment composed of 2 replicas $ kubectl -n ingress-nginx get pod -o wide NAME READY STATUS IP NODE default-http-backend-7c5bc89cc9-p86md 1/1 Running 172.17.1.1 host-2 nginx-ingress-controller-cf9ff8c96-8vvf8 1/1 Running 172.17.0.3 host-3 nginx-ingress-controller-cf9ff8c96-pxsds 1/1 Running 172.17.1.4 host-2 Requests sent to host-2 and host-3 would be forwarded to NGINX and original client's IP would be preserved, while requests to host-1 would get dropped because there is no NGINX replica running on that node. Ingress status Because NodePort Services do not get a LoadBalancerIP assigned by definition, the NGINX Ingress controller does not update the status of Ingress objects it manages . $ kubectl get ingress NAME HOSTS ADDRESS PORTS test-ingress myapp.example.com 80 Despite the fact there is no load balancer providing a public IP address to the NGINX Ingress controller, it is possible to force the status update of all managed Ingress objects by setting the externalIPs field of the ingress-nginx Service. Warning There is more to setting externalIPs than just enabling the NGINX Ingress controller to update the status of Ingress objects. Please read about this option in the Services page of official Kubernetes documentation as well as the section about External IPs in this document for more information. Example Given the following 3-node Kubernetes cluster (the external IP is added as an example, in most bare-metal environments this value is ) $ kubectl get node NAME STATUS ROLES EXTERNAL-IP host-1 Ready master 203.0.113.1 host-2 Ready node 203.0.113.2 host-3 Ready node 203.0.113.3 one could edit the ingress-nginx Service and add the following field to the object spec spec : externalIPs : - 203.0.113.1 - 203.0.113.2 - 203.0.113.3 which would in turn be reflected on Ingress objects as follows: $ kubectl get ingress -o wide NAME HOSTS ADDRESS PORTS test-ingress myapp.example.com 203.0.113.1,203.0.113.2,203.0.113.3 80 Redirects As NGINX is not aware of the port translation operated by the NodePort Service , backend applications are responsible for generating redirect URLs that take into account the URL used by external clients, including the NodePort. Example Redirects generated by NGINX, for instance HTTP to HTTPS or domain to www.domain , are generated without NodePort: $ curl -D- http://myapp.example.com:30100 ` HTTP/1.1 308 Permanent Redirect Server: nginx/1.15.2 Location: https://myapp.example.com/ #-> missing NodePort in HTTPS redirect","title":"Over a NodePort Service"},{"location":"deploy/baremetal/#via-the-host-network","text":"In a setup where there is no external load balancer available but using NodePorts is not an option, one can configure ingress-nginx Pods to use the network of the host they run on instead of a dedicated network namespace. The benefit of this approach is that the NGINX Ingress controller can bind ports 80 and 443 directly to Kubernetes nodes' network interfaces, without the extra network translation imposed by NodePort Services. Note This approach does not leverage any Service object to expose the NGINX Ingress controller. If the ingress-nginx Service exists in the target cluster, it is recommended to delete it . This can be achieved by enabling the hostNetwork option in the Pods' spec. template : spec : hostNetwork : true Security considerations Enabling this option exposes every system daemon to the NGINX Ingress controller on any network interface, including the host's loopback. Please evaluate the impact this may have on the security of your system carefully. Example Consider this nginx-ingress-controller Deployment composed of 2 replicas, NGINX Pods inherit from the IP address of their host instead of an internal Pod IP. $ kubectl -n ingress-nginx get pod -o wide NAME READY STATUS IP NODE default-http-backend-7c5bc89cc9-p86md 1/1 Running 172.17.1.1 host-2 nginx-ingress-controller-5b4cf5fc6-7lg6c 1/1 Running 203.0.113.3 host-3 nginx-ingress-controller-5b4cf5fc6-lzrls 1/1 Running 203.0.113.2 host-2 One major limitation of this deployment approach is that only a single NGINX Ingress controller Pod may be scheduled on each cluster node, because binding the same port multiple times on the same network interface is technically impossible. Pods that are unschedulable due to such situation fail with the following event: $ kubectl -n ingress-nginx describe pod ... Events: Type Reason From Message ---- ------ ---- ------- Warning FailedScheduling default-scheduler 0/3 nodes are available: 3 node(s) didn't have free ports for the requested pod ports. One way to ensure only schedulable Pods are created is to deploy the NGINX Ingress controller as a DaemonSet instead of a traditional Deployment. Info A DaemonSet schedules exactly one type of Pod per cluster node, masters included, unless a node is configured to repel those Pods . For more information, see DaemonSet . Because most properties of DaemonSet objects are identical to Deployment objects, this documentation page leaves the configuration of the corresponding manifest at the user's discretion. Like with NodePorts, this approach has a few quirks it is important to be aware of. DNS resolution Pods configured with hostNetwork : true do not use the internal DNS resolver (i.e. kube-dns or CoreDNS ), unless their dnsPolicy spec field is set to ClusterFirstWithHostNet . Consider using this setting if NGINX is expected to resolve internal names for any reason. Ingress status Because there is no Service exposing the NGINX Ingress controller in a configuration using the host network, the default --publish-service flag used in standard cloud setups does not apply and the status of all Ingress objects remains blank. $ kubectl get ingress NAME HOSTS ADDRESS PORTS test-ingress myapp.example.com 80 Instead, and because bare-metal nodes usually don't have an ExternalIP, one has to enable the --report-node-internal-ip-address flag, which sets the status of all Ingress objects to the internal IP address of all nodes running the NGINX Ingress controller. Example Given a nginx-ingress-controller DaemonSet composed of 2 replicas $ kubectl -n ingress-nginx get pod -o wide NAME READY STATUS IP NODE default-http-backend-7c5bc89cc9-p86md 1/1 Running 172.17.1.1 host-2 nginx-ingress-controller-5b4cf5fc6-7lg6c 1/1 Running 203.0.113.3 host-3 nginx-ingress-controller-5b4cf5fc6-lzrls 1/1 Running 203.0.113.2 host-2 the controller sets the status of all Ingress objects it manages to the following value: $ kubectl get ingress -o wide NAME HOSTS ADDRESS PORTS test-ingress myapp.example.com 203.0.113.2,203.0.113.3 80 Note Alternatively, it is possible to override the address written to Ingress objects using the --publish-status-address flag. See Command line arguments .","title":"Via the host network"},{"location":"deploy/baremetal/#using-a-self-provisioned-edge","text":"Similarly to cloud environments, this deployment approach requires an edge network component providing a public entrypoint to the Kubernetes cluster. This edge component can be either hardware (e.g. vendor appliance) or software (e.g. HAproxy ) and is usually managed outside of the Kubernetes landscape by operations teams. Such deployment builds upon the NodePort Service described above in Over a NodePort Service , with one significant difference: external clients do not access cluster nodes directly, only the edge component does. This is particularly suitable for private Kubernetes clusters where none of the nodes has a public IP address. On the edge side, the only prerequisite is to dedicate a public IP address that forwards all HTTP traffic to Kubernetes nodes and/or masters. Incoming traffic on TCP ports 80 and 443 is forwarded to the corresponding HTTP and HTTPS NodePort on the target nodes as shown in the diagram below:","title":"Using a self-provisioned edge"},{"location":"deploy/baremetal/#external-ips","text":"Source IP address This method does not allow preserving the source IP of HTTP requests in any manner, it is therefore not recommended to use it despite its apparent simplicity. The externalIPs Service option was previously mentioned in the NodePort section. As per the Services page of the official Kubernetes documentation, the externalIPs option causes kube-proxy to route traffic sent to arbitrary IP addresses and on the Service ports to the endpoints of that Service. These IP addresses must belong to the target node . Example Given the following 3-node Kubernetes cluster (the external IP is added as an example, in most bare-metal environments this value is ) $ kubectl get node NAME STATUS ROLES EXTERNAL-IP host-1 Ready master 203.0.113.1 host-2 Ready node 203.0.113.2 host-3 Ready node 203.0.113.3 and the following ingress-nginx NodePort Service $ kubectl -n ingress-nginx get svc NAME TYPE CLUSTER-IP PORT(S) ingress-nginx NodePort 10.0.220.217 80:30100/TCP,443:30101/TCP One could set the following external IPs in the Service spec, and NGINX would become available on both the NodePort and the Service port: spec : externalIPs : - 203.0.113.2 - 203.0.113.3 $ curl -D- http://myapp.example.com:30100 HTTP/1.1 200 OK Server: nginx/1.15.2 $ curl -D- http://myapp.example.com HTTP/1.1 200 OK Server: nginx/1.15.2 We assume the myapp.example.com subdomain above resolves to both 203.0.113.2 and 203.0.113.3 IP addresses.","title":"External IPs"},{"location":"deploy/rbac/","text":"Role Based Access Control (RBAC) \u00b6 Overview \u00b6 This example applies to nginx-ingress-controllers being deployed in an environment with RBAC enabled. Role Based Access Control is comprised of four layers: ClusterRole - permissions assigned to a role that apply to an entire cluster ClusterRoleBinding - binding a ClusterRole to a specific account Role - permissions assigned to a role that apply to a specific namespace RoleBinding - binding a Role to a specific account In order for RBAC to be applied to an nginx-ingress-controller, that controller should be assigned to a ServiceAccount . That ServiceAccount should be bound to the Role s and ClusterRole s defined for the nginx-ingress-controller. Service Accounts created in this example \u00b6 One ServiceAccount is created in this example, nginx-ingress-serviceaccount . Permissions Granted in this example \u00b6 There are two sets of permissions defined in this example. Cluster-wide permissions defined by the ClusterRole named nginx-ingress-clusterrole , and namespace specific permissions defined by the Role named nginx-ingress-role . Cluster Permissions \u00b6 These permissions are granted in order for the nginx-ingress-controller to be able to function as an ingress across the cluster. These permissions are granted to the ClusterRole named nginx-ingress-clusterrole configmaps , endpoints , nodes , pods , secrets : list, watch nodes : get services , ingresses : get, list, watch events : create, patch ingresses/status : update Namespace Permissions \u00b6 These permissions are granted specific to the nginx-ingress namespace. These permissions are granted to the Role named nginx-ingress-role configmaps , pods , secrets : get endpoints : get Furthermore to support leader-election, the nginx-ingress-controller needs to have access to a configmap using the resourceName ingress-controller-leader-nginx Note that resourceNames can NOT be used to limit requests using the \u201ccreate\u201d verb because authorizers only have access to information that can be obtained from the request URL, method, and headers (resource names in a \u201ccreate\u201d request are part of the request body). configmaps : get, update (for resourceName ingress-controller-leader-nginx ) configmaps : create This resourceName is the concatenation of the election-id and the ingress-class as defined by the ingress-controller, which defaults to: election-id : ingress-controller-leader ingress-class : nginx resourceName : - Please adapt accordingly if you overwrite either parameter when launching the nginx-ingress-controller. Bindings \u00b6 The ServiceAccount nginx-ingress-serviceaccount is bound to the Role nginx-ingress-role and the ClusterRole nginx-ingress-clusterrole . The serviceAccountName associated with the containers in the deployment must match the serviceAccount. The namespace references in the Deployment metadata, container arguments, and POD_NAMESPACE should be in the nginx-ingress namespace.","title":"Role Based Access Control (RBAC)"},{"location":"deploy/rbac/#role-based-access-control-rbac","text":"","title":"Role Based Access Control (RBAC)"},{"location":"deploy/rbac/#overview","text":"This example applies to nginx-ingress-controllers being deployed in an environment with RBAC enabled. Role Based Access Control is comprised of four layers: ClusterRole - permissions assigned to a role that apply to an entire cluster ClusterRoleBinding - binding a ClusterRole to a specific account Role - permissions assigned to a role that apply to a specific namespace RoleBinding - binding a Role to a specific account In order for RBAC to be applied to an nginx-ingress-controller, that controller should be assigned to a ServiceAccount . That ServiceAccount should be bound to the Role s and ClusterRole s defined for the nginx-ingress-controller.","title":"Overview"},{"location":"deploy/rbac/#service-accounts-created-in-this-example","text":"One ServiceAccount is created in this example, nginx-ingress-serviceaccount .","title":"Service Accounts created in this example"},{"location":"deploy/rbac/#permissions-granted-in-this-example","text":"There are two sets of permissions defined in this example. Cluster-wide permissions defined by the ClusterRole named nginx-ingress-clusterrole , and namespace specific permissions defined by the Role named nginx-ingress-role .","title":"Permissions Granted in this example"},{"location":"deploy/rbac/#cluster-permissions","text":"These permissions are granted in order for the nginx-ingress-controller to be able to function as an ingress across the cluster. These permissions are granted to the ClusterRole named nginx-ingress-clusterrole configmaps , endpoints , nodes , pods , secrets : list, watch nodes : get services , ingresses : get, list, watch events : create, patch ingresses/status : update","title":"Cluster Permissions"},{"location":"deploy/rbac/#namespace-permissions","text":"These permissions are granted specific to the nginx-ingress namespace. These permissions are granted to the Role named nginx-ingress-role configmaps , pods , secrets : get endpoints : get Furthermore to support leader-election, the nginx-ingress-controller needs to have access to a configmap using the resourceName ingress-controller-leader-nginx Note that resourceNames can NOT be used to limit requests using the \u201ccreate\u201d verb because authorizers only have access to information that can be obtained from the request URL, method, and headers (resource names in a \u201ccreate\u201d request are part of the request body). configmaps : get, update (for resourceName ingress-controller-leader-nginx ) configmaps : create This resourceName is the concatenation of the election-id and the ingress-class as defined by the ingress-controller, which defaults to: election-id : ingress-controller-leader ingress-class : nginx resourceName : - Please adapt accordingly if you overwrite either parameter when launching the nginx-ingress-controller.","title":"Namespace Permissions"},{"location":"deploy/rbac/#bindings","text":"The ServiceAccount nginx-ingress-serviceaccount is bound to the Role nginx-ingress-role and the ClusterRole nginx-ingress-clusterrole . The serviceAccountName associated with the containers in the deployment must match the serviceAccount. The namespace references in the Deployment metadata, container arguments, and POD_NAMESPACE should be in the nginx-ingress namespace.","title":"Bindings"},{"location":"deploy/upgrade/","text":"Upgrading \u00b6 Important No matter the method you use for upgrading, if you use template overrides, make sure your templates are compatible with the new version of ingress-nginx . Without Helm \u00b6 To upgrade your ingress-nginx installation, it should be enough to change the version of the image in the controller Deployment. I.e. if your deployment resource looks like (partial example): kind : Deployment metadata : name : nginx-ingress-controller namespace : ingress-nginx spec : replicas : 1 selector : ... template : metadata : ... spec : containers : - name : nginx-ingress-controller image : quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.9.0 args : ... simply change the 0.9.0 tag to the version you wish to upgrade to. The easiest way to do this is e.g. (do note you may need to change the name parameter according to your installation): kubectl set image deployment/nginx-ingress-controller \\ nginx-ingress-controller=quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.28.0 For interactive editing, use kubectl edit deployment nginx-ingress-controller . With Helm \u00b6 If you installed ingress-nginx using the Helm command in the deployment docs so its name is ngx-ingress , you should be able to upgrade using helm upgrade --reuse-values ngx-ingress stable/nginx-ingress","title":"Upgrade"},{"location":"deploy/upgrade/#upgrading","text":"Important No matter the method you use for upgrading, if you use template overrides, make sure your templates are compatible with the new version of ingress-nginx .","title":"Upgrading"},{"location":"deploy/upgrade/#without-helm","text":"To upgrade your ingress-nginx installation, it should be enough to change the version of the image in the controller Deployment. I.e. if your deployment resource looks like (partial example): kind : Deployment metadata : name : nginx-ingress-controller namespace : ingress-nginx spec : replicas : 1 selector : ... template : metadata : ... spec : containers : - name : nginx-ingress-controller image : quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.9.0 args : ... simply change the 0.9.0 tag to the version you wish to upgrade to. The easiest way to do this is e.g. (do note you may need to change the name parameter according to your installation): kubectl set image deployment/nginx-ingress-controller \\ nginx-ingress-controller=quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.28.0 For interactive editing, use kubectl edit deployment nginx-ingress-controller .","title":"Without Helm"},{"location":"deploy/upgrade/#with-helm","text":"If you installed ingress-nginx using the Helm command in the deployment docs so its name is ngx-ingress , you should be able to upgrade using helm upgrade --reuse-values ngx-ingress stable/nginx-ingress","title":"With Helm"},{"location":"deploy/validating-webhook/","text":"Validating webhook (admission controller) \u00b6 Overview \u00b6 Nginx ingress controller offers the option to validate ingresses before they enter the cluster, ensuring controller will generate a valid configuration. This controller is called, when ValidatingAdmissionWebhook is enabled, by the Kubernetes API server each time a new ingress is to enter the cluster, and rejects objects for which the generated nginx configuration fails to be validated. This feature requires some further configuration of the cluster, hence it is an optional feature, this section explains how to enable it for your cluster. Configure the webhook \u00b6 Generate the webhook certificate \u00b6 Self signed certificate \u00b6 Validating webhook must be served using TLS, you need to generate a certificate. Note that kube API server is checking the hostname of the certificate, the common name of your certificate will need to match the service name. Example To run the validating webhook with a service named ingress-validation-webhook in the namespace ingress-nginx , run openssl req -x509 -newkey rsa:2048 -keyout certificate.pem -out key.pem -days 365 -nodes -subj \"/CN=ingress-validation-webhook.ingress-nginx.svc\" Using Kubernetes CA \u00b6 Kubernetes also provides primitives to sign a certificate request. Here is an example on how to use it Example #!/bin/bash SERVICE_NAME = ingress-nginx NAMESPACE = ingress-nginx TEMP_DIRECTORY = $( mktemp -d ) echo \"creating certs in directory ${ TEMP_DIRECTORY } \" cat <> ${TEMP_DIRECTORY}/csr.conf [req] req_extensions = v3_req distinguished_name = req_distinguished_name [req_distinguished_name] [ v3_req ] basicConstraints = CA:FALSE keyUsage = nonRepudiation, digitalSignature, keyEncipherment extendedKeyUsage = serverAuth subjectAltName = @alt_names [alt_names] DNS.1 = ${SERVICE_NAME} DNS.2 = ${SERVICE_NAME}.${NAMESPACE} DNS.3 = ${SERVICE_NAME}.${NAMESPACE}.svc EOF openssl genrsa -out ${ TEMP_DIRECTORY } /server-key.pem 2048 openssl req -new -key ${ TEMP_DIRECTORY } /server-key.pem \\ -subj \"/CN= ${ SERVICE_NAME } . ${ NAMESPACE } .svc\" \\ -out ${ TEMP_DIRECTORY } /server.csr \\ -config ${ TEMP_DIRECTORY } /csr.conf cat < & 2 exit 1 fi echo ${ SERVER_CERT } | openssl base64 -d -A -out ${ TEMP_DIRECTORY } /server-cert.pem kubectl create secret generic ingress-nginx.svc \\ --from-file = key.pem = ${ TEMP_DIRECTORY } /server-key.pem \\ --from-file = cert.pem = ${ TEMP_DIRECTORY } /server-cert.pem \\ -n ${ NAMESPACE } Using helm \u00b6 To generate the certificate using helm, you can use the following snippet Example {{ - $ cn := printf \"%s.%s.svc\" ( include \"nginx-ingress.validatingWebhook.fullname\" . ) .Release.Namespace }} {{ - $ ca := genCA ( printf \"%s-ca\" ( include \"nginx-ingress.validatingWebhook.fullname\" . )) .Values.validatingWebhook.certificateValidity - }} {{ - $ cert := genSignedCert $ cn nil nil .Values.validatingWebhook.certificateValidity $ ca - }} Ingress controller flags \u00b6 To enable the feature in the ingress controller, you need to provide 3 flags to the command line. flag description example usage --validating-webhook The address to start an admission controller on :8080 --validating-webhook-certificate The certificate the webhook is using for its TLS handling /usr/local/certificates/validating-webhook.pem --validating-webhook-key The key the webhook is using for its TLS handling /usr/local/certificates/validating-webhook-key.pem kube API server flags \u00b6 Validating webhook feature requires specific setup on the kube API server side. Depending on your kubernetes version, the flag can, or not, be enabled by default. To check that your kube API server runs with the required flags, please refer to the kubernetes documentation. Additional kubernetes objects \u00b6 Once both the ingress controller and the kube API server are configured to serve the webhook, add the you can configure the webhook with the following objects: apiVersion : v1 kind : Service metadata : name : ingress-validation-webhook namespace : ingress-nginx spec : ports : - name : admission port : 443 protocol : TCP targetPort : 8080 selector : app : nginx-ingress component : controller --- apiVersion : admissionregistration.k8s.io/v1beta1 kind : ValidatingWebhookConfiguration metadata : name : check-ingress webhooks : - name : validate.nginx.ingress.kubernetes.io rules : - apiGroups : - networking.k8s.io/v1beta1 apiVersions : - v1beta1 operations : - CREATE - UPDATE resources : - ingresses failurePolicy : Fail clientConfig : service : namespace : ingress-nginx name : ingress-validation-webhook path : /networking.k8s.io/v1beta1/ingress caBundle : ","title":"Validating Webhook (admission controller)"},{"location":"deploy/validating-webhook/#validating-webhook-admission-controller","text":"","title":"Validating webhook (admission controller)"},{"location":"deploy/validating-webhook/#overview","text":"Nginx ingress controller offers the option to validate ingresses before they enter the cluster, ensuring controller will generate a valid configuration. This controller is called, when ValidatingAdmissionWebhook is enabled, by the Kubernetes API server each time a new ingress is to enter the cluster, and rejects objects for which the generated nginx configuration fails to be validated. This feature requires some further configuration of the cluster, hence it is an optional feature, this section explains how to enable it for your cluster.","title":"Overview"},{"location":"deploy/validating-webhook/#configure-the-webhook","text":"","title":"Configure the webhook"},{"location":"deploy/validating-webhook/#generate-the-webhook-certificate","text":"","title":"Generate the webhook certificate"},{"location":"deploy/validating-webhook/#self-signed-certificate","text":"Validating webhook must be served using TLS, you need to generate a certificate. Note that kube API server is checking the hostname of the certificate, the common name of your certificate will need to match the service name. Example To run the validating webhook with a service named ingress-validation-webhook in the namespace ingress-nginx , run openssl req -x509 -newkey rsa:2048 -keyout certificate.pem -out key.pem -days 365 -nodes -subj \"/CN=ingress-validation-webhook.ingress-nginx.svc\"","title":"Self signed certificate"},{"location":"deploy/validating-webhook/#using-kubernetes-ca","text":"Kubernetes also provides primitives to sign a certificate request. Here is an example on how to use it Example #!/bin/bash SERVICE_NAME = ingress-nginx NAMESPACE = ingress-nginx TEMP_DIRECTORY = $( mktemp -d ) echo \"creating certs in directory ${ TEMP_DIRECTORY } \" cat <> ${TEMP_DIRECTORY}/csr.conf [req] req_extensions = v3_req distinguished_name = req_distinguished_name [req_distinguished_name] [ v3_req ] basicConstraints = CA:FALSE keyUsage = nonRepudiation, digitalSignature, keyEncipherment extendedKeyUsage = serverAuth subjectAltName = @alt_names [alt_names] DNS.1 = ${SERVICE_NAME} DNS.2 = ${SERVICE_NAME}.${NAMESPACE} DNS.3 = ${SERVICE_NAME}.${NAMESPACE}.svc EOF openssl genrsa -out ${ TEMP_DIRECTORY } /server-key.pem 2048 openssl req -new -key ${ TEMP_DIRECTORY } /server-key.pem \\ -subj \"/CN= ${ SERVICE_NAME } . ${ NAMESPACE } .svc\" \\ -out ${ TEMP_DIRECTORY } /server.csr \\ -config ${ TEMP_DIRECTORY } /csr.conf cat < & 2 exit 1 fi echo ${ SERVER_CERT } | openssl base64 -d -A -out ${ TEMP_DIRECTORY } /server-cert.pem kubectl create secret generic ingress-nginx.svc \\ --from-file = key.pem = ${ TEMP_DIRECTORY } /server-key.pem \\ --from-file = cert.pem = ${ TEMP_DIRECTORY } /server-cert.pem \\ -n ${ NAMESPACE }","title":"Using Kubernetes CA"},{"location":"deploy/validating-webhook/#using-helm","text":"To generate the certificate using helm, you can use the following snippet Example {{ - $ cn := printf \"%s.%s.svc\" ( include \"nginx-ingress.validatingWebhook.fullname\" . ) .Release.Namespace }} {{ - $ ca := genCA ( printf \"%s-ca\" ( include \"nginx-ingress.validatingWebhook.fullname\" . )) .Values.validatingWebhook.certificateValidity - }} {{ - $ cert := genSignedCert $ cn nil nil .Values.validatingWebhook.certificateValidity $ ca - }}","title":"Using helm"},{"location":"deploy/validating-webhook/#ingress-controller-flags","text":"To enable the feature in the ingress controller, you need to provide 3 flags to the command line. flag description example usage --validating-webhook The address to start an admission controller on :8080 --validating-webhook-certificate The certificate the webhook is using for its TLS handling /usr/local/certificates/validating-webhook.pem --validating-webhook-key The key the webhook is using for its TLS handling /usr/local/certificates/validating-webhook-key.pem","title":"Ingress controller flags"},{"location":"deploy/validating-webhook/#kube-api-server-flags","text":"Validating webhook feature requires specific setup on the kube API server side. Depending on your kubernetes version, the flag can, or not, be enabled by default. To check that your kube API server runs with the required flags, please refer to the kubernetes documentation.","title":"kube API server flags"},{"location":"deploy/validating-webhook/#additional-kubernetes-objects","text":"Once both the ingress controller and the kube API server are configured to serve the webhook, add the you can configure the webhook with the following objects: apiVersion : v1 kind : Service metadata : name : ingress-validation-webhook namespace : ingress-nginx spec : ports : - name : admission port : 443 protocol : TCP targetPort : 8080 selector : app : nginx-ingress component : controller --- apiVersion : admissionregistration.k8s.io/v1beta1 kind : ValidatingWebhookConfiguration metadata : name : check-ingress webhooks : - name : validate.nginx.ingress.kubernetes.io rules : - apiGroups : - networking.k8s.io/v1beta1 apiVersions : - v1beta1 operations : - CREATE - UPDATE resources : - ingresses failurePolicy : Fail clientConfig : service : namespace : ingress-nginx name : ingress-validation-webhook path : /networking.k8s.io/v1beta1/ingress caBundle : ","title":"Additional kubernetes objects"},{"location":"enhancements/","text":"Kubernetes Enhancement Proposals (KEPs) \u00b6 A Kubernetes Enhancement Proposal (KEP) is a way to propose, communicate and coordinate on new efforts for the Kubernetes project. For this reason, the ingress-nginx project is adopting it. Quick start for the KEP process \u00b6 Follow the process outlined in the KEP template Do I have to use the KEP process? \u00b6 No... but we hope that you will. Over time having a rich set of KEPs in one place will make it easier for people to track what is going on in the community and find a structured historic record. KEPs are only required when the changes are wide ranging and impact most of the project. Why would I want to use the KEP process? \u00b6 Our aim with KEPs is to clearly communicate new efforts to the Kubernetes contributor community. As such, we want to build a well curated set of clear proposals in a common format with useful metadata. Benefits to KEP users (in the limit): Exposure on a kubernetes blessed web site that is findable via web search engines. Cross indexing of KEPs so that users can find connections and the current status of any KEP. A clear process with approvers and reviewers for making decisions. This will lead to more structured decisions that stick as there is a discoverable record around the decisions. We are inspired by IETF RFCs, Python PEPs, and Rust RFCs.","title":"Kubernetes Enhancement Proposals (KEPs)"},{"location":"enhancements/#kubernetes-enhancement-proposals-keps","text":"A Kubernetes Enhancement Proposal (KEP) is a way to propose, communicate and coordinate on new efforts for the Kubernetes project. For this reason, the ingress-nginx project is adopting it.","title":"Kubernetes Enhancement Proposals (KEPs)"},{"location":"enhancements/#quick-start-for-the-kep-process","text":"Follow the process outlined in the KEP template","title":"Quick start for the KEP process"},{"location":"enhancements/#do-i-have-to-use-the-kep-process","text":"No... but we hope that you will. Over time having a rich set of KEPs in one place will make it easier for people to track what is going on in the community and find a structured historic record. KEPs are only required when the changes are wide ranging and impact most of the project.","title":"Do I have to use the KEP process?"},{"location":"enhancements/#why-would-i-want-to-use-the-kep-process","text":"Our aim with KEPs is to clearly communicate new efforts to the Kubernetes contributor community. As such, we want to build a well curated set of clear proposals in a common format with useful metadata. Benefits to KEP users (in the limit): Exposure on a kubernetes blessed web site that is findable via web search engines. Cross indexing of KEPs so that users can find connections and the current status of any KEP. A clear process with approvers and reviewers for making decisions. This will lead to more structured decisions that stick as there is a discoverable record around the decisions. We are inspired by IETF RFCs, Python PEPs, and Rust RFCs.","title":"Why would I want to use the KEP process?"},{"location":"enhancements/20190724-only-dynamic-ssl/","text":"Remove static SSL configuration mode \u00b6 Table of Contents \u00b6 Summary Motivation Goals Non-Goals Proposal Implementation Details/Notes/Constraints Drawbacks Alternatives Summary \u00b6 Since release 0.19.0 is possible to configure SSL certificates without the need of NGINX reloads (thanks to lua) and after release 0.24.0 the default enabled mode is dynamic. Motivation \u00b6 The static configuration implies reloads, something that affects the majority of the users. Goals \u00b6 Deprecation of the flag --enable-dynamic-certificates . Cleanup of the codebase. Non-Goals \u00b6 Features related to certificate authentication are not changed in any way. Proposal \u00b6 Remove static SSL configuration Implementation Details/Notes/Constraints \u00b6 Deprecate the flag Move the directives ssl_certificate and ssl_certificate_key from each server block to the http section. These settings are required to avoid NGINX errors in the logs. Remove any action of the flag --enable-dynamic-certificates Drawbacks \u00b6 Alternatives \u00b6 Keep both implementations","title":"Remove static SSL configuration mode"},{"location":"enhancements/20190724-only-dynamic-ssl/#remove-static-ssl-configuration-mode","text":"","title":"Remove static SSL configuration mode"},{"location":"enhancements/20190724-only-dynamic-ssl/#table-of-contents","text":"Summary Motivation Goals Non-Goals Proposal Implementation Details/Notes/Constraints Drawbacks Alternatives","title":"Table of Contents"},{"location":"enhancements/20190724-only-dynamic-ssl/#summary","text":"Since release 0.19.0 is possible to configure SSL certificates without the need of NGINX reloads (thanks to lua) and after release 0.24.0 the default enabled mode is dynamic.","title":"Summary"},{"location":"enhancements/20190724-only-dynamic-ssl/#motivation","text":"The static configuration implies reloads, something that affects the majority of the users.","title":"Motivation"},{"location":"enhancements/20190724-only-dynamic-ssl/#goals","text":"Deprecation of the flag --enable-dynamic-certificates . Cleanup of the codebase.","title":"Goals"},{"location":"enhancements/20190724-only-dynamic-ssl/#non-goals","text":"Features related to certificate authentication are not changed in any way.","title":"Non-Goals"},{"location":"enhancements/20190724-only-dynamic-ssl/#proposal","text":"Remove static SSL configuration","title":"Proposal"},{"location":"enhancements/20190724-only-dynamic-ssl/#implementation-detailsnotesconstraints","text":"Deprecate the flag Move the directives ssl_certificate and ssl_certificate_key from each server block to the http section. These settings are required to avoid NGINX errors in the logs. Remove any action of the flag --enable-dynamic-certificates","title":"Implementation Details/Notes/Constraints"},{"location":"enhancements/20190724-only-dynamic-ssl/#drawbacks","text":"","title":"Drawbacks"},{"location":"enhancements/20190724-only-dynamic-ssl/#alternatives","text":"Keep both implementations","title":"Alternatives"},{"location":"enhancements/20190815-zone-aware-routing/","text":"Availability zone aware routing \u00b6 Table of Contents \u00b6 Summary Motivation Goals Non-Goals Proposal Implementation History Drawbacks [optional] Summary \u00b6 Teach ingress-nginx about availability zones where endpoints are running in. This way ingress-nginx pod will do its best to proxy to zone-local endpoint. Motivation \u00b6 When users run their services across multiple availability zones they usually pay for egress traffic between zones. Providers such as GCP, Amazon EC charges money for that. ingress-nginx when picking an endpoint to route request to does not consider whether the endpoint is in different zone or the same one. That means it's at least equally likely that it will pick an endpoint from another zone and proxy the request to it. In this situation response from the endpoint to ingress-nginx pod is considered as inter zone traffic and costs money. At the time of this writing GCP charges $0.01 per GB of inter zone egress traffic according to https://cloud.google.com/compute/network-pricing. According to https://datapath.io/resources/blog/what-are-aws-data-transfer-costs-and-how-to-minimize-them/ Amazon also charges the same amount of money sa GCP for cross zone, egress traffic. This can be a lot of money depending on once's traffic. By teaching ingress-nginx about zones we can eliminate or at least decrease this cost. Arguably inter-zone network latency should also be better than cross zone. Goals \u00b6 Given a regional cluster running ingress-nginx, ingress-nginx should do best effort to pick zone-local endpoint when proxying This should not impact canary feature ingress-nginx should be able to operate successfully if there's no zonal endpoints Non-Goals \u00b6 This feature inherently assumes that endpoints are distributed across zones in a way that they can handle all the traffic from ingress-nginx pod(s) in that zone This feature will be relying on https://kubernetes.io/docs/reference/kubernetes-api/labels-annotations-taints/#failure-domainbetakubernetesiozone, it is not this KEP's goal to support other cases Proposal \u00b6 The idea here is to have controller part of ingress-nginx to (1) detect what zone its current pod is running in and (2) detect the zone for every endpoints it knows about. After that it will post that data as part of endpoints to Lua land. Then Lua balancer when picking an endpoint will try to pick zone-local endpoint first and if there is no zone-local endpoint then it will fallback to current behaviour. This feature at least in the beginning should be optional since it is going to make it harder to reason about the load balancing and not everyone might want that. How does controller know what zone it runs in? We can have the pod spec do pass node name using downward API as an environment variable. Then on start controller can get node details from the API based on node name. Once the node details is obtained we can extract the zone from failure-domain.beta.kubernetes.io/zone annotation. Then we can pass that value to Lua land through Nginx configuration when loading lua_ingress.lua module in init_by_lua phase. How do we extract zones for endpoints? We can have the controller watch create and update events on nodes in the entire cluster and based on that keep the map of nodes to zones in the memory. And when we generate endpoints list, we can access node name using .subsets.addresses [ i ]. nodeName and based on that fetch zone from the map in memory and store it as a field on the endpoint. This solution assumes failure-domain.beta.kubernetes.io/zone annotation does not change until the end of node's life. Otherwise we have to watch update events as well on the nodes and that'll add even more overhead. Alternatively, we can get the list of nodes only when there's no node in the memory for given node name. This is probably a better solution because then we would avoid watching for API changes on node resources. We can eagrly fetch all the nodes and build node name to zone mapping on start. And from thereon sync it during endpoints building in the main event loop iff there's no entry exist for the node of an endpoint. This means an extra API call in case cluster has expanded. How do we make sure we do our best to choose zone-local endpoint? This will be done on Lua side. For every backend we will initialize two balancer instances: (1) with all endpoints (2) with all endpoints corresponding to current zone for the backend. Then given the request once we choose what backend needs to serve the request, we will first try to use zonal balancer for that backend. If zonal balancer does not exist (i.e there's no zonal endpoint) then we will use general balancer. In case of zonal outages we assume that readiness probe will fail and controller will see no endpoints for the backend and therefore we will use general balancer. We can enable the feature using a configmap setting. Doing it this way makes it easier to rollback in case of a problem. Implementation History \u00b6 initial version of KEP is shipped proposal and implementation details is done Drawbacks [optional] \u00b6 More load on the Kubernetes API server.","title":"Availability zone aware routing"},{"location":"enhancements/20190815-zone-aware-routing/#availability-zone-aware-routing","text":"","title":"Availability zone aware routing"},{"location":"enhancements/20190815-zone-aware-routing/#table-of-contents","text":"Summary Motivation Goals Non-Goals Proposal Implementation History Drawbacks [optional]","title":"Table of Contents"},{"location":"enhancements/20190815-zone-aware-routing/#summary","text":"Teach ingress-nginx about availability zones where endpoints are running in. This way ingress-nginx pod will do its best to proxy to zone-local endpoint.","title":"Summary"},{"location":"enhancements/20190815-zone-aware-routing/#motivation","text":"When users run their services across multiple availability zones they usually pay for egress traffic between zones. Providers such as GCP, Amazon EC charges money for that. ingress-nginx when picking an endpoint to route request to does not consider whether the endpoint is in different zone or the same one. That means it's at least equally likely that it will pick an endpoint from another zone and proxy the request to it. In this situation response from the endpoint to ingress-nginx pod is considered as inter zone traffic and costs money. At the time of this writing GCP charges $0.01 per GB of inter zone egress traffic according to https://cloud.google.com/compute/network-pricing. According to https://datapath.io/resources/blog/what-are-aws-data-transfer-costs-and-how-to-minimize-them/ Amazon also charges the same amount of money sa GCP for cross zone, egress traffic. This can be a lot of money depending on once's traffic. By teaching ingress-nginx about zones we can eliminate or at least decrease this cost. Arguably inter-zone network latency should also be better than cross zone.","title":"Motivation"},{"location":"enhancements/20190815-zone-aware-routing/#goals","text":"Given a regional cluster running ingress-nginx, ingress-nginx should do best effort to pick zone-local endpoint when proxying This should not impact canary feature ingress-nginx should be able to operate successfully if there's no zonal endpoints","title":"Goals"},{"location":"enhancements/20190815-zone-aware-routing/#non-goals","text":"This feature inherently assumes that endpoints are distributed across zones in a way that they can handle all the traffic from ingress-nginx pod(s) in that zone This feature will be relying on https://kubernetes.io/docs/reference/kubernetes-api/labels-annotations-taints/#failure-domainbetakubernetesiozone, it is not this KEP's goal to support other cases","title":"Non-Goals"},{"location":"enhancements/20190815-zone-aware-routing/#proposal","text":"The idea here is to have controller part of ingress-nginx to (1) detect what zone its current pod is running in and (2) detect the zone for every endpoints it knows about. After that it will post that data as part of endpoints to Lua land. Then Lua balancer when picking an endpoint will try to pick zone-local endpoint first and if there is no zone-local endpoint then it will fallback to current behaviour. This feature at least in the beginning should be optional since it is going to make it harder to reason about the load balancing and not everyone might want that. How does controller know what zone it runs in? We can have the pod spec do pass node name using downward API as an environment variable. Then on start controller can get node details from the API based on node name. Once the node details is obtained we can extract the zone from failure-domain.beta.kubernetes.io/zone annotation. Then we can pass that value to Lua land through Nginx configuration when loading lua_ingress.lua module in init_by_lua phase. How do we extract zones for endpoints? We can have the controller watch create and update events on nodes in the entire cluster and based on that keep the map of nodes to zones in the memory. And when we generate endpoints list, we can access node name using .subsets.addresses [ i ]. nodeName and based on that fetch zone from the map in memory and store it as a field on the endpoint. This solution assumes failure-domain.beta.kubernetes.io/zone annotation does not change until the end of node's life. Otherwise we have to watch update events as well on the nodes and that'll add even more overhead. Alternatively, we can get the list of nodes only when there's no node in the memory for given node name. This is probably a better solution because then we would avoid watching for API changes on node resources. We can eagrly fetch all the nodes and build node name to zone mapping on start. And from thereon sync it during endpoints building in the main event loop iff there's no entry exist for the node of an endpoint. This means an extra API call in case cluster has expanded. How do we make sure we do our best to choose zone-local endpoint? This will be done on Lua side. For every backend we will initialize two balancer instances: (1) with all endpoints (2) with all endpoints corresponding to current zone for the backend. Then given the request once we choose what backend needs to serve the request, we will first try to use zonal balancer for that backend. If zonal balancer does not exist (i.e there's no zonal endpoint) then we will use general balancer. In case of zonal outages we assume that readiness probe will fail and controller will see no endpoints for the backend and therefore we will use general balancer. We can enable the feature using a configmap setting. Doing it this way makes it easier to rollback in case of a problem.","title":"Proposal"},{"location":"enhancements/20190815-zone-aware-routing/#implementation-history","text":"initial version of KEP is shipped proposal and implementation details is done","title":"Implementation History"},{"location":"enhancements/20190815-zone-aware-routing/#drawbacks-optional","text":"More load on the Kubernetes API server.","title":"Drawbacks [optional]"},{"location":"enhancements/YYYYMMDD-kep-template/","text":"Title \u00b6 This is the title of the KEP. Keep it simple and descriptive. A good title can help communicate what the KEP is and should be considered as part of any review. The title should be lowercased and spaces/punctuation should be replaced with - . To get started with this template: Make a copy of this template. Create a copy of this template and name it YYYYMMDD-my-title.md , where YYYYMMDD is the date the KEP was first drafted. Fill out the \"overview\" sections. This includes the Summary and Motivation sections. These should be easy if you've preflighted the idea of the KEP in an issue. Create a PR. Assign it to folks that are sponsoring this process. Create an issue When filing an enhancement tracking issue, please ensure to complete all fields in the template. Merge early. Avoid getting hung up on specific details and instead aim to get the goal of the KEP merged quickly. The best way to do this is to just start with the \"Overview\" sections and fill out details incrementally in follow on PRs. View anything marked as a provisional as a working document and subject to change. Aim for single topic PRs to keep discussions focused. If you disagree with what is already in a document, open a new PR with suggested changes. The canonical place for the latest set of instructions (and the likely source of this file) is here . The Metadata section above is intended to support the creation of tooling around the KEP process. This will be a YAML section that is fenced as a code block. See the KEP process for details on each of these items. Table of Contents \u00b6 A table of contents is helpful for quickly jumping to sections of a KEP and for highlighting any additional information provided beyond the standard KEP template. Ensure the TOC is wrapped with
This section demonstrates how to use the Layer 2 configuration mode of MetalLB together with the NGINX Ingress controller in a Kubernetes cluster that has publicly accessible nodes. In this mode, one node attracts all -the traffic for the ingress-nginx Service IP. See Traffic policies for more details.
ingress-nginx
Note
MetalLB can be deployed either with a simple Kubernetes manifest or with Helm. The rest of this example assumes MetalLB was deployed following the Installation instructions.
MetalLB requires a pool of IP addresses in order to be able to take ownership of the ingress-nginx Service. This pool -can be defined in a ConfigMap named config located in the same namespace as the MetalLB controller. This pool of IPs must be dedicated to MetalLB's use, you can't reuse the Kubernetes node IPs or IPs handed out by a DHCP server.
config
MetalLB requires a pool of IP addresses in order to be able to take ownership of the ingress-nginx Service. This pool +can be defined in a ConfigMap named config located in the same namespace as the MetalLB controller. This pool of IPs must be dedicated to MetalLB's use, you can't reuse the Kubernetes node IPs or IPs handed out by a DHCP server.
Example
Given the following 3-node Kubernetes cluster (the external IP is added as an example, in most bare-metal environments this value is <None>)
$ kubectl get node +$ kubectl get node NAME STATUS ROLES EXTERNAL-IP host-1 Ready master 203.0.113.1 host-2 Ready node 203.0.113.2 host-3 Ready node 203.0.113.3 - +
$ kubectl get node NAME STATUS ROLES EXTERNAL-IP host-1 Ready master 203.0.113.1 host-2 Ready node 203.0.113.2 host-3 Ready node 203.0.113.3 -
After creating the following ConfigMap, MetalLB takes ownership of one of the IP addresses in the pool and updates -the loadBalancer IP field of the ingress-nginx Service accordingly.
apiVersion: v1 +the loadBalancer IP field of the ingress-nginx Service accordingly. +apiVersion: v1 kind: ConfigMap metadata: namespace: metallb-system @@ -1303,26 +1303,26 @@ the loadBalancer IP field of the ingress-nginx protocol: layer2 addresses: - 203.0.113.10-203.0.113.15 - +
apiVersion: v1 kind: ConfigMap metadata: namespace: metallb-system @@ -1303,26 +1303,26 @@ the loadBalancer IP field of the ingress-nginx protocol: layer2 addresses: - 203.0.113.10-203.0.113.15 -
ingress-nginx protocol: layer2 addresses: - 203.0.113.10-203.0.113.15 -
$ kubectl -n ingress-nginx get svc +$ kubectl -n ingress-nginx get svc NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) default-http-backend ClusterIP 10.0.64.249 <none> 80/TCP ingress-nginx LoadBalancer 10.0.220.217 203.0.113.10 80:30100/TCP,443:30101/TCP - +
$ kubectl -n ingress-nginx get svc NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) default-http-backend ClusterIP 10.0.64.249 <none> 80/TCP ingress-nginx LoadBalancer 10.0.220.217 203.0.113.10 80:30100/TCP,443:30101/TCP -
As soon as MetalLB sets the external IP address of the ingress-nginx LoadBalancer Service, the corresponding entries +
As soon as MetalLB sets the external IP address of the ingress-nginx LoadBalancer Service, the corresponding entries are created in the iptables NAT table and the node with the selected IP address starts responding to HTTP requests on the ports configured in the LoadBalancer Service:
$ curl -D- http://203.0.113.3 -H 'Host: myapp.example.com' +$ curl -D- http://203.0.113.3 -H 'Host: myapp.example.com' HTTP/1.1 200 OK Server: nginx/1.15.2 - +
$ curl -D- http://203.0.113.3 -H 'Host: myapp.example.com' HTTP/1.1 200 OK Server: nginx/1.15.2 -
Tip
In order to preserve the source IP address in HTTP requests sent to NGINX, it is necessary to use the Local +
Local
In order to preserve the source IP address in HTTP requests sent to NGINX, it is necessary to use the Local traffic policy. Traffic policies are described in more details in Traffic policies as well as in the next section.
Info
A Service of type NodePort exposes, via the kube-proxy component, the same unprivileged port (default: +
NodePort
kube-proxy
A Service of type NodePort exposes, via the kube-proxy component, the same unprivileged port (default: 30000-32767) on every Kubernetes node, masters included. For more information, see Services.
In this configuration, the NGINX container remains isolated from the host network. As a result, it can safely bind to any port, including the standard HTTP ports 80 and 443. However, due to the container namespace isolation, a client located outside the cluster network (e.g. on the public internet) is not able to access Ingress hosts directly on ports -80 and 443. Instead, the external client must append the NodePort allocated to the ingress-nginx Service to HTTP +80 and 443. Instead, the external client must append the NodePort allocated to the ingress-nginx Service to HTTP requests.
Given the NodePort 30100 allocated to the ingress-nginx Service
30100
$ kubectl -n ingress-nginx get svc +Given the NodePort 30100 allocated to the ingress-nginx Service +$ kubectl -n ingress-nginx get svc NAME TYPE CLUSTER-IP PORT(S) default-http-backend ClusterIP 10.0.64.249 80/TCP ingress-nginx NodePort 10.0.220.217 80:30100/TCP,443:30101/TCP - +
$ kubectl -n ingress-nginx get svc NAME TYPE CLUSTER-IP PORT(S) default-http-backend ClusterIP 10.0.64.249 80/TCP ingress-nginx NodePort 10.0.220.217 80:30100/TCP,443:30101/TCP -
and a Kubernetes node with the public IP address 203.0.113.2 (the external IP is added as an example, in most +
203.0.113.2
and a Kubernetes node with the public IP address 203.0.113.2 (the external IP is added as an example, in most bare-metal environments this value is <None>)
a client would reach an Ingress with host: myapp.example.com at http://myapp.example.com:30100, where the +
host: myapp.example.com
http://myapp.example.com:30100
a client would reach an Ingress with host: myapp.example.com at http://myapp.example.com:30100, where the myapp.example.com subdomain resolves to the 203.0.113.2 IP address.
Impact on the host system
While it may sound tempting to reconfigure the NodePort range using the --service-node-port-range API server flag +
--service-node-port-range
While it may sound tempting to reconfigure the NodePort range using the --service-node-port-range API server flag to include unprivileged ports and be able to expose ports 80 and 443, doing so may result in unexpected issues including (but not limited to) the use of ports otherwise reserved to system daemons and the necessity to grant -kube-proxy privileges it may otherwise not require.
This practice is therefore discouraged. See the other approaches proposed in this page for alternatives.
This approach has a few other limitations one ought to be aware of:
Services of type NodePort perform source address translation by default. This means the source IP of a HTTP request is always the IP address of the Kubernetes node that received the request from the perspective of NGINX.
The recommended way to preserve the source IP in a NodePort setup is to set the value of the externalTrafficPolicy -field of the ingress-nginx Service spec to Local (example).
externalTrafficPolicy
The recommended way to preserve the source IP in a NodePort setup is to set the value of the externalTrafficPolicy +field of the ingress-nginx Service spec to Local (example).
Warning
This setting effectively drops packets sent to Kubernetes nodes which are not running any instance of the NGINX @@ -1388,40 +1388,40 @@ the NGINX Ingress controller should be scheduled or not scheduled.
In a Kubernetes cluster composed of 3 nodes (the external IP is added as an example, in most bare-metal environments this value is <None>)
with a nginx-ingress-controller Deployment composed of 2 replicas
nginx-ingress-controller
$ kubectl -n ingress-nginx get pod -o wide +with a nginx-ingress-controller Deployment composed of 2 replicas +$ kubectl -n ingress-nginx get pod -o wide NAME READY STATUS IP NODE default-http-backend-7c5bc89cc9-p86md 1/1 Running 172.17.1.1 host-2 nginx-ingress-controller-cf9ff8c96-8vvf8 1/1 Running 172.17.0.3 host-3 nginx-ingress-controller-cf9ff8c96-pxsds 1/1 Running 172.17.1.4 host-2 - +
$ kubectl -n ingress-nginx get pod -o wide NAME READY STATUS IP NODE default-http-backend-7c5bc89cc9-p86md 1/1 Running 172.17.1.1 host-2 nginx-ingress-controller-cf9ff8c96-8vvf8 1/1 Running 172.17.0.3 host-3 nginx-ingress-controller-cf9ff8c96-pxsds 1/1 Running 172.17.1.4 host-2 -
Requests sent to host-2 and host-3 would be forwarded to NGINX and original client's IP would be preserved, -while requests to host-1 would get dropped because there is no NGINX replica running on that node.
host-2
host-3
host-1
Requests sent to host-2 and host-3 would be forwarded to NGINX and original client's IP would be preserved, +while requests to host-1 would get dropped because there is no NGINX replica running on that node.
Because NodePort Services do not get a LoadBalancerIP assigned by definition, the NGINX Ingress controller does not update the status of Ingress objects it manages.
$ kubectl get ingress +$ kubectl get ingress NAME HOSTS ADDRESS PORTS test-ingress myapp.example.com 80 - +
$ kubectl get ingress NAME HOSTS ADDRESS PORTS test-ingress myapp.example.com 80 -
Despite the fact there is no load balancer providing a public IP address to the NGINX Ingress controller, it is possible -to force the status update of all managed Ingress objects by setting the externalIPs field of the ingress-nginx +to force the status update of all managed Ingress objects by setting the externalIPs field of the ingress-nginx Service.
externalIPs
There is more to setting externalIPs than just enabling the NGINX Ingress controller to update the status of +
There is more to setting externalIPs than just enabling the NGINX Ingress controller to update the status of Ingress objects. Please read about this option in the Services page of official Kubernetes documentation as well as the section about External IPs in this document for more information.
one could edit the ingress-nginx Service and add the following field to the object spec
spec: +one could edit the ingress-nginx Service and add the following field to the object spec +spec: externalIPs: - 203.0.113.1 - 203.0.113.2 - 203.0.113.3 - +
spec: externalIPs: - 203.0.113.1 - 203.0.113.2 - 203.0.113.3 -
which would in turn be reflected on Ingress objects as follows:
$ kubectl get ingress -o wide +$ kubectl get ingress -o wide NAME HOSTS ADDRESS PORTS test-ingress myapp.example.com 203.0.113.1,203.0.113.2,203.0.113.3 80 - +
$ kubectl get ingress -o wide NAME HOSTS ADDRESS PORTS test-ingress myapp.example.com 203.0.113.1,203.0.113.2,203.0.113.3 80 -
Redirects generated by NGINX, for instance HTTP to HTTPS or domain to www.domain, are generated without +
domain
www.domain
Redirects generated by NGINX, for instance HTTP to HTTPS or domain to www.domain, are generated without NodePort:
$ curl -D- http://myapp.example.com:30100` +$ curl -D- http://myapp.example.com:30100` HTTP/1.1 308 Permanent Redirect Server: nginx/1.15.2 Location: https://myapp.example.com/ #-> missing NodePort in HTTPS redirect - +
$ curl -D- http://myapp.example.com:30100` HTTP/1.1 308 Permanent Redirect Server: nginx/1.15.2 Location: https://myapp.example.com/ #-> missing NodePort in HTTPS redirect -
In a setup where there is no external load balancer available but using NodePorts is not an option, one can configure -ingress-nginx Pods to use the network of the host they run on instead of a dedicated network namespace. The benefit of +ingress-nginx Pods to use the network of the host they run on instead of a dedicated network namespace. The benefit of this approach is that the NGINX Ingress controller can bind ports 80 and 443 directly to Kubernetes nodes' network interfaces, without the extra network translation imposed by NodePort Services.
This approach does not leverage any Service object to expose the NGINX Ingress controller. If the ingress-nginx +
This approach does not leverage any Service object to expose the NGINX Ingress controller. If the ingress-nginx Service exists in the target cluster, it is recommended to delete it.
This can be achieved by enabling the hostNetwork option in the Pods' spec.
hostNetwork
template: +This can be achieved by enabling the hostNetwork option in the Pods' spec. +template: spec: hostNetwork: true - +
template: spec: hostNetwork: true -
Security considerations
Consider this nginx-ingress-controller Deployment composed of 2 replicas, NGINX Pods inherit from the IP address +
Consider this nginx-ingress-controller Deployment composed of 2 replicas, NGINX Pods inherit from the IP address of their host instead of an internal Pod IP.
$ kubectl -n ingress-nginx get pod -o wide +$ kubectl -n ingress-nginx get pod -o wide NAME READY STATUS IP NODE default-http-backend-7c5bc89cc9-p86md 1/1 Running 172.17.1.1 host-2 nginx-ingress-controller-5b4cf5fc6-7lg6c 1/1 Running 203.0.113.3 host-3 nginx-ingress-controller-5b4cf5fc6-lzrls 1/1 Running 203.0.113.2 host-2 - +
$ kubectl -n ingress-nginx get pod -o wide NAME READY STATUS IP NODE default-http-backend-7c5bc89cc9-p86md 1/1 Running 172.17.1.1 host-2 nginx-ingress-controller-5b4cf5fc6-7lg6c 1/1 Running 203.0.113.3 host-3 nginx-ingress-controller-5b4cf5fc6-lzrls 1/1 Running 203.0.113.2 host-2 -
One major limitation of this deployment approach is that only a single NGINX Ingress controller Pod may be scheduled on each cluster node, because binding the same port multiple times on the same network interface is technically impossible. Pods that are unschedulable due to such situation fail with the following event:
$ kubectl -n ingress-nginx describe pod <unschedulable-nginx-ingress-controller-pod> +$ kubectl -n ingress-nginx describe pod <unschedulable-nginx-ingress-controller-pod> ... Events: Type Reason From Message ---- ------ ---- ------- Warning FailedScheduling default-scheduler 0/3 nodes are available: 3 node(s) didn't have free ports for the requested pod ports. - +
$ kubectl -n ingress-nginx describe pod <unschedulable-nginx-ingress-controller-pod> ... Events: Type Reason From Message ---- ------ ---- ------- Warning FailedScheduling default-scheduler 0/3 nodes are available: 3 node(s) didn't have free ports for the requested pod ports. -
One way to ensure only schedulable Pods are created is to deploy the NGINX Ingress controller as a DaemonSet instead of a traditional Deployment.
Pods configured with hostNetwork: true do not use the internal DNS resolver (i.e. kube-dns or CoreDNS), unless -their dnsPolicy spec field is set to ClusterFirstWithHostNet. Consider using this setting if NGINX is +their dnsPolicy spec field is set to ClusterFirstWithHostNet. Consider using this setting if NGINX is expected to resolve internal names for any reason.
hostNetwork: true
dnsPolicy
ClusterFirstWithHostNet
Because there is no Service exposing the NGINX Ingress controller in a configuration using the host network, the default ---publish-service flag used in standard cloud setups does not apply and the status of all Ingress objects remains +--publish-service flag used in standard cloud setups does not apply and the status of all Ingress objects remains blank.
--publish-service
Instead, and because bare-metal nodes usually don't have an ExternalIP, one has to enable the ---report-node-internal-ip-address flag, which sets the status of all Ingress objects to the internal IP +--report-node-internal-ip-address flag, which sets the status of all Ingress objects to the internal IP address of all nodes running the NGINX Ingress controller.
--report-node-internal-ip-address
Given a nginx-ingress-controller DaemonSet composed of 2 replicas
$ kubectl -n ingress-nginx get pod -o wide +Given a nginx-ingress-controller DaemonSet composed of 2 replicas +$ kubectl -n ingress-nginx get pod -o wide NAME READY STATUS IP NODE default-http-backend-7c5bc89cc9-p86md 1/1 Running 172.17.1.1 host-2 nginx-ingress-controller-5b4cf5fc6-7lg6c 1/1 Running 203.0.113.3 host-3 nginx-ingress-controller-5b4cf5fc6-lzrls 1/1 Running 203.0.113.2 host-2 - +
the controller sets the status of all Ingress objects it manages to the following value:
$ kubectl get ingress -o wide +$ kubectl get ingress -o wide NAME HOSTS ADDRESS PORTS test-ingress myapp.example.com 203.0.113.2,203.0.113.3 80 - +
$ kubectl get ingress -o wide NAME HOSTS ADDRESS PORTS test-ingress myapp.example.com 203.0.113.2,203.0.113.3 80 -
Alternatively, it is possible to override the address written to Ingress objects using the ---publish-status-address flag. See Command line arguments.
--publish-status-address
Similarly to cloud environments, this deployment approach requires an edge network component providing a public @@ -1581,49 +1581,50 @@ on the target nodes as shown in the diagram below:
This method does not allow preserving the source IP of HTTP requests in any manner, it is therefore not recommended to use it despite its apparent simplicity.
The externalIPs Service option was previously mentioned in the NodePort section.
As per the Services page of the official Kubernetes documentation, the externalIPs option causes -kube-proxy to route traffic sent to arbitrary IP addresses and on the Service ports to the endpoints of that +
As per the Services page of the official Kubernetes documentation, the externalIPs option causes +kube-proxy to route traffic sent to arbitrary IP addresses and on the Service ports to the endpoints of that Service. These IP addresses must belong to the target node.
and the following ingress-nginx NodePort Service
$ kubectl -n ingress-nginx get svc +and the following ingress-nginx NodePort Service +$ kubectl -n ingress-nginx get svc NAME TYPE CLUSTER-IP PORT(S) ingress-nginx NodePort 10.0.220.217 80:30100/TCP,443:30101/TCP - +
$ kubectl -n ingress-nginx get svc NAME TYPE CLUSTER-IP PORT(S) ingress-nginx NodePort 10.0.220.217 80:30100/TCP,443:30101/TCP -
One could set the following external IPs in the Service spec, and NGINX would become available on both the NodePort and the Service port:
spec: +spec: externalIPs: - 203.0.113.2 - 203.0.113.3 - +
spec: externalIPs: - 203.0.113.2 - 203.0.113.3 -
$ curl -D- http://myapp.example.com:30100 +$ curl -D- http://myapp.example.com:30100 HTTP/1.1 200 OK Server: nginx/1.15.2 $ curl -D- http://myapp.example.com HTTP/1.1 200 OK Server: nginx/1.15.2 - +
$ curl -D- http://myapp.example.com:30100 HTTP/1.1 200 OK Server: nginx/1.15.2 $ curl -D- http://myapp.example.com HTTP/1.1 200 OK Server: nginx/1.15.2 -
We assume the myapp.example.com subdomain above resolves to both 203.0.113.2 and 203.0.113.3 IP addresses.
Attention
The default configuration watches Ingress object from all the namespaces. -To change this behavior use the flag --watch-namespace to limit the scope to a particular namespace.
--watch-namespace
--watch-namespace< Attention If you're using GKE you need to initialize your user as a cluster-admin with the following command: -kubectl create clusterrolebinding cluster-admin-binding \ +kubectl create clusterrolebinding cluster-admin-binding \ --clusterrole cluster-admin \ --user $(gcloud config get-value account) - + The following Mandatory Command is required for all deployments. -kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/mandatory.yaml - +kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/mandatory.yaml + Tip -If you are using a Kubernetes version previous to 1.14, you need to change kubernetes.io/os to beta.kubernetes.io/os at line 217 of mandatory.yaml, see Labels details. +If you are using a Kubernetes version previous to 1.14, you need to change kubernetes.io/os to beta.kubernetes.io/os at line 217 of mandatory.yaml, see Labels details. Provider Specific Steps ¶ There are cloud provider specific yaml files. Docker for Mac ¶ Kubernetes is available in Docker for Mac (from version 18.06.0-ce) Create a service -kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/cloud-generic.yaml - +kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/cloud-generic.yaml + minikube ¶ For standard usage: -minikube addons enable ingress - +minikube addons enable ingress + For development: Disable the ingress addon: -minikube addons disable ingress - +minikube addons disable ingress + -Execute make dev-env -Confirm the nginx-ingress-controller deployment exists: +Execute make dev-env +Confirm the nginx-ingress-controller deployment exists: -$ kubectl get pods -n ingress-nginx +$ kubectl get pods -n ingress-nginx NAME READY STATUS RESTARTS AGE default-http-backend-66b447d9cf-rrlf9 1/1 Running 0 12s nginx-ingress-controller-fdcdcd6dd-vvpgs 1/1 Running 0 11s - + AWS ¶ -In AWS we use an Elastic Load Balancer (ELB) to expose the NGINX Ingress controller behind a Service of Type=LoadBalancer. +In AWS we use an Elastic Load Balancer (ELB) to expose the NGINX Ingress controller behind a Service of Type=LoadBalancer. Since Kubernetes v1.9.0 it is possible to use a classic load balancer (ELB) or network load balancer (NLB) Please check the elastic load balancing AWS details page Elastic Load Balancer - ELB ¶ @@ -1509,45 +1509,45 @@ Please check the Layer 7: use HTTP as the listener protocol for port 80 and terminate TLS in the ELB For L4: -Check that no change is necessary with regards to the ELB idle timeout. In some scenarios, users may want to modify the ELB idle timeout, so please check the ELB Idle Timeouts section for additional information. If a change is required, users will need to update the value of service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout in provider/aws/service-l4.yaml +Check that no change is necessary with regards to the ELB idle timeout. In some scenarios, users may want to modify the ELB idle timeout, so please check the ELB Idle Timeouts section for additional information. If a change is required, users will need to update the value of service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout in provider/aws/service-l4.yaml Then execute: -kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/aws/service-l4.yaml +kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/aws/service-l4.yaml kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/aws/patch-configmap-l4.yaml - + For L7: -Change line of the file provider/aws/service-l7.yaml replacing the dummy id with a valid one "arn:aws:acm:us-west-2:XXXXXXXX:certificate/XXXXXX-XXXXXXX-XXXXXXX-XXXXXXXX" -Check that no change is necessary with regards to the ELB idle timeout. In some scenarios, users may want to modify the ELB idle timeout, so please check the ELB Idle Timeouts section for additional information. If a change is required, users will need to update the value of service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout in provider/aws/service-l7.yaml +Change line of the file provider/aws/service-l7.yaml replacing the dummy id with a valid one "arn:aws:acm:us-west-2:XXXXXXXX:certificate/XXXXXX-XXXXXXX-XXXXXXX-XXXXXXXX" +Check that no change is necessary with regards to the ELB idle timeout. In some scenarios, users may want to modify the ELB idle timeout, so please check the ELB Idle Timeouts section for additional information. If a change is required, users will need to update the value of service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout in provider/aws/service-l7.yaml Then execute: -kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/aws/service-l7.yaml +kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/aws/service-l7.yaml kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/aws/patch-configmap-l7.yaml - + This example creates an ELB with just two listeners, one in port 80 and another in port 443 ELB Idle Timeouts ¶ -In some scenarios users will need to modify the value of the ELB idle timeout. Users need to ensure the idle timeout is less than the keepalive_timeout that is configured for NGINX. By default NGINX keepalive_timeout is set to 75s. -The default ELB idle timeout will work for most scenarios, unless the NGINX keepalive_timeout has been modified, in which case service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout will need to be modified to ensure it is less than the keepalive_timeout the user has configured. -Please Note: An idle timeout of 3600s is recommended when using WebSockets. +In some scenarios users will need to modify the value of the ELB idle timeout. Users need to ensure the idle timeout is less than the keepalive_timeout that is configured for NGINX. By default NGINX keepalive_timeout is set to 75s. +The default ELB idle timeout will work for most scenarios, unless the NGINX keepalive_timeout has been modified, in which case service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout will need to be modified to ensure it is less than the keepalive_timeout the user has configured. +Please Note: An idle timeout of 3600s is recommended when using WebSockets. More information with regards to idle timeouts for your Load Balancer can be found in the official AWS documentation. Network Load Balancer (NLB) ¶ This type of load balancer is supported since v1.10.0 as an ALPHA feature. -kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/aws/service-nlb.yaml - +kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/aws/service-nlb.yaml + GCE-GKE ¶ -kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/cloud-generic.yaml - +kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/cloud-generic.yaml + Important Note: proxy protocol is not supported in GCE/GKE Azure ¶ -kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/cloud-generic.yaml - +kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/cloud-generic.yaml + Bare-metal ¶ Using NodePort: -kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/baremetal/service-nodeport.yaml - +kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/baremetal/service-nodeport.yaml + Tip @@ -1555,42 +1555,43 @@ Please check the Verify installation ¶ To check if the ingress controller pods have started, run the following command: -kubectl get pods --all-namespaces -l app.kubernetes.io/name=ingress-nginx --watch - +kubectl get pods --all-namespaces -l app.kubernetes.io/name=ingress-nginx --watch + -Once the operator pods are running, you can cancel the above command by typing Ctrl+C. +Once the operator pods are running, you can cancel the above command by typing Ctrl+C. Now, you are ready to create your first ingress. Detect installed version ¶ -To detect which version of the ingress controller is running, exec into the pod and run nginx-ingress-controller version command. -POD_NAMESPACE=ingress-nginx +To detect which version of the ingress controller is running, exec into the pod and run nginx-ingress-controller version command. +POD_NAMESPACE=ingress-nginx POD_NAME=$(kubectl get pods -n $POD_NAMESPACE -l app.kubernetes.io/name=ingress-nginx -o jsonpath='{.items[0].metadata.name}') kubectl exec -it $POD_NAME -n $POD_NAMESPACE -- /nginx-ingress-controller --version - + Using Helm ¶ NGINX Ingress controller can be installed via Helm using the chart stable/nginx-ingress from the official charts repository. -To install the chart with the release name my-nginx: -helm install my-nginx stable/nginx-ingress - +To install the chart with the release name my-nginx: +helm install my-nginx stable/nginx-ingress + If the kubernetes cluster has RBAC enabled, then run: -helm install my-nginx stable/nginx-ingress --set rbac.create=true - +helm install my-nginx stable/nginx-ingress --set rbac.create=true + -If you are using Helm 2 then specify release name using --name flag -helm install stable/nginx-ingress --name my-nginx - +If you are using Helm 2 then specify release name using --name flag +helm install stable/nginx-ingress --name my-nginx + or -helm install stable/nginx-ingress --name my-nginx --set rbac.create=true - +helm install stable/nginx-ingress --name my-nginx --set rbac.create=true + Detect installed version: -POD_NAME=$(kubectl get pods -l app.kubernetes.io/name=ingress-nginx -o jsonpath='{.items[0].metadata.name}') +POD_NAME=$(kubectl get pods -l app.kubernetes.io/name=ingress-nginx -o jsonpath='{.items[0].metadata.name}') kubectl exec -it $POD_NAME -- /nginx-ingress-controller --version - + + @@ -1645,9 +1646,9 @@ or @@ -1657,7 +1658,7 @@ or
If you're using GKE you need to initialize your user as a cluster-admin with the following command: -
kubectl create clusterrolebinding cluster-admin-binding \ +kubectl create clusterrolebinding cluster-admin-binding \ --clusterrole cluster-admin \ --user $(gcloud config get-value account) - +
kubectl create clusterrolebinding cluster-admin-binding \ --clusterrole cluster-admin \ --user $(gcloud config get-value account) -
The following Mandatory Command is required for all deployments.
kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/mandatory.yaml -
kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/mandatory.yaml +
If you are using a Kubernetes version previous to 1.14, you need to change kubernetes.io/os to beta.kubernetes.io/os at line 217 of mandatory.yaml, see Labels details.
kubernetes.io/os
beta.kubernetes.io/os
There are cloud provider specific yaml files.
Kubernetes is available in Docker for Mac (from version 18.06.0-ce)
Create a service
kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/cloud-generic.yaml -
kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/cloud-generic.yaml +
For standard usage:
minikube addons enable ingress -
minikube addons enable ingress +
For development:
minikube addons disable ingress -
minikube addons disable ingress +
make dev-env
$ kubectl get pods -n ingress-nginx +$ kubectl get pods -n ingress-nginx NAME READY STATUS RESTARTS AGE default-http-backend-66b447d9cf-rrlf9 1/1 Running 0 12s nginx-ingress-controller-fdcdcd6dd-vvpgs 1/1 Running 0 11s - +
$ kubectl get pods -n ingress-nginx NAME READY STATUS RESTARTS AGE default-http-backend-66b447d9cf-rrlf9 1/1 Running 0 12s nginx-ingress-controller-fdcdcd6dd-vvpgs 1/1 Running 0 11s -
In AWS we use an Elastic Load Balancer (ELB) to expose the NGINX Ingress controller behind a Service of Type=LoadBalancer. +
Type=LoadBalancer
In AWS we use an Elastic Load Balancer (ELB) to expose the NGINX Ingress controller behind a Service of Type=LoadBalancer. Since Kubernetes v1.9.0 it is possible to use a classic load balancer (ELB) or network load balancer (NLB) Please check the elastic load balancing AWS details page
For L4:
Check that no change is necessary with regards to the ELB idle timeout. In some scenarios, users may want to modify the ELB idle timeout, so please check the ELB Idle Timeouts section for additional information. If a change is required, users will need to update the value of service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout in provider/aws/service-l4.yaml
service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout
provider/aws/service-l4.yaml
Then execute:
kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/aws/service-l4.yaml +kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/aws/service-l4.yaml kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/aws/patch-configmap-l4.yaml - +
kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/aws/service-l4.yaml kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/aws/patch-configmap-l4.yaml -
For L7:
Change line of the file provider/aws/service-l7.yaml replacing the dummy id with a valid one "arn:aws:acm:us-west-2:XXXXXXXX:certificate/XXXXXX-XXXXXXX-XXXXXXX-XXXXXXXX"
provider/aws/service-l7.yaml
"arn:aws:acm:us-west-2:XXXXXXXX:certificate/XXXXXX-XXXXXXX-XXXXXXX-XXXXXXXX"
Check that no change is necessary with regards to the ELB idle timeout. In some scenarios, users may want to modify the ELB idle timeout, so please check the ELB Idle Timeouts section for additional information. If a change is required, users will need to update the value of service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout in provider/aws/service-l7.yaml
kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/aws/service-l7.yaml +kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/aws/service-l7.yaml kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/aws/patch-configmap-l7.yaml - +
kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/aws/service-l7.yaml kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/aws/patch-configmap-l7.yaml -
This example creates an ELB with just two listeners, one in port 80 and another in port 443
In some scenarios users will need to modify the value of the ELB idle timeout. Users need to ensure the idle timeout is less than the keepalive_timeout that is configured for NGINX. By default NGINX keepalive_timeout is set to 75s.
keepalive_timeout
75s
The default ELB idle timeout will work for most scenarios, unless the NGINX keepalive_timeout has been modified, in which case service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout will need to be modified to ensure it is less than the keepalive_timeout the user has configured.
Please Note: An idle timeout of 3600s is recommended when using WebSockets.
3600s
More information with regards to idle timeouts for your Load Balancer can be found in the official AWS documentation.
This type of load balancer is supported since v1.10.0 as an ALPHA feature.
kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/aws/service-nlb.yaml -
kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/aws/service-nlb.yaml +
Important Note: proxy protocol is not supported in GCE/GKE
Using NodePort:
kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/baremetal/service-nodeport.yaml -
kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/baremetal/service-nodeport.yaml +
To check if the ingress controller pods have started, run the following command:
kubectl get pods --all-namespaces -l app.kubernetes.io/name=ingress-nginx --watch -
kubectl get pods --all-namespaces -l app.kubernetes.io/name=ingress-nginx --watch +
Once the operator pods are running, you can cancel the above command by typing Ctrl+C. +
Ctrl+C
Once the operator pods are running, you can cancel the above command by typing Ctrl+C. Now, you are ready to create your first ingress.
To detect which version of the ingress controller is running, exec into the pod and run nginx-ingress-controller version command.
nginx-ingress-controller version
POD_NAMESPACE=ingress-nginx +To detect which version of the ingress controller is running, exec into the pod and run nginx-ingress-controller version command. +POD_NAMESPACE=ingress-nginx POD_NAME=$(kubectl get pods -n $POD_NAMESPACE -l app.kubernetes.io/name=ingress-nginx -o jsonpath='{.items[0].metadata.name}') kubectl exec -it $POD_NAME -n $POD_NAMESPACE -- /nginx-ingress-controller --version - +
POD_NAMESPACE=ingress-nginx POD_NAME=$(kubectl get pods -n $POD_NAMESPACE -l app.kubernetes.io/name=ingress-nginx -o jsonpath='{.items[0].metadata.name}') kubectl exec -it $POD_NAME -n $POD_NAMESPACE -- /nginx-ingress-controller --version -
NGINX Ingress controller can be installed via Helm using the chart stable/nginx-ingress from the official charts repository. -To install the chart with the release name my-nginx:
my-nginx
helm install my-nginx stable/nginx-ingress -
helm install my-nginx stable/nginx-ingress +
If the kubernetes cluster has RBAC enabled, then run:
helm install my-nginx stable/nginx-ingress --set rbac.create=true -
helm install my-nginx stable/nginx-ingress --set rbac.create=true +
If you are using Helm 2 then specify release name using --name flag
--name
helm install stable/nginx-ingress --name my-nginx -
helm install stable/nginx-ingress --name my-nginx +
helm install stable/nginx-ingress --name my-nginx --set rbac.create=true -
helm install stable/nginx-ingress --name my-nginx --set rbac.create=true +
Detect installed version:
POD_NAME=$(kubectl get pods -l app.kubernetes.io/name=ingress-nginx -o jsonpath='{.items[0].metadata.name}') +POD_NAME=$(kubectl get pods -l app.kubernetes.io/name=ingress-nginx -o jsonpath='{.items[0].metadata.name}') kubectl exec -it $POD_NAME -- /nginx-ingress-controller --version - +
POD_NAME=$(kubectl get pods -l app.kubernetes.io/name=ingress-nginx -o jsonpath='{.items[0].metadata.name}') kubectl exec -it $POD_NAME -- /nginx-ingress-controller --version -
This example applies to nginx-ingress-controllers being deployed in an environment with RBAC enabled.
Role Based Access Control is comprised of four layers:
ClusterRole
ClusterRoleBinding
Role
RoleBinding
In order for RBAC to be applied to an nginx-ingress-controller, that controller -should be assigned to a ServiceAccount. That ServiceAccount should be -bound to the Roles and ClusterRoles defined for the nginx-ingress-controller.
ServiceAccount
One ServiceAccount is created in this example, nginx-ingress-serviceaccount.
nginx-ingress-serviceaccount
There are two sets of permissions defined in this example. Cluster-wide -permissions defined by the ClusterRole named nginx-ingress-clusterrole, and -namespace specific permissions defined by the Role named nginx-ingress-role.
nginx-ingress-clusterrole
nginx-ingress-role
These permissions are granted in order for the nginx-ingress-controller to be able to function as an ingress across the cluster. These permissions are -granted to the ClusterRole named nginx-ingress-clusterrole
configmaps
endpoints
nodes
pods
secrets
services
ingresses
events
ingresses/status
These permissions are granted specific to the nginx-ingress namespace. These -permissions are granted to the Role named nginx-ingress-role
Furthermore to support leader-election, the nginx-ingress-controller needs to -have access to a configmap using the resourceName ingress-controller-leader-nginx
configmap
ingress-controller-leader-nginx
Note that resourceNames can NOT be used to limit requests using the “create” verb because authorizers only have access to information that can be obtained @@ -1321,27 +1321,28 @@ from the request URL, method, and headers (resource names in a “create” requ are part of the request body).
This resourceName is the concatenation of the election-id and the -ingress-class as defined by the ingress-controller, which defaults to:
election-id
ingress-class
This resourceName is the concatenation of the election-id and the +ingress-class as defined by the ingress-controller, which defaults to:
ingress-controller-leader
nginx
resourceName
<election-id>-<ingress-class>
Please adapt accordingly if you overwrite either parameter when launching the nginx-ingress-controller.
The ServiceAccount nginx-ingress-serviceaccount is bound to the Role -nginx-ingress-role and the ClusterRole nginx-ingress-clusterrole.
The ServiceAccount nginx-ingress-serviceaccount is bound to the Role +nginx-ingress-role and the ClusterRole nginx-ingress-clusterrole.
The serviceAccountName associated with the containers in the deployment must match the serviceAccount. The namespace references in the Deployment metadata, container arguments, and POD_NAMESPACE should be in the nginx-ingress namespace.
I.e. if your deployment resource looks like (partial example):
kind: Deployment +kind: Deployment metadata: name: nginx-ingress-controller namespace: ingress-nginx @@ -1231,23 +1231,24 @@ in the controller Deployment. - name: nginx-ingress-controller image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.9.0 args: ... - +
kind: Deployment metadata: name: nginx-ingress-controller namespace: ingress-nginx @@ -1231,23 +1231,24 @@ in the controller Deployment. - name: nginx-ingress-controller image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.9.0 args: ... -
simply change the 0.9.0 tag to the version you wish to upgrade to. +
0.9.0
simply change the 0.9.0 tag to the version you wish to upgrade to. The easiest way to do this is e.g. (do note you may need to change the name parameter according to your installation):
kubectl set image deployment/nginx-ingress-controller \ - nginx-ingress-controller=quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.28.0 -
kubectl set image deployment/nginx-ingress-controller \ + nginx-ingress-controller=quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.28.0 +
For interactive editing, use kubectl edit deployment nginx-ingress-controller.
kubectl edit deployment nginx-ingress-controller
If you installed ingress-nginx using the Helm command in the deployment docs so its name is ngx-ingress, +
ngx-ingress
If you installed ingress-nginx using the Helm command in the deployment docs so its name is ngx-ingress, you should be able to upgrade using
helm upgrade --reuse-values ngx-ingress stable/nginx-ingress -
helm upgrade --reuse-values ngx-ingress stable/nginx-ingress +
Validating webhook must be served using TLS, you need to generate a certificate. Note that kube API server is checking the hostname of the certificate, the common name of your certificate will need to match the service name.
To run the validating webhook with a service named ingress-validation-webhook in the namespace ingress-nginx, run
ingress-validation-webhook
openssl req -x509 -newkey rsa:2048 -keyout certificate.pem -out key.pem -days 365 -nodes -subj "/CN=ingress-validation-webhook.ingress-nginx.svc" -
openssl req -x509 -newkey rsa:2048 -keyout certificate.pem -out key.pem -days 365 -nodes -subj "/CN=ingress-validation-webhook.ingress-nginx.svc" +
Kubernetes also provides primitives to sign a certificate request. Here is an example on how to use it
#!/bin/bash +#!/bin/bash SERVICE_NAME=ingress-nginx NAMESPACE=ingress-nginx @@ -1423,17 +1423,17 @@ kubectl create secret generic ingress-nginx.svc \ --from-file=key.pem=${TEMP_DIRECTORY}/server-key.pem \ --from-file=cert.pem=${TEMP_DIRECTORY}/server-cert.pem \ -n ${NAMESPACE} - +
#!/bin/bash SERVICE_NAME=ingress-nginx NAMESPACE=ingress-nginx @@ -1423,17 +1423,17 @@ kubectl create secret generic ingress-nginx.svc \ --from-file=key.pem=${TEMP_DIRECTORY}/server-key.pem \ --from-file=cert.pem=${TEMP_DIRECTORY}/server-cert.pem \ -n ${NAMESPACE} -
To generate the certificate using helm, you can use the following snippet
{{- $cn := printf "%s.%s.svc" ( include "nginx-ingress.validatingWebhook.fullname" . ) .Release.Namespace }} +{{- $cn := printf "%s.%s.svc" ( include "nginx-ingress.validatingWebhook.fullname" . ) .Release.Namespace }} {{- $ca := genCA (printf "%s-ca" ( include "nginx-ingress.validatingWebhook.fullname" . )) .Values.validatingWebhook.certificateValidity -}} {{- $cert := genSignedCert $cn nil nil .Values.validatingWebhook.certificateValidity $ca -}} - +
{{- $cn := printf "%s.%s.svc" ( include "nginx-ingress.validatingWebhook.fullname" . ) .Release.Namespace }} {{- $ca := genCA (printf "%s-ca" ( include "nginx-ingress.validatingWebhook.fullname" . )) .Values.validatingWebhook.certificateValidity -}} {{- $cert := genSignedCert $cn nil nil .Values.validatingWebhook.certificateValidity $ca -}} -
--validating-webhook
:8080
--validating-webhook-certificate
/usr/local/certificates/validating-webhook.pem
--validating-webhook-key
/usr/local/certificates/validating-webhook-key.pem
Once both the ingress controller and the kube API server are configured to serve the webhook, add the you can configure the webhook with the following objects:
apiVersion: v1 +apiVersion: v1 kind: Service metadata: name: ingress-validation-webhook @@ -1507,10 +1507,11 @@ To check that your kube API server runs with the required flags, please refer to name: ingress-validation-webhook path: /networking.k8s.io/v1beta1/ingress caBundle: <pem encoded ca cert that signs the server cert used by the webhook> - +
apiVersion: v1 kind: Service metadata: name: ingress-validation-webhook @@ -1507,10 +1507,11 @@ To check that your kube API server runs with the required flags, please refer to name: ingress-validation-webhook path: /networking.k8s.io/v1beta1/ingress caBundle: <pem encoded ca cert that signs the server cert used by the webhook> -
The code must be checked out as a subdirectory of k8s.io, and not github.com.
mkdir -p $GOPATH/src/k8s.io -cd $GOPATH/src/k8s.io -# Replace "$YOUR_GITHUB_USERNAME" below with your github username -git clone https://github.com/$YOUR_GITHUB_USERNAME/ingress-nginx.git -cd ingress-nginx -
mkdir -p $GOPATH/src/k8s.io +cd $GOPATH/src/k8s.io +# Replace "$YOUR_GITHUB_USERNAME" below with your github username +git clone https://github.com/$YOUR_GITHUB_USERNAME/ingress-nginx.git +cd ingress-nginx +
Prequisites: Minikube must be installed. See releases for installation instructions.
If you are using MacOS and deploying to minikube, the following command will build the local nginx controller container image and deploy the ingress controller onto a minikube cluster with RBAC enabled in the namespace ingress-nginx:
$ make dev-env -
$ make dev-env +
The nginx controller container image can be rebuilt using: -
$ ARCH=amd64 TAG=dev REGISTRY=$USER/ingress-controller make build container -
$ ARCH=amd64 TAG=dev REGISTRY=$USER/ingress-controller make build container +
The image will only be used by pods created after the rebuild. To delete old pods which will cause new ones to spin up: -
$ kubectl get pods -n ingress-nginx +$ kubectl get pods -n ingress-nginx $ kubectl delete pod -n ingress-nginx nginx-ingress-controller-<unique-pod-id> - +
$ kubectl get pods -n ingress-nginx $ kubectl delete pod -n ingress-nginx nginx-ingress-controller-<unique-pod-id> -
The build uses dependencies in the vendor directory, which +
vendor
The build uses dependencies in the vendor directory, which must be installed before building a binary/image. Occasionally, you might need to update the dependencies.
This guide requires you to install go 1.13 or newer.
This will automatically save the dependencies to the vendor/ directory.
vendor/
$ go get +This will automatically save the dependencies to the vendor/ directory. +$ go get $ make dep-ensure - +
$ go get $ make dep-ensure -
All ingress controllers are built through a Makefile. Depending on your requirements you can build a raw server binary, a local container image, or push an image to a remote repository.
In order to use your local Docker, you may need to set the following environment variables:
# "gcloud docker" (default) or "docker" +# "gcloud docker" (default) or "docker" $ export DOCKER=<docker> # "quay.io/kubernetes-ingress-controller" (default), "index.docker.io", or your own registry $ export REGISTRY=<your-docker-registry> - +
# "gcloud docker" (default) or "docker" $ export DOCKER=<docker> # "quay.io/kubernetes-ingress-controller" (default), "index.docker.io", or your own registry $ export REGISTRY=<your-docker-registry> -
To find the registry simply run: docker system info | grep Registry
docker system info | grep Registry
The e2e test image can also be built through the Makefile.
$ make e2e-test-image -
$ make e2e-test-image +
You can then make this image available on your minikube host by exporting the image and loading it with the minikube docker context:
$ docker save nginx-ingress-controller:e2e | (eval $(minikube docker-env) && docker load) -
$ docker save nginx-ingress-controller:e2e | (eval $(minikube docker-env) && docker load) +
Build a raw server binary -
$ make build -
$ make build +
TODO: add more specific instructions needed for raw server binary.
Build a local container image
$ TAG=<tag> REGISTRY=$USER/ingress-controller make container -
$ TAG=<tag> REGISTRY=$USER/ingress-controller make container +
Push the container image to a remote repository
$ TAG=<tag> REGISTRY=$USER/ingress-controller make push -
$ TAG=<tag> REGISTRY=$USER/ingress-controller make push +
There are several ways to deploy the ingress controller onto a cluster. Please check the deployment guide
To run unit-tests, just run
$ cd $GOPATH/src/k8s.io/ingress-nginx +$ cd $GOPATH/src/k8s.io/ingress-nginx $ make test - +
$ cd $GOPATH/src/k8s.io/ingress-nginx $ make test -
If you have access to a Kubernetes cluster, you can also run e2e tests using ginkgo.
$ cd $GOPATH/src/k8s.io/ingress-nginx +$ cd $GOPATH/src/k8s.io/ingress-nginx $ make e2e-test - +
$ cd $GOPATH/src/k8s.io/ingress-nginx $ make e2e-test -
NOTE: if your e2e pod keeps hanging in an ImagePullBackoff, make sure you've made your e2e nginx-ingress-controller image available to minikube as explained in the Building the e2e test image section
To run unit-tests for lua code locally, run:
$ cd $GOPATH/src/k8s.io/ingress-nginx +$ cd $GOPATH/src/k8s.io/ingress-nginx $ ./rootfs/etc/nginx/lua/test/up.sh $ make lua-test - +
$ cd $GOPATH/src/k8s.io/ingress-nginx $ ./rootfs/etc/nginx/lua/test/up.sh $ make lua-test -
Lua tests are located in $GOPATH/src/k8s.io/ingress-nginx/rootfs/etc/nginx/lua/test. When creating a new test file it must follow the naming convention <mytest>_test.lua or it will be ignored.
$GOPATH/src/k8s.io/ingress-nginx/rootfs/etc/nginx/lua/test
<mytest>_test.lua
All Makefiles will produce a release binary, as shown above. To publish this to a wider Kubernetes user base, push the image to a container registry, like -gcr.io. All release images are hosted under gcr.io/google_containers and +gcr.io. All release images are hosted under gcr.io/google_containers and tagged according to a semver scheme.
gcr.io/google_containers
An example release might look like: -
$ make release -
$ make release +
Please follow these guidelines to cut a release:
controller-release-version
The static configuration implies reloads, something that affects the majority of the users.
--enable-dynamic-certificates
ssl_certificate
ssl_certificate_key
http
How does controller know what zone it runs in? We can have the pod spec do pass node name using downward API as an environment variable. Then on start controller can get node details from the API based on node name. Once the node details is obtained -we can extract the zone from failure-domain.beta.kubernetes.io/zone annotation. Then we can pass that value to Lua land through Nginx configuration -when loading lua_ingress.lua module in init_by_lua phase.
failure-domain.beta.kubernetes.io/zone
lua_ingress.lua
init_by_lua
How do we extract zones for endpoints? We can have the controller watch create and update events on nodes in the entire cluster and based on that keep the map of nodes to zones in the memory. -And when we generate endpoints list, we can access node name using .subsets.addresses[i].nodeName +And when we generate endpoints list, we can access node name using .subsets.addresses[i].nodeName and based on that fetch zone from the map in memory and store it as a field on the endpoint. -This solution assumes failure-domain.beta.kubernetes.io/zone annotation does not change until the end of node's life. Otherwise we have to +This solution assumes failure-domain.beta.kubernetes.io/zone annotation does not change until the end of node's life. Otherwise we have to watch update events as well on the nodes and that'll add even more overhead.
.subsets.addresses[i].nodeName
Alternatively, we can get the list of nodes only when there's no node in the memory for given node name. This is probably a better solution because then we would avoid watching for API changes on node resources. We can eagrly fetch all the nodes and build node name to zone mapping on start. @@ -1279,6 +1279,7 @@ see no endpoints for the backend and therefore we will use general balancer.
This is the title of the KEP. Keep it simple and descriptive. A good title can help communicate what the KEP is and should be considered as part of any review.
The title should be lowercased and spaces/punctuation should be replaced with -.
-
To get started with this template:
YYYYMMDD-my-title.md
YYYYMMDD
provisional
The canonical place for the latest set of instructions (and the likely source of this file) is here.
The Metadata section above is intended to support the creation of tooling around the KEP process. +
Metadata
The Metadata section above is intended to support the creation of tooling around the KEP process. This will be a YAML section that is fenced as a code block. See the KEP process for details on each of these items.
A table of contents is helpful for quickly jumping to sections of a KEP and for highlighting any additional information provided beyond the standard KEP template.
Ensure the TOC is wrapped with <!-- toc --&rt;<!-- /toc --&rt; tags, and then generate with hack/update-toc.sh.
<!-- toc --&rt;<!-- /toc --&rt;
hack/update-toc.sh
The Summary section is incredibly important for producing high quality user-focused documentation such as release notes or a development roadmap. +
Summary
The Summary section is incredibly important for producing high quality user-focused documentation such as release notes or a development roadmap. It should be possible to collect this information before implementation begins in order to avoid requiring implementors to split their attention between writing release notes and implementing the feature itself.
A good summary is probably at least a paragraph in length.
Major milestones in the life cycle of a KEP should be tracked in Implementation History. +
Implementation History
Major milestones in the life cycle of a KEP should be tracked in Implementation History. Major milestones might include
Motivation
Proposal
Why should this KEP not be implemented.
Similar to the Drawbacks section the Alternatives section is used to highlight and record other possible approaches to delivering the value proposed by a KEP.
Drawbacks
Alternatives
A Kubernetes Enhancement Proposal (KEP) is a way to propose, communicate and coordinate on new efforts for the Kubernetes project. For this reason, the ingress-nginx project is adopting it.
Follow the process outlined in the KEP template
Unless otherwise mentioned, the TLS secret used in examples is a 2048 bit RSA key/cert pair with an arbitrarily chosen hostname, created as follows
$ openssl req -x509 -sha256 -nodes -days 365 -newkey rsa:2048 -keyout tls.key -out tls.crt -subj "/CN=nginxsvc/O=nginxsvc" +$ openssl req -x509 -sha256 -nodes -days 365 -newkey rsa:2048 -keyout tls.key -out tls.crt -subj "/CN=nginxsvc/O=nginxsvc" Generating a 2048 bit RSA private key ................+++ ................+++ @@ -1235,7 +1235,7 @@ key/cert pair with an arbitrarily chosen hostname, created as follows $ kubectl create secret tls tls-secret --key tls.key --cert tls.crt secret "tls-secret" created - +
$ openssl req -x509 -sha256 -nodes -days 365 -newkey rsa:2048 -keyout tls.key -out tls.crt -subj "/CN=nginxsvc/O=nginxsvc" Generating a 2048 bit RSA private key ................+++ ................+++ @@ -1235,7 +1235,7 @@ key/cert pair with an arbitrarily chosen hostname, created as follows $ kubectl create secret tls tls-secret --key tls.key --cert tls.crt secret "tls-secret" created -
Note: If using CA Authentication, described below, you will need to sign the server certificate with the CA.
These instructions are based on the following blog
Generate the CA Key and Certificate:
openssl req -x509 -sha256 -newkey rsa:4096 -keyout ca.key -out ca.crt -days 356 -nodes -subj '/CN=My Cert Authority' -
openssl req -x509 -sha256 -newkey rsa:4096 -keyout ca.key -out ca.crt -days 356 -nodes -subj '/CN=My Cert Authority' +
Generate the Server Key, and Certificate and Sign with the CA Certificate:
openssl req -new -newkey rsa:4096 -keyout server.key -out server.csr -nodes -subj '/CN=mydomain.com' +openssl req -new -newkey rsa:4096 -keyout server.key -out server.csr -nodes -subj '/CN=mydomain.com' openssl x509 -req -sha256 -days 365 -in server.csr -CA ca.crt -CAkey ca.key -set_serial 01 -out server.crt - +
openssl req -new -newkey rsa:4096 -keyout server.key -out server.csr -nodes -subj '/CN=mydomain.com' openssl x509 -req -sha256 -days 365 -in server.csr -CA ca.crt -CAkey ca.key -set_serial 01 -out server.crt -
Generate the Client Key, and Certificate and Sign with the CA Certificate:
openssl req -new -newkey rsa:4096 -keyout client.key -out client.csr -nodes -subj '/CN=My Client' +openssl req -new -newkey rsa:4096 -keyout client.key -out client.csr -nodes -subj '/CN=My Client' openssl x509 -req -sha256 -days 365 -in client.csr -CA ca.crt -CAkey ca.key -set_serial 02 -out client.crt - +
openssl req -new -newkey rsa:4096 -keyout client.key -out client.csr -nodes -subj '/CN=My Client' openssl x509 -req -sha256 -days 365 -in client.csr -CA ca.crt -CAkey ca.key -set_serial 02 -out client.crt -
Once this is complete you can continue to follow the instructions here
All examples that require a test HTTP Service use the standard http-svc pod, which you can deploy as follows
$ kubectl create -f http-svc.yaml +$ kubectl create -f http-svc.yaml service "http-svc" created replicationcontroller "http-svc" created @@ -1274,10 +1274,10 @@ which you can deploy as follows $ kubectl get svc NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE http-svc 10.0.122.116 <pending> 80:30301/TCP 1d - +
$ kubectl create -f http-svc.yaml service "http-svc" created replicationcontroller "http-svc" created @@ -1274,10 +1274,10 @@ which you can deploy as follows $ kubectl get svc NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE http-svc 10.0.122.116 <pending> 80:30301/TCP 1d -
You can test that the HTTP Service works by exposing it temporarily
$ kubectl patch svc http-svc -p '{"spec":{"type": "LoadBalancer"}}' +$ kubectl patch svc http-svc -p '{"spec":{"type": "LoadBalancer"}}' "http-svc" patched $ kubectl get svc http-svc @@ -1324,10 +1324,11 @@ which you can deploy as follows $ kubectl patch svc http-svc -p '{"spec":{"type": "NodePort"}}' "http-svc" patched - +
$ kubectl patch svc http-svc -p '{"spec":{"type": "LoadBalancer"}}' "http-svc" patched $ kubectl get svc http-svc @@ -1324,10 +1324,11 @@ which you can deploy as follows $ kubectl patch svc http-svc -p '{"spec":{"type": "NodePort"}}' "http-svc" patched -
cookie
balanced
persistent
INGRESSCOOKIE
None
Lax
Strict
SameSite=None
"true"
"false"
Max-Age
Expires
false
true
You can create the example Ingress to test this:
kubectl create -f ingress.yaml -
kubectl create -f ingress.yaml +
You can confirm that the Ingress works:
$ kubectl describe ing nginx-test +$ kubectl describe ing nginx-test Name: nginx-test Namespace: default Address: @@ -1305,10 +1305,10 @@ Last-Modified: Tue, 24 Jan 2017 14:02:19 GMT ETag: "58875e6b-264" Accept-Ranges: bytes - +
$ kubectl describe ing nginx-test Name: nginx-test Namespace: default Address: @@ -1305,10 +1305,10 @@ Last-Modified: Tue, 24 Jan 2017 14:02:19 GMT ETag: "58875e6b-264" Accept-Ranges: bytes -
In the example above, you can see that the response contains a Set-Cookie header with the settings we have defined. -This cookie is created by NGINX, it contains a randomly generated key corresponding to the upstream used for that request (selected using consistent hashing) and has an Expires directive. +
Set-Cookie
In the example above, you can see that the response contains a Set-Cookie header with the settings we have defined. +This cookie is created by NGINX, it contains a randomly generated key corresponding to the upstream used for that request (selected using consistent hashing) and has an Expires directive. If the user changes this cookie, NGINX creates a new one and redirects the user to another upstream.
If the backend pool grows NGINX will keep sending the requests through the same server of the first request, even if it's overloaded.
When the backend server is removed, the requests are re-routed to another upstream server. This does not require the cookie to be updated because the key's consistent hash will change.
This example shows how to add authentication in a Ingress rule using a secret that contains a file generated with htpasswd. -It's important the file generated is named auth (actually - that the secret has a key data.auth), otherwise the ingress-controller returns a 503.
htpasswd
auth
data.auth
$ htpasswd -c auth foo +This example shows how to add authentication in a Ingress rule using a secret that contains a file generated with htpasswd. +It's important the file generated is named auth (actually - that the secret has a key data.auth), otherwise the ingress-controller returns a 503. +$ htpasswd -c auth foo New password: <bar> New password: Re-type new password: Adding password for user foo - +
This example shows how to add authentication in a Ingress rule using a secret that contains a file generated with htpasswd. +It's important the file generated is named auth (actually - that the secret has a key data.auth), otherwise the ingress-controller returns a 503.
$ htpasswd -c auth foo New password: <bar> New password: Re-type new password: Adding password for user foo -
$ kubectl create secret generic basic-auth --from-file=auth +$ kubectl create secret generic basic-auth --from-file=auth secret "basic-auth" created - +
$ kubectl create secret generic basic-auth --from-file=auth secret "basic-auth" created -
$ kubectl get secret basic-auth -o yaml +$ kubectl get secret basic-auth -o yaml apiVersion: v1 data: auth: Zm9vOiRhcHIxJE9GRzNYeWJwJGNrTDBGSERBa29YWUlsSDkuY3lzVDAK @@ -1173,9 +1173,9 @@ It's important the file generated is named auth name: basic-auth namespace: default type: Opaque - +
$ kubectl get secret basic-auth -o yaml apiVersion: v1 data: auth: Zm9vOiRhcHIxJE9GRzNYeWJwJGNrTDBGSERBa29YWUlsSDkuY3lzVDAK @@ -1173,9 +1173,9 @@ It's important the file generated is named auth name: basic-auth namespace: default type: Opaque -
echo " +echo " apiVersion: networking.k8s.io/v1beta1 kind: Ingress metadata: @@ -1197,9 +1197,9 @@ It's important the file generated is named auth serviceName: http-svc servicePort: 80 " | kubectl create -f - - +
echo " apiVersion: networking.k8s.io/v1beta1 kind: Ingress metadata: @@ -1197,9 +1197,9 @@ It's important the file generated is named auth serviceName: http-svc servicePort: 80 " | kubectl create -f - -
$ curl -v http://10.2.29.4/ -H 'Host: foo.bar.com' +$ curl -v http://10.2.29.4/ -H 'Host: foo.bar.com' * Trying 10.2.29.4... * Connected to 10.2.29.4 (10.2.29.4) port 80 (#0) > GET / HTTP/1.1 @@ -1223,9 +1223,9 @@ It's important the file generated is named auth </body> </html> * Connection #0 to host 10.2.29.4 left intact - +
$ curl -v http://10.2.29.4/ -H 'Host: foo.bar.com' * Trying 10.2.29.4... * Connected to 10.2.29.4 (10.2.29.4) port 80 (#0) > GET / HTTP/1.1 @@ -1223,9 +1223,9 @@ It's important the file generated is named auth </body> </html> * Connection #0 to host 10.2.29.4 left intact -
$ curl -v http://10.2.29.4/ -H 'Host: foo.bar.com' -u 'foo:bar' +$ curl -v http://10.2.29.4/ -H 'Host: foo.bar.com' -u 'foo:bar' * Trying 10.2.29.4... * Connected to 10.2.29.4 (10.2.29.4) port 80 (#0) * Server auth using Basic with user 'foo' @@ -1268,10 +1268,11 @@ x-real-ip=10.2.29.1 BODY: * Connection #0 to host 10.2.29.4 left intact -no body in request- - +
$ curl -v http://10.2.29.4/ -H 'Host: foo.bar.com' -u 'foo:bar' * Trying 10.2.29.4... * Connected to 10.2.29.4 (10.2.29.4) port 80 (#0) * Server auth using Basic with user 'foo' @@ -1268,10 +1268,11 @@ x-real-ip=10.2.29.1 BODY: * Connection #0 to host 10.2.29.4 left intact -no body in request- -
For more details on the generation process, checkout the Prerequisite docs.
You can have as many certificates as you want. If they're in the binary DER format, you can convert them as the following:
openssl x509 -in certificate.der -inform der -out certificate.crt -outform pem -
openssl x509 -in certificate.der -inform der -out certificate.crt -outform pem +
Then, you can concatenate them all in only one file, named 'ca.crt' as the following:
cat certificate1.crt certificate2.crt certificate3.crt >> ca.crt -
cat certificate1.crt certificate2.crt certificate3.crt >> ca.crt +
Note: Make sure that the Key Size is greater than 1024 and Hashing Algorithm(Digest) is something better than md5 for each certificate generated. Otherwise you will receive an error.
You can create a secret containing just the CA certificate and another Secret containing the Server Certificate which is Signed by the CA.
kubectl create secret generic ca-secret --from-file=ca.crt=ca.crt +kubectl create secret generic ca-secret --from-file=ca.crt=ca.crt kubectl create secret generic tls-secret --from-file=tls.crt=server.crt --from-file=tls.key=server.key - +
kubectl create secret generic ca-secret --from-file=ca.crt=ca.crt kubectl create secret generic tls-secret --from-file=tls.crt=server.crt --from-file=tls.key=server.key -
You can create a secret containing CA certificate along with the Server Certificate, that can be used for both TLS and Client Auth.
kubectl create secret generic ca-secret --from-file=tls.crt=server.crt --from-file=tls.key=server.key --from-file=ca.crt=ca.crt -
kubectl create secret generic ca-secret --from-file=tls.crt=server.crt --from-file=tls.key=server.key --from-file=ca.crt=ca.crt +
If you want to also enable Certificate Revocation List verification you can create the secret also containing the CRL file in PEM format: -
kubectl create secret generic ca-secret --from-file=ca.crt=ca.crt --from-file=ca.crl=ca.crl -
kubectl create secret generic ca-secret --from-file=ca.crt=ca.crt --from-file=ca.crl=ca.crl +
Note: The CA Certificate must contain the trusted certificate authority chain to verify client certificates.
Use an external service (Basic Auth) located in https://httpbin.org
https://httpbin.org
$ kubectl create -f ingress.yaml +Use an external service (Basic Auth) located in https://httpbin.org +$ kubectl create -f ingress.yaml ingress "external-auth" created $ kubectl get ing external-auth @@ -1232,10 +1232,10 @@ status: ingress: - ip: 172.17.4.99 $ - +
$ kubectl create -f ingress.yaml ingress "external-auth" created $ kubectl get ing external-auth @@ -1232,10 +1232,10 @@ status: ingress: - ip: 172.17.4.99 $ -
Test 1: no username/password (expect code 401)
$ curl -k http://172.17.4.99 -v -H 'Host: external-auth-01.sample.com' +$ curl -k http://172.17.4.99 -v -H 'Host: external-auth-01.sample.com' * Rebuilt URL to: http://172.17.4.99/ * Trying 172.17.4.99... * Connected to 172.17.4.99 (172.17.4.99) port 80 (#0) @@ -1260,10 +1260,10 @@ $ </body> </html> * Connection #0 to host 172.17.4.99 left intact - +
$ curl -k http://172.17.4.99 -v -H 'Host: external-auth-01.sample.com' * Rebuilt URL to: http://172.17.4.99/ * Trying 172.17.4.99... * Connected to 172.17.4.99 (172.17.4.99) port 80 (#0) @@ -1260,10 +1260,10 @@ $ </body> </html> * Connection #0 to host 172.17.4.99 left intact -
Test 2: valid username/password (expect code 200) -
$ curl -k http://172.17.4.99 -v -H 'Host: external-auth-01.sample.com' -u 'user:passwd' +$ curl -k http://172.17.4.99 -v -H 'Host: external-auth-01.sample.com' -u 'user:passwd' * Rebuilt URL to: http://172.17.4.99/ * Trying 172.17.4.99... * Connected to 172.17.4.99 (172.17.4.99) port 80 (#0) @@ -1306,9 +1306,9 @@ x-real-ip=10.2.60.1 BODY: * Connection #0 to host 172.17.4.99 left intact -no body in request- - +
$ curl -k http://172.17.4.99 -v -H 'Host: external-auth-01.sample.com' -u 'user:passwd' * Rebuilt URL to: http://172.17.4.99/ * Trying 172.17.4.99... * Connected to 172.17.4.99 (172.17.4.99) port 80 (#0) @@ -1306,9 +1306,9 @@ x-real-ip=10.2.60.1 BODY: * Connection #0 to host 172.17.4.99 left intact -no body in request- -
Test 3: invalid username/password (expect code 401) -
curl -k http://172.17.4.99 -v -H 'Host: external-auth-01.sample.com' -u 'user:user' +curl -k http://172.17.4.99 -v -H 'Host: external-auth-01.sample.com' -u 'user:user' * Rebuilt URL to: http://172.17.4.99/ * Trying 172.17.4.99... * Connected to 172.17.4.99 (172.17.4.99) port 80 (#0) @@ -1336,10 +1336,11 @@ BODY: </body> </html> * Connection #0 to host 172.17.4.99 left intact - +
curl -k http://172.17.4.99 -v -H 'Host: external-auth-01.sample.com' -u 'user:user' * Rebuilt URL to: http://172.17.4.99/ * Trying 172.17.4.99... * Connected to 172.17.4.99 (172.17.4.99) port 80 (#0) @@ -1336,10 +1336,11 @@ BODY: </body> </html> * Connection #0 to host 172.17.4.99 left intact -
The auth-url and auth-signin annotations allow you to use an external +
auth-url
auth-signin
The auth-url and auth-signin annotations allow you to use an external authentication provider to protect your Ingress resources.
Important
This annotation requires nginx-ingress-controller v0.9.0 or greater.)
nginx-ingress-controller v0.9.0
This functionality is enabled by deploying multiple Ingress objects for a single host. One Ingress object has no special annotations and handles authentication.
Other Ingress objects can then be annotated in such a way that require the user to -authenticate against the first Ingress's endpoint, and can redirect 401s to the +authenticate against the first Ingress's endpoint, and can redirect 401s to the same endpoint.
401
Sample:
... +... metadata: name: application annotations: nginx.ingress.kubernetes.io/auth-url: "https://$host/oauth2/auth" nginx.ingress.kubernetes.io/auth-signin: "https://$host/oauth2/start?rd=$escaped_request_uri" ... - +
... metadata: name: application annotations: nginx.ingress.kubernetes.io/auth-url: "https://$host/oauth2/auth" nginx.ingress.kubernetes.io/auth-signin: "https://$host/oauth2/start?rd=$escaped_request_uri" ... -
This example will show you how to deploy oauth2_proxy +
oauth2_proxy
This example will show you how to deploy oauth2_proxy into a Kubernetes cluster and use it to protect the Kubernetes Dashboard using github as oAuth2 provider
kubectl create -f https://raw.githubusercontent.com/kubernetes/kops/master/addons/kubernetes-dashboard/v1.10.1.yaml -
kubectl create -f https://raw.githubusercontent.com/kubernetes/kops/master/addons/kubernetes-dashboard/v1.10.1.yaml +
https://foo.bar.com
/oauth2
https://foo.bar.com/oauth2
Configure oauth2_proxy values in the file oauth2-proxy.yaml with the values:
OAUTH2_PROXY_CLIENT_ID with the github <Client ID>
<Client ID>
<Client Secret>
OAUTH2_PROXY_COOKIE_SECRET with value of python -c 'import os,base64; print(base64.b64encode(os.urandom(16)).decode("ascii"))'
python -c 'import os,base64; print(base64.b64encode(os.urandom(16)).decode("ascii"))'
Customize the contents of the file dashboard-ingress.yaml:
Replace __INGRESS_HOST__ with a valid FQDN and __INGRESS_SECRET__ with a Secret with a valid SSL certificate.
__INGRESS_HOST__
__INGRESS_SECRET__
$ kubectl create -f oauth2-proxy.yaml,dashboard-ingress.yaml -
$ kubectl create -f oauth2-proxy.yaml,dashboard-ingress.yaml +
Test the oauth integration accessing the configured URL, like https://foo.bar.com
The Ingress in this example adds a custom header to Nginx configuration that only applies to that specific Ingress. If you want to add headers that apply globally to all Ingresses, please have a look at this example.
$ kubectl apply -f ingress.yaml -
$ kubectl apply -f ingress.yaml +
Check if the contents of the annotation are present in the nginx.conf file using: -kubectl exec nginx-ingress-controller-873061567-4n3k2 -n kube-system cat /etc/nginx/nginx.conf
kubectl exec nginx-ingress-controller-873061567-4n3k2 -n kube-system cat /etc/nginx/nginx.conf
Using a ConfigMap is possible to customize the NGINX configuration
For example, if we want to change the timeouts we need to create a ConfigMap:
$ cat configmap.yaml +$ cat configmap.yaml apiVersion: v1 data: proxy-connect-timeout: "10" @@ -1162,16 +1162,17 @@ data: kind: ConfigMap metadata: name: nginx-configuration - +
$ cat configmap.yaml apiVersion: v1 data: proxy-connect-timeout: "10" @@ -1162,16 +1162,17 @@ data: kind: ConfigMap metadata: name: nginx-configuration -
curl https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/docs/examples/customization/custom-configuration/configmap.yaml \ - | kubectl apply -f - -
curl https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/docs/examples/customization/custom-configuration/configmap.yaml \ + | kubectl apply -f - +
If the Configmap it is updated, NGINX will be reloaded with the new configuration.
This example demonstrates how to use a custom backend to render custom error pages.
First, create the custom default-backend. It will be used by the Ingress controller later on.
default-backend
$ kubectl create -f custom-default-backend.yaml +First, create the custom default-backend. It will be used by the Ingress controller later on. +$ kubectl create -f custom-default-backend.yaml service "nginx-errors" created deployment.apps "nginx-errors" created - +
$ kubectl create -f custom-default-backend.yaml service "nginx-errors" created deployment.apps "nginx-errors" created -
This should have created a Deployment and a Service with the name nginx-errors.
nginx-errors
$ kubectl get deploy,svc +This should have created a Deployment and a Service with the name nginx-errors. +$ kubectl get deploy,svc NAME DESIRED CURRENT READY AGE deployment.apps/nginx-errors 1 1 1 10s NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE service/nginx-errors ClusterIP 10.0.0.12 <none> 80/TCP 10s - +
$ kubectl get deploy,svc NAME DESIRED CURRENT READY AGE deployment.apps/nginx-errors 1 1 1 10s NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE service/nginx-errors ClusterIP 10.0.0.12 <none> 80/TCP 10s -
If you do not already have an instance of the NGINX Ingress controller running, deploy it according to the deployment guide, then follow these steps:
Edit the nginx-ingress-controller Deployment and set the value of the --default-backend flag to the name of the +
--default-backend
Edit the nginx-ingress-controller Deployment and set the value of the --default-backend flag to the name of the newly created error backend.
Edit the nginx-configuration ConfigMap and create the key custom-http-errors with a value of 404,503.
nginx-configuration
custom-http-errors
404,503
Take note of the IP address assigned to the NGINX Ingress controller Service. -
$ kubectl get svc ingress-nginx + $ kubectl get svc ingress-nginx NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE ingress-nginx ClusterIP 10.0.0.13 <none> 80/TCP,443/TCP 10m - +
$ kubectl get svc ingress-nginx NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE ingress-nginx ClusterIP 10.0.0.13 <none> 80/TCP,443/TCP 10m -
The ingress-nginx Service is of type ClusterIP in this example. This may vary depending on your environment. +
ClusterIP
The ingress-nginx Service is of type ClusterIP in this example. This may vary depending on your environment. Make sure you can use the Service to reach NGINX before proceeding with the rest of this example.
Let us send a couple of HTTP requests using cURL and validate everything is working as expected.
A request to the default backend returns a 404 error with a custom message:
$ curl -D- http://10.0.0.13/ +$ curl -D- http://10.0.0.13/ HTTP/1.1 404 Not Found Server: nginx/1.13.12 Date: Tue, 12 Jun 2018 19:11:24 GMT @@ -1277,10 +1277,10 @@ Transfer-Encoding: chunked Connection: keep-alive <span>The page you're looking for could not be found.</span> - +
$ curl -D- http://10.0.0.13/ HTTP/1.1 404 Not Found Server: nginx/1.13.12 Date: Tue, 12 Jun 2018 19:11:24 GMT @@ -1277,10 +1277,10 @@ Transfer-Encoding: chunked Connection: keep-alive <span>The page you're looking for could not be found.</span> -
A request with a custom Accept header returns the corresponding document type (JSON):
Accept
$ curl -D- -H 'Accept: application/json' http://10.0.0.13/ +A request with a custom Accept header returns the corresponding document type (JSON): +$ curl -D- -H 'Accept: application/json' http://10.0.0.13/ HTTP/1.1 404 Not Found Server: nginx/1.13.12 Date: Tue, 12 Jun 2018 19:12:36 GMT @@ -1290,13 +1290,14 @@ Connection: keep-alive Vary: Accept-Encoding { "message": "The page you're looking for could not be found" } - +
$ curl -D- -H 'Accept: application/json' http://10.0.0.13/ HTTP/1.1 404 Not Found Server: nginx/1.13.12 Date: Tue, 12 Jun 2018 19:12:36 GMT @@ -1290,13 +1290,14 @@ Connection: keep-alive Vary: Accept-Encoding { "message": "The page you're looking for could not be found" } -
To go further with this example, feel free to deploy your own applications and Ingress objects, and validate that the responses are still in the correct format when a backend returns 503 (eg. if you scale a Deployment down to 0 replica).
This example demonstrates configuration of the nginx ingress controller via a ConfigMap to pass a custom list of headers to the upstream server.
custom-headers.yaml defines a ConfigMap in the ingress-nginx namespace named custom-headers, holding several custom X-prefixed HTTP headers.
custom-headers
kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/docs/examples/customization/custom-headers/custom-headers.yaml -
kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/docs/examples/customization/custom-headers/custom-headers.yaml +
configmap.yaml defines a ConfigMap in the ingress-nginx namespace named nginx-configuration. This controls the global configuration of the ingress controller, and already exists in a standard installation. The key proxy-set-headers is set to cite the previously-created ingress-nginx/custom-headers ConfigMap.
proxy-set-headers
ingress-nginx/custom-headers
kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/docs/examples/customization/custom-headers/configmap.yaml -
kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/docs/examples/customization/custom-headers/configmap.yaml +
The nginx ingress controller will read the ingress-nginx/nginx-configuration ConfigMap, find the proxy-set-headers key, read HTTP headers from the ingress-nginx/custom-headers ConfigMap, and include those HTTP headers in all requests flowing from nginx to the backends.
ingress-nginx/nginx-configuration
Check the contents of the ConfigMaps are present in the nginx.conf file using: -kubectl exec nginx-ingress-controller-873061567-4n3k2 -n ingress-nginx cat /etc/nginx/nginx.conf
kubectl exec nginx-ingress-controller-873061567-4n3k2 -n ingress-nginx cat /etc/nginx/nginx.conf
Sample configuration includes:
User
internal
UserID
UserRole
You can deploy the controller as follows:
$ kubectl create -f deploy/ +$ kubectl create -f deploy/ deployment "demo-auth-service" created service "demo-auth-service" created ingress "demo-auth-service" created @@ -1183,10 +1183,10 @@ follows: NAME HOSTS ADDRESS PORTS AGE public-demo-echo-service public-demo-echo-service.kube.local 80 1m secure-demo-echo-service secure-demo-echo-service.kube.local 80 1m - +
$ kubectl create -f deploy/ deployment "demo-auth-service" created service "demo-auth-service" created ingress "demo-auth-service" created @@ -1183,10 +1183,10 @@ follows: NAME HOSTS ADDRESS PORTS AGE public-demo-echo-service public-demo-echo-service.kube.local 80 1m secure-demo-echo-service secure-demo-echo-service.kube.local 80 1m -
Test 1: public service with no auth header
$ curl -H 'Host: public-demo-echo-service.kube.local' -v 192.168.99.100 +$ curl -H 'Host: public-demo-echo-service.kube.local' -v 192.168.99.100 * Rebuilt URL to: 192.168.99.100/ * Trying 192.168.99.100... * Connected to 192.168.99.100 (192.168.99.100) port 80 (#0) @@ -1204,10 +1204,10 @@ follows: < * Connection #0 to host 192.168.99.100 left intact UserID: , UserRole: - +
$ curl -H 'Host: public-demo-echo-service.kube.local' -v 192.168.99.100 * Rebuilt URL to: 192.168.99.100/ * Trying 192.168.99.100... * Connected to 192.168.99.100 (192.168.99.100) port 80 (#0) @@ -1204,10 +1204,10 @@ follows: < * Connection #0 to host 192.168.99.100 left intact UserID: , UserRole: -
Test 2: secure service with no auth header
$ curl -H 'Host: secure-demo-echo-service.kube.local' -v 192.168.99.100 +$ curl -H 'Host: secure-demo-echo-service.kube.local' -v 192.168.99.100 * Rebuilt URL to: 192.168.99.100/ * Trying 192.168.99.100... * Connected to 192.168.99.100 (192.168.99.100) port 80 (#0) @@ -1231,10 +1231,10 @@ follows: </body> </html> * Connection #0 to host 192.168.99.100 left intact - +
$ curl -H 'Host: secure-demo-echo-service.kube.local' -v 192.168.99.100 * Rebuilt URL to: 192.168.99.100/ * Trying 192.168.99.100... * Connected to 192.168.99.100 (192.168.99.100) port 80 (#0) @@ -1231,10 +1231,10 @@ follows: </body> </html> * Connection #0 to host 192.168.99.100 left intact -
Test 3: public service with valid auth header
$ curl -H 'Host: public-demo-echo-service.kube.local' -H 'User:internal' -v 192.168.99.100 +$ curl -H 'Host: public-demo-echo-service.kube.local' -H 'User:internal' -v 192.168.99.100 * Rebuilt URL to: 192.168.99.100/ * Trying 192.168.99.100... * Connected to 192.168.99.100 (192.168.99.100) port 80 (#0) @@ -1253,10 +1253,10 @@ follows: < * Connection #0 to host 192.168.99.100 left intact UserID: 1443635317331776148, UserRole: admin - +
$ curl -H 'Host: public-demo-echo-service.kube.local' -H 'User:internal' -v 192.168.99.100 * Rebuilt URL to: 192.168.99.100/ * Trying 192.168.99.100... * Connected to 192.168.99.100 (192.168.99.100) port 80 (#0) @@ -1253,10 +1253,10 @@ follows: < * Connection #0 to host 192.168.99.100 left intact UserID: 1443635317331776148, UserRole: admin -
Test 4: secure service with valid auth header
$ curl -H 'Host: secure-demo-echo-service.kube.local' -H 'User:internal' -v 192.168.99.100 +$ curl -H 'Host: secure-demo-echo-service.kube.local' -H 'User:internal' -v 192.168.99.100 * Rebuilt URL to: 192.168.99.100/ * Trying 192.168.99.100... * Connected to 192.168.99.100 (192.168.99.100) port 80 (#0) @@ -1275,10 +1275,11 @@ follows: < * Connection #0 to host 192.168.99.100 left intact UserID: 605394647632969758, UserRole: admin - +
$ curl -H 'Host: secure-demo-echo-service.kube.local' -H 'User:internal' -v 192.168.99.100 * Rebuilt URL to: 192.168.99.100/ * Trying 192.168.99.100... * Connected to 192.168.99.100 (192.168.99.100) port 80 (#0) @@ -1275,10 +1275,11 @@ follows: < * Connection #0 to host 192.168.99.100 left intact UserID: 605394647632969758, UserRole: admin -
$ cat configmap.yaml +$ cat configmap.yaml apiVersion: v1 data: ssl-dh-param: "ingress-nginx/lb-dhparam" @@ -1239,17 +1239,17 @@ use a ConfigMap to configure custom Diffie-Hellman parameters file to help with labels: app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - +
$ cat configmap.yaml apiVersion: v1 data: ssl-dh-param: "ingress-nginx/lb-dhparam" @@ -1239,17 +1239,17 @@ use a ConfigMap to configure custom Diffie-Hellman parameters file to help with labels: app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx -
$ kubectl create -f configmap.yaml -
$ kubectl create -f configmap.yaml +
$> openssl dhparam 1024 2> /dev/null | base64 +$> openssl dhparam 1024 2> /dev/null | base64 LS0tLS1CRUdJTiBESCBQQVJBTUVURVJ... - +
$> openssl dhparam 1024 2> /dev/null | base64 LS0tLS1CRUdJTiBESCBQQVJBTUVURVJ... -
$ cat ssl-dh-param.yaml +$ cat ssl-dh-param.yaml apiVersion: v1 data: dhparam.pem: "LS0tLS1CRUdJTiBESCBQQVJBTUVURVJ..." @@ -1260,17 +1260,18 @@ use a ConfigMap to configure custom Diffie-Hellman parameters file to help with labels: app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx - +
$ cat ssl-dh-param.yaml apiVersion: v1 data: dhparam.pem: "LS0tLS1CRUdJTiBESCBQQVJBTUVURVJ..." @@ -1260,17 +1260,18 @@ use a ConfigMap to configure custom Diffie-Hellman parameters file to help with labels: app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx -
$ kubectl create -f ssl-dh-param.yaml -
$ kubectl create -f ssl-dh-param.yaml +
Check the contents of the configmap is present in the nginx.conf file using: -kubectl exec nginx-ingress-controller-873061567-4n3k2 -n kube-system cat /etc/nginx/nginx.conf
This example aims to demonstrate the use of an Init Container to adjust sysctl default values using kubectl patch
kubectl patch
kubectl patch deployment -n ingress-nginx nginx-ingress-controller \ +This example aims to demonstrate the use of an Init Container to adjust sysctl default values using kubectl patch +kubectl patch deployment -n ingress-nginx nginx-ingress-controller \ --patch="$(curl https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/docs/examples/customization/sysctl/patch.json)" - +
kubectl patch deployment -n ingress-nginx nginx-ingress-controller \ --patch="$(curl https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/docs/examples/customization/sysctl/patch.json)" -
Changes:
net.core.somaxconn
128
32768
net.ipv4.ip_local_port_range
32768 60999
1024 65000
In a post from the NGINX blog, it is possible to see an explanation for the changes.
This example demonstrates how to deploy a docker registry in the cluster and configure Ingress enable access from Internet
First we deploy the docker registry in the cluster:
kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/docs/examples/docker-registry/deployment.yaml -
kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/docs/examples/docker-registry/deployment.yaml +
DO NOT RUN THIS IN PRODUCTION
This deployment uses emptyDir in the volumeMount which means the contents of the registry will be deleted when the pod dies.
emptyDir
volumeMount
The next required step is creation of the ingress rules. To do this we have two options: with and without TLS
Download and edit the yaml deployment replacing registry.<your domain> with a valid DNS name pointing to the ingress controller:
registry.<your domain>
wget https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/docs/examples/docker-registry/ingress-without-tls.yaml -
wget https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/docs/examples/docker-registry/ingress-without-tls.yaml +
Running a docker registry without TLS requires we configure our local docker daemon with the insecure registry flag.
Please check deploy a plain http registry
wget https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/docs/examples/docker-registry/ingress-with-tls.yaml -
wget https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/docs/examples/docker-registry/ingress-with-tls.yaml +
Deploy kube lego use Let's Encrypt certificates or edit the ingress rule to use a secret with an existing SSL certificate.
To test the registry is working correctly we download a known image from docker hub, create a tag pointing to the new registry and upload the image:
docker pull ubuntu:16.04 +docker pull ubuntu:16.04 docker tag ubuntu:16.04 `registry.<your domain>/ubuntu:16.04` docker push `registry.<your domain>/ubuntu:16.04` - +
docker pull ubuntu:16.04 docker tag ubuntu:16.04 `registry.<your domain>/ubuntu:16.04` docker push `registry.<your domain>/ubuntu:16.04` -
Please replace registry.<your domain> with your domain.
example.com
fortune-teller.stack.build
Deployment
$ kubectl create -f app.yaml -
$ kubectl create -f app.yaml +
This is a standard kubernetes deployment object. It is running a grpc service -listening on port 50051.
50051
The sample application fortune-teller-app is a grpc server implemented in go. Here's the stripped-down implementation:
func main() { +func main() { grpcServer := grpc.NewServer() fortune.RegisterFortuneTellerServer(grpcServer, &FortuneTeller{}) lis, _ := net.Listen("tcp", ":50051") grpcServer.Serve(lis) } - +
func main() { grpcServer := grpc.NewServer() fortune.RegisterFortuneTellerServer(grpcServer, &FortuneTeller{}) lis, _ := net.Listen("tcp", ":50051") grpcServer.Serve(lis) } -
The takeaway is that we are not doing any TLS configuration on the server (as we are terminating TLS at the ingress level, grpc traffic will travel unencrypted inside the cluster and arrive "insecure").
For your own application you may or may not want to do this. If you prefer to forward encrypted traffic to your POD and terminate TLS at the gRPC server -itself, add the ingress annotation nginx.ingress.kubernetes.io/backend-protocol: "GRPCS".
nginx.ingress.kubernetes.io/backend-protocol: "GRPCS"
Service
$ kubectl create -f svc.yaml -
$ kubectl create -f svc.yaml +
Here we have a typical service. Nothing special, just routing traffic to the -backend application on port 50051.
Ingress
$ kubectl create -f ingress.yaml -
$ kubectl create -f ingress.yaml +
A few things to note:
nginx.ingress.kubernetes.io/backend-protocol: "GRPC"
https://fortune-teller.stack.build:443
Once we've applied our configuration to kubernetes, it's time to test that we can actually talk to the backend. To do this, we'll use the grpcurl utility:
$ grpcurl fortune-teller.stack.build:443 build.stack.fortune.FortuneTeller/Predict +$ grpcurl fortune-teller.stack.build:443 build.stack.fortune.FortuneTeller/Predict { "message": "Let us endeavor so to live that when we come to die even the undertaker will be sorry.\n\t\t-- Mark Twain, \"Pudd'nhead Wilson's Calendar\"" } - +
$ grpcurl fortune-teller.stack.build:443 build.stack.fortune.FortuneTeller/Predict { "message": "Let us endeavor so to live that when we come to die even the undertaker will be sorry.\n\t\t-- Mark Twain, \"Pudd'nhead Wilson's Calendar\"" } -
GODEBUG=http2debug=2
grpc_read_timeout
grpc_send_timeout
client_body_timeout
Values for the timeouts must be specified as e.g. "1200s".
"1200s"
-On the most recent versions of nginx-ingress, changing these timeouts requires using the nginx.ingress.kubernetes.io/server-snippet annotation. There are plans for future releases to allow using the Kubernetes annotations to define each timeout seperately. +On the most recent versions of nginx-ingress, changing these timeouts requires using the nginx.ingress.kubernetes.io/server-snippet annotation. There are plans for future releases to allow using the Kubernetes annotations to define each timeout seperately.
On the most recent versions of nginx-ingress, changing these timeouts requires using the nginx.ingress.kubernetes.io/server-snippet annotation. There are plans for future releases to allow using the Kubernetes annotations to define each timeout seperately.
nginx.ingress.kubernetes.io/server-snippet
This should generate a segment like: -
$ kubectl exec -it nginx-ingress-controller-6vwd1 -- cat /etc/nginx/nginx.conf | grep "foo.bar.com" -B 7 -A 35 +$ kubectl exec -it nginx-ingress-controller-6vwd1 -- cat /etc/nginx/nginx.conf | grep "foo.bar.com" -B 7 -A 35 server { listen 80; listen 443 ssl http2; @@ -1198,9 +1198,9 @@ proxy_pass http://default-http-svc-80; } - +
$ kubectl exec -it nginx-ingress-controller-6vwd1 -- cat /etc/nginx/nginx.conf | grep "foo.bar.com" -B 7 -A 35 server { listen 80; listen 443 ssl http2; @@ -1198,9 +1198,9 @@ proxy_pass http://default-http-svc-80; } -
And you should be able to reach your nginx service or http-svc service using a hostname switch: -
$ kubectl get ing +$ kubectl get ing NAME RULE BACKEND ADDRESS AGE foo-tls - 104.154.30.67 13m foo.bar.com @@ -1237,10 +1237,11 @@ $ curl 104.154.30.67 default backend - 404 - +
$ kubectl get ing NAME RULE BACKEND ADDRESS AGE foo-tls - 104.154.30.67 13m foo.bar.com @@ -1237,10 +1237,11 @@ $ curl 104.154.30.67 default backend - 404 -
Before applying any objects, first apply the PSP permissions by running: -
kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/docs/examples/psp/psp.yaml -
kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/docs/examples/psp/psp.yaml +
Now that the pod security policy is applied, we can continue as usual by applying the mandatory.yaml according to the Installation Guide.
Starting in Version 0.22.0, ingress definitions using the annotation nginx.ingress.kubernetes.io/rewrite-target are not backwards compatible with previous versions. In Version 0.22.0 and beyond, any substrings within the request URI that need to be passed to the rewritten path must explicitly be defined in a capture group.
nginx.ingress.kubernetes.io/rewrite-target
Captured groups are saved in numbered placeholders, chronologically, in the form $1, $2 ... $n. These placeholders can be used as parameters in the rewrite-target annotation.
$1
$2
$n
rewrite-target
Create an Ingress rule with a rewrite annotation:
$ echo ' +$ echo ' apiVersion: networking.k8s.io/v1beta1 kind: Ingress metadata: @@ -1334,18 +1334,18 @@ and that you have an ingress controller running in y servicePort: 80 path: /something(/|$)(.*) ' | kubectl create -f - - +
$ echo ' apiVersion: networking.k8s.io/v1beta1 kind: Ingress metadata: @@ -1334,18 +1334,18 @@ and that you have an ingress controller running in y servicePort: 80 path: /something(/|$)(.*) ' | kubectl create -f - -
In this ingress definition, any characters captured by (.*) will be assigned to the placeholder $2, which is then used as a parameter in the rewrite-target annotation.
(.*)
For example, the ingress definition above will result in the following rewrites:
rewrite.bar.com/something
rewrite.bar.com/
rewrite.bar.com/something/
rewrite.bar.com/something/new
rewrite.bar.com/new
Create an Ingress rule with a app-root annotation: -
$ echo " +$ echo " apiVersion: networking.k8s.io/v1beta1 kind: Ingress metadata: @@ -1363,9 +1363,9 @@ and that you have an ingress controller running in y servicePort: 80 path: / " | kubectl create -f - - +
$ echo " apiVersion: networking.k8s.io/v1beta1 kind: Ingress metadata: @@ -1363,9 +1363,9 @@ and that you have an ingress controller running in y servicePort: 80 path: / " | kubectl create -f - -
Check the rewrite is working
$ curl -I -k http://approot.bar.com/ +$ curl -I -k http://approot.bar.com/ HTTP/1.1 302 Moved Temporarily Server: nginx/1.11.10 Date: Mon, 13 Mar 2017 14:57:15 GMT @@ -1373,10 +1373,11 @@ Content-Type: text/html Content-Length: 162 Location: http://stickyingress.example.com/app1 Connection: keep-alive - +
$ curl -I -k http://approot.bar.com/ HTTP/1.1 302 Moved Temporarily Server: nginx/1.11.10 Date: Mon, 13 Mar 2017 14:57:15 GMT @@ -1373,10 +1373,11 @@ Content-Type: text/html Content-Length: 162 Location: http://stickyingress.example.com/app1 Connection: keep-alive -
To acquire a static IP for the nginx ingress controller, simply put it -behind a Service of Type=LoadBalancer.
First, create a loadbalancer Service and wait for it to acquire an IP
$ kubectl create -f static-ip-svc.yaml +$ kubectl create -f static-ip-svc.yaml service "nginx-ingress-lb" created $ kubectl get svc nginx-ingress-lb NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE nginx-ingress-lb 10.0.138.113 104.154.109.191 80:31457/TCP,443:32240/TCP 15m - +
$ kubectl create -f static-ip-svc.yaml service "nginx-ingress-lb" created $ kubectl get svc nginx-ingress-lb NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE nginx-ingress-lb 10.0.138.113 104.154.109.191 80:31457/TCP,443:32240/TCP 15m -
then, update the ingress controller so it adopts the static IP of the Service -by passing the --publish-service flag (the example yaml used in the next step +by passing the --publish-service flag (the example yaml used in the next step already has it set to "nginx-ingress-lb").
$ kubectl create -f nginx-ingress-controller.yaml +$ kubectl create -f nginx-ingress-controller.yaml deployment "nginx-ingress-controller" created - +
$ kubectl create -f nginx-ingress-controller.yaml deployment "nginx-ingress-controller" created -
From here on every Ingress created with the ingress.class annotation set to -nginx will get the IP allocated in the previous step
ingress.class
$ kubectl create -f nginx-ingress.yaml +From here on every Ingress created with the ingress.class annotation set to +nginx will get the IP allocated in the previous step +$ kubectl create -f nginx-ingress.yaml ingress "nginx-ingress" created $ kubectl get ing ingress-nginx @@ -1298,11 +1298,11 @@ already has it set to "nginx-ingress-lb"). request_version=1.1 request_uri=http://104.154.109.191:8080/ ... - +
From here on every Ingress created with the ingress.class annotation set to +nginx will get the IP allocated in the previous step
$ kubectl create -f nginx-ingress.yaml ingress "nginx-ingress" created $ kubectl get ing ingress-nginx @@ -1298,11 +1298,11 @@ already has it set to "nginx-ingress-lb"). request_version=1.1 request_uri=http://104.154.109.191:8080/ ... -
You can test retention by deleting the Ingress
$ kubectl delete ing nginx-ingress +$ kubectl delete ing nginx-ingress ingress "nginx-ingress" deleted $ kubectl create -f nginx-ingress.yaml @@ -1311,7 +1311,7 @@ already has it set to "nginx-ingress-lb"). $ kubectl get ing nginx-ingress NAME HOSTS ADDRESS PORTS AGE nginx-ingress * 104.154.109.191 80, 443 13m - +
$ kubectl delete ing nginx-ingress ingress "nginx-ingress" deleted $ kubectl create -f nginx-ingress.yaml @@ -1311,7 +1311,7 @@ already has it set to "nginx-ingress-lb"). $ kubectl get ing nginx-ingress NAME HOSTS ADDRESS PORTS AGE nginx-ingress * 104.154.109.191 80, 443 13m -
Note that unlike the GCE Ingress, the same loadbalancer IP is shared amongst all @@ -1320,14 +1320,14 @@ controllers.
To promote the allocated IP to static, you can update the Service manifest
$ kubectl patch svc nginx-ingress-lb -p '{"spec": {"loadBalancerIP": "104.154.109.191"}}' +$ kubectl patch svc nginx-ingress-lb -p '{"spec": {"loadBalancerIP": "104.154.109.191"}}' "nginx-ingress-lb" patched - +
$ kubectl patch svc nginx-ingress-lb -p '{"spec": {"loadBalancerIP": "104.154.109.191"}}' "nginx-ingress-lb" patched -
and promote the IP to static (promotion works differently for cloudproviders, provided example is for GKE/GCE) ` -
$ gcloud compute addresses create nginx-ingress-lb --addresses 104.154.109.191 --region us-central1 +$ gcloud compute addresses create nginx-ingress-lb --addresses 104.154.109.191 --region us-central1 Created [https://www.googleapis.com/compute/v1/projects/kubernetesdev/regions/us-central1/addresses/nginx-ingress-lb]. --- address: 104.154.109.191 @@ -1341,12 +1341,13 @@ provided example is for GKE/GCE) status: IN_USE users: - us-central1/forwardingRules/a09f6913ae80e11e6a8c542010af0000 - +
$ gcloud compute addresses create nginx-ingress-lb --addresses 104.154.109.191 --region us-central1 Created [https://www.googleapis.com/compute/v1/projects/kubernetesdev/regions/us-central1/addresses/nginx-ingress-lb]. --- address: 104.154.109.191 @@ -1341,12 +1341,13 @@ provided example is for GKE/GCE) status: IN_USE users: - us-central1/forwardingRules/a09f6913ae80e11e6a8c542010af0000 -
Now even if the Service is deleted, the IP will persist, so you can recreate the -Service with spec.loadBalancerIP set to 104.154.109.191.
spec.loadBalancerIP
104.154.109.191
powered by - MkDocs + MkDocs and - + Material for MkDocs
- + diff --git a/examples/tls-termination/index.html b/examples/tls-termination/index.html index 7710b5fd5..546263674 100644 --- a/examples/tls-termination/index.html +++ b/examples/tls-termination/index.html @@ -34,7 +34,7 @@ - + @@ -42,7 +42,7 @@ - + @@ -53,12 +53,12 @@ - + - + @@ -114,7 +114,7 @@ - + Skip to content @@ -123,7 +123,7 @@ - + public @@ -154,7 +154,7 @@ - + @@ -1226,8 +1226,8 @@ Prerequisites ¶ You need a TLS cert and a test HTTP service for this example. Deployment ¶ -Create a values.yaml file. -apiVersion: networking.k8s.io/v1beta1 +Create a values.yaml file. +apiVersion: networking.k8s.io/v1beta1 kind: Ingress metadata: name: nginx-test @@ -1247,16 +1247,16 @@ # This assumes http-svc exists and routes to healthy endpoints serviceName: http-svc servicePort: 80 - + The following command instructs the controller to terminate traffic using the provided TLS cert, and forward un-encrypted HTTP traffic to the test HTTP service. -kubectl apply -f ingress.yaml - +kubectl apply -f ingress.yaml + Validation ¶ You can confirm that the Ingress works. -$ kubectl describe ing nginx-test +$ kubectl describe ing nginx-test Name: nginx-test Namespace: default Address: 104.198.183.6 @@ -1303,10 +1303,11 @@ TLS cert, and forward un-encrypted HTTP traffic to the test HTTP service. x-forwarded-for=104.132.0.80, 35.186.221.137 x-forwarded-proto=https BODY: - + + @@ -1361,9 +1362,9 @@ TLS cert, and forward un-encrypted HTTP traffic to the test HTTP service. @@ -1373,7 +1374,7 @@ TLS cert, and forward un-encrypted HTTP traffic to the test HTTP service. - + diff --git a/how-it-works/index.html b/how-it-works/index.html index a19c7855e..d00e28ac4 100644 --- a/how-it-works/index.html +++ b/how-it-works/index.html @@ -34,7 +34,7 @@ - + @@ -42,7 +42,7 @@ - + @@ -53,12 +53,12 @@ - + - + @@ -114,7 +114,7 @@ - + Skip to content @@ -123,7 +123,7 @@ - + public @@ -154,7 +154,7 @@ - + @@ -1292,10 +1292,10 @@ How it works ¶ The objective of this document is to explain how the NGINX Ingress controller works, in particular how the NGINX model is built and why we need one. NGINX configuration ¶ -The goal of this Ingress controller is the assembly of a configuration file (nginx.conf). The main implication of this requirement is the need to reload NGINX after any change in the configuration file. Though it is important to note that we don't reload Nginx on changes that impact only an upstream configuration (i.e Endpoints change when you deploy your app). We use lua-nginx-module to achieve this. Check below to learn more about how it's done. +The goal of this Ingress controller is the assembly of a configuration file (nginx.conf). The main implication of this requirement is the need to reload NGINX after any change in the configuration file. Though it is important to note that we don't reload Nginx on changes that impact only an upstream configuration (i.e Endpoints change when you deploy your app). We use lua-nginx-module to achieve this. Check below to learn more about how it's done. NGINX model ¶ Usually, a Kubernetes Controller utilizes the synchronization loop pattern to check if the desired state in the controller is updated or a change is required. To this purpose, we need to build a model using different objects from the cluster, in particular (in no special order) Ingresses, Services, Endpoints, Secrets, and Configmaps to generate a point in time configuration file that reflects the state of the cluster. -To get this object from the cluster, we use Kubernetes Informers, in particular, FilteredSharedInformer. This informers allows reacting to changes in using callbacks to individual changes when a new object is added, modified or removed. Unfortunately, there is no way to know if a particular change is going to affect the final configuration file. Therefore on every change, we have to rebuild a new model from scratch based on the state of cluster and compare it to the current model. If the new model equals to the current one, then we avoid generating a new NGINX configuration and triggering a reload. Otherwise, we check if the difference is only about Endpoints. If so we then send the new list of Endpoints to a Lua handler running inside Nginx using HTTP POST request and again avoid generating a new NGINX configuration and triggering a reload. If the difference between running and new model is about more than just Endpoints we create a new NGINX configuration based on the new model, replace the current model and trigger a reload. +To get this object from the cluster, we use Kubernetes Informers, in particular, FilteredSharedInformer. This informers allows reacting to changes in using callbacks to individual changes when a new object is added, modified or removed. Unfortunately, there is no way to know if a particular change is going to affect the final configuration file. Therefore on every change, we have to rebuild a new model from scratch based on the state of cluster and compare it to the current model. If the new model equals to the current one, then we avoid generating a new NGINX configuration and triggering a reload. Otherwise, we check if the difference is only about Endpoints. If so we then send the new list of Endpoints to a Lua handler running inside Nginx using HTTP POST request and again avoid generating a new NGINX configuration and triggering a reload. If the difference between running and new model is about more than just Endpoints we create a new NGINX configuration based on the new model, replace the current model and trigger a reload. One of the uses of the model is to avoid unnecessary reloads when there's no change in the state and to detect conflicts in definitions. The final representation of the NGINX configuration is generated from a Go template using the new model as input for the variables required by the template. Building the NGINX model ¶ @@ -1303,7 +1303,7 @@ Operations to build the model: -Order Ingress rules by CreationTimestamp field, i.e., old rules first. +Order Ingress rules by CreationTimestamp field, i.e., old rules first. If the same path for the same host is defined in more than one Ingress, the oldest rule wins. @@ -1325,7 +1325,7 @@ New Ingress Resource Created. TLS section is added to existing Ingress. -Change in Ingress annotations that impacts more than just upstream configuration. For instance load-balance annotation does not require a reload. +Change in Ingress annotations that impacts more than just upstream configuration. For instance load-balance annotation does not require a reload. A path is added/removed from an Ingress. An Ingress, Service, Secret is removed. Some missing referenced object from the Ingress is available, like a Service or Secret. @@ -1334,15 +1334,16 @@ Avoiding reloads ¶ In some cases, it is possible to avoid reloads, in particular when there is a change in the endpoints, i.e., a pod is started or replaced. It is out of the scope of this Ingress controller to remove reloads completely. This would require an incredible amount of work and at some point makes no sense. This can change only if NGINX changes the way new configurations are read, basically, new changes do not replace worker processes. Avoiding reloads on Endpoints changes ¶ -On every endpoint change the controller fetches endpoints from all the services it sees and generates corresponding Backend objects. It then sends these objects to a Lua handler running inside Nginx. The Lua code in turn stores those backends in a shared memory zone. Then for every request Lua code running in balancer_by_lua context detects what endpoints it should choose upstream peer from and applies the configured load balancing algorithm to choose the peer. Then Nginx takes care of the rest. This way we avoid reloading Nginx on endpoint changes. Note that this includes annotation changes that affects only upstream configuration in Nginx as well. +On every endpoint change the controller fetches endpoints from all the services it sees and generates corresponding Backend objects. It then sends these objects to a Lua handler running inside Nginx. The Lua code in turn stores those backends in a shared memory zone. Then for every request Lua code running in balancer_by_lua context detects what endpoints it should choose upstream peer from and applies the configured load balancing algorithm to choose the peer. Then Nginx takes care of the rest. This way we avoid reloading Nginx on endpoint changes. Note that this includes annotation changes that affects only upstream configuration in Nginx as well. In a relatively big clusters with frequently deploying apps this feature saves significant number of Nginx reloads which can otherwise affect response latency, load balancing quality (after every reload Nginx resets the state of load balancing) and so on. Avoiding outage from wrong configuration ¶ -Because the ingress controller works using the synchronization loop pattern, it is applying the configuration for all matching objects. In case some Ingress objects have a broken configuration, for example a syntax error in the nginx.ingress.kubernetes.io/configuration-snippet annotation, the generated configuration becomes invalid, does not reload and hence no more ingresses will be taken into account. +Because the ingress controller works using the synchronization loop pattern, it is applying the configuration for all matching objects. In case some Ingress objects have a broken configuration, for example a syntax error in the nginx.ingress.kubernetes.io/configuration-snippet annotation, the generated configuration becomes invalid, does not reload and hence no more ingresses will be taken into account. To prevent this situation to happen, the nginx ingress controller optionally exposes a validating admission webhook server to ensure the validity of incoming ingress objects. This webhook appends the incoming ingress objects to the list of ingresses, generates the configuration and calls nginx to ensure the configuration has no syntax errors. + @@ -1397,9 +1398,9 @@ This webhook appends the incoming ingress objects to the list of ingresses, gene @@ -1409,7 +1410,7 @@ This webhook appends the incoming ingress objects to the list of ingresses, gene - + diff --git a/index.html b/index.html index f800d9138..c2ac90a9f 100644 --- a/index.html +++ b/index.html @@ -34,7 +34,7 @@ - + @@ -42,7 +42,7 @@ - + @@ -53,12 +53,12 @@ - + - + @@ -114,7 +114,7 @@ - + Skip to content @@ -123,7 +123,7 @@ - + public @@ -154,7 +154,7 @@ - + @@ -1202,6 +1202,7 @@ + @@ -1242,9 +1243,9 @@ @@ -1254,7 +1255,7 @@ - + diff --git a/kubectl-plugin/index.html b/kubectl-plugin/index.html index 85c81899f..0010e8b0b 100644 --- a/kubectl-plugin/index.html +++ b/kubectl-plugin/index.html @@ -34,7 +34,7 @@ - + @@ -42,7 +42,7 @@ - + @@ -53,12 +53,12 @@ - + - + @@ -114,7 +114,7 @@ - + Skip to content @@ -123,7 +123,7 @@ - + public @@ -154,7 +154,7 @@ - + @@ -1384,15 +1384,15 @@ Do not move it without providing redirects. The ingress-nginx kubectl plugin ¶ Installation ¶ Install krew, then run -kubectl krew install ingress-nginx - +kubectl krew install ingress-nginx + to install the plugin. Then run -kubectl ingress-nginx --help - +kubectl ingress-nginx --help + to make sure the plugin is properly installed and to get a list of commands: -kubectl ingress-nginx --help +kubectl ingress-nginx --help A kubectl plugin for inspecting your ingress-nginx deployments Usage: @@ -1430,29 +1430,29 @@ Do not move it without providing redirects. --user string The name of the kubeconfig user to use Use "ingress-nginx [command] --help" for more information about a command. - + -If a new ingress-nginx version has just been released, the plugin may not yet have been updated inside the repository. In that case, you can install the latest version of the plugin by running: -( +If a new ingress-nginx version has just been released, the plugin may not yet have been updated inside the repository. In that case, you can install the latest version of the plugin by running: +( set -x; cd "$(mktemp -d)" && curl -fsSLO "https://github.com/kubernetes/ingress-nginx/releases/download/nginx-0.24.0/{ingress-nginx.yaml,kubectl-ingress_nginx-$(uname | tr '[:upper:]' '[:lower:]')-amd64.tar.gz}" && kubectl krew install \ --manifest=ingress-nginx.yaml --archive=kubectl-ingress_nginx-$(uname | tr '[:upper:]' '[:lower:]')-amd64.tar.gz ) - + -Replacing 0.24.0 with the recently released version. +Replacing 0.24.0 with the recently released version. Common Flags ¶ -Every subcommand supports the basic kubectl configuration flags like --namespace, --context, --client-key and so on. -Subcommands that act on a particular ingress-nginx pod (backends, certs, conf, exec, general, logs, ssh), support the --deployment <deployment> and --pod <pod> flags to select either a pod from a deployment with the given name, or a pod with the given name. The --deployment flag defaults to nginx-ingress-controller. -Subcommands that inspect resources (ingresses, lint) support the --all-namespaces flag, which causes them to inspect resources in every namespace. +Every subcommand supports the basic kubectl configuration flags like --namespace, --context, --client-key and so on. +Subcommands that act on a particular ingress-nginx pod (backends, certs, conf, exec, general, logs, ssh), support the --deployment <deployment> and --pod <pod> flags to select either a pod from a deployment with the given name, or a pod with the given name. The --deployment flag defaults to nginx-ingress-controller. +Subcommands that inspect resources (ingresses, lint) support the --all-namespaces flag, which causes them to inspect resources in every namespace. Subcommands ¶ -Note that backends, general, certs, and conf require ingress-nginx version 0.23.0 or higher. +Note that backends, general, certs, and conf require ingress-nginx version 0.23.0 or higher. backends ¶ -Run kubectl ingress-nginx backends to get a JSON array of the backends that an ingress-nginx controller currently knows about: -$ kubectl ingress-nginx backends -n ingress-nginx +Run kubectl ingress-nginx backends to get a JSON array of the backends that an ingress-nginx controller currently knows about: +$ kubectl ingress-nginx backends -n ingress-nginx [ { "name": "default-apple-service-5678", @@ -1513,13 +1513,13 @@ Do not move it without providing redirects. ... } ] - + -Add the --list option to show only the backend names. Add the --backend <backend> option to show only the backend with the given name. +Add the --list option to show only the backend names. Add the --backend <backend> option to show only the backend with the given name. certs ¶ -Use kubectl ingress-nginx certs --host <hostname> to dump the SSL cert/key information for a given host. Requires that --enable-dynamic-certificates is true (this is the default as of version 0.24.0). +Use kubectl ingress-nginx certs --host <hostname> to dump the SSL cert/key information for a given host. Requires that --enable-dynamic-certificates is true (this is the default as of version 0.24.0). WARNING: This command will dump sensitive private key information. Don't blindly share the output, and certainly don't log it anywhere. -$ kubectl ingress-nginx certs -n ingress-nginx --host testaddr.local +$ kubectl ingress-nginx certs -n ingress-nginx --host testaddr.local -----BEGIN CERTIFICATE----- ... -----END CERTIFICATE----- @@ -1530,11 +1530,11 @@ Do not move it without providing redirects. -----BEGIN RSA PRIVATE KEY----- <REDACTED! DO NOT SHARE THIS!> -----END RSA PRIVATE KEY----- - + conf ¶ -Use kubectl ingress-nginx conf to dump the generated nginx.conf file. Add the --host <hostname> option to view only the server block for that host: -kubectl ingress-nginx conf -n ingress-nginx --host testaddr.local +Use kubectl ingress-nginx conf to dump the generated nginx.conf file. Add the --host <hostname> option to view only the server block for that host: +kubectl ingress-nginx conf -n ingress-nginx --host testaddr.local server { server_name testaddr.local ; @@ -1556,11 +1556,11 @@ Do not move it without providing redirects. set $location_path "/"; ... - + exec ¶ -kubectl ingress-nginx exec is exactly the same as kubectl exec, with the same command flags. It will automatically choose an ingress-nginx pod to run the command in. -$ kubectl ingress-nginx exec -i -n ingress-nginx -- ls /etc/nginx +kubectl ingress-nginx exec is exactly the same as kubectl exec, with the same command flags. It will automatically choose an ingress-nginx pod to run the command in. +$ kubectl ingress-nginx exec -i -n ingress-nginx -- ls /etc/nginx fastcgi_params geoip lua @@ -1571,44 +1571,44 @@ Do not move it without providing redirects. opentracing.json owasp-modsecurity-crs template - + general ¶ -kubectl ingress-nginx general dumps miscellaneous controller state as a JSON object. Currently it just shows the number of controller pods known to a particular controller pod. -$ kubectl ingress-nginx general -n ingress-nginx +kubectl ingress-nginx general dumps miscellaneous controller state as a JSON object. Currently it just shows the number of controller pods known to a particular controller pod. +$ kubectl ingress-nginx general -n ingress-nginx { "controllerPodsCount": 1 } - + info ¶ -Shows the internal and external IP/CNAMES for an ingress-nginx service. -$ kubectl ingress-nginx info -n ingress-nginx +Shows the internal and external IP/CNAMES for an ingress-nginx service. +$ kubectl ingress-nginx info -n ingress-nginx Service cluster IP address: 10.187.253.31 LoadBalancer IP|CNAME: 35.123.123.123 - + -Use the --service <service> flag if your ingress-nginx LoadBalancer service is not named ingress-nginx. +Use the --service <service> flag if your ingress-nginx LoadBalancer service is not named ingress-nginx. ingresses ¶ -kubectl ingress-nginx ingresses, alternately kubectl ingress-nginx ing, shows a more detailed view of the ingress definitions in a namespace. Compare: -$ kubectl get ingresses --all-namespaces +kubectl ingress-nginx ingresses, alternately kubectl ingress-nginx ing, shows a more detailed view of the ingress definitions in a namespace. Compare: +$ kubectl get ingresses --all-namespaces NAMESPACE NAME HOSTS ADDRESS PORTS AGE default example-ingress1 testaddr.local,testaddr2.local localhost 80 5d default test-ingress-2 * localhost 80 5d - + vs -$ kubectl ingress-nginx ingresses --all-namespaces +$ kubectl ingress-nginx ingresses --all-namespaces NAMESPACE INGRESS NAME HOST+PATH ADDRESSES TLS SERVICE SERVICE PORT ENDPOINTS default example-ingress1 testaddr.local/etameta localhost NO pear-service 5678 5 default example-ingress1 testaddr2.local/otherpath localhost NO apple-service 5678 1 default example-ingress1 testaddr2.local/otherotherpath localhost NO pear-service 5678 5 default test-ingress-2 * localhost NO echo-service 8080 2 - + lint ¶ -kubectl ingress-nginx lint can check a namespace or entire cluster for potential configuration issues. This command is especially useful when upgrading between ingress-nginx versions. -$ kubectl ingress-nginx lint --all-namespaces --verbose +kubectl ingress-nginx lint can check a namespace or entire cluster for potential configuration issues. This command is especially useful when upgrading between ingress-nginx versions. +$ kubectl ingress-nginx lint --all-namespaces --verbose Checking ingresses... ✗ anamespace/this-nginx - Contains the removed session-cookie-hash annotation. @@ -1627,10 +1627,10 @@ Do not move it without providing redirects. - Uses removed config flag --enable-dynamic-certificates Lint added for version 0.24.0 https://github.com/kubernetes/ingress-nginx/issues/3808 - + -to show the lints added only for a particular ingress-nginx release, use the --from-version and --to-version flags: -$ kubectl ingress-nginx lint --all-namespaces --verbose --from-version 0.24.0 --to-version 0.24.0 +to show the lints added only for a particular ingress-nginx release, use the --from-version and --to-version flags: +$ kubectl ingress-nginx lint --all-namespaces --verbose --from-version 0.24.0 --to-version 0.24.0 Checking ingresses... ✗ anamespace/this-nginx - Contains the removed session-cookie-hash annotation. @@ -1642,11 +1642,11 @@ Do not move it without providing redirects. - Uses removed config flag --enable-dynamic-certificates Lint added for version 0.24.0 https://github.com/kubernetes/ingress-nginx/issues/3808 - + logs ¶ -kubectl ingress-nginx logs is almost the same as kubectl logs, with fewer flags. It will automatically choose an ingress-nginx pod to read logs from. -$ kubectl ingress-nginx logs -n ingress-nginx +kubectl ingress-nginx logs is almost the same as kubectl logs, with fewer flags. It will automatically choose an ingress-nginx pod to read logs from. +$ kubectl ingress-nginx logs -n ingress-nginx ------------------------------------------------------------------------------- NGINX Ingress controller Release: dev @@ -1662,16 +1662,17 @@ Do not move it without providing redirects. I0405 16:53:46.183359 7 nginx.go:265] Starting NGINX Ingress controller I0405 16:53:46.193913 7 event.go:209] Event(v1.ObjectReference{Kind:"ConfigMap", Namespace:"ingress-nginx", Name:"udp-services", UID:"82258915-563e-11e9-9c52-025000000001", APIVersion:"v1", ResourceVersion:"494", FieldPath:""}): type: 'Normal' reason: 'CREATE' ConfigMap ingress-nginx/udp-services ... - + ssh ¶ -kubectl ingress-nginx ssh is exactly the same as kubectl ingress-nginx exec -it -- /bin/bash. Use it when you want to quickly be dropped into a shell inside a running ingress-nginx container. -$ kubectl ingress-nginx ssh -n ingress-nginx +kubectl ingress-nginx ssh is exactly the same as kubectl ingress-nginx exec -it -- /bin/bash. Use it when you want to quickly be dropped into a shell inside a running ingress-nginx container. +$ kubectl ingress-nginx ssh -n ingress-nginx www-data@nginx-ingress-controller-7cbf77c976-wx5pn:/etc/nginx$ - + + @@ -1726,9 +1727,9 @@ Do not move it without providing redirects. @@ -1738,7 +1739,7 @@ Do not move it without providing redirects. - + diff --git a/search/search_index.json b/search/search_index.json index 573e83c48..1340c42d6 100644 --- a/search/search_index.json +++ b/search/search_index.json @@ -1 +1 @@ -{"config":{"lang":["en"],"prebuild_index":false,"separator":"[\\s\\-]+"},"docs":[{"location":"","text":"Welcome \u00b6 This is the documentation for the NGINX Ingress Controller. It is built around the Kubernetes Ingress resource , using a ConfigMap to store the NGINX configuration. Learn more about using Ingress on k8s.io . Getting Started \u00b6 See Deployment for a whirlwind tour that will get you started.","title":"Welcome"},{"location":"#welcome","text":"This is the documentation for the NGINX Ingress Controller. It is built around the Kubernetes Ingress resource , using a ConfigMap to store the NGINX configuration. Learn more about using Ingress on k8s.io .","title":"Welcome"},{"location":"#getting-started","text":"See Deployment for a whirlwind tour that will get you started.","title":"Getting Started"},{"location":"development/","text":"Developing for NGINX Ingress Controller \u00b6 This document explains how to get started with developing for NGINX Ingress controller. It includes how to build, test, and release ingress controllers. Quick Start \u00b6 Getting the code \u00b6 The code must be checked out as a subdirectory of k8s.io, and not github.com. mkdir -p $GOPATH/src/k8s.io cd $GOPATH/src/k8s.io # Replace \"$YOUR_GITHUB_USERNAME\" below with your github username git clone https://github.com/$YOUR_GITHUB_USERNAME/ingress-nginx.git cd ingress-nginx Initial developer environment build \u00b6 Prequisites : Minikube must be installed. See releases for installation instructions. If you are using MacOS and deploying to minikube , the following command will build the local nginx controller container image and deploy the ingress controller onto a minikube cluster with RBAC enabled in the namespace ingress-nginx : $ make dev-env Updating the deployment \u00b6 The nginx controller container image can be rebuilt using: $ ARCH = amd64 TAG = dev REGISTRY = $USER /ingress-controller make build container The image will only be used by pods created after the rebuild. To delete old pods which will cause new ones to spin up: $ kubectl get pods -n ingress-nginx $ kubectl delete pod -n ingress-nginx nginx-ingress-controller- Dependencies \u00b6 The build uses dependencies in the vendor directory, which must be installed before building a binary/image. Occasionally, you might need to update the dependencies. This guide requires you to install go 1.13 or newer. This will automatically save the dependencies to the vendor/ directory. $ go get $ make dep-ensure Building \u00b6 All ingress controllers are built through a Makefile. Depending on your requirements you can build a raw server binary, a local container image, or push an image to a remote repository. In order to use your local Docker, you may need to set the following environment variables: # \"gcloud docker\" ( default ) or \"docker\" $ export DOCKER = # \"quay.io/kubernetes-ingress-controller\" ( default ) , \"index.docker.io\" , or your own registry $ export REGISTRY = To find the registry simply run: docker system info | grep Registry Building the e2e test image \u00b6 The e2e test image can also be built through the Makefile. $ make e2e-test-image You can then make this image available on your minikube host by exporting the image and loading it with the minikube docker context: $ docker save nginx-ingress-controller:e2e | ( eval $( minikube docker-env ) && docker load ) Nginx Controller \u00b6 Build a raw server binary $ make build TODO : add more specific instructions needed for raw server binary. Build a local container image $ TAG = REGISTRY = $USER /ingress-controller make container Push the container image to a remote repository $ TAG = REGISTRY = $USER /ingress-controller make push Deploying \u00b6 There are several ways to deploy the ingress controller onto a cluster. Please check the deployment guide Testing \u00b6 To run unit-tests, just run $ cd $GOPATH /src/k8s.io/ingress-nginx $ make test If you have access to a Kubernetes cluster, you can also run e2e tests using ginkgo. $ cd $GOPATH /src/k8s.io/ingress-nginx $ make e2e-test NOTE: if your e2e pod keeps hanging in an ImagePullBackoff, make sure you've made your e2e nginx-ingress-controller image available to minikube as explained in the Building the e2e test image section To run unit-tests for lua code locally, run: $ cd $GOPATH /src/k8s.io/ingress-nginx $ ./rootfs/etc/nginx/lua/test/up.sh $ make lua-test Lua tests are located in $GOPATH/src/k8s.io/ingress-nginx/rootfs/etc/nginx/lua/test . When creating a new test file it must follow the naming convention _test.lua or it will be ignored. Releasing \u00b6 All Makefiles will produce a release binary, as shown above. To publish this to a wider Kubernetes user base, push the image to a container registry, like gcr.io . All release images are hosted under gcr.io/google_containers and tagged according to a semver scheme. An example release might look like: $ make release Please follow these guidelines to cut a release: Update the release page with a short description of the major changes that correspond to a given image tag. Cut a release branch, if appropriate. Release branches follow the format of controller-release-version . Typically, pre-releases are cut from HEAD. All major feature work is done in HEAD. Specific bug fixes are cherry-picked into a release branch. If you're not confident about the stability of the code, tag it as alpha or beta. Typically, a release branch should have stable code.","title":"Development"},{"location":"development/#developing-for-nginx-ingress-controller","text":"This document explains how to get started with developing for NGINX Ingress controller. It includes how to build, test, and release ingress controllers.","title":"Developing for NGINX Ingress Controller"},{"location":"development/#quick-start","text":"","title":"Quick Start"},{"location":"development/#getting-the-code","text":"The code must be checked out as a subdirectory of k8s.io, and not github.com. mkdir -p $GOPATH/src/k8s.io cd $GOPATH/src/k8s.io # Replace \"$YOUR_GITHUB_USERNAME\" below with your github username git clone https://github.com/$YOUR_GITHUB_USERNAME/ingress-nginx.git cd ingress-nginx","title":"Getting the code"},{"location":"development/#initial-developer-environment-build","text":"Prequisites : Minikube must be installed. See releases for installation instructions. If you are using MacOS and deploying to minikube , the following command will build the local nginx controller container image and deploy the ingress controller onto a minikube cluster with RBAC enabled in the namespace ingress-nginx : $ make dev-env","title":"Initial developer environment build"},{"location":"development/#updating-the-deployment","text":"The nginx controller container image can be rebuilt using: $ ARCH = amd64 TAG = dev REGISTRY = $USER /ingress-controller make build container The image will only be used by pods created after the rebuild. To delete old pods which will cause new ones to spin up: $ kubectl get pods -n ingress-nginx $ kubectl delete pod -n ingress-nginx nginx-ingress-controller-","title":"Updating the deployment"},{"location":"development/#dependencies","text":"The build uses dependencies in the vendor directory, which must be installed before building a binary/image. Occasionally, you might need to update the dependencies. This guide requires you to install go 1.13 or newer. This will automatically save the dependencies to the vendor/ directory. $ go get $ make dep-ensure","title":"Dependencies"},{"location":"development/#building","text":"All ingress controllers are built through a Makefile. Depending on your requirements you can build a raw server binary, a local container image, or push an image to a remote repository. In order to use your local Docker, you may need to set the following environment variables: # \"gcloud docker\" ( default ) or \"docker\" $ export DOCKER = # \"quay.io/kubernetes-ingress-controller\" ( default ) , \"index.docker.io\" , or your own registry $ export REGISTRY = To find the registry simply run: docker system info | grep Registry","title":"Building"},{"location":"development/#building-the-e2e-test-image","text":"The e2e test image can also be built through the Makefile. $ make e2e-test-image You can then make this image available on your minikube host by exporting the image and loading it with the minikube docker context: $ docker save nginx-ingress-controller:e2e | ( eval $( minikube docker-env ) && docker load )","title":"Building the e2e test image"},{"location":"development/#nginx-controller","text":"Build a raw server binary $ make build TODO : add more specific instructions needed for raw server binary. Build a local container image $ TAG = REGISTRY = $USER /ingress-controller make container Push the container image to a remote repository $ TAG = REGISTRY = $USER /ingress-controller make push","title":"Nginx Controller"},{"location":"development/#deploying","text":"There are several ways to deploy the ingress controller onto a cluster. Please check the deployment guide","title":"Deploying"},{"location":"development/#testing","text":"To run unit-tests, just run $ cd $GOPATH /src/k8s.io/ingress-nginx $ make test If you have access to a Kubernetes cluster, you can also run e2e tests using ginkgo. $ cd $GOPATH /src/k8s.io/ingress-nginx $ make e2e-test NOTE: if your e2e pod keeps hanging in an ImagePullBackoff, make sure you've made your e2e nginx-ingress-controller image available to minikube as explained in the Building the e2e test image section To run unit-tests for lua code locally, run: $ cd $GOPATH /src/k8s.io/ingress-nginx $ ./rootfs/etc/nginx/lua/test/up.sh $ make lua-test Lua tests are located in $GOPATH/src/k8s.io/ingress-nginx/rootfs/etc/nginx/lua/test . When creating a new test file it must follow the naming convention _test.lua or it will be ignored.","title":"Testing"},{"location":"development/#releasing","text":"All Makefiles will produce a release binary, as shown above. To publish this to a wider Kubernetes user base, push the image to a container registry, like gcr.io . All release images are hosted under gcr.io/google_containers and tagged according to a semver scheme. An example release might look like: $ make release Please follow these guidelines to cut a release: Update the release page with a short description of the major changes that correspond to a given image tag. Cut a release branch, if appropriate. Release branches follow the format of controller-release-version . Typically, pre-releases are cut from HEAD. All major feature work is done in HEAD. Specific bug fixes are cherry-picked into a release branch. If you're not confident about the stability of the code, tag it as alpha or beta. Typically, a release branch should have stable code.","title":"Releasing"},{"location":"how-it-works/","text":"How it works \u00b6 The objective of this document is to explain how the NGINX Ingress controller works, in particular how the NGINX model is built and why we need one. NGINX configuration \u00b6 The goal of this Ingress controller is the assembly of a configuration file (nginx.conf). The main implication of this requirement is the need to reload NGINX after any change in the configuration file. Though it is important to note that we don't reload Nginx on changes that impact only an upstream configuration (i.e Endpoints change when you deploy your app) . We use lua-nginx-module to achieve this. Check below to learn more about how it's done. NGINX model \u00b6 Usually, a Kubernetes Controller utilizes the synchronization loop pattern to check if the desired state in the controller is updated or a change is required. To this purpose, we need to build a model using different objects from the cluster, in particular (in no special order) Ingresses, Services, Endpoints, Secrets, and Configmaps to generate a point in time configuration file that reflects the state of the cluster. To get this object from the cluster, we use Kubernetes Informers , in particular, FilteredSharedInformer . This informers allows reacting to changes in using callbacks to individual changes when a new object is added, modified or removed. Unfortunately, there is no way to know if a particular change is going to affect the final configuration file. Therefore on every change, we have to rebuild a new model from scratch based on the state of cluster and compare it to the current model. If the new model equals to the current one, then we avoid generating a new NGINX configuration and triggering a reload. Otherwise, we check if the difference is only about Endpoints. If so we then send the new list of Endpoints to a Lua handler running inside Nginx using HTTP POST request and again avoid generating a new NGINX configuration and triggering a reload. If the difference between running and new model is about more than just Endpoints we create a new NGINX configuration based on the new model, replace the current model and trigger a reload. One of the uses of the model is to avoid unnecessary reloads when there's no change in the state and to detect conflicts in definitions. The final representation of the NGINX configuration is generated from a Go template using the new model as input for the variables required by the template. Building the NGINX model \u00b6 Building a model is an expensive operation, for this reason, the use of the synchronization loop is a must. By using a work queue it is possible to not lose changes and remove the use of sync.Mutex to force a single execution of the sync loop and additionally it is possible to create a time window between the start and end of the sync loop that allows us to discard unnecessary updates. It is important to understand that any change in the cluster could generate events that the informer will send to the controller and one of the reasons for the work queue . Operations to build the model: Order Ingress rules by CreationTimestamp field, i.e., old rules first. If the same path for the same host is defined in more than one Ingress, the oldest rule wins. If more than one Ingress contains a TLS section for the same host, the oldest rule wins. If multiple Ingresses define an annotation that affects the configuration of the Server block, the oldest rule wins. Create a list of NGINX Servers (per hostname) Create a list of NGINX Upstreams If multiple Ingresses define different paths for the same host, the ingress controller will merge the definitions. Annotations are applied to all the paths in the Ingress. Multiple Ingresses can define different annotations. These definitions are not shared between Ingresses. When a reload is required \u00b6 The next list describes the scenarios when a reload is required: New Ingress Resource Created. TLS section is added to existing Ingress. Change in Ingress annotations that impacts more than just upstream configuration. For instance load-balance annotation does not require a reload. A path is added/removed from an Ingress. An Ingress, Service, Secret is removed. Some missing referenced object from the Ingress is available, like a Service or Secret. A Secret is updated. Avoiding reloads \u00b6 In some cases, it is possible to avoid reloads, in particular when there is a change in the endpoints, i.e., a pod is started or replaced. It is out of the scope of this Ingress controller to remove reloads completely. This would require an incredible amount of work and at some point makes no sense. This can change only if NGINX changes the way new configurations are read, basically, new changes do not replace worker processes. Avoiding reloads on Endpoints changes \u00b6 On every endpoint change the controller fetches endpoints from all the services it sees and generates corresponding Backend objects. It then sends these objects to a Lua handler running inside Nginx. The Lua code in turn stores those backends in a shared memory zone. Then for every request Lua code running in balancer_by_lua context detects what endpoints it should choose upstream peer from and applies the configured load balancing algorithm to choose the peer. Then Nginx takes care of the rest. This way we avoid reloading Nginx on endpoint changes. Note that this includes annotation changes that affects only upstream configuration in Nginx as well. In a relatively big clusters with frequently deploying apps this feature saves significant number of Nginx reloads which can otherwise affect response latency, load balancing quality (after every reload Nginx resets the state of load balancing) and so on. Avoiding outage from wrong configuration \u00b6 Because the ingress controller works using the synchronization loop pattern , it is applying the configuration for all matching objects. In case some Ingress objects have a broken configuration, for example a syntax error in the nginx.ingress.kubernetes.io/configuration-snippet annotation, the generated configuration becomes invalid, does not reload and hence no more ingresses will be taken into account. To prevent this situation to happen, the nginx ingress controller optionally exposes a validating admission webhook server to ensure the validity of incoming ingress objects. This webhook appends the incoming ingress objects to the list of ingresses, generates the configuration and calls nginx to ensure the configuration has no syntax errors.","title":"How it works"},{"location":"how-it-works/#how-it-works","text":"The objective of this document is to explain how the NGINX Ingress controller works, in particular how the NGINX model is built and why we need one.","title":"How it works"},{"location":"how-it-works/#nginx-configuration","text":"The goal of this Ingress controller is the assembly of a configuration file (nginx.conf). The main implication of this requirement is the need to reload NGINX after any change in the configuration file. Though it is important to note that we don't reload Nginx on changes that impact only an upstream configuration (i.e Endpoints change when you deploy your app) . We use lua-nginx-module to achieve this. Check below to learn more about how it's done.","title":"NGINX configuration"},{"location":"how-it-works/#nginx-model","text":"Usually, a Kubernetes Controller utilizes the synchronization loop pattern to check if the desired state in the controller is updated or a change is required. To this purpose, we need to build a model using different objects from the cluster, in particular (in no special order) Ingresses, Services, Endpoints, Secrets, and Configmaps to generate a point in time configuration file that reflects the state of the cluster. To get this object from the cluster, we use Kubernetes Informers , in particular, FilteredSharedInformer . This informers allows reacting to changes in using callbacks to individual changes when a new object is added, modified or removed. Unfortunately, there is no way to know if a particular change is going to affect the final configuration file. Therefore on every change, we have to rebuild a new model from scratch based on the state of cluster and compare it to the current model. If the new model equals to the current one, then we avoid generating a new NGINX configuration and triggering a reload. Otherwise, we check if the difference is only about Endpoints. If so we then send the new list of Endpoints to a Lua handler running inside Nginx using HTTP POST request and again avoid generating a new NGINX configuration and triggering a reload. If the difference between running and new model is about more than just Endpoints we create a new NGINX configuration based on the new model, replace the current model and trigger a reload. One of the uses of the model is to avoid unnecessary reloads when there's no change in the state and to detect conflicts in definitions. The final representation of the NGINX configuration is generated from a Go template using the new model as input for the variables required by the template.","title":"NGINX model"},{"location":"how-it-works/#building-the-nginx-model","text":"Building a model is an expensive operation, for this reason, the use of the synchronization loop is a must. By using a work queue it is possible to not lose changes and remove the use of sync.Mutex to force a single execution of the sync loop and additionally it is possible to create a time window between the start and end of the sync loop that allows us to discard unnecessary updates. It is important to understand that any change in the cluster could generate events that the informer will send to the controller and one of the reasons for the work queue . Operations to build the model: Order Ingress rules by CreationTimestamp field, i.e., old rules first. If the same path for the same host is defined in more than one Ingress, the oldest rule wins. If more than one Ingress contains a TLS section for the same host, the oldest rule wins. If multiple Ingresses define an annotation that affects the configuration of the Server block, the oldest rule wins. Create a list of NGINX Servers (per hostname) Create a list of NGINX Upstreams If multiple Ingresses define different paths for the same host, the ingress controller will merge the definitions. Annotations are applied to all the paths in the Ingress. Multiple Ingresses can define different annotations. These definitions are not shared between Ingresses.","title":"Building the NGINX model"},{"location":"how-it-works/#when-a-reload-is-required","text":"The next list describes the scenarios when a reload is required: New Ingress Resource Created. TLS section is added to existing Ingress. Change in Ingress annotations that impacts more than just upstream configuration. For instance load-balance annotation does not require a reload. A path is added/removed from an Ingress. An Ingress, Service, Secret is removed. Some missing referenced object from the Ingress is available, like a Service or Secret. A Secret is updated.","title":"When a reload is required"},{"location":"how-it-works/#avoiding-reloads","text":"In some cases, it is possible to avoid reloads, in particular when there is a change in the endpoints, i.e., a pod is started or replaced. It is out of the scope of this Ingress controller to remove reloads completely. This would require an incredible amount of work and at some point makes no sense. This can change only if NGINX changes the way new configurations are read, basically, new changes do not replace worker processes.","title":"Avoiding reloads"},{"location":"how-it-works/#avoiding-reloads-on-endpoints-changes","text":"On every endpoint change the controller fetches endpoints from all the services it sees and generates corresponding Backend objects. It then sends these objects to a Lua handler running inside Nginx. The Lua code in turn stores those backends in a shared memory zone. Then for every request Lua code running in balancer_by_lua context detects what endpoints it should choose upstream peer from and applies the configured load balancing algorithm to choose the peer. Then Nginx takes care of the rest. This way we avoid reloading Nginx on endpoint changes. Note that this includes annotation changes that affects only upstream configuration in Nginx as well. In a relatively big clusters with frequently deploying apps this feature saves significant number of Nginx reloads which can otherwise affect response latency, load balancing quality (after every reload Nginx resets the state of load balancing) and so on.","title":"Avoiding reloads on Endpoints changes"},{"location":"how-it-works/#avoiding-outage-from-wrong-configuration","text":"Because the ingress controller works using the synchronization loop pattern , it is applying the configuration for all matching objects. In case some Ingress objects have a broken configuration, for example a syntax error in the nginx.ingress.kubernetes.io/configuration-snippet annotation, the generated configuration becomes invalid, does not reload and hence no more ingresses will be taken into account. To prevent this situation to happen, the nginx ingress controller optionally exposes a validating admission webhook server to ensure the validity of incoming ingress objects. This webhook appends the incoming ingress objects to the list of ingresses, generates the configuration and calls nginx to ensure the configuration has no syntax errors.","title":"Avoiding outage from wrong configuration"},{"location":"kubectl-plugin/","text":"The ingress-nginx kubectl plugin \u00b6 Installation \u00b6 Install krew , then run kubectl krew install ingress-nginx to install the plugin. Then run kubectl ingress-nginx --help to make sure the plugin is properly installed and to get a list of commands: kubectl ingress-nginx --help A kubectl plugin for inspecting your ingress-nginx deployments Usage: ingress-nginx [command] Available Commands: backends Inspect the dynamic backend information of an ingress-nginx instance certs Output the certificate data stored in an ingress-nginx pod conf Inspect the generated nginx.conf exec Execute a command inside an ingress-nginx pod general Inspect the other dynamic ingress-nginx information help Help about any command info Show information about the ingress-nginx service ingresses Provide a short summary of all of the ingress definitions lint Inspect kubernetes resources for possible issues logs Get the kubernetes logs for an ingress-nginx pod ssh ssh into a running ingress-nginx pod Flags: --as string Username to impersonate for the operation --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. --cache-dir string Default HTTP cache directory (default \"/Users/alexkursell/.kube/http-cache\") --certificate-authority string Path to a cert file for the certificate authority --client-certificate string Path to a client certificate file for TLS --client-key string Path to a client key file for TLS --cluster string The name of the kubeconfig cluster to use --context string The name of the kubeconfig context to use -h, --help help for ingress-nginx --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure --kubeconfig string Path to the kubeconfig file to use for CLI requests. -n, --namespace string If present, the namespace scope for this CLI request --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default \"0\") -s, --server string The address and port of the Kubernetes API server --token string Bearer token for authentication to the API server --user string The name of the kubeconfig user to use Use \"ingress-nginx [command] --help\" for more information about a command. If a new ingress-nginx version has just been released, the plugin may not yet have been updated inside the repository. In that case, you can install the latest version of the plugin by running: ( set -x; cd \"$(mktemp -d)\" && curl -fsSLO \"https://github.com/kubernetes/ingress-nginx/releases/download/nginx-0.24.0/{ingress-nginx.yaml,kubectl-ingress_nginx-$(uname | tr '[:upper:]' '[:lower:]')-amd64.tar.gz}\" && kubectl krew install \\ --manifest=ingress-nginx.yaml --archive=kubectl-ingress_nginx-$(uname | tr '[:upper:]' '[:lower:]')-amd64.tar.gz ) Replacing 0.24.0 with the recently released version. Common Flags \u00b6 Every subcommand supports the basic kubectl configuration flags like --namespace , --context , --client-key and so on. Subcommands that act on a particular ingress-nginx pod ( backends , certs , conf , exec , general , logs , ssh ), support the --deployment and --pod flags to select either a pod from a deployment with the given name, or a pod with the given name. The --deployment flag defaults to nginx-ingress-controller . Subcommands that inspect resources ( ingresses , lint ) support the --all-namespaces flag, which causes them to inspect resources in every namespace. Subcommands \u00b6 Note that backends , general , certs , and conf require ingress-nginx version 0.23.0 or higher. backends \u00b6 Run kubectl ingress-nginx backends to get a JSON array of the backends that an ingress-nginx controller currently knows about: $ kubectl ingress-nginx backends -n ingress-nginx [ { \"name\": \"default-apple-service-5678\", \"service\": { \"metadata\": { \"creationTimestamp\": null }, \"spec\": { \"ports\": [ { \"protocol\": \"TCP\", \"port\": 5678, \"targetPort\": 5678 } ], \"selector\": { \"app\": \"apple\" }, \"clusterIP\": \"10.97.230.121\", \"type\": \"ClusterIP\", \"sessionAffinity\": \"None\" }, \"status\": { \"loadBalancer\": {} } }, \"port\": 0, \"sslPassthrough\": false, \"endpoints\": [ { \"address\": \"10.1.3.86\", \"port\": \"5678\" } ], \"sessionAffinityConfig\": { \"name\": \"\", \"cookieSessionAffinity\": { \"name\": \"\" } }, \"upstreamHashByConfig\": { \"upstream-hash-by-subset-size\": 3 }, \"noServer\": false, \"trafficShapingPolicy\": { \"weight\": 0, \"header\": \"\", \"headerValue\": \"\", \"cookie\": \"\" } }, { \"name\": \"default-echo-service-8080\", ... }, { \"name\": \"upstream-default-backend\", ... } ] Add the --list option to show only the backend names. Add the --backend option to show only the backend with the given name. certs \u00b6 Use kubectl ingress-nginx certs --host to dump the SSL cert/key information for a given host. Requires that --enable-dynamic-certificates is true (this is the default as of version 0.24.0 ). WARNING: This command will dump sensitive private key information. Don't blindly share the output, and certainly don't log it anywhere. $ kubectl ingress-nginx certs -n ingress-nginx --host testaddr.local -----BEGIN CERTIFICATE----- ... -----END CERTIFICATE----- -----BEGIN CERTIFICATE----- ... -----END CERTIFICATE----- -----BEGIN RSA PRIVATE KEY----- -----END RSA PRIVATE KEY----- conf \u00b6 Use kubectl ingress-nginx conf to dump the generated nginx.conf file. Add the --host option to view only the server block for that host: kubectl ingress-nginx conf -n ingress-nginx --host testaddr.local server { server_name testaddr.local ; listen 80; set $proxy_upstream_name \"-\"; set $pass_access_scheme $scheme; set $pass_server_port $server_port; set $best_http_host $http_host; set $pass_port $pass_server_port; location / { set $namespace \"\"; set $ingress_name \"\"; set $service_name \"\"; set $service_port \"0\"; set $location_path \"/\"; ... exec \u00b6 kubectl ingress-nginx exec is exactly the same as kubectl exec , with the same command flags. It will automatically choose an ingress-nginx pod to run the command in. $ kubectl ingress-nginx exec -i -n ingress-nginx -- ls /etc/nginx fastcgi_params geoip lua mime.types modsecurity modules nginx.conf opentracing.json owasp-modsecurity-crs template general \u00b6 kubectl ingress-nginx general dumps miscellaneous controller state as a JSON object. Currently it just shows the number of controller pods known to a particular controller pod. $ kubectl ingress-nginx general -n ingress-nginx { \"controllerPodsCount\": 1 } info \u00b6 Shows the internal and external IP/CNAMES for an ingress-nginx service. $ kubectl ingress-nginx info -n ingress-nginx Service cluster IP address: 10.187.253.31 LoadBalancer IP|CNAME: 35.123.123.123 Use the --service flag if your ingress-nginx LoadBalancer service is not named ingress-nginx . ingresses \u00b6 kubectl ingress-nginx ingresses , alternately kubectl ingress-nginx ing , shows a more detailed view of the ingress definitions in a namespace. Compare: $ kubectl get ingresses --all-namespaces NAMESPACE NAME HOSTS ADDRESS PORTS AGE default example-ingress1 testaddr.local,testaddr2.local localhost 80 5d default test-ingress-2 * localhost 80 5d vs $ kubectl ingress-nginx ingresses --all-namespaces NAMESPACE INGRESS NAME HOST+PATH ADDRESSES TLS SERVICE SERVICE PORT ENDPOINTS default example-ingress1 testaddr.local/etameta localhost NO pear-service 5678 5 default example-ingress1 testaddr2.local/otherpath localhost NO apple-service 5678 1 default example-ingress1 testaddr2.local/otherotherpath localhost NO pear-service 5678 5 default test-ingress-2 * localhost NO echo-service 8080 2 lint \u00b6 kubectl ingress-nginx lint can check a namespace or entire cluster for potential configuration issues. This command is especially useful when upgrading between ingress-nginx versions. $ kubectl ingress-nginx lint --all-namespaces --verbose Checking ingresses... \u2717 anamespace/this-nginx - Contains the removed session-cookie-hash annotation. Lint added for version 0.24.0 https://github.com/kubernetes/ingress-nginx/issues/3743 \u2717 othernamespace/ingress-definition-blah - The rewrite-target annotation value does not reference a capture group Lint added for version 0.22.0 https://github.com/kubernetes/ingress-nginx/issues/3174 Checking deployments... \u2717 namespace2/nginx-ingress-controller - Uses removed config flag --sort-backends Lint added for version 0.22.0 https://github.com/kubernetes/ingress-nginx/issues/3655 - Uses removed config flag --enable-dynamic-certificates Lint added for version 0.24.0 https://github.com/kubernetes/ingress-nginx/issues/3808 to show the lints added only for a particular ingress-nginx release, use the --from-version and --to-version flags: $ kubectl ingress-nginx lint --all-namespaces --verbose --from-version 0 .24.0 --to-version 0 .24.0 Checking ingresses... \u2717 anamespace/this-nginx - Contains the removed session-cookie-hash annotation. Lint added for version 0.24.0 https://github.com/kubernetes/ingress-nginx/issues/3743 Checking deployments... \u2717 namespace2/nginx-ingress-controller - Uses removed config flag --enable-dynamic-certificates Lint added for version 0.24.0 https://github.com/kubernetes/ingress-nginx/issues/3808 logs \u00b6 kubectl ingress-nginx logs is almost the same as kubectl logs , with fewer flags. It will automatically choose an ingress-nginx pod to read logs from. $ kubectl ingress-nginx logs -n ingress-nginx ------------------------------------------------------------------------------- NGINX Ingress controller Release: dev Build: git-48dc3a867 Repository: git@github.com:kubernetes/ingress-nginx.git ------------------------------------------------------------------------------- W0405 16:53:46.061589 7 flags.go:214] SSL certificate chain completion is disabled (--enable-ssl-chain-completion=false) nginx version: nginx/1.15.9 W0405 16:53:46.070093 7 client_config.go:549] Neither --kubeconfig nor --master was specified. Using the inClusterConfig. This might not work. I0405 16:53:46.070499 7 main.go:205] Creating API client for https://10.96.0.1:443 I0405 16:53:46.077784 7 main.go:249] Running in Kubernetes cluster version v1.10 (v1.10.11) - git (clean) commit 637c7e288581ee40ab4ca210618a89a555b6e7e9 - platform linux/amd64 I0405 16:53:46.183359 7 nginx.go:265] Starting NGINX Ingress controller I0405 16:53:46.193913 7 event.go:209] Event(v1.ObjectReference{Kind:\"ConfigMap\", Namespace:\"ingress-nginx\", Name:\"udp-services\", UID:\"82258915-563e-11e9-9c52-025000000001\", APIVersion:\"v1\", ResourceVersion:\"494\", FieldPath:\"\"}): type: 'Normal' reason: 'CREATE' ConfigMap ingress-nginx/udp-services ... ssh \u00b6 kubectl ingress-nginx ssh is exactly the same as kubectl ingress-nginx exec -it -- /bin/bash . Use it when you want to quickly be dropped into a shell inside a running ingress-nginx container. $ kubectl ingress-nginx ssh -n ingress-nginx www-data@nginx-ingress-controller-7cbf77c976-wx5pn:/etc/nginx$","title":"kubectl plugin"},{"location":"kubectl-plugin/#the-ingress-nginx-kubectl-plugin","text":"","title":"The ingress-nginx kubectl plugin"},{"location":"kubectl-plugin/#installation","text":"Install krew , then run kubectl krew install ingress-nginx to install the plugin. Then run kubectl ingress-nginx --help to make sure the plugin is properly installed and to get a list of commands: kubectl ingress-nginx --help A kubectl plugin for inspecting your ingress-nginx deployments Usage: ingress-nginx [command] Available Commands: backends Inspect the dynamic backend information of an ingress-nginx instance certs Output the certificate data stored in an ingress-nginx pod conf Inspect the generated nginx.conf exec Execute a command inside an ingress-nginx pod general Inspect the other dynamic ingress-nginx information help Help about any command info Show information about the ingress-nginx service ingresses Provide a short summary of all of the ingress definitions lint Inspect kubernetes resources for possible issues logs Get the kubernetes logs for an ingress-nginx pod ssh ssh into a running ingress-nginx pod Flags: --as string Username to impersonate for the operation --as-group stringArray Group to impersonate for the operation, this flag can be repeated to specify multiple groups. --cache-dir string Default HTTP cache directory (default \"/Users/alexkursell/.kube/http-cache\") --certificate-authority string Path to a cert file for the certificate authority --client-certificate string Path to a client certificate file for TLS --client-key string Path to a client key file for TLS --cluster string The name of the kubeconfig cluster to use --context string The name of the kubeconfig context to use -h, --help help for ingress-nginx --insecure-skip-tls-verify If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure --kubeconfig string Path to the kubeconfig file to use for CLI requests. -n, --namespace string If present, the namespace scope for this CLI request --request-timeout string The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests. (default \"0\") -s, --server string The address and port of the Kubernetes API server --token string Bearer token for authentication to the API server --user string The name of the kubeconfig user to use Use \"ingress-nginx [command] --help\" for more information about a command. If a new ingress-nginx version has just been released, the plugin may not yet have been updated inside the repository. In that case, you can install the latest version of the plugin by running: ( set -x; cd \"$(mktemp -d)\" && curl -fsSLO \"https://github.com/kubernetes/ingress-nginx/releases/download/nginx-0.24.0/{ingress-nginx.yaml,kubectl-ingress_nginx-$(uname | tr '[:upper:]' '[:lower:]')-amd64.tar.gz}\" && kubectl krew install \\ --manifest=ingress-nginx.yaml --archive=kubectl-ingress_nginx-$(uname | tr '[:upper:]' '[:lower:]')-amd64.tar.gz ) Replacing 0.24.0 with the recently released version.","title":"Installation"},{"location":"kubectl-plugin/#common-flags","text":"Every subcommand supports the basic kubectl configuration flags like --namespace , --context , --client-key and so on. Subcommands that act on a particular ingress-nginx pod ( backends , certs , conf , exec , general , logs , ssh ), support the --deployment and --pod flags to select either a pod from a deployment with the given name, or a pod with the given name. The --deployment flag defaults to nginx-ingress-controller . Subcommands that inspect resources ( ingresses , lint ) support the --all-namespaces flag, which causes them to inspect resources in every namespace.","title":"Common Flags"},{"location":"kubectl-plugin/#subcommands","text":"Note that backends , general , certs , and conf require ingress-nginx version 0.23.0 or higher.","title":"Subcommands"},{"location":"kubectl-plugin/#backends","text":"Run kubectl ingress-nginx backends to get a JSON array of the backends that an ingress-nginx controller currently knows about: $ kubectl ingress-nginx backends -n ingress-nginx [ { \"name\": \"default-apple-service-5678\", \"service\": { \"metadata\": { \"creationTimestamp\": null }, \"spec\": { \"ports\": [ { \"protocol\": \"TCP\", \"port\": 5678, \"targetPort\": 5678 } ], \"selector\": { \"app\": \"apple\" }, \"clusterIP\": \"10.97.230.121\", \"type\": \"ClusterIP\", \"sessionAffinity\": \"None\" }, \"status\": { \"loadBalancer\": {} } }, \"port\": 0, \"sslPassthrough\": false, \"endpoints\": [ { \"address\": \"10.1.3.86\", \"port\": \"5678\" } ], \"sessionAffinityConfig\": { \"name\": \"\", \"cookieSessionAffinity\": { \"name\": \"\" } }, \"upstreamHashByConfig\": { \"upstream-hash-by-subset-size\": 3 }, \"noServer\": false, \"trafficShapingPolicy\": { \"weight\": 0, \"header\": \"\", \"headerValue\": \"\", \"cookie\": \"\" } }, { \"name\": \"default-echo-service-8080\", ... }, { \"name\": \"upstream-default-backend\", ... } ] Add the --list option to show only the backend names. Add the --backend option to show only the backend with the given name.","title":"backends"},{"location":"kubectl-plugin/#certs","text":"Use kubectl ingress-nginx certs --host to dump the SSL cert/key information for a given host. Requires that --enable-dynamic-certificates is true (this is the default as of version 0.24.0 ). WARNING: This command will dump sensitive private key information. Don't blindly share the output, and certainly don't log it anywhere. $ kubectl ingress-nginx certs -n ingress-nginx --host testaddr.local -----BEGIN CERTIFICATE----- ... -----END CERTIFICATE----- -----BEGIN CERTIFICATE----- ... -----END CERTIFICATE----- -----BEGIN RSA PRIVATE KEY----- -----END RSA PRIVATE KEY-----","title":"certs"},{"location":"kubectl-plugin/#conf","text":"Use kubectl ingress-nginx conf to dump the generated nginx.conf file. Add the --host option to view only the server block for that host: kubectl ingress-nginx conf -n ingress-nginx --host testaddr.local server { server_name testaddr.local ; listen 80; set $proxy_upstream_name \"-\"; set $pass_access_scheme $scheme; set $pass_server_port $server_port; set $best_http_host $http_host; set $pass_port $pass_server_port; location / { set $namespace \"\"; set $ingress_name \"\"; set $service_name \"\"; set $service_port \"0\"; set $location_path \"/\"; ...","title":"conf"},{"location":"kubectl-plugin/#exec","text":"kubectl ingress-nginx exec is exactly the same as kubectl exec , with the same command flags. It will automatically choose an ingress-nginx pod to run the command in. $ kubectl ingress-nginx exec -i -n ingress-nginx -- ls /etc/nginx fastcgi_params geoip lua mime.types modsecurity modules nginx.conf opentracing.json owasp-modsecurity-crs template","title":"exec"},{"location":"kubectl-plugin/#general","text":"kubectl ingress-nginx general dumps miscellaneous controller state as a JSON object. Currently it just shows the number of controller pods known to a particular controller pod. $ kubectl ingress-nginx general -n ingress-nginx { \"controllerPodsCount\": 1 }","title":"general"},{"location":"kubectl-plugin/#info","text":"Shows the internal and external IP/CNAMES for an ingress-nginx service. $ kubectl ingress-nginx info -n ingress-nginx Service cluster IP address: 10.187.253.31 LoadBalancer IP|CNAME: 35.123.123.123 Use the --service flag if your ingress-nginx LoadBalancer service is not named ingress-nginx .","title":"info"},{"location":"kubectl-plugin/#ingresses","text":"kubectl ingress-nginx ingresses , alternately kubectl ingress-nginx ing , shows a more detailed view of the ingress definitions in a namespace. Compare: $ kubectl get ingresses --all-namespaces NAMESPACE NAME HOSTS ADDRESS PORTS AGE default example-ingress1 testaddr.local,testaddr2.local localhost 80 5d default test-ingress-2 * localhost 80 5d vs $ kubectl ingress-nginx ingresses --all-namespaces NAMESPACE INGRESS NAME HOST+PATH ADDRESSES TLS SERVICE SERVICE PORT ENDPOINTS default example-ingress1 testaddr.local/etameta localhost NO pear-service 5678 5 default example-ingress1 testaddr2.local/otherpath localhost NO apple-service 5678 1 default example-ingress1 testaddr2.local/otherotherpath localhost NO pear-service 5678 5 default test-ingress-2 * localhost NO echo-service 8080 2","title":"ingresses"},{"location":"kubectl-plugin/#lint","text":"kubectl ingress-nginx lint can check a namespace or entire cluster for potential configuration issues. This command is especially useful when upgrading between ingress-nginx versions. $ kubectl ingress-nginx lint --all-namespaces --verbose Checking ingresses... \u2717 anamespace/this-nginx - Contains the removed session-cookie-hash annotation. Lint added for version 0.24.0 https://github.com/kubernetes/ingress-nginx/issues/3743 \u2717 othernamespace/ingress-definition-blah - The rewrite-target annotation value does not reference a capture group Lint added for version 0.22.0 https://github.com/kubernetes/ingress-nginx/issues/3174 Checking deployments... \u2717 namespace2/nginx-ingress-controller - Uses removed config flag --sort-backends Lint added for version 0.22.0 https://github.com/kubernetes/ingress-nginx/issues/3655 - Uses removed config flag --enable-dynamic-certificates Lint added for version 0.24.0 https://github.com/kubernetes/ingress-nginx/issues/3808 to show the lints added only for a particular ingress-nginx release, use the --from-version and --to-version flags: $ kubectl ingress-nginx lint --all-namespaces --verbose --from-version 0 .24.0 --to-version 0 .24.0 Checking ingresses... \u2717 anamespace/this-nginx - Contains the removed session-cookie-hash annotation. Lint added for version 0.24.0 https://github.com/kubernetes/ingress-nginx/issues/3743 Checking deployments... \u2717 namespace2/nginx-ingress-controller - Uses removed config flag --enable-dynamic-certificates Lint added for version 0.24.0 https://github.com/kubernetes/ingress-nginx/issues/3808","title":"lint"},{"location":"kubectl-plugin/#logs","text":"kubectl ingress-nginx logs is almost the same as kubectl logs , with fewer flags. It will automatically choose an ingress-nginx pod to read logs from. $ kubectl ingress-nginx logs -n ingress-nginx ------------------------------------------------------------------------------- NGINX Ingress controller Release: dev Build: git-48dc3a867 Repository: git@github.com:kubernetes/ingress-nginx.git ------------------------------------------------------------------------------- W0405 16:53:46.061589 7 flags.go:214] SSL certificate chain completion is disabled (--enable-ssl-chain-completion=false) nginx version: nginx/1.15.9 W0405 16:53:46.070093 7 client_config.go:549] Neither --kubeconfig nor --master was specified. Using the inClusterConfig. This might not work. I0405 16:53:46.070499 7 main.go:205] Creating API client for https://10.96.0.1:443 I0405 16:53:46.077784 7 main.go:249] Running in Kubernetes cluster version v1.10 (v1.10.11) - git (clean) commit 637c7e288581ee40ab4ca210618a89a555b6e7e9 - platform linux/amd64 I0405 16:53:46.183359 7 nginx.go:265] Starting NGINX Ingress controller I0405 16:53:46.193913 7 event.go:209] Event(v1.ObjectReference{Kind:\"ConfigMap\", Namespace:\"ingress-nginx\", Name:\"udp-services\", UID:\"82258915-563e-11e9-9c52-025000000001\", APIVersion:\"v1\", ResourceVersion:\"494\", FieldPath:\"\"}): type: 'Normal' reason: 'CREATE' ConfigMap ingress-nginx/udp-services ...","title":"logs"},{"location":"kubectl-plugin/#ssh","text":"kubectl ingress-nginx ssh is exactly the same as kubectl ingress-nginx exec -it -- /bin/bash . Use it when you want to quickly be dropped into a shell inside a running ingress-nginx container. $ kubectl ingress-nginx ssh -n ingress-nginx www-data@nginx-ingress-controller-7cbf77c976-wx5pn:/etc/nginx$","title":"ssh"},{"location":"troubleshooting/","text":"Troubleshooting \u00b6 Ingress-Controller Logs and Events \u00b6 There are many ways to troubleshoot the ingress-controller. The following are basic troubleshooting methods to obtain more information. Check the Ingress Resource Events $ kubectl get ing -n NAME HOSTS ADDRESS PORTS AGE cafe-ingress cafe.com 10.0.2.15 80 25s $ kubectl describe ing -n Name: cafe-ingress Namespace: default Address: 10.0.2.15 Default backend: default-http-backend:80 (172.17.0.5:8080) Rules: Host Path Backends ---- ---- -------- cafe.com /tea tea-svc:80 () /coffee coffee-svc:80 () Annotations: kubectl.kubernetes.io/last-applied-configuration: {\"apiVersion\":\"networking.k8s.io/v1beta1\",\"kind\":\"Ingress\",\"metadata\":{\"annotations\":{},\"name\":\"cafe-ingress\",\"namespace\":\"default\",\"selfLink\":\"/apis/networking/v1beta1/namespaces/default/ingresses/cafe-ingress\"},\"spec\":{\"rules\":[{\"host\":\"cafe.com\",\"http\":{\"paths\":[{\"backend\":{\"serviceName\":\"tea-svc\",\"servicePort\":80},\"path\":\"/tea\"},{\"backend\":{\"serviceName\":\"coffee-svc\",\"servicePort\":80},\"path\":\"/coffee\"}]}}]},\"status\":{\"loadBalancer\":{\"ingress\":[{\"ip\":\"169.48.142.110\"}]}}} Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal CREATE 1m nginx-ingress-controller Ingress default/cafe-ingress Normal UPDATE 58s nginx-ingress-controller Ingress default/cafe-ingress Check the Ingress Controller Logs $ kubectl get pods -n NAME READY STATUS RESTARTS AGE nginx-ingress-controller-67956bf89d-fv58j 1/1 Running 0 1m $ kubectl logs -n nginx-ingress-controller-67956bf89d-fv58j ------------------------------------------------------------------------------- NGINX Ingress controller Release: 0.14.0 Build: git-734361d Repository: https://github.com/kubernetes/ingress-nginx ------------------------------------------------------------------------------- .... Check the Nginx Configuration $ kubectl get pods -n NAME READY STATUS RESTARTS AGE nginx-ingress-controller-67956bf89d-fv58j 1/1 Running 0 1m $ kubectl exec -it -n nginx-ingress-controller-67956bf89d-fv58j cat /etc/nginx/nginx.conf daemon off; worker_processes 2; pid /run/nginx.pid; worker_rlimit_nofile 523264; worker_shutdown_timeout 240s; events { multi_accept on; worker_connections 16384; use epoll; } http { .... Check if used Services Exist $ kubectl get svc --all-namespaces NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE default coffee-svc ClusterIP 10.106.154.35 80/TCP 18m default kubernetes ClusterIP 10.96.0.1 443/TCP 30m default tea-svc ClusterIP 10.104.172.12 80/TCP 18m kube-system default-http-backend NodePort 10.108.189.236 80:30001/TCP 30m kube-system kube-dns ClusterIP 10.96.0.10 53/UDP,53/TCP 30m kube-system kubernetes-dashboard NodePort 10.103.128.17 80:30000/TCP 30m Debug Logging \u00b6 Using the flag --v=XX it is possible to increase the level of logging. This is performed by editing the deployment. $ kubectl get deploy -n NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE default-http-backend 1 1 1 1 35m nginx-ingress-controller 1 1 1 1 35m $ kubectl edit deploy -n nginx-ingress-controller # Add --v = X to \"- args\" , where X is an integer --v=2 shows details using diff about the changes in the configuration in nginx --v=3 shows details about the service, Ingress rule, endpoint changes and it dumps the nginx configuration in JSON format --v=5 configures NGINX in debug mode Authentication to the Kubernetes API Server \u00b6 A number of components are involved in the authentication process and the first step is to narrow down the source of the problem, namely whether it is a problem with service authentication or with the kubeconfig file. Both authentications must work: +-------------+ service +------------+ | | authentication | | + apiserver +<-------------------+ ingress | | | | controller | +-------------+ +------------+ Service authentication The Ingress controller needs information from apiserver. Therefore, authentication is required, which can be achieved in two different ways: Service Account: This is recommended, because nothing has to be configured. The Ingress controller will use information provided by the system to communicate with the API server. See 'Service Account' section for details. Kubeconfig file: In some Kubernetes environments service accounts are not available. In this case a manual configuration is required. The Ingress controller binary can be started with the --kubeconfig flag. The value of the flag is a path to a file specifying how to connect to the API server. Using the --kubeconfig does not requires the flag --apiserver-host . The format of the file is identical to ~/.kube/config which is used by kubectl to connect to the API server. See 'kubeconfig' section for details. Using the flag --apiserver-host : Using this flag --apiserver-host=http://localhost:8080 it is possible to specify an unsecured API server or reach a remote kubernetes cluster using kubectl proxy . Please do not use this approach in production. In the diagram below you can see the full authentication flow with all options, starting with the browser on the lower left hand side. Kubernetes Workstation +---------------------------------------------------+ +------------------+ | | | | | +-----------+ apiserver +------------+ | | +------------+ | | | | proxy | | | | | | | | | apiserver | | ingress | | | | ingress | | | | | | controller | | | | controller | | | | | | | | | | | | | | | | | | | | | | | | | service account/ | | | | | | | | | | kubeconfig | | | | | | | | | +<-------------------+ | | | | | | | | | | | | | | | | | +------+----+ kubeconfig +------+-----+ | | +------+-----+ | | |<--------------------------------------------------------| | | | | | +---------------------------------------------------+ +------------------+ Service Account \u00b6 If using a service account to connect to the API server, Dashboard expects the file /var/run/secrets/kubernetes.io/serviceaccount/token to be present. It provides a secret token that is required to authenticate with the API server. Verify with the following commands: # start a container that contains curl $ kubectl run test --image = tutum/curl -- sleep 10000 # check that container is running $ kubectl get pods NAME READY STATUS RESTARTS AGE test-701078429-s5kca 1/1 Running 0 16s # check if secret exists $ kubectl exec test-701078429-s5kca ls /var/run/secrets/kubernetes.io/serviceaccount/ ca.crt namespace token # get service IP of master $ kubectl get services NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes 10.0.0.1 443/TCP 1d # check base connectivity from cluster inside $ kubectl exec test-701078429-s5kca -- curl -k https://10.0.0.1 Unauthorized # connect using tokens $ TOKEN_VALUE = $( kubectl exec test-701078429-s5kca -- cat /var/run/secrets/kubernetes.io/serviceaccount/token ) $ echo $TOKEN_VALUE eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3Mi....9A $ kubectl exec test-701078429-s5kca -- curl --cacert /var/run/secrets/kubernetes.io/serviceaccount/ca.crt -H \"Authorization: Bearer $TOKEN_VALUE \" https://10.0.0.1 { \"paths\": [ \"/api\", \"/api/v1\", \"/apis\", \"/apis/apps\", \"/apis/apps/v1alpha1\", \"/apis/authentication.k8s.io\", \"/apis/authentication.k8s.io/v1beta1\", \"/apis/authorization.k8s.io\", \"/apis/authorization.k8s.io/v1beta1\", \"/apis/autoscaling\", \"/apis/autoscaling/v1\", \"/apis/batch\", \"/apis/batch/v1\", \"/apis/batch/v2alpha1\", \"/apis/certificates.k8s.io\", \"/apis/certificates.k8s.io/v1alpha1\", \"/apis/networking\", \"/apis/networking/v1beta1\", \"/apis/policy\", \"/apis/policy/v1alpha1\", \"/apis/rbac.authorization.k8s.io\", \"/apis/rbac.authorization.k8s.io/v1alpha1\", \"/apis/storage.k8s.io\", \"/apis/storage.k8s.io/v1beta1\", \"/healthz\", \"/healthz/ping\", \"/logs\", \"/metrics\", \"/swaggerapi/\", \"/ui/\", \"/version\" ] } If it is not working, there are two possible reasons: The contents of the tokens are invalid. Find the secret name with kubectl get secrets | grep service-account and delete it with kubectl delete secret . It will automatically be recreated. You have a non-standard Kubernetes installation and the file containing the token may not be present. The API server will mount a volume containing this file, but only if the API server is configured to use the ServiceAccount admission controller. If you experience this error, verify that your API server is using the ServiceAccount admission controller. If you are configuring the API server by hand, you can set this with the --admission-control parameter. Note that you should use other admission controllers as well. Before configuring this option, you should read about admission controllers. More information: User Guide: Service Accounts Cluster Administrator Guide: Managing Service Accounts Kube-Config \u00b6 If you want to use a kubeconfig file for authentication, follow the deploy procedure and add the flag --kubeconfig=/etc/kubernetes/kubeconfig.yaml to the args section of the deployment. Using GDB with Nginx \u00b6 Gdb can be used to with nginx to perform a configuration dump. This allows us to see which configuration is being used, as well as older configurations. Note: The below is based on the nginx documentation . SSH into the worker $ ssh user@workerIP Obtain the Docker Container Running nginx $ docker ps | grep nginx-ingress-controller CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES d9e1d243156a quay.io/kubernetes-ingress-controller/nginx-ingress-controller \"/usr/bin/dumb-init \u2026\" 19 minutes ago Up 19 minutes k8s_nginx-ingress-controller_nginx-ingress-controller-67956bf89d-mqxzt_kube-system_079f31ec-aa37-11e8-ad39-080027a227db_0 Exec into the container $ docker exec -it --user = 0 --privileged d9e1d243156a bash Make sure nginx is running in --with-debug $ nginx -V 2 > & 1 | grep -- '--with-debug' Get list of processes running on container $ ps -ef UID PID PPID C STIME TTY TIME CMD root 1 0 0 20:23 ? 00:00:00 /usr/bin/dumb-init /nginx-ingres root 5 1 0 20:23 ? 00:00:05 /nginx-ingress-controller --defa root 21 5 0 20:23 ? 00:00:00 nginx: master process /usr/sbin/ nobody 106 21 0 20:23 ? 00:00:00 nginx: worker process nobody 107 21 0 20:23 ? 00:00:00 nginx: worker process root 172 0 0 20:43 pts/0 00:00:00 bash Attach gdb to the nginx master process $ gdb -p 21 .... Attaching to process 21 Reading symbols from /usr/sbin/nginx...done. .... (gdb) Copy and paste the following: set $cd = ngx_cycle->config_dump set $nelts = $cd.nelts set $elts = (ngx_conf_dump_t*)($cd.elts) while ($nelts-- > 0) set $name = $elts[$nelts]->name.data printf \"Dumping %s to nginx_conf.txt\\n\", $name append memory nginx_conf.txt \\ $ elts [ $nelts ] ->buffer.start $elts [ $nelts ] ->buffer.end end Quit GDB by pressing CTRL+D Open nginx_conf.txt cat nginx_conf.txt","title":"Troubleshooting"},{"location":"troubleshooting/#troubleshooting","text":"","title":"Troubleshooting"},{"location":"troubleshooting/#ingress-controller-logs-and-events","text":"There are many ways to troubleshoot the ingress-controller. The following are basic troubleshooting methods to obtain more information. Check the Ingress Resource Events $ kubectl get ing -n NAME HOSTS ADDRESS PORTS AGE cafe-ingress cafe.com 10.0.2.15 80 25s $ kubectl describe ing -n Name: cafe-ingress Namespace: default Address: 10.0.2.15 Default backend: default-http-backend:80 (172.17.0.5:8080) Rules: Host Path Backends ---- ---- -------- cafe.com /tea tea-svc:80 () /coffee coffee-svc:80 () Annotations: kubectl.kubernetes.io/last-applied-configuration: {\"apiVersion\":\"networking.k8s.io/v1beta1\",\"kind\":\"Ingress\",\"metadata\":{\"annotations\":{},\"name\":\"cafe-ingress\",\"namespace\":\"default\",\"selfLink\":\"/apis/networking/v1beta1/namespaces/default/ingresses/cafe-ingress\"},\"spec\":{\"rules\":[{\"host\":\"cafe.com\",\"http\":{\"paths\":[{\"backend\":{\"serviceName\":\"tea-svc\",\"servicePort\":80},\"path\":\"/tea\"},{\"backend\":{\"serviceName\":\"coffee-svc\",\"servicePort\":80},\"path\":\"/coffee\"}]}}]},\"status\":{\"loadBalancer\":{\"ingress\":[{\"ip\":\"169.48.142.110\"}]}}} Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal CREATE 1m nginx-ingress-controller Ingress default/cafe-ingress Normal UPDATE 58s nginx-ingress-controller Ingress default/cafe-ingress Check the Ingress Controller Logs $ kubectl get pods -n NAME READY STATUS RESTARTS AGE nginx-ingress-controller-67956bf89d-fv58j 1/1 Running 0 1m $ kubectl logs -n nginx-ingress-controller-67956bf89d-fv58j ------------------------------------------------------------------------------- NGINX Ingress controller Release: 0.14.0 Build: git-734361d Repository: https://github.com/kubernetes/ingress-nginx ------------------------------------------------------------------------------- .... Check the Nginx Configuration $ kubectl get pods -n NAME READY STATUS RESTARTS AGE nginx-ingress-controller-67956bf89d-fv58j 1/1 Running 0 1m $ kubectl exec -it -n nginx-ingress-controller-67956bf89d-fv58j cat /etc/nginx/nginx.conf daemon off; worker_processes 2; pid /run/nginx.pid; worker_rlimit_nofile 523264; worker_shutdown_timeout 240s; events { multi_accept on; worker_connections 16384; use epoll; } http { .... Check if used Services Exist $ kubectl get svc --all-namespaces NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE default coffee-svc ClusterIP 10.106.154.35 80/TCP 18m default kubernetes ClusterIP 10.96.0.1 443/TCP 30m default tea-svc ClusterIP 10.104.172.12 80/TCP 18m kube-system default-http-backend NodePort 10.108.189.236 80:30001/TCP 30m kube-system kube-dns ClusterIP 10.96.0.10 53/UDP,53/TCP 30m kube-system kubernetes-dashboard NodePort 10.103.128.17 80:30000/TCP 30m","title":"Ingress-Controller Logs and Events"},{"location":"troubleshooting/#debug-logging","text":"Using the flag --v=XX it is possible to increase the level of logging. This is performed by editing the deployment. $ kubectl get deploy -n NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE default-http-backend 1 1 1 1 35m nginx-ingress-controller 1 1 1 1 35m $ kubectl edit deploy -n nginx-ingress-controller # Add --v = X to \"- args\" , where X is an integer --v=2 shows details using diff about the changes in the configuration in nginx --v=3 shows details about the service, Ingress rule, endpoint changes and it dumps the nginx configuration in JSON format --v=5 configures NGINX in debug mode","title":"Debug Logging"},{"location":"troubleshooting/#authentication-to-the-kubernetes-api-server","text":"A number of components are involved in the authentication process and the first step is to narrow down the source of the problem, namely whether it is a problem with service authentication or with the kubeconfig file. Both authentications must work: +-------------+ service +------------+ | | authentication | | + apiserver +<-------------------+ ingress | | | | controller | +-------------+ +------------+ Service authentication The Ingress controller needs information from apiserver. Therefore, authentication is required, which can be achieved in two different ways: Service Account: This is recommended, because nothing has to be configured. The Ingress controller will use information provided by the system to communicate with the API server. See 'Service Account' section for details. Kubeconfig file: In some Kubernetes environments service accounts are not available. In this case a manual configuration is required. The Ingress controller binary can be started with the --kubeconfig flag. The value of the flag is a path to a file specifying how to connect to the API server. Using the --kubeconfig does not requires the flag --apiserver-host . The format of the file is identical to ~/.kube/config which is used by kubectl to connect to the API server. See 'kubeconfig' section for details. Using the flag --apiserver-host : Using this flag --apiserver-host=http://localhost:8080 it is possible to specify an unsecured API server or reach a remote kubernetes cluster using kubectl proxy . Please do not use this approach in production. In the diagram below you can see the full authentication flow with all options, starting with the browser on the lower left hand side. Kubernetes Workstation +---------------------------------------------------+ +------------------+ | | | | | +-----------+ apiserver +------------+ | | +------------+ | | | | proxy | | | | | | | | | apiserver | | ingress | | | | ingress | | | | | | controller | | | | controller | | | | | | | | | | | | | | | | | | | | | | | | | service account/ | | | | | | | | | | kubeconfig | | | | | | | | | +<-------------------+ | | | | | | | | | | | | | | | | | +------+----+ kubeconfig +------+-----+ | | +------+-----+ | | |<--------------------------------------------------------| | | | | | +---------------------------------------------------+ +------------------+","title":"Authentication to the Kubernetes API Server"},{"location":"troubleshooting/#service-account","text":"If using a service account to connect to the API server, Dashboard expects the file /var/run/secrets/kubernetes.io/serviceaccount/token to be present. It provides a secret token that is required to authenticate with the API server. Verify with the following commands: # start a container that contains curl $ kubectl run test --image = tutum/curl -- sleep 10000 # check that container is running $ kubectl get pods NAME READY STATUS RESTARTS AGE test-701078429-s5kca 1/1 Running 0 16s # check if secret exists $ kubectl exec test-701078429-s5kca ls /var/run/secrets/kubernetes.io/serviceaccount/ ca.crt namespace token # get service IP of master $ kubectl get services NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes 10.0.0.1 443/TCP 1d # check base connectivity from cluster inside $ kubectl exec test-701078429-s5kca -- curl -k https://10.0.0.1 Unauthorized # connect using tokens $ TOKEN_VALUE = $( kubectl exec test-701078429-s5kca -- cat /var/run/secrets/kubernetes.io/serviceaccount/token ) $ echo $TOKEN_VALUE eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3Mi....9A $ kubectl exec test-701078429-s5kca -- curl --cacert /var/run/secrets/kubernetes.io/serviceaccount/ca.crt -H \"Authorization: Bearer $TOKEN_VALUE \" https://10.0.0.1 { \"paths\": [ \"/api\", \"/api/v1\", \"/apis\", \"/apis/apps\", \"/apis/apps/v1alpha1\", \"/apis/authentication.k8s.io\", \"/apis/authentication.k8s.io/v1beta1\", \"/apis/authorization.k8s.io\", \"/apis/authorization.k8s.io/v1beta1\", \"/apis/autoscaling\", \"/apis/autoscaling/v1\", \"/apis/batch\", \"/apis/batch/v1\", \"/apis/batch/v2alpha1\", \"/apis/certificates.k8s.io\", \"/apis/certificates.k8s.io/v1alpha1\", \"/apis/networking\", \"/apis/networking/v1beta1\", \"/apis/policy\", \"/apis/policy/v1alpha1\", \"/apis/rbac.authorization.k8s.io\", \"/apis/rbac.authorization.k8s.io/v1alpha1\", \"/apis/storage.k8s.io\", \"/apis/storage.k8s.io/v1beta1\", \"/healthz\", \"/healthz/ping\", \"/logs\", \"/metrics\", \"/swaggerapi/\", \"/ui/\", \"/version\" ] } If it is not working, there are two possible reasons: The contents of the tokens are invalid. Find the secret name with kubectl get secrets | grep service-account and delete it with kubectl delete secret . It will automatically be recreated. You have a non-standard Kubernetes installation and the file containing the token may not be present. The API server will mount a volume containing this file, but only if the API server is configured to use the ServiceAccount admission controller. If you experience this error, verify that your API server is using the ServiceAccount admission controller. If you are configuring the API server by hand, you can set this with the --admission-control parameter. Note that you should use other admission controllers as well. Before configuring this option, you should read about admission controllers. More information: User Guide: Service Accounts Cluster Administrator Guide: Managing Service Accounts","title":"Service Account"},{"location":"troubleshooting/#kube-config","text":"If you want to use a kubeconfig file for authentication, follow the deploy procedure and add the flag --kubeconfig=/etc/kubernetes/kubeconfig.yaml to the args section of the deployment.","title":"Kube-Config"},{"location":"troubleshooting/#using-gdb-with-nginx","text":"Gdb can be used to with nginx to perform a configuration dump. This allows us to see which configuration is being used, as well as older configurations. Note: The below is based on the nginx documentation . SSH into the worker $ ssh user@workerIP Obtain the Docker Container Running nginx $ docker ps | grep nginx-ingress-controller CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES d9e1d243156a quay.io/kubernetes-ingress-controller/nginx-ingress-controller \"/usr/bin/dumb-init \u2026\" 19 minutes ago Up 19 minutes k8s_nginx-ingress-controller_nginx-ingress-controller-67956bf89d-mqxzt_kube-system_079f31ec-aa37-11e8-ad39-080027a227db_0 Exec into the container $ docker exec -it --user = 0 --privileged d9e1d243156a bash Make sure nginx is running in --with-debug $ nginx -V 2 > & 1 | grep -- '--with-debug' Get list of processes running on container $ ps -ef UID PID PPID C STIME TTY TIME CMD root 1 0 0 20:23 ? 00:00:00 /usr/bin/dumb-init /nginx-ingres root 5 1 0 20:23 ? 00:00:05 /nginx-ingress-controller --defa root 21 5 0 20:23 ? 00:00:00 nginx: master process /usr/sbin/ nobody 106 21 0 20:23 ? 00:00:00 nginx: worker process nobody 107 21 0 20:23 ? 00:00:00 nginx: worker process root 172 0 0 20:43 pts/0 00:00:00 bash Attach gdb to the nginx master process $ gdb -p 21 .... Attaching to process 21 Reading symbols from /usr/sbin/nginx...done. .... (gdb) Copy and paste the following: set $cd = ngx_cycle->config_dump set $nelts = $cd.nelts set $elts = (ngx_conf_dump_t*)($cd.elts) while ($nelts-- > 0) set $name = $elts[$nelts]->name.data printf \"Dumping %s to nginx_conf.txt\\n\", $name append memory nginx_conf.txt \\ $ elts [ $nelts ] ->buffer.start $elts [ $nelts ] ->buffer.end end Quit GDB by pressing CTRL+D Open nginx_conf.txt cat nginx_conf.txt","title":"Using GDB with Nginx"},{"location":"deploy/","text":"Installation Guide \u00b6 Contents \u00b6 Prerequisite Generic Deployment Command Provider Specific Steps Docker for Mac minikube AWS GCE - GKE Azure Bare-metal Verify installation Detect installed version Using Helm Prerequisite Generic Deployment Command \u00b6 Attention The default configuration watches Ingress object from all the namespaces . To change this behavior use the flag --watch-namespace to limit the scope to a particular namespace. Warning If multiple Ingresses define different paths for the same host, the ingress controller will merge the definitions. Attention If you're using GKE you need to initialize your user as a cluster-admin with the following command: kubectl create clusterrolebinding cluster-admin-binding \\ --clusterrole cluster-admin \\ --user $(gcloud config get-value account) The following Mandatory Command is required for all deployments. kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/mandatory.yaml Tip If you are using a Kubernetes version previous to 1.14, you need to change kubernetes.io/os to beta.kubernetes.io/os at line 217 of mandatory.yaml , see Labels details . Provider Specific Steps \u00b6 There are cloud provider specific yaml files. Docker for Mac \u00b6 Kubernetes is available in Docker for Mac (from version 18.06.0-ce ) Create a service kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/cloud-generic.yaml minikube \u00b6 For standard usage: minikube addons enable ingress For development: Disable the ingress addon: minikube addons disable ingress Execute make dev-env Confirm the nginx-ingress-controller deployment exists: $ kubectl get pods -n ingress-nginx NAME READY STATUS RESTARTS AGE default-http-backend-66b447d9cf-rrlf9 1/1 Running 0 12s nginx-ingress-controller-fdcdcd6dd-vvpgs 1/1 Running 0 11s AWS \u00b6 In AWS we use an Elastic Load Balancer (ELB) to expose the NGINX Ingress controller behind a Service of Type=LoadBalancer . Since Kubernetes v1.9.0 it is possible to use a classic load balancer (ELB) or network load balancer (NLB) Please check the elastic load balancing AWS details page Elastic Load Balancer - ELB \u00b6 This setup requires to choose in which layer (L4 or L7) we want to configure the ELB: Layer 4 : use TCP as the listener protocol for ports 80 and 443. Layer 7 : use HTTP as the listener protocol for port 80 and terminate TLS in the ELB For L4: Check that no change is necessary with regards to the ELB idle timeout. In some scenarios, users may want to modify the ELB idle timeout, so please check the ELB Idle Timeouts section for additional information. If a change is required, users will need to update the value of service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout in provider/aws/service-l4.yaml Then execute: kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/aws/service-l4.yaml kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/aws/patch-configmap-l4.yaml For L7: Change line of the file provider/aws/service-l7.yaml replacing the dummy id with a valid one \"arn:aws:acm:us-west-2:XXXXXXXX:certificate/XXXXXX-XXXXXXX-XXXXXXX-XXXXXXXX\" Check that no change is necessary with regards to the ELB idle timeout. In some scenarios, users may want to modify the ELB idle timeout, so please check the ELB Idle Timeouts section for additional information. If a change is required, users will need to update the value of service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout in provider/aws/service-l7.yaml Then execute: kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/aws/service-l7.yaml kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/aws/patch-configmap-l7.yaml This example creates an ELB with just two listeners, one in port 80 and another in port 443 ELB Idle Timeouts \u00b6 In some scenarios users will need to modify the value of the ELB idle timeout. Users need to ensure the idle timeout is less than the keepalive_timeout that is configured for NGINX. By default NGINX keepalive_timeout is set to 75s . The default ELB idle timeout will work for most scenarios, unless the NGINX keepalive_timeout has been modified, in which case service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout will need to be modified to ensure it is less than the keepalive_timeout the user has configured. Please Note: An idle timeout of 3600s is recommended when using WebSockets. More information with regards to idle timeouts for your Load Balancer can be found in the official AWS documentation . Network Load Balancer (NLB) \u00b6 This type of load balancer is supported since v1.10.0 as an ALPHA feature. kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/aws/service-nlb.yaml GCE-GKE \u00b6 kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/cloud-generic.yaml Important Note: proxy protocol is not supported in GCE/GKE Azure \u00b6 kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/cloud-generic.yaml Bare-metal \u00b6 Using NodePort : kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/baremetal/service-nodeport.yaml Tip For extended notes regarding deployments on bare-metal, see Bare-metal considerations . Verify installation \u00b6 To check if the ingress controller pods have started, run the following command: kubectl get pods --all-namespaces -l app.kubernetes.io/name=ingress-nginx --watch Once the operator pods are running, you can cancel the above command by typing Ctrl+C . Now, you are ready to create your first ingress. Detect installed version \u00b6 To detect which version of the ingress controller is running, exec into the pod and run nginx-ingress-controller version command. POD_NAMESPACE=ingress-nginx POD_NAME=$(kubectl get pods -n $POD_NAMESPACE -l app.kubernetes.io/name=ingress-nginx -o jsonpath='{.items[0].metadata.name}') kubectl exec -it $POD_NAME -n $POD_NAMESPACE -- /nginx-ingress-controller --version Using Helm \u00b6 NGINX Ingress controller can be installed via Helm using the chart stable/nginx-ingress from the official charts repository. To install the chart with the release name my-nginx : helm install my-nginx stable/nginx-ingress If the kubernetes cluster has RBAC enabled, then run: helm install my-nginx stable/nginx-ingress --set rbac.create=true If you are using Helm 2 then specify release name using --name flag helm install stable/nginx-ingress --name my-nginx or helm install stable/nginx-ingress --name my-nginx --set rbac.create=true Detect installed version: POD_NAME=$(kubectl get pods -l app.kubernetes.io/name=ingress-nginx -o jsonpath='{.items[0].metadata.name}') kubectl exec -it $POD_NAME -- /nginx-ingress-controller --version","title":"Installation Guide"},{"location":"deploy/#installation-guide","text":"","title":"Installation Guide"},{"location":"deploy/#contents","text":"Prerequisite Generic Deployment Command Provider Specific Steps Docker for Mac minikube AWS GCE - GKE Azure Bare-metal Verify installation Detect installed version Using Helm","title":"Contents"},{"location":"deploy/#prerequisite-generic-deployment-command","text":"Attention The default configuration watches Ingress object from all the namespaces . To change this behavior use the flag --watch-namespace to limit the scope to a particular namespace. Warning If multiple Ingresses define different paths for the same host, the ingress controller will merge the definitions. Attention If you're using GKE you need to initialize your user as a cluster-admin with the following command: kubectl create clusterrolebinding cluster-admin-binding \\ --clusterrole cluster-admin \\ --user $(gcloud config get-value account) The following Mandatory Command is required for all deployments. kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/mandatory.yaml Tip If you are using a Kubernetes version previous to 1.14, you need to change kubernetes.io/os to beta.kubernetes.io/os at line 217 of mandatory.yaml , see Labels details .","title":"Prerequisite Generic Deployment Command"},{"location":"deploy/#provider-specific-steps","text":"There are cloud provider specific yaml files.","title":"Provider Specific Steps"},{"location":"deploy/#docker-for-mac","text":"Kubernetes is available in Docker for Mac (from version 18.06.0-ce ) Create a service kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/cloud-generic.yaml","title":"Docker for Mac"},{"location":"deploy/#minikube","text":"For standard usage: minikube addons enable ingress For development: Disable the ingress addon: minikube addons disable ingress Execute make dev-env Confirm the nginx-ingress-controller deployment exists: $ kubectl get pods -n ingress-nginx NAME READY STATUS RESTARTS AGE default-http-backend-66b447d9cf-rrlf9 1/1 Running 0 12s nginx-ingress-controller-fdcdcd6dd-vvpgs 1/1 Running 0 11s","title":"minikube"},{"location":"deploy/#aws","text":"In AWS we use an Elastic Load Balancer (ELB) to expose the NGINX Ingress controller behind a Service of Type=LoadBalancer . Since Kubernetes v1.9.0 it is possible to use a classic load balancer (ELB) or network load balancer (NLB) Please check the elastic load balancing AWS details page","title":"AWS"},{"location":"deploy/#elastic-load-balancer-elb","text":"This setup requires to choose in which layer (L4 or L7) we want to configure the ELB: Layer 4 : use TCP as the listener protocol for ports 80 and 443. Layer 7 : use HTTP as the listener protocol for port 80 and terminate TLS in the ELB For L4: Check that no change is necessary with regards to the ELB idle timeout. In some scenarios, users may want to modify the ELB idle timeout, so please check the ELB Idle Timeouts section for additional information. If a change is required, users will need to update the value of service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout in provider/aws/service-l4.yaml Then execute: kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/aws/service-l4.yaml kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/aws/patch-configmap-l4.yaml For L7: Change line of the file provider/aws/service-l7.yaml replacing the dummy id with a valid one \"arn:aws:acm:us-west-2:XXXXXXXX:certificate/XXXXXX-XXXXXXX-XXXXXXX-XXXXXXXX\" Check that no change is necessary with regards to the ELB idle timeout. In some scenarios, users may want to modify the ELB idle timeout, so please check the ELB Idle Timeouts section for additional information. If a change is required, users will need to update the value of service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout in provider/aws/service-l7.yaml Then execute: kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/aws/service-l7.yaml kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/aws/patch-configmap-l7.yaml This example creates an ELB with just two listeners, one in port 80 and another in port 443","title":"Elastic Load Balancer - ELB"},{"location":"deploy/#elb-idle-timeouts","text":"In some scenarios users will need to modify the value of the ELB idle timeout. Users need to ensure the idle timeout is less than the keepalive_timeout that is configured for NGINX. By default NGINX keepalive_timeout is set to 75s . The default ELB idle timeout will work for most scenarios, unless the NGINX keepalive_timeout has been modified, in which case service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout will need to be modified to ensure it is less than the keepalive_timeout the user has configured. Please Note: An idle timeout of 3600s is recommended when using WebSockets. More information with regards to idle timeouts for your Load Balancer can be found in the official AWS documentation .","title":"ELB Idle Timeouts"},{"location":"deploy/#network-load-balancer-nlb","text":"This type of load balancer is supported since v1.10.0 as an ALPHA feature. kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/aws/service-nlb.yaml","title":"Network Load Balancer (NLB)"},{"location":"deploy/#gce-gke","text":"kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/cloud-generic.yaml Important Note: proxy protocol is not supported in GCE/GKE","title":"GCE-GKE"},{"location":"deploy/#azure","text":"kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/cloud-generic.yaml","title":"Azure"},{"location":"deploy/#bare-metal","text":"Using NodePort : kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.28.0/deploy/static/provider/baremetal/service-nodeport.yaml Tip For extended notes regarding deployments on bare-metal, see Bare-metal considerations .","title":"Bare-metal"},{"location":"deploy/#verify-installation","text":"To check if the ingress controller pods have started, run the following command: kubectl get pods --all-namespaces -l app.kubernetes.io/name=ingress-nginx --watch Once the operator pods are running, you can cancel the above command by typing Ctrl+C . Now, you are ready to create your first ingress.","title":"Verify installation"},{"location":"deploy/#detect-installed-version","text":"To detect which version of the ingress controller is running, exec into the pod and run nginx-ingress-controller version command. POD_NAMESPACE=ingress-nginx POD_NAME=$(kubectl get pods -n $POD_NAMESPACE -l app.kubernetes.io/name=ingress-nginx -o jsonpath='{.items[0].metadata.name}') kubectl exec -it $POD_NAME -n $POD_NAMESPACE -- /nginx-ingress-controller --version","title":"Detect installed version"},{"location":"deploy/#using-helm","text":"NGINX Ingress controller can be installed via Helm using the chart stable/nginx-ingress from the official charts repository. To install the chart with the release name my-nginx : helm install my-nginx stable/nginx-ingress If the kubernetes cluster has RBAC enabled, then run: helm install my-nginx stable/nginx-ingress --set rbac.create=true If you are using Helm 2 then specify release name using --name flag helm install stable/nginx-ingress --name my-nginx or helm install stable/nginx-ingress --name my-nginx --set rbac.create=true Detect installed version: POD_NAME=$(kubectl get pods -l app.kubernetes.io/name=ingress-nginx -o jsonpath='{.items[0].metadata.name}') kubectl exec -it $POD_NAME -- /nginx-ingress-controller --version","title":"Using Helm"},{"location":"deploy/baremetal/","text":"Bare-metal considerations \u00b6 In traditional cloud environments, where network load balancers are available on-demand, a single Kubernetes manifest suffices to provide a single point of contact to the NGINX Ingress controller to external clients and, indirectly, to any application running inside the cluster. Bare-metal environments lack this commodity, requiring a slightly different setup to offer the same kind of access to external consumers. The rest of this document describes a few recommended approaches to deploying the NGINX Ingress controller inside a Kubernetes cluster running on bare-metal. A pure software solution: MetalLB \u00b6 MetalLB provides a network load-balancer implementation for Kubernetes clusters that do not run on a supported cloud provider, effectively allowing the usage of LoadBalancer Services within any cluster. This section demonstrates how to use the Layer 2 configuration mode of MetalLB together with the NGINX Ingress controller in a Kubernetes cluster that has publicly accessible nodes . In this mode, one node attracts all the traffic for the ingress-nginx Service IP. See Traffic policies for more details. Note The description of other supported configuration modes is off-scope for this document. Warning MetalLB is currently in beta . Read about the Project maturity and make sure you inform yourself by reading the official documentation thoroughly. MetalLB can be deployed either with a simple Kubernetes manifest or with Helm. The rest of this example assumes MetalLB was deployed following the Installation instructions. MetalLB requires a pool of IP addresses in order to be able to take ownership of the ingress-nginx Service. This pool can be defined in a ConfigMap named config located in the same namespace as the MetalLB controller. This pool of IPs must be dedicated to MetalLB's use, you can't reuse the Kubernetes node IPs or IPs handed out by a DHCP server. Example Given the following 3-node Kubernetes cluster (the external IP is added as an example, in most bare-metal environments this value is ) $ kubectl get node NAME STATUS ROLES EXTERNAL-IP host-1 Ready master 203.0.113.1 host-2 Ready node 203.0.113.2 host-3 Ready node 203.0.113.3 After creating the following ConfigMap, MetalLB takes ownership of one of the IP addresses in the pool and updates the loadBalancer IP field of the ingress-nginx Service accordingly. apiVersion : v1 kind : ConfigMap metadata : namespace : metallb-system name : config data : config : | address-pools: - name: default protocol: layer2 addresses: - 203.0.113.10-203.0.113.15 $ kubectl -n ingress-nginx get svc NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) default-http-backend ClusterIP 10.0.64.249 80/TCP ingress-nginx LoadBalancer 10.0.220.217 203.0.113.10 80:30100/TCP,443:30101/TCP As soon as MetalLB sets the external IP address of the ingress-nginx LoadBalancer Service, the corresponding entries are created in the iptables NAT table and the node with the selected IP address starts responding to HTTP requests on the ports configured in the LoadBalancer Service: $ curl -D- http://203.0.113.3 -H 'Host: myapp.example.com' HTTP/1.1 200 OK Server: nginx/1.15.2 Tip In order to preserve the source IP address in HTTP requests sent to NGINX, it is necessary to use the Local traffic policy. Traffic policies are described in more details in Traffic policies as well as in the next section. Over a NodePort Service \u00b6 Due to its simplicity, this is the setup a user will deploy by default when following the steps described in the installation guide . Info A Service of type NodePort exposes, via the kube-proxy component, the same unprivileged port (default: 30000-32767) on every Kubernetes node, masters included. For more information, see Services . In this configuration, the NGINX container remains isolated from the host network. As a result, it can safely bind to any port, including the standard HTTP ports 80 and 443. However, due to the container namespace isolation, a client located outside the cluster network (e.g. on the public internet) is not able to access Ingress hosts directly on ports 80 and 443. Instead, the external client must append the NodePort allocated to the ingress-nginx Service to HTTP requests. Example Given the NodePort 30100 allocated to the ingress-nginx Service $ kubectl -n ingress-nginx get svc NAME TYPE CLUSTER-IP PORT(S) default-http-backend ClusterIP 10.0.64.249 80/TCP ingress-nginx NodePort 10.0.220.217 80:30100/TCP,443:30101/TCP and a Kubernetes node with the public IP address 203.0.113.2 (the external IP is added as an example, in most bare-metal environments this value is ) $ kubectl get node NAME STATUS ROLES EXTERNAL-IP host-1 Ready master 203.0.113.1 host-2 Ready node 203.0.113.2 host-3 Ready node 203.0.113.3 a client would reach an Ingress with host : myapp . example . com at http://myapp.example.com:30100 , where the myapp.example.com subdomain resolves to the 203.0.113.2 IP address. Impact on the host system While it may sound tempting to reconfigure the NodePort range using the --service-node-port-range API server flag to include unprivileged ports and be able to expose ports 80 and 443, doing so may result in unexpected issues including (but not limited to) the use of ports otherwise reserved to system daemons and the necessity to grant kube-proxy privileges it may otherwise not require. This practice is therefore discouraged . See the other approaches proposed in this page for alternatives. This approach has a few other limitations one ought to be aware of: Source IP address Services of type NodePort perform source address translation by default. This means the source IP of a HTTP request is always the IP address of the Kubernetes node that received the request from the perspective of NGINX. The recommended way to preserve the source IP in a NodePort setup is to set the value of the externalTrafficPolicy field of the ingress-nginx Service spec to Local ( example ). Warning This setting effectively drops packets sent to Kubernetes nodes which are not running any instance of the NGINX Ingress controller. Consider assigning NGINX Pods to specific nodes in order to control on what nodes the NGINX Ingress controller should be scheduled or not scheduled. Example In a Kubernetes cluster composed of 3 nodes (the external IP is added as an example, in most bare-metal environments this value is ) $ kubectl get node NAME STATUS ROLES EXTERNAL-IP host-1 Ready master 203.0.113.1 host-2 Ready node 203.0.113.2 host-3 Ready node 203.0.113.3 with a nginx-ingress-controller Deployment composed of 2 replicas $ kubectl -n ingress-nginx get pod -o wide NAME READY STATUS IP NODE default-http-backend-7c5bc89cc9-p86md 1/1 Running 172.17.1.1 host-2 nginx-ingress-controller-cf9ff8c96-8vvf8 1/1 Running 172.17.0.3 host-3 nginx-ingress-controller-cf9ff8c96-pxsds 1/1 Running 172.17.1.4 host-2 Requests sent to host-2 and host-3 would be forwarded to NGINX and original client's IP would be preserved, while requests to host-1 would get dropped because there is no NGINX replica running on that node. Ingress status Because NodePort Services do not get a LoadBalancerIP assigned by definition, the NGINX Ingress controller does not update the status of Ingress objects it manages . $ kubectl get ingress NAME HOSTS ADDRESS PORTS test-ingress myapp.example.com 80 Despite the fact there is no load balancer providing a public IP address to the NGINX Ingress controller, it is possible to force the status update of all managed Ingress objects by setting the externalIPs field of the ingress-nginx Service. Warning There is more to setting externalIPs than just enabling the NGINX Ingress controller to update the status of Ingress objects. Please read about this option in the Services page of official Kubernetes documentation as well as the section about External IPs in this document for more information. Example Given the following 3-node Kubernetes cluster (the external IP is added as an example, in most bare-metal environments this value is ) $ kubectl get node NAME STATUS ROLES EXTERNAL-IP host-1 Ready master 203.0.113.1 host-2 Ready node 203.0.113.2 host-3 Ready node 203.0.113.3 one could edit the ingress-nginx Service and add the following field to the object spec spec : externalIPs : - 203.0.113.1 - 203.0.113.2 - 203.0.113.3 which would in turn be reflected on Ingress objects as follows: $ kubectl get ingress -o wide NAME HOSTS ADDRESS PORTS test-ingress myapp.example.com 203.0.113.1,203.0.113.2,203.0.113.3 80 Redirects As NGINX is not aware of the port translation operated by the NodePort Service , backend applications are responsible for generating redirect URLs that take into account the URL used by external clients, including the NodePort. Example Redirects generated by NGINX, for instance HTTP to HTTPS or domain to www.domain , are generated without NodePort: $ curl -D- http://myapp.example.com:30100 ` HTTP/1.1 308 Permanent Redirect Server: nginx/1.15.2 Location: https://myapp.example.com/ #-> missing NodePort in HTTPS redirect Via the host network \u00b6 In a setup where there is no external load balancer available but using NodePorts is not an option, one can configure ingress-nginx Pods to use the network of the host they run on instead of a dedicated network namespace. The benefit of this approach is that the NGINX Ingress controller can bind ports 80 and 443 directly to Kubernetes nodes' network interfaces, without the extra network translation imposed by NodePort Services. Note This approach does not leverage any Service object to expose the NGINX Ingress controller. If the ingress-nginx Service exists in the target cluster, it is recommended to delete it . This can be achieved by enabling the hostNetwork option in the Pods' spec. template : spec : hostNetwork : true Security considerations Enabling this option exposes every system daemon to the NGINX Ingress controller on any network interface, including the host's loopback. Please evaluate the impact this may have on the security of your system carefully. Example Consider this nginx-ingress-controller Deployment composed of 2 replicas, NGINX Pods inherit from the IP address of their host instead of an internal Pod IP. $ kubectl -n ingress-nginx get pod -o wide NAME READY STATUS IP NODE default-http-backend-7c5bc89cc9-p86md 1/1 Running 172.17.1.1 host-2 nginx-ingress-controller-5b4cf5fc6-7lg6c 1/1 Running 203.0.113.3 host-3 nginx-ingress-controller-5b4cf5fc6-lzrls 1/1 Running 203.0.113.2 host-2 One major limitation of this deployment approach is that only a single NGINX Ingress controller Pod may be scheduled on each cluster node, because binding the same port multiple times on the same network interface is technically impossible. Pods that are unschedulable due to such situation fail with the following event: $ kubectl -n ingress-nginx describe pod ... Events: Type Reason From Message ---- ------ ---- ------- Warning FailedScheduling default-scheduler 0/3 nodes are available: 3 node(s) didn't have free ports for the requested pod ports. One way to ensure only schedulable Pods are created is to deploy the NGINX Ingress controller as a DaemonSet instead of a traditional Deployment. Info A DaemonSet schedules exactly one type of Pod per cluster node, masters included, unless a node is configured to repel those Pods . For more information, see DaemonSet . Because most properties of DaemonSet objects are identical to Deployment objects, this documentation page leaves the configuration of the corresponding manifest at the user's discretion. Like with NodePorts, this approach has a few quirks it is important to be aware of. DNS resolution Pods configured with hostNetwork : true do not use the internal DNS resolver (i.e. kube-dns or CoreDNS ), unless their dnsPolicy spec field is set to ClusterFirstWithHostNet . Consider using this setting if NGINX is expected to resolve internal names for any reason. Ingress status Because there is no Service exposing the NGINX Ingress controller in a configuration using the host network, the default --publish-service flag used in standard cloud setups does not apply and the status of all Ingress objects remains blank. $ kubectl get ingress NAME HOSTS ADDRESS PORTS test-ingress myapp.example.com 80 Instead, and because bare-metal nodes usually don't have an ExternalIP, one has to enable the --report-node-internal-ip-address flag, which sets the status of all Ingress objects to the internal IP address of all nodes running the NGINX Ingress controller. Example Given a nginx-ingress-controller DaemonSet composed of 2 replicas $ kubectl -n ingress-nginx get pod -o wide NAME READY STATUS IP NODE default-http-backend-7c5bc89cc9-p86md 1/1 Running 172.17.1.1 host-2 nginx-ingress-controller-5b4cf5fc6-7lg6c 1/1 Running 203.0.113.3 host-3 nginx-ingress-controller-5b4cf5fc6-lzrls 1/1 Running 203.0.113.2 host-2 the controller sets the status of all Ingress objects it manages to the following value: $ kubectl get ingress -o wide NAME HOSTS ADDRESS PORTS test-ingress myapp.example.com 203.0.113.2,203.0.113.3 80 Note Alternatively, it is possible to override the address written to Ingress objects using the --publish-status-address flag. See Command line arguments . Using a self-provisioned edge \u00b6 Similarly to cloud environments, this deployment approach requires an edge network component providing a public entrypoint to the Kubernetes cluster. This edge component can be either hardware (e.g. vendor appliance) or software (e.g. HAproxy ) and is usually managed outside of the Kubernetes landscape by operations teams. Such deployment builds upon the NodePort Service described above in Over a NodePort Service , with one significant difference: external clients do not access cluster nodes directly, only the edge component does. This is particularly suitable for private Kubernetes clusters where none of the nodes has a public IP address. On the edge side, the only prerequisite is to dedicate a public IP address that forwards all HTTP traffic to Kubernetes nodes and/or masters. Incoming traffic on TCP ports 80 and 443 is forwarded to the corresponding HTTP and HTTPS NodePort on the target nodes as shown in the diagram below: External IPs \u00b6 Source IP address This method does not allow preserving the source IP of HTTP requests in any manner, it is therefore not recommended to use it despite its apparent simplicity. The externalIPs Service option was previously mentioned in the NodePort section. As per the Services page of the official Kubernetes documentation, the externalIPs option causes kube-proxy to route traffic sent to arbitrary IP addresses and on the Service ports to the endpoints of that Service. These IP addresses must belong to the target node . Example Given the following 3-node Kubernetes cluster (the external IP is added as an example, in most bare-metal environments this value is ) $ kubectl get node NAME STATUS ROLES EXTERNAL-IP host-1 Ready master 203.0.113.1 host-2 Ready node 203.0.113.2 host-3 Ready node 203.0.113.3 and the following ingress-nginx NodePort Service $ kubectl -n ingress-nginx get svc NAME TYPE CLUSTER-IP PORT(S) ingress-nginx NodePort 10.0.220.217 80:30100/TCP,443:30101/TCP One could set the following external IPs in the Service spec, and NGINX would become available on both the NodePort and the Service port: spec : externalIPs : - 203.0.113.2 - 203.0.113.3 $ curl -D- http://myapp.example.com:30100 HTTP/1.1 200 OK Server: nginx/1.15.2 $ curl -D- http://myapp.example.com HTTP/1.1 200 OK Server: nginx/1.15.2 We assume the myapp.example.com subdomain above resolves to both 203.0.113.2 and 203.0.113.3 IP addresses.","title":"Bare-metal considerations"},{"location":"deploy/baremetal/#bare-metal-considerations","text":"In traditional cloud environments, where network load balancers are available on-demand, a single Kubernetes manifest suffices to provide a single point of contact to the NGINX Ingress controller to external clients and, indirectly, to any application running inside the cluster. Bare-metal environments lack this commodity, requiring a slightly different setup to offer the same kind of access to external consumers. The rest of this document describes a few recommended approaches to deploying the NGINX Ingress controller inside a Kubernetes cluster running on bare-metal.","title":"Bare-metal considerations"},{"location":"deploy/baremetal/#a-pure-software-solution-metallb","text":"MetalLB provides a network load-balancer implementation for Kubernetes clusters that do not run on a supported cloud provider, effectively allowing the usage of LoadBalancer Services within any cluster. This section demonstrates how to use the Layer 2 configuration mode of MetalLB together with the NGINX Ingress controller in a Kubernetes cluster that has publicly accessible nodes . In this mode, one node attracts all the traffic for the ingress-nginx Service IP. See Traffic policies for more details. Note The description of other supported configuration modes is off-scope for this document. Warning MetalLB is currently in beta . Read about the Project maturity and make sure you inform yourself by reading the official documentation thoroughly. MetalLB can be deployed either with a simple Kubernetes manifest or with Helm. The rest of this example assumes MetalLB was deployed following the Installation instructions. MetalLB requires a pool of IP addresses in order to be able to take ownership of the ingress-nginx Service. This pool can be defined in a ConfigMap named config located in the same namespace as the MetalLB controller. This pool of IPs must be dedicated to MetalLB's use, you can't reuse the Kubernetes node IPs or IPs handed out by a DHCP server. Example Given the following 3-node Kubernetes cluster (the external IP is added as an example, in most bare-metal environments this value is ) $ kubectl get node NAME STATUS ROLES EXTERNAL-IP host-1 Ready master 203.0.113.1 host-2 Ready node 203.0.113.2 host-3 Ready node 203.0.113.3 After creating the following ConfigMap, MetalLB takes ownership of one of the IP addresses in the pool and updates the loadBalancer IP field of the ingress-nginx Service accordingly. apiVersion : v1 kind : ConfigMap metadata : namespace : metallb-system name : config data : config : | address-pools: - name: default protocol: layer2 addresses: - 203.0.113.10-203.0.113.15 $ kubectl -n ingress-nginx get svc NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) default-http-backend ClusterIP 10.0.64.249 80/TCP ingress-nginx LoadBalancer 10.0.220.217 203.0.113.10 80:30100/TCP,443:30101/TCP As soon as MetalLB sets the external IP address of the ingress-nginx LoadBalancer Service, the corresponding entries are created in the iptables NAT table and the node with the selected IP address starts responding to HTTP requests on the ports configured in the LoadBalancer Service: $ curl -D- http://203.0.113.3 -H 'Host: myapp.example.com' HTTP/1.1 200 OK Server: nginx/1.15.2 Tip In order to preserve the source IP address in HTTP requests sent to NGINX, it is necessary to use the Local traffic policy. Traffic policies are described in more details in Traffic policies as well as in the next section.","title":"A pure software solution: MetalLB"},{"location":"deploy/baremetal/#over-a-nodeport-service","text":"Due to its simplicity, this is the setup a user will deploy by default when following the steps described in the installation guide . Info A Service of type NodePort exposes, via the kube-proxy component, the same unprivileged port (default: 30000-32767) on every Kubernetes node, masters included. For more information, see Services . In this configuration, the NGINX container remains isolated from the host network. As a result, it can safely bind to any port, including the standard HTTP ports 80 and 443. However, due to the container namespace isolation, a client located outside the cluster network (e.g. on the public internet) is not able to access Ingress hosts directly on ports 80 and 443. Instead, the external client must append the NodePort allocated to the ingress-nginx Service to HTTP requests. Example Given the NodePort 30100 allocated to the ingress-nginx Service $ kubectl -n ingress-nginx get svc NAME TYPE CLUSTER-IP PORT(S) default-http-backend ClusterIP 10.0.64.249 80/TCP ingress-nginx NodePort 10.0.220.217 80:30100/TCP,443:30101/TCP and a Kubernetes node with the public IP address 203.0.113.2 (the external IP is added as an example, in most bare-metal environments this value is ) $ kubectl get node NAME STATUS ROLES EXTERNAL-IP host-1 Ready master 203.0.113.1 host-2 Ready node 203.0.113.2 host-3 Ready node 203.0.113.3 a client would reach an Ingress with host : myapp . example . com at http://myapp.example.com:30100 , where the myapp.example.com subdomain resolves to the 203.0.113.2 IP address. Impact on the host system While it may sound tempting to reconfigure the NodePort range using the --service-node-port-range API server flag to include unprivileged ports and be able to expose ports 80 and 443, doing so may result in unexpected issues including (but not limited to) the use of ports otherwise reserved to system daemons and the necessity to grant kube-proxy privileges it may otherwise not require. This practice is therefore discouraged . See the other approaches proposed in this page for alternatives. This approach has a few other limitations one ought to be aware of: Source IP address Services of type NodePort perform source address translation by default. This means the source IP of a HTTP request is always the IP address of the Kubernetes node that received the request from the perspective of NGINX. The recommended way to preserve the source IP in a NodePort setup is to set the value of the externalTrafficPolicy field of the ingress-nginx Service spec to Local ( example ). Warning This setting effectively drops packets sent to Kubernetes nodes which are not running any instance of the NGINX Ingress controller. Consider assigning NGINX Pods to specific nodes in order to control on what nodes the NGINX Ingress controller should be scheduled or not scheduled. Example In a Kubernetes cluster composed of 3 nodes (the external IP is added as an example, in most bare-metal environments this value is ) $ kubectl get node NAME STATUS ROLES EXTERNAL-IP host-1 Ready master 203.0.113.1 host-2 Ready node 203.0.113.2 host-3 Ready node 203.0.113.3 with a nginx-ingress-controller Deployment composed of 2 replicas $ kubectl -n ingress-nginx get pod -o wide NAME READY STATUS IP NODE default-http-backend-7c5bc89cc9-p86md 1/1 Running 172.17.1.1 host-2 nginx-ingress-controller-cf9ff8c96-8vvf8 1/1 Running 172.17.0.3 host-3 nginx-ingress-controller-cf9ff8c96-pxsds 1/1 Running 172.17.1.4 host-2 Requests sent to host-2 and host-3 would be forwarded to NGINX and original client's IP would be preserved, while requests to host-1 would get dropped because there is no NGINX replica running on that node. Ingress status Because NodePort Services do not get a LoadBalancerIP assigned by definition, the NGINX Ingress controller does not update the status of Ingress objects it manages . $ kubectl get ingress NAME HOSTS ADDRESS PORTS test-ingress myapp.example.com 80 Despite the fact there is no load balancer providing a public IP address to the NGINX Ingress controller, it is possible to force the status update of all managed Ingress objects by setting the externalIPs field of the ingress-nginx Service. Warning There is more to setting externalIPs than just enabling the NGINX Ingress controller to update the status of Ingress objects. Please read about this option in the Services page of official Kubernetes documentation as well as the section about External IPs in this document for more information. Example Given the following 3-node Kubernetes cluster (the external IP is added as an example, in most bare-metal environments this value is ) $ kubectl get node NAME STATUS ROLES EXTERNAL-IP host-1 Ready master 203.0.113.1 host-2 Ready node 203.0.113.2 host-3 Ready node 203.0.113.3 one could edit the ingress-nginx Service and add the following field to the object spec spec : externalIPs : - 203.0.113.1 - 203.0.113.2 - 203.0.113.3 which would in turn be reflected on Ingress objects as follows: $ kubectl get ingress -o wide NAME HOSTS ADDRESS PORTS test-ingress myapp.example.com 203.0.113.1,203.0.113.2,203.0.113.3 80 Redirects As NGINX is not aware of the port translation operated by the NodePort Service , backend applications are responsible for generating redirect URLs that take into account the URL used by external clients, including the NodePort. Example Redirects generated by NGINX, for instance HTTP to HTTPS or domain to www.domain , are generated without NodePort: $ curl -D- http://myapp.example.com:30100 ` HTTP/1.1 308 Permanent Redirect Server: nginx/1.15.2 Location: https://myapp.example.com/ #-> missing NodePort in HTTPS redirect","title":"Over a NodePort Service"},{"location":"deploy/baremetal/#via-the-host-network","text":"In a setup where there is no external load balancer available but using NodePorts is not an option, one can configure ingress-nginx Pods to use the network of the host they run on instead of a dedicated network namespace. The benefit of this approach is that the NGINX Ingress controller can bind ports 80 and 443 directly to Kubernetes nodes' network interfaces, without the extra network translation imposed by NodePort Services. Note This approach does not leverage any Service object to expose the NGINX Ingress controller. If the ingress-nginx Service exists in the target cluster, it is recommended to delete it . This can be achieved by enabling the hostNetwork option in the Pods' spec. template : spec : hostNetwork : true Security considerations Enabling this option exposes every system daemon to the NGINX Ingress controller on any network interface, including the host's loopback. Please evaluate the impact this may have on the security of your system carefully. Example Consider this nginx-ingress-controller Deployment composed of 2 replicas, NGINX Pods inherit from the IP address of their host instead of an internal Pod IP. $ kubectl -n ingress-nginx get pod -o wide NAME READY STATUS IP NODE default-http-backend-7c5bc89cc9-p86md 1/1 Running 172.17.1.1 host-2 nginx-ingress-controller-5b4cf5fc6-7lg6c 1/1 Running 203.0.113.3 host-3 nginx-ingress-controller-5b4cf5fc6-lzrls 1/1 Running 203.0.113.2 host-2 One major limitation of this deployment approach is that only a single NGINX Ingress controller Pod may be scheduled on each cluster node, because binding the same port multiple times on the same network interface is technically impossible. Pods that are unschedulable due to such situation fail with the following event: $ kubectl -n ingress-nginx describe pod ... Events: Type Reason From Message ---- ------ ---- ------- Warning FailedScheduling default-scheduler 0/3 nodes are available: 3 node(s) didn't have free ports for the requested pod ports. One way to ensure only schedulable Pods are created is to deploy the NGINX Ingress controller as a DaemonSet instead of a traditional Deployment. Info A DaemonSet schedules exactly one type of Pod per cluster node, masters included, unless a node is configured to repel those Pods . For more information, see DaemonSet . Because most properties of DaemonSet objects are identical to Deployment objects, this documentation page leaves the configuration of the corresponding manifest at the user's discretion. Like with NodePorts, this approach has a few quirks it is important to be aware of. DNS resolution Pods configured with hostNetwork : true do not use the internal DNS resolver (i.e. kube-dns or CoreDNS ), unless their dnsPolicy spec field is set to ClusterFirstWithHostNet . Consider using this setting if NGINX is expected to resolve internal names for any reason. Ingress status Because there is no Service exposing the NGINX Ingress controller in a configuration using the host network, the default --publish-service flag used in standard cloud setups does not apply and the status of all Ingress objects remains blank. $ kubectl get ingress NAME HOSTS ADDRESS PORTS test-ingress myapp.example.com 80 Instead, and because bare-metal nodes usually don't have an ExternalIP, one has to enable the --report-node-internal-ip-address flag, which sets the status of all Ingress objects to the internal IP address of all nodes running the NGINX Ingress controller. Example Given a nginx-ingress-controller DaemonSet composed of 2 replicas $ kubectl -n ingress-nginx get pod -o wide NAME READY STATUS IP NODE default-http-backend-7c5bc89cc9-p86md 1/1 Running 172.17.1.1 host-2 nginx-ingress-controller-5b4cf5fc6-7lg6c 1/1 Running 203.0.113.3 host-3 nginx-ingress-controller-5b4cf5fc6-lzrls 1/1 Running 203.0.113.2 host-2 the controller sets the status of all Ingress objects it manages to the following value: $ kubectl get ingress -o wide NAME HOSTS ADDRESS PORTS test-ingress myapp.example.com 203.0.113.2,203.0.113.3 80 Note Alternatively, it is possible to override the address written to Ingress objects using the --publish-status-address flag. See Command line arguments .","title":"Via the host network"},{"location":"deploy/baremetal/#using-a-self-provisioned-edge","text":"Similarly to cloud environments, this deployment approach requires an edge network component providing a public entrypoint to the Kubernetes cluster. This edge component can be either hardware (e.g. vendor appliance) or software (e.g. HAproxy ) and is usually managed outside of the Kubernetes landscape by operations teams. Such deployment builds upon the NodePort Service described above in Over a NodePort Service , with one significant difference: external clients do not access cluster nodes directly, only the edge component does. This is particularly suitable for private Kubernetes clusters where none of the nodes has a public IP address. On the edge side, the only prerequisite is to dedicate a public IP address that forwards all HTTP traffic to Kubernetes nodes and/or masters. Incoming traffic on TCP ports 80 and 443 is forwarded to the corresponding HTTP and HTTPS NodePort on the target nodes as shown in the diagram below:","title":"Using a self-provisioned edge"},{"location":"deploy/baremetal/#external-ips","text":"Source IP address This method does not allow preserving the source IP of HTTP requests in any manner, it is therefore not recommended to use it despite its apparent simplicity. The externalIPs Service option was previously mentioned in the NodePort section. As per the Services page of the official Kubernetes documentation, the externalIPs option causes kube-proxy to route traffic sent to arbitrary IP addresses and on the Service ports to the endpoints of that Service. These IP addresses must belong to the target node . Example Given the following 3-node Kubernetes cluster (the external IP is added as an example, in most bare-metal environments this value is ) $ kubectl get node NAME STATUS ROLES EXTERNAL-IP host-1 Ready master 203.0.113.1 host-2 Ready node 203.0.113.2 host-3 Ready node 203.0.113.3 and the following ingress-nginx NodePort Service $ kubectl -n ingress-nginx get svc NAME TYPE CLUSTER-IP PORT(S) ingress-nginx NodePort 10.0.220.217 80:30100/TCP,443:30101/TCP One could set the following external IPs in the Service spec, and NGINX would become available on both the NodePort and the Service port: spec : externalIPs : - 203.0.113.2 - 203.0.113.3 $ curl -D- http://myapp.example.com:30100 HTTP/1.1 200 OK Server: nginx/1.15.2 $ curl -D- http://myapp.example.com HTTP/1.1 200 OK Server: nginx/1.15.2 We assume the myapp.example.com subdomain above resolves to both 203.0.113.2 and 203.0.113.3 IP addresses.","title":"External IPs"},{"location":"deploy/rbac/","text":"Role Based Access Control (RBAC) \u00b6 Overview \u00b6 This example applies to nginx-ingress-controllers being deployed in an environment with RBAC enabled. Role Based Access Control is comprised of four layers: ClusterRole - permissions assigned to a role that apply to an entire cluster ClusterRoleBinding - binding a ClusterRole to a specific account Role - permissions assigned to a role that apply to a specific namespace RoleBinding - binding a Role to a specific account In order for RBAC to be applied to an nginx-ingress-controller, that controller should be assigned to a ServiceAccount . That ServiceAccount should be bound to the Role s and ClusterRole s defined for the nginx-ingress-controller. Service Accounts created in this example \u00b6 One ServiceAccount is created in this example, nginx-ingress-serviceaccount . Permissions Granted in this example \u00b6 There are two sets of permissions defined in this example. Cluster-wide permissions defined by the ClusterRole named nginx-ingress-clusterrole , and namespace specific permissions defined by the Role named nginx-ingress-role . Cluster Permissions \u00b6 These permissions are granted in order for the nginx-ingress-controller to be able to function as an ingress across the cluster. These permissions are granted to the ClusterRole named nginx-ingress-clusterrole configmaps , endpoints , nodes , pods , secrets : list, watch nodes : get services , ingresses : get, list, watch events : create, patch ingresses/status : update Namespace Permissions \u00b6 These permissions are granted specific to the nginx-ingress namespace. These permissions are granted to the Role named nginx-ingress-role configmaps , pods , secrets : get endpoints : get Furthermore to support leader-election, the nginx-ingress-controller needs to have access to a configmap using the resourceName ingress-controller-leader-nginx Note that resourceNames can NOT be used to limit requests using the \u201ccreate\u201d verb because authorizers only have access to information that can be obtained from the request URL, method, and headers (resource names in a \u201ccreate\u201d request are part of the request body). configmaps : get, update (for resourceName ingress-controller-leader-nginx ) configmaps : create This resourceName is the concatenation of the election-id and the ingress-class as defined by the ingress-controller, which defaults to: election-id : ingress-controller-leader ingress-class : nginx resourceName : - Please adapt accordingly if you overwrite either parameter when launching the nginx-ingress-controller. Bindings \u00b6 The ServiceAccount nginx-ingress-serviceaccount is bound to the Role nginx-ingress-role and the ClusterRole nginx-ingress-clusterrole . The serviceAccountName associated with the containers in the deployment must match the serviceAccount. The namespace references in the Deployment metadata, container arguments, and POD_NAMESPACE should be in the nginx-ingress namespace.","title":"Role Based Access Control (RBAC)"},{"location":"deploy/rbac/#role-based-access-control-rbac","text":"","title":"Role Based Access Control (RBAC)"},{"location":"deploy/rbac/#overview","text":"This example applies to nginx-ingress-controllers being deployed in an environment with RBAC enabled. Role Based Access Control is comprised of four layers: ClusterRole - permissions assigned to a role that apply to an entire cluster ClusterRoleBinding - binding a ClusterRole to a specific account Role - permissions assigned to a role that apply to a specific namespace RoleBinding - binding a Role to a specific account In order for RBAC to be applied to an nginx-ingress-controller, that controller should be assigned to a ServiceAccount . That ServiceAccount should be bound to the Role s and ClusterRole s defined for the nginx-ingress-controller.","title":"Overview"},{"location":"deploy/rbac/#service-accounts-created-in-this-example","text":"One ServiceAccount is created in this example, nginx-ingress-serviceaccount .","title":"Service Accounts created in this example"},{"location":"deploy/rbac/#permissions-granted-in-this-example","text":"There are two sets of permissions defined in this example. Cluster-wide permissions defined by the ClusterRole named nginx-ingress-clusterrole , and namespace specific permissions defined by the Role named nginx-ingress-role .","title":"Permissions Granted in this example"},{"location":"deploy/rbac/#cluster-permissions","text":"These permissions are granted in order for the nginx-ingress-controller to be able to function as an ingress across the cluster. These permissions are granted to the ClusterRole named nginx-ingress-clusterrole configmaps , endpoints , nodes , pods , secrets : list, watch nodes : get services , ingresses : get, list, watch events : create, patch ingresses/status : update","title":"Cluster Permissions"},{"location":"deploy/rbac/#namespace-permissions","text":"These permissions are granted specific to the nginx-ingress namespace. These permissions are granted to the Role named nginx-ingress-role configmaps , pods , secrets : get endpoints : get Furthermore to support leader-election, the nginx-ingress-controller needs to have access to a configmap using the resourceName ingress-controller-leader-nginx Note that resourceNames can NOT be used to limit requests using the \u201ccreate\u201d verb because authorizers only have access to information that can be obtained from the request URL, method, and headers (resource names in a \u201ccreate\u201d request are part of the request body). configmaps : get, update (for resourceName ingress-controller-leader-nginx ) configmaps : create This resourceName is the concatenation of the election-id and the ingress-class as defined by the ingress-controller, which defaults to: election-id : ingress-controller-leader ingress-class : nginx resourceName : - Please adapt accordingly if you overwrite either parameter when launching the nginx-ingress-controller.","title":"Namespace Permissions"},{"location":"deploy/rbac/#bindings","text":"The ServiceAccount nginx-ingress-serviceaccount is bound to the Role nginx-ingress-role and the ClusterRole nginx-ingress-clusterrole . The serviceAccountName associated with the containers in the deployment must match the serviceAccount. The namespace references in the Deployment metadata, container arguments, and POD_NAMESPACE should be in the nginx-ingress namespace.","title":"Bindings"},{"location":"deploy/upgrade/","text":"Upgrading \u00b6 Important No matter the method you use for upgrading, if you use template overrides, make sure your templates are compatible with the new version of ingress-nginx . Without Helm \u00b6 To upgrade your ingress-nginx installation, it should be enough to change the version of the image in the controller Deployment. I.e. if your deployment resource looks like (partial example): kind : Deployment metadata : name : nginx-ingress-controller namespace : ingress-nginx spec : replicas : 1 selector : ... template : metadata : ... spec : containers : - name : nginx-ingress-controller image : quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.9.0 args : ... simply change the 0.9.0 tag to the version you wish to upgrade to. The easiest way to do this is e.g. (do note you may need to change the name parameter according to your installation): kubectl set image deployment/nginx-ingress-controller \\ nginx-ingress-controller=quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.28.0 For interactive editing, use kubectl edit deployment nginx-ingress-controller . With Helm \u00b6 If you installed ingress-nginx using the Helm command in the deployment docs so its name is ngx-ingress , you should be able to upgrade using helm upgrade --reuse-values ngx-ingress stable/nginx-ingress","title":"Upgrade"},{"location":"deploy/upgrade/#upgrading","text":"Important No matter the method you use for upgrading, if you use template overrides, make sure your templates are compatible with the new version of ingress-nginx .","title":"Upgrading"},{"location":"deploy/upgrade/#without-helm","text":"To upgrade your ingress-nginx installation, it should be enough to change the version of the image in the controller Deployment. I.e. if your deployment resource looks like (partial example): kind : Deployment metadata : name : nginx-ingress-controller namespace : ingress-nginx spec : replicas : 1 selector : ... template : metadata : ... spec : containers : - name : nginx-ingress-controller image : quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.9.0 args : ... simply change the 0.9.0 tag to the version you wish to upgrade to. The easiest way to do this is e.g. (do note you may need to change the name parameter according to your installation): kubectl set image deployment/nginx-ingress-controller \\ nginx-ingress-controller=quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.28.0 For interactive editing, use kubectl edit deployment nginx-ingress-controller .","title":"Without Helm"},{"location":"deploy/upgrade/#with-helm","text":"If you installed ingress-nginx using the Helm command in the deployment docs so its name is ngx-ingress , you should be able to upgrade using helm upgrade --reuse-values ngx-ingress stable/nginx-ingress","title":"With Helm"},{"location":"deploy/validating-webhook/","text":"Validating webhook (admission controller) \u00b6 Overview \u00b6 Nginx ingress controller offers the option to validate ingresses before they enter the cluster, ensuring controller will generate a valid configuration. This controller is called, when ValidatingAdmissionWebhook is enabled, by the Kubernetes API server each time a new ingress is to enter the cluster, and rejects objects for which the generated nginx configuration fails to be validated. This feature requires some further configuration of the cluster, hence it is an optional feature, this section explains how to enable it for your cluster. Configure the webhook \u00b6 Generate the webhook certificate \u00b6 Self signed certificate \u00b6 Validating webhook must be served using TLS, you need to generate a certificate. Note that kube API server is checking the hostname of the certificate, the common name of your certificate will need to match the service name. Example To run the validating webhook with a service named ingress-validation-webhook in the namespace ingress-nginx , run openssl req -x509 -newkey rsa:2048 -keyout certificate.pem -out key.pem -days 365 -nodes -subj \"/CN=ingress-validation-webhook.ingress-nginx.svc\" Using Kubernetes CA \u00b6 Kubernetes also provides primitives to sign a certificate request. Here is an example on how to use it Example #!/bin/bash SERVICE_NAME = ingress-nginx NAMESPACE = ingress-nginx TEMP_DIRECTORY = $( mktemp -d ) echo \"creating certs in directory ${ TEMP_DIRECTORY } \" cat <> ${TEMP_DIRECTORY}/csr.conf [req] req_extensions = v3_req distinguished_name = req_distinguished_name [req_distinguished_name] [ v3_req ] basicConstraints = CA:FALSE keyUsage = nonRepudiation, digitalSignature, keyEncipherment extendedKeyUsage = serverAuth subjectAltName = @alt_names [alt_names] DNS.1 = ${SERVICE_NAME} DNS.2 = ${SERVICE_NAME}.${NAMESPACE} DNS.3 = ${SERVICE_NAME}.${NAMESPACE}.svc EOF openssl genrsa -out ${ TEMP_DIRECTORY } /server-key.pem 2048 openssl req -new -key ${ TEMP_DIRECTORY } /server-key.pem \\ -subj \"/CN= ${ SERVICE_NAME } . ${ NAMESPACE } .svc\" \\ -out ${ TEMP_DIRECTORY } /server.csr \\ -config ${ TEMP_DIRECTORY } /csr.conf cat < & 2 exit 1 fi echo ${ SERVER_CERT } | openssl base64 -d -A -out ${ TEMP_DIRECTORY } /server-cert.pem kubectl create secret generic ingress-nginx.svc \\ --from-file = key.pem = ${ TEMP_DIRECTORY } /server-key.pem \\ --from-file = cert.pem = ${ TEMP_DIRECTORY } /server-cert.pem \\ -n ${ NAMESPACE } Using helm \u00b6 To generate the certificate using helm, you can use the following snippet Example {{ - $ cn := printf \"%s.%s.svc\" ( include \"nginx-ingress.validatingWebhook.fullname\" . ) .Release.Namespace }} {{ - $ ca := genCA ( printf \"%s-ca\" ( include \"nginx-ingress.validatingWebhook.fullname\" . )) .Values.validatingWebhook.certificateValidity - }} {{ - $ cert := genSignedCert $ cn nil nil .Values.validatingWebhook.certificateValidity $ ca - }} Ingress controller flags \u00b6 To enable the feature in the ingress controller, you need to provide 3 flags to the command line. flag description example usage --validating-webhook The address to start an admission controller on :8080 --validating-webhook-certificate The certificate the webhook is using for its TLS handling /usr/local/certificates/validating-webhook.pem --validating-webhook-key The key the webhook is using for its TLS handling /usr/local/certificates/validating-webhook-key.pem kube API server flags \u00b6 Validating webhook feature requires specific setup on the kube API server side. Depending on your kubernetes version, the flag can, or not, be enabled by default. To check that your kube API server runs with the required flags, please refer to the kubernetes documentation. Additional kubernetes objects \u00b6 Once both the ingress controller and the kube API server are configured to serve the webhook, add the you can configure the webhook with the following objects: apiVersion : v1 kind : Service metadata : name : ingress-validation-webhook namespace : ingress-nginx spec : ports : - name : admission port : 443 protocol : TCP targetPort : 8080 selector : app : nginx-ingress component : controller --- apiVersion : admissionregistration.k8s.io/v1beta1 kind : ValidatingWebhookConfiguration metadata : name : check-ingress webhooks : - name : validate.nginx.ingress.kubernetes.io rules : - apiGroups : - networking.k8s.io/v1beta1 apiVersions : - v1beta1 operations : - CREATE - UPDATE resources : - ingresses failurePolicy : Fail clientConfig : service : namespace : ingress-nginx name : ingress-validation-webhook path : /networking.k8s.io/v1beta1/ingress caBundle : ","title":"Validating Webhook (admission controller)"},{"location":"deploy/validating-webhook/#validating-webhook-admission-controller","text":"","title":"Validating webhook (admission controller)"},{"location":"deploy/validating-webhook/#overview","text":"Nginx ingress controller offers the option to validate ingresses before they enter the cluster, ensuring controller will generate a valid configuration. This controller is called, when ValidatingAdmissionWebhook is enabled, by the Kubernetes API server each time a new ingress is to enter the cluster, and rejects objects for which the generated nginx configuration fails to be validated. This feature requires some further configuration of the cluster, hence it is an optional feature, this section explains how to enable it for your cluster.","title":"Overview"},{"location":"deploy/validating-webhook/#configure-the-webhook","text":"","title":"Configure the webhook"},{"location":"deploy/validating-webhook/#generate-the-webhook-certificate","text":"","title":"Generate the webhook certificate"},{"location":"deploy/validating-webhook/#self-signed-certificate","text":"Validating webhook must be served using TLS, you need to generate a certificate. Note that kube API server is checking the hostname of the certificate, the common name of your certificate will need to match the service name. Example To run the validating webhook with a service named ingress-validation-webhook in the namespace ingress-nginx , run openssl req -x509 -newkey rsa:2048 -keyout certificate.pem -out key.pem -days 365 -nodes -subj \"/CN=ingress-validation-webhook.ingress-nginx.svc\"","title":"Self signed certificate"},{"location":"deploy/validating-webhook/#using-kubernetes-ca","text":"Kubernetes also provides primitives to sign a certificate request. Here is an example on how to use it Example #!/bin/bash SERVICE_NAME = ingress-nginx NAMESPACE = ingress-nginx TEMP_DIRECTORY = $( mktemp -d ) echo \"creating certs in directory ${ TEMP_DIRECTORY } \" cat <> ${TEMP_DIRECTORY}/csr.conf [req] req_extensions = v3_req distinguished_name = req_distinguished_name [req_distinguished_name] [ v3_req ] basicConstraints = CA:FALSE keyUsage = nonRepudiation, digitalSignature, keyEncipherment extendedKeyUsage = serverAuth subjectAltName = @alt_names [alt_names] DNS.1 = ${SERVICE_NAME} DNS.2 = ${SERVICE_NAME}.${NAMESPACE} DNS.3 = ${SERVICE_NAME}.${NAMESPACE}.svc EOF openssl genrsa -out ${ TEMP_DIRECTORY } /server-key.pem 2048 openssl req -new -key ${ TEMP_DIRECTORY } /server-key.pem \\ -subj \"/CN= ${ SERVICE_NAME } . ${ NAMESPACE } .svc\" \\ -out ${ TEMP_DIRECTORY } /server.csr \\ -config ${ TEMP_DIRECTORY } /csr.conf cat < & 2 exit 1 fi echo ${ SERVER_CERT } | openssl base64 -d -A -out ${ TEMP_DIRECTORY } /server-cert.pem kubectl create secret generic ingress-nginx.svc \\ --from-file = key.pem = ${ TEMP_DIRECTORY } /server-key.pem \\ --from-file = cert.pem = ${ TEMP_DIRECTORY } /server-cert.pem \\ -n ${ NAMESPACE }","title":"Using Kubernetes CA"},{"location":"deploy/validating-webhook/#using-helm","text":"To generate the certificate using helm, you can use the following snippet Example {{ - $ cn := printf \"%s.%s.svc\" ( include \"nginx-ingress.validatingWebhook.fullname\" . ) .Release.Namespace }} {{ - $ ca := genCA ( printf \"%s-ca\" ( include \"nginx-ingress.validatingWebhook.fullname\" . )) .Values.validatingWebhook.certificateValidity - }} {{ - $ cert := genSignedCert $ cn nil nil .Values.validatingWebhook.certificateValidity $ ca - }}","title":"Using helm"},{"location":"deploy/validating-webhook/#ingress-controller-flags","text":"To enable the feature in the ingress controller, you need to provide 3 flags to the command line. flag description example usage --validating-webhook The address to start an admission controller on :8080 --validating-webhook-certificate The certificate the webhook is using for its TLS handling /usr/local/certificates/validating-webhook.pem --validating-webhook-key The key the webhook is using for its TLS handling /usr/local/certificates/validating-webhook-key.pem","title":"Ingress controller flags"},{"location":"deploy/validating-webhook/#kube-api-server-flags","text":"Validating webhook feature requires specific setup on the kube API server side. Depending on your kubernetes version, the flag can, or not, be enabled by default. To check that your kube API server runs with the required flags, please refer to the kubernetes documentation.","title":"kube API server flags"},{"location":"deploy/validating-webhook/#additional-kubernetes-objects","text":"Once both the ingress controller and the kube API server are configured to serve the webhook, add the you can configure the webhook with the following objects: apiVersion : v1 kind : Service metadata : name : ingress-validation-webhook namespace : ingress-nginx spec : ports : - name : admission port : 443 protocol : TCP targetPort : 8080 selector : app : nginx-ingress component : controller --- apiVersion : admissionregistration.k8s.io/v1beta1 kind : ValidatingWebhookConfiguration metadata : name : check-ingress webhooks : - name : validate.nginx.ingress.kubernetes.io rules : - apiGroups : - networking.k8s.io/v1beta1 apiVersions : - v1beta1 operations : - CREATE - UPDATE resources : - ingresses failurePolicy : Fail clientConfig : service : namespace : ingress-nginx name : ingress-validation-webhook path : /networking.k8s.io/v1beta1/ingress caBundle : ","title":"Additional kubernetes objects"},{"location":"enhancements/","text":"Kubernetes Enhancement Proposals (KEPs) \u00b6 A Kubernetes Enhancement Proposal (KEP) is a way to propose, communicate and coordinate on new efforts for the Kubernetes project. For this reason, the ingress-nginx project is adopting it. Quick start for the KEP process \u00b6 Follow the process outlined in the KEP template Do I have to use the KEP process? \u00b6 No... but we hope that you will. Over time having a rich set of KEPs in one place will make it easier for people to track what is going on in the community and find a structured historic record. KEPs are only required when the changes are wide ranging and impact most of the project. Why would I want to use the KEP process? \u00b6 Our aim with KEPs is to clearly communicate new efforts to the Kubernetes contributor community. As such, we want to build a well curated set of clear proposals in a common format with useful metadata. Benefits to KEP users (in the limit): Exposure on a kubernetes blessed web site that is findable via web search engines. Cross indexing of KEPs so that users can find connections and the current status of any KEP. A clear process with approvers and reviewers for making decisions. This will lead to more structured decisions that stick as there is a discoverable record around the decisions. We are inspired by IETF RFCs, Python PEPs, and Rust RFCs.","title":"Kubernetes Enhancement Proposals (KEPs)"},{"location":"enhancements/#kubernetes-enhancement-proposals-keps","text":"A Kubernetes Enhancement Proposal (KEP) is a way to propose, communicate and coordinate on new efforts for the Kubernetes project. For this reason, the ingress-nginx project is adopting it.","title":"Kubernetes Enhancement Proposals (KEPs)"},{"location":"enhancements/#quick-start-for-the-kep-process","text":"Follow the process outlined in the KEP template","title":"Quick start for the KEP process"},{"location":"enhancements/#do-i-have-to-use-the-kep-process","text":"No... but we hope that you will. Over time having a rich set of KEPs in one place will make it easier for people to track what is going on in the community and find a structured historic record. KEPs are only required when the changes are wide ranging and impact most of the project.","title":"Do I have to use the KEP process?"},{"location":"enhancements/#why-would-i-want-to-use-the-kep-process","text":"Our aim with KEPs is to clearly communicate new efforts to the Kubernetes contributor community. As such, we want to build a well curated set of clear proposals in a common format with useful metadata. Benefits to KEP users (in the limit): Exposure on a kubernetes blessed web site that is findable via web search engines. Cross indexing of KEPs so that users can find connections and the current status of any KEP. A clear process with approvers and reviewers for making decisions. This will lead to more structured decisions that stick as there is a discoverable record around the decisions. We are inspired by IETF RFCs, Python PEPs, and Rust RFCs.","title":"Why would I want to use the KEP process?"},{"location":"enhancements/20190724-only-dynamic-ssl/","text":"Remove static SSL configuration mode \u00b6 Table of Contents \u00b6 Summary Motivation Goals Non-Goals Proposal Implementation Details/Notes/Constraints Drawbacks Alternatives Summary \u00b6 Since release 0.19.0 is possible to configure SSL certificates without the need of NGINX reloads (thanks to lua) and after release 0.24.0 the default enabled mode is dynamic. Motivation \u00b6 The static configuration implies reloads, something that affects the majority of the users. Goals \u00b6 Deprecation of the flag --enable-dynamic-certificates . Cleanup of the codebase. Non-Goals \u00b6 Features related to certificate authentication are not changed in any way. Proposal \u00b6 Remove static SSL configuration Implementation Details/Notes/Constraints \u00b6 Deprecate the flag Move the directives ssl_certificate and ssl_certificate_key from each server block to the http section. These settings are required to avoid NGINX errors in the logs. Remove any action of the flag --enable-dynamic-certificates Drawbacks \u00b6 Alternatives \u00b6 Keep both implementations","title":"Remove static SSL configuration mode"},{"location":"enhancements/20190724-only-dynamic-ssl/#remove-static-ssl-configuration-mode","text":"","title":"Remove static SSL configuration mode"},{"location":"enhancements/20190724-only-dynamic-ssl/#table-of-contents","text":"Summary Motivation Goals Non-Goals Proposal Implementation Details/Notes/Constraints Drawbacks Alternatives","title":"Table of Contents"},{"location":"enhancements/20190724-only-dynamic-ssl/#summary","text":"Since release 0.19.0 is possible to configure SSL certificates without the need of NGINX reloads (thanks to lua) and after release 0.24.0 the default enabled mode is dynamic.","title":"Summary"},{"location":"enhancements/20190724-only-dynamic-ssl/#motivation","text":"The static configuration implies reloads, something that affects the majority of the users.","title":"Motivation"},{"location":"enhancements/20190724-only-dynamic-ssl/#goals","text":"Deprecation of the flag --enable-dynamic-certificates . Cleanup of the codebase.","title":"Goals"},{"location":"enhancements/20190724-only-dynamic-ssl/#non-goals","text":"Features related to certificate authentication are not changed in any way.","title":"Non-Goals"},{"location":"enhancements/20190724-only-dynamic-ssl/#proposal","text":"Remove static SSL configuration","title":"Proposal"},{"location":"enhancements/20190724-only-dynamic-ssl/#implementation-detailsnotesconstraints","text":"Deprecate the flag Move the directives ssl_certificate and ssl_certificate_key from each server block to the http section. These settings are required to avoid NGINX errors in the logs. Remove any action of the flag --enable-dynamic-certificates","title":"Implementation Details/Notes/Constraints"},{"location":"enhancements/20190724-only-dynamic-ssl/#drawbacks","text":"","title":"Drawbacks"},{"location":"enhancements/20190724-only-dynamic-ssl/#alternatives","text":"Keep both implementations","title":"Alternatives"},{"location":"enhancements/20190815-zone-aware-routing/","text":"Availability zone aware routing \u00b6 Table of Contents \u00b6 Summary Motivation Goals Non-Goals Proposal Implementation History Drawbacks [optional] Summary \u00b6 Teach ingress-nginx about availability zones where endpoints are running in. This way ingress-nginx pod will do its best to proxy to zone-local endpoint. Motivation \u00b6 When users run their services across multiple availability zones they usually pay for egress traffic between zones. Providers such as GCP, Amazon EC charges money for that. ingress-nginx when picking an endpoint to route request to does not consider whether the endpoint is in different zone or the same one. That means it's at least equally likely that it will pick an endpoint from another zone and proxy the request to it. In this situation response from the endpoint to ingress-nginx pod is considered as inter zone traffic and costs money. At the time of this writing GCP charges $0.01 per GB of inter zone egress traffic according to https://cloud.google.com/compute/network-pricing. According to https://datapath.io/resources/blog/what-are-aws-data-transfer-costs-and-how-to-minimize-them/ Amazon also charges the same amount of money sa GCP for cross zone, egress traffic. This can be a lot of money depending on once's traffic. By teaching ingress-nginx about zones we can eliminate or at least decrease this cost. Arguably inter-zone network latency should also be better than cross zone. Goals \u00b6 Given a regional cluster running ingress-nginx, ingress-nginx should do best effort to pick zone-local endpoint when proxying This should not impact canary feature ingress-nginx should be able to operate successfully if there's no zonal endpoints Non-Goals \u00b6 This feature inherently assumes that endpoints are distributed across zones in a way that they can handle all the traffic from ingress-nginx pod(s) in that zone This feature will be relying on https://kubernetes.io/docs/reference/kubernetes-api/labels-annotations-taints/#failure-domainbetakubernetesiozone, it is not this KEP's goal to support other cases Proposal \u00b6 The idea here is to have controller part of ingress-nginx to (1) detect what zone its current pod is running in and (2) detect the zone for every endpoints it knows about. After that it will post that data as part of endpoints to Lua land. Then Lua balancer when picking an endpoint will try to pick zone-local endpoint first and if there is no zone-local endpoint then it will fallback to current behaviour. This feature at least in the beginning should be optional since it is going to make it harder to reason about the load balancing and not everyone might want that. How does controller know what zone it runs in? We can have the pod spec do pass node name using downward API as an environment variable. Then on start controller can get node details from the API based on node name. Once the node details is obtained we can extract the zone from failure-domain.beta.kubernetes.io/zone annotation. Then we can pass that value to Lua land through Nginx configuration when loading lua_ingress.lua module in init_by_lua phase. How do we extract zones for endpoints? We can have the controller watch create and update events on nodes in the entire cluster and based on that keep the map of nodes to zones in the memory. And when we generate endpoints list, we can access node name using .subsets.addresses [ i ]. nodeName and based on that fetch zone from the map in memory and store it as a field on the endpoint. This solution assumes failure-domain.beta.kubernetes.io/zone annotation does not change until the end of node's life. Otherwise we have to watch update events as well on the nodes and that'll add even more overhead. Alternatively, we can get the list of nodes only when there's no node in the memory for given node name. This is probably a better solution because then we would avoid watching for API changes on node resources. We can eagrly fetch all the nodes and build node name to zone mapping on start. And from thereon sync it during endpoints building in the main event loop iff there's no entry exist for the node of an endpoint. This means an extra API call in case cluster has expanded. How do we make sure we do our best to choose zone-local endpoint? This will be done on Lua side. For every backend we will initialize two balancer instances: (1) with all endpoints (2) with all endpoints corresponding to current zone for the backend. Then given the request once we choose what backend needs to serve the request, we will first try to use zonal balancer for that backend. If zonal balancer does not exist (i.e there's no zonal endpoint) then we will use general balancer. In case of zonal outages we assume that readiness probe will fail and controller will see no endpoints for the backend and therefore we will use general balancer. We can enable the feature using a configmap setting. Doing it this way makes it easier to rollback in case of a problem. Implementation History \u00b6 initial version of KEP is shipped proposal and implementation details is done Drawbacks [optional] \u00b6 More load on the Kubernetes API server.","title":"Availability zone aware routing"},{"location":"enhancements/20190815-zone-aware-routing/#availability-zone-aware-routing","text":"","title":"Availability zone aware routing"},{"location":"enhancements/20190815-zone-aware-routing/#table-of-contents","text":"Summary Motivation Goals Non-Goals Proposal Implementation History Drawbacks [optional]","title":"Table of Contents"},{"location":"enhancements/20190815-zone-aware-routing/#summary","text":"Teach ingress-nginx about availability zones where endpoints are running in. This way ingress-nginx pod will do its best to proxy to zone-local endpoint.","title":"Summary"},{"location":"enhancements/20190815-zone-aware-routing/#motivation","text":"When users run their services across multiple availability zones they usually pay for egress traffic between zones. Providers such as GCP, Amazon EC charges money for that. ingress-nginx when picking an endpoint to route request to does not consider whether the endpoint is in different zone or the same one. That means it's at least equally likely that it will pick an endpoint from another zone and proxy the request to it. In this situation response from the endpoint to ingress-nginx pod is considered as inter zone traffic and costs money. At the time of this writing GCP charges $0.01 per GB of inter zone egress traffic according to https://cloud.google.com/compute/network-pricing. According to https://datapath.io/resources/blog/what-are-aws-data-transfer-costs-and-how-to-minimize-them/ Amazon also charges the same amount of money sa GCP for cross zone, egress traffic. This can be a lot of money depending on once's traffic. By teaching ingress-nginx about zones we can eliminate or at least decrease this cost. Arguably inter-zone network latency should also be better than cross zone.","title":"Motivation"},{"location":"enhancements/20190815-zone-aware-routing/#goals","text":"Given a regional cluster running ingress-nginx, ingress-nginx should do best effort to pick zone-local endpoint when proxying This should not impact canary feature ingress-nginx should be able to operate successfully if there's no zonal endpoints","title":"Goals"},{"location":"enhancements/20190815-zone-aware-routing/#non-goals","text":"This feature inherently assumes that endpoints are distributed across zones in a way that they can handle all the traffic from ingress-nginx pod(s) in that zone This feature will be relying on https://kubernetes.io/docs/reference/kubernetes-api/labels-annotations-taints/#failure-domainbetakubernetesiozone, it is not this KEP's goal to support other cases","title":"Non-Goals"},{"location":"enhancements/20190815-zone-aware-routing/#proposal","text":"The idea here is to have controller part of ingress-nginx to (1) detect what zone its current pod is running in and (2) detect the zone for every endpoints it knows about. After that it will post that data as part of endpoints to Lua land. Then Lua balancer when picking an endpoint will try to pick zone-local endpoint first and if there is no zone-local endpoint then it will fallback to current behaviour. This feature at least in the beginning should be optional since it is going to make it harder to reason about the load balancing and not everyone might want that. How does controller know what zone it runs in? We can have the pod spec do pass node name using downward API as an environment variable. Then on start controller can get node details from the API based on node name. Once the node details is obtained we can extract the zone from failure-domain.beta.kubernetes.io/zone annotation. Then we can pass that value to Lua land through Nginx configuration when loading lua_ingress.lua module in init_by_lua phase. How do we extract zones for endpoints? We can have the controller watch create and update events on nodes in the entire cluster and based on that keep the map of nodes to zones in the memory. And when we generate endpoints list, we can access node name using .subsets.addresses [ i ]. nodeName and based on that fetch zone from the map in memory and store it as a field on the endpoint. This solution assumes failure-domain.beta.kubernetes.io/zone annotation does not change until the end of node's life. Otherwise we have to watch update events as well on the nodes and that'll add even more overhead. Alternatively, we can get the list of nodes only when there's no node in the memory for given node name. This is probably a better solution because then we would avoid watching for API changes on node resources. We can eagrly fetch all the nodes and build node name to zone mapping on start. And from thereon sync it during endpoints building in the main event loop iff there's no entry exist for the node of an endpoint. This means an extra API call in case cluster has expanded. How do we make sure we do our best to choose zone-local endpoint? This will be done on Lua side. For every backend we will initialize two balancer instances: (1) with all endpoints (2) with all endpoints corresponding to current zone for the backend. Then given the request once we choose what backend needs to serve the request, we will first try to use zonal balancer for that backend. If zonal balancer does not exist (i.e there's no zonal endpoint) then we will use general balancer. In case of zonal outages we assume that readiness probe will fail and controller will see no endpoints for the backend and therefore we will use general balancer. We can enable the feature using a configmap setting. Doing it this way makes it easier to rollback in case of a problem.","title":"Proposal"},{"location":"enhancements/20190815-zone-aware-routing/#implementation-history","text":"initial version of KEP is shipped proposal and implementation details is done","title":"Implementation History"},{"location":"enhancements/20190815-zone-aware-routing/#drawbacks-optional","text":"More load on the Kubernetes API server.","title":"Drawbacks [optional]"},{"location":"enhancements/YYYYMMDD-kep-template/","text":"Title \u00b6 This is the title of the KEP. Keep it simple and descriptive. A good title can help communicate what the KEP is and should be considered as part of any review. The title should be lowercased and spaces/punctuation should be replaced with - . To get started with this template: Make a copy of this template. Create a copy of this template and name it YYYYMMDD-my-title.md , where YYYYMMDD is the date the KEP was first drafted. Fill out the \"overview\" sections. This includes the Summary and Motivation sections. These should be easy if you've preflighted the idea of the KEP in an issue. Create a PR. Assign it to folks that are sponsoring this process. Create an issue When filing an enhancement tracking issue, please ensure to complete all fields in the template. Merge early. Avoid getting hung up on specific details and instead aim to get the goal of the KEP merged quickly. The best way to do this is to just start with the \"Overview\" sections and fill out details incrementally in follow on PRs. View anything marked as a provisional as a working document and subject to change. Aim for single topic PRs to keep discussions focused. If you disagree with what is already in a document, open a new PR with suggested changes. The canonical place for the latest set of instructions (and the likely source of this file) is here . The Metadata section above is intended to support the creation of tooling around the KEP process. This will be a YAML section that is fenced as a code block. See the KEP process for details on each of these items. Table of Contents \u00b6 A table of contents is helpful for quickly jumping to sections of a KEP and for highlighting any additional information provided beyond the standard KEP template. Ensure the TOC is wrapped with
You need a TLS cert and a test HTTP service for this example.
Create a values.yaml file.
values.yaml
apiVersion: networking.k8s.io/v1beta1 +Create a values.yaml file. +apiVersion: networking.k8s.io/v1beta1 kind: Ingress metadata: name: nginx-test @@ -1247,16 +1247,16 @@ # This assumes http-svc exists and routes to healthy endpoints serviceName: http-svc servicePort: 80 - +
apiVersion: networking.k8s.io/v1beta1 kind: Ingress metadata: name: nginx-test @@ -1247,16 +1247,16 @@ # This assumes http-svc exists and routes to healthy endpoints serviceName: http-svc servicePort: 80 -
The following command instructs the controller to terminate traffic using the provided TLS cert, and forward un-encrypted HTTP traffic to the test HTTP service.
kubectl apply -f ingress.yaml -
kubectl apply -f ingress.yaml +
You can confirm that the Ingress works.
$ kubectl describe ing nginx-test +$ kubectl describe ing nginx-test Name: nginx-test Namespace: default Address: 104.198.183.6 @@ -1303,10 +1303,11 @@ TLS cert, and forward un-encrypted HTTP traffic to the test HTTP service. x-forwarded-for=104.132.0.80, 35.186.221.137 x-forwarded-proto=https BODY: - +
$ kubectl describe ing nginx-test Name: nginx-test Namespace: default Address: 104.198.183.6 @@ -1303,10 +1303,11 @@ TLS cert, and forward un-encrypted HTTP traffic to the test HTTP service. x-forwarded-for=104.132.0.80, 35.186.221.137 x-forwarded-proto=https BODY: -
The objective of this document is to explain how the NGINX Ingress controller works, in particular how the NGINX model is built and why we need one.
The goal of this Ingress controller is the assembly of a configuration file (nginx.conf). The main implication of this requirement is the need to reload NGINX after any change in the configuration file. Though it is important to note that we don't reload Nginx on changes that impact only an upstream configuration (i.e Endpoints change when you deploy your app). We use lua-nginx-module to achieve this. Check below to learn more about how it's done.
upstream
Usually, a Kubernetes Controller utilizes the synchronization loop pattern to check if the desired state in the controller is updated or a change is required. To this purpose, we need to build a model using different objects from the cluster, in particular (in no special order) Ingresses, Services, Endpoints, Secrets, and Configmaps to generate a point in time configuration file that reflects the state of the cluster.
To get this object from the cluster, we use Kubernetes Informers, in particular, FilteredSharedInformer. This informers allows reacting to changes in using callbacks to individual changes when a new object is added, modified or removed. Unfortunately, there is no way to know if a particular change is going to affect the final configuration file. Therefore on every change, we have to rebuild a new model from scratch based on the state of cluster and compare it to the current model. If the new model equals to the current one, then we avoid generating a new NGINX configuration and triggering a reload. Otherwise, we check if the difference is only about Endpoints. If so we then send the new list of Endpoints to a Lua handler running inside Nginx using HTTP POST request and again avoid generating a new NGINX configuration and triggering a reload. If the difference between running and new model is about more than just Endpoints we create a new NGINX configuration based on the new model, replace the current model and trigger a reload.
FilteredSharedInformer
One of the uses of the model is to avoid unnecessary reloads when there's no change in the state and to detect conflicts in definitions.
The final representation of the NGINX configuration is generated from a Go template using the new model as input for the variables required by the template.
Operations to build the model:
Order Ingress rules by CreationTimestamp field, i.e., old rules first.
CreationTimestamp
If the same path for the same host is defined in more than one Ingress, the oldest rule wins.
load-balance
In some cases, it is possible to avoid reloads, in particular when there is a change in the endpoints, i.e., a pod is started or replaced. It is out of the scope of this Ingress controller to remove reloads completely. This would require an incredible amount of work and at some point makes no sense. This can change only if NGINX changes the way new configurations are read, basically, new changes do not replace worker processes.
On every endpoint change the controller fetches endpoints from all the services it sees and generates corresponding Backend objects. It then sends these objects to a Lua handler running inside Nginx. The Lua code in turn stores those backends in a shared memory zone. Then for every request Lua code running in balancer_by_lua context detects what endpoints it should choose upstream peer from and applies the configured load balancing algorithm to choose the peer. Then Nginx takes care of the rest. This way we avoid reloading Nginx on endpoint changes. Note that this includes annotation changes that affects only upstream configuration in Nginx as well.
balancer_by_lua
In a relatively big clusters with frequently deploying apps this feature saves significant number of Nginx reloads which can otherwise affect response latency, load balancing quality (after every reload Nginx resets the state of load balancing) and so on.
Because the ingress controller works using the synchronization loop pattern, it is applying the configuration for all matching objects. In case some Ingress objects have a broken configuration, for example a syntax error in the nginx.ingress.kubernetes.io/configuration-snippet annotation, the generated configuration becomes invalid, does not reload and hence no more ingresses will be taken into account.
nginx.ingress.kubernetes.io/configuration-snippet
To prevent this situation to happen, the nginx ingress controller optionally exposes a validating admission webhook server to ensure the validity of incoming ingress objects. This webhook appends the incoming ingress objects to the list of ingresses, generates the configuration and calls nginx to ensure the configuration has no syntax errors.
Install krew, then run
kubectl krew install ingress-nginx -
kubectl krew install ingress-nginx +
to install the plugin. Then run
kubectl ingress-nginx --help -
kubectl ingress-nginx --help +
to make sure the plugin is properly installed and to get a list of commands:
kubectl ingress-nginx --help +kubectl ingress-nginx --help A kubectl plugin for inspecting your ingress-nginx deployments Usage: @@ -1430,29 +1430,29 @@ Do not move it without providing redirects. --user string The name of the kubeconfig user to use Use "ingress-nginx [command] --help" for more information about a command. - +
kubectl ingress-nginx --help A kubectl plugin for inspecting your ingress-nginx deployments Usage: @@ -1430,29 +1430,29 @@ Do not move it without providing redirects. --user string The name of the kubeconfig user to use Use "ingress-nginx [command] --help" for more information about a command. -
If a new ingress-nginx version has just been released, the plugin may not yet have been updated inside the repository. In that case, you can install the latest version of the plugin by running:
( +If a new ingress-nginx version has just been released, the plugin may not yet have been updated inside the repository. In that case, you can install the latest version of the plugin by running: +( set -x; cd "$(mktemp -d)" && curl -fsSLO "https://github.com/kubernetes/ingress-nginx/releases/download/nginx-0.24.0/{ingress-nginx.yaml,kubectl-ingress_nginx-$(uname | tr '[:upper:]' '[:lower:]')-amd64.tar.gz}" && kubectl krew install \ --manifest=ingress-nginx.yaml --archive=kubectl-ingress_nginx-$(uname | tr '[:upper:]' '[:lower:]')-amd64.tar.gz ) - +
( set -x; cd "$(mktemp -d)" && curl -fsSLO "https://github.com/kubernetes/ingress-nginx/releases/download/nginx-0.24.0/{ingress-nginx.yaml,kubectl-ingress_nginx-$(uname | tr '[:upper:]' '[:lower:]')-amd64.tar.gz}" && kubectl krew install \ --manifest=ingress-nginx.yaml --archive=kubectl-ingress_nginx-$(uname | tr '[:upper:]' '[:lower:]')-amd64.tar.gz ) -
Replacing 0.24.0 with the recently released version.
0.24.0
kubectl
--namespace
--context
--client-key
backends
certs
conf
exec
general
logs
ssh
--deployment <deployment>
--pod <pod>
--deployment
lint
--all-namespaces
Note that backends, general, certs, and conf require ingress-nginx version 0.23.0 or higher.
0.23.0
Run kubectl ingress-nginx backends to get a JSON array of the backends that an ingress-nginx controller currently knows about:
kubectl ingress-nginx backends
$ kubectl ingress-nginx backends -n ingress-nginx +Run kubectl ingress-nginx backends to get a JSON array of the backends that an ingress-nginx controller currently knows about: +$ kubectl ingress-nginx backends -n ingress-nginx [ { "name": "default-apple-service-5678", @@ -1513,13 +1513,13 @@ Do not move it without providing redirects. ... } ] - +
$ kubectl ingress-nginx backends -n ingress-nginx [ { "name": "default-apple-service-5678", @@ -1513,13 +1513,13 @@ Do not move it without providing redirects. ... } ] -
Add the --list option to show only the backend names. Add the --backend <backend> option to show only the backend with the given name.
--list
--backend <backend>
Use kubectl ingress-nginx certs --host <hostname> to dump the SSL cert/key information for a given host. Requires that --enable-dynamic-certificates is true (this is the default as of version 0.24.0).
kubectl ingress-nginx certs --host <hostname>
WARNING: This command will dump sensitive private key information. Don't blindly share the output, and certainly don't log it anywhere.
$ kubectl ingress-nginx certs -n ingress-nginx --host testaddr.local +$ kubectl ingress-nginx certs -n ingress-nginx --host testaddr.local -----BEGIN CERTIFICATE----- ... -----END CERTIFICATE----- @@ -1530,11 +1530,11 @@ Do not move it without providing redirects. -----BEGIN RSA PRIVATE KEY----- <REDACTED! DO NOT SHARE THIS!> -----END RSA PRIVATE KEY----- - +
$ kubectl ingress-nginx certs -n ingress-nginx --host testaddr.local -----BEGIN CERTIFICATE----- ... -----END CERTIFICATE----- @@ -1530,11 +1530,11 @@ Do not move it without providing redirects. -----BEGIN RSA PRIVATE KEY----- <REDACTED! DO NOT SHARE THIS!> -----END RSA PRIVATE KEY----- -
Use kubectl ingress-nginx conf to dump the generated nginx.conf file. Add the --host <hostname> option to view only the server block for that host:
kubectl ingress-nginx conf
nginx.conf
--host <hostname>
kubectl ingress-nginx conf -n ingress-nginx --host testaddr.local +Use kubectl ingress-nginx conf to dump the generated nginx.conf file. Add the --host <hostname> option to view only the server block for that host: +kubectl ingress-nginx conf -n ingress-nginx --host testaddr.local server { server_name testaddr.local ; @@ -1556,11 +1556,11 @@ Do not move it without providing redirects. set $location_path "/"; ... - +
kubectl ingress-nginx conf -n ingress-nginx --host testaddr.local server { server_name testaddr.local ; @@ -1556,11 +1556,11 @@ Do not move it without providing redirects. set $location_path "/"; ... -
kubectl ingress-nginx exec is exactly the same as kubectl exec, with the same command flags. It will automatically choose an ingress-nginx pod to run the command in.
kubectl ingress-nginx exec
kubectl exec
$ kubectl ingress-nginx exec -i -n ingress-nginx -- ls /etc/nginx +kubectl ingress-nginx exec is exactly the same as kubectl exec, with the same command flags. It will automatically choose an ingress-nginx pod to run the command in. +$ kubectl ingress-nginx exec -i -n ingress-nginx -- ls /etc/nginx fastcgi_params geoip lua @@ -1571,44 +1571,44 @@ Do not move it without providing redirects. opentracing.json owasp-modsecurity-crs template - +
$ kubectl ingress-nginx exec -i -n ingress-nginx -- ls /etc/nginx fastcgi_params geoip lua @@ -1571,44 +1571,44 @@ Do not move it without providing redirects. opentracing.json owasp-modsecurity-crs template -
kubectl ingress-nginx general dumps miscellaneous controller state as a JSON object. Currently it just shows the number of controller pods known to a particular controller pod.
kubectl ingress-nginx general
$ kubectl ingress-nginx general -n ingress-nginx +kubectl ingress-nginx general dumps miscellaneous controller state as a JSON object. Currently it just shows the number of controller pods known to a particular controller pod. +$ kubectl ingress-nginx general -n ingress-nginx { "controllerPodsCount": 1 } - +
$ kubectl ingress-nginx general -n ingress-nginx { "controllerPodsCount": 1 } -
Shows the internal and external IP/CNAMES for an ingress-nginx service.
$ kubectl ingress-nginx info -n ingress-nginx +Shows the internal and external IP/CNAMES for an ingress-nginx service. +$ kubectl ingress-nginx info -n ingress-nginx Service cluster IP address: 10.187.253.31 LoadBalancer IP|CNAME: 35.123.123.123 - +
$ kubectl ingress-nginx info -n ingress-nginx Service cluster IP address: 10.187.253.31 LoadBalancer IP|CNAME: 35.123.123.123 -
Use the --service <service> flag if your ingress-nginx LoadBalancer service is not named ingress-nginx.
--service <service>
LoadBalancer
kubectl ingress-nginx ingresses, alternately kubectl ingress-nginx ing, shows a more detailed view of the ingress definitions in a namespace. Compare:
kubectl ingress-nginx ingresses
kubectl ingress-nginx ing
$ kubectl get ingresses --all-namespaces +kubectl ingress-nginx ingresses, alternately kubectl ingress-nginx ing, shows a more detailed view of the ingress definitions in a namespace. Compare: +$ kubectl get ingresses --all-namespaces NAMESPACE NAME HOSTS ADDRESS PORTS AGE default example-ingress1 testaddr.local,testaddr2.local localhost 80 5d default test-ingress-2 * localhost 80 5d - +
$ kubectl get ingresses --all-namespaces NAMESPACE NAME HOSTS ADDRESS PORTS AGE default example-ingress1 testaddr.local,testaddr2.local localhost 80 5d default test-ingress-2 * localhost 80 5d -
vs
$ kubectl ingress-nginx ingresses --all-namespaces +$ kubectl ingress-nginx ingresses --all-namespaces NAMESPACE INGRESS NAME HOST+PATH ADDRESSES TLS SERVICE SERVICE PORT ENDPOINTS default example-ingress1 testaddr.local/etameta localhost NO pear-service 5678 5 default example-ingress1 testaddr2.local/otherpath localhost NO apple-service 5678 1 default example-ingress1 testaddr2.local/otherotherpath localhost NO pear-service 5678 5 default test-ingress-2 * localhost NO echo-service 8080 2 - +
$ kubectl ingress-nginx ingresses --all-namespaces NAMESPACE INGRESS NAME HOST+PATH ADDRESSES TLS SERVICE SERVICE PORT ENDPOINTS default example-ingress1 testaddr.local/etameta localhost NO pear-service 5678 5 default example-ingress1 testaddr2.local/otherpath localhost NO apple-service 5678 1 default example-ingress1 testaddr2.local/otherotherpath localhost NO pear-service 5678 5 default test-ingress-2 * localhost NO echo-service 8080 2 -
kubectl ingress-nginx lint can check a namespace or entire cluster for potential configuration issues. This command is especially useful when upgrading between ingress-nginx versions.
kubectl ingress-nginx lint
$ kubectl ingress-nginx lint --all-namespaces --verbose +kubectl ingress-nginx lint can check a namespace or entire cluster for potential configuration issues. This command is especially useful when upgrading between ingress-nginx versions. +$ kubectl ingress-nginx lint --all-namespaces --verbose Checking ingresses... ✗ anamespace/this-nginx - Contains the removed session-cookie-hash annotation. @@ -1627,10 +1627,10 @@ Do not move it without providing redirects. - Uses removed config flag --enable-dynamic-certificates Lint added for version 0.24.0 https://github.com/kubernetes/ingress-nginx/issues/3808 - +
$ kubectl ingress-nginx lint --all-namespaces --verbose Checking ingresses... ✗ anamespace/this-nginx - Contains the removed session-cookie-hash annotation. @@ -1627,10 +1627,10 @@ Do not move it without providing redirects. - Uses removed config flag --enable-dynamic-certificates Lint added for version 0.24.0 https://github.com/kubernetes/ingress-nginx/issues/3808 -
to show the lints added only for a particular ingress-nginx release, use the --from-version and --to-version flags:
--from-version
--to-version
$ kubectl ingress-nginx lint --all-namespaces --verbose --from-version 0.24.0 --to-version 0.24.0 +to show the lints added only for a particular ingress-nginx release, use the --from-version and --to-version flags: +$ kubectl ingress-nginx lint --all-namespaces --verbose --from-version 0.24.0 --to-version 0.24.0 Checking ingresses... ✗ anamespace/this-nginx - Contains the removed session-cookie-hash annotation. @@ -1642,11 +1642,11 @@ Do not move it without providing redirects. - Uses removed config flag --enable-dynamic-certificates Lint added for version 0.24.0 https://github.com/kubernetes/ingress-nginx/issues/3808 - +
$ kubectl ingress-nginx lint --all-namespaces --verbose --from-version 0.24.0 --to-version 0.24.0 Checking ingresses... ✗ anamespace/this-nginx - Contains the removed session-cookie-hash annotation. @@ -1642,11 +1642,11 @@ Do not move it without providing redirects. - Uses removed config flag --enable-dynamic-certificates Lint added for version 0.24.0 https://github.com/kubernetes/ingress-nginx/issues/3808 -
kubectl ingress-nginx logs is almost the same as kubectl logs, with fewer flags. It will automatically choose an ingress-nginx pod to read logs from.
kubectl ingress-nginx logs
kubectl logs
$ kubectl ingress-nginx logs -n ingress-nginx +kubectl ingress-nginx logs is almost the same as kubectl logs, with fewer flags. It will automatically choose an ingress-nginx pod to read logs from. +$ kubectl ingress-nginx logs -n ingress-nginx ------------------------------------------------------------------------------- NGINX Ingress controller Release: dev @@ -1662,16 +1662,17 @@ Do not move it without providing redirects. I0405 16:53:46.183359 7 nginx.go:265] Starting NGINX Ingress controller I0405 16:53:46.193913 7 event.go:209] Event(v1.ObjectReference{Kind:"ConfigMap", Namespace:"ingress-nginx", Name:"udp-services", UID:"82258915-563e-11e9-9c52-025000000001", APIVersion:"v1", ResourceVersion:"494", FieldPath:""}): type: 'Normal' reason: 'CREATE' ConfigMap ingress-nginx/udp-services ... - +
$ kubectl ingress-nginx logs -n ingress-nginx ------------------------------------------------------------------------------- NGINX Ingress controller Release: dev @@ -1662,16 +1662,17 @@ Do not move it without providing redirects. I0405 16:53:46.183359 7 nginx.go:265] Starting NGINX Ingress controller I0405 16:53:46.193913 7 event.go:209] Event(v1.ObjectReference{Kind:"ConfigMap", Namespace:"ingress-nginx", Name:"udp-services", UID:"82258915-563e-11e9-9c52-025000000001", APIVersion:"v1", ResourceVersion:"494", FieldPath:""}): type: 'Normal' reason: 'CREATE' ConfigMap ingress-nginx/udp-services ... -
kubectl ingress-nginx ssh is exactly the same as kubectl ingress-nginx exec -it -- /bin/bash. Use it when you want to quickly be dropped into a shell inside a running ingress-nginx container.
kubectl ingress-nginx ssh
kubectl ingress-nginx exec -it -- /bin/bash
$ kubectl ingress-nginx ssh -n ingress-nginx +kubectl ingress-nginx ssh is exactly the same as kubectl ingress-nginx exec -it -- /bin/bash. Use it when you want to quickly be dropped into a shell inside a running ingress-nginx container. +$ kubectl ingress-nginx ssh -n ingress-nginx www-data@nginx-ingress-controller-7cbf77c976-wx5pn:/etc/nginx$ - +
$ kubectl ingress-nginx ssh -n ingress-nginx www-data@nginx-ingress-controller-7cbf77c976-wx5pn:/etc/nginx$ -