From 733ae1aca0f351278f6e02a0384f35a25f5d9ef9 Mon Sep 17 00:00:00 2001 From: alexellis Date: Fri, 9 Feb 2024 16:12:40 +0000 Subject: [PATCH] Deployed 311c684 with MkDocs version: 1.4.2 --- .nojekyll | 0 404.html | 949 +++ CNAME | 1 + assets/images/favicon.png | Bin 0 -> 1870 bytes assets/javascripts/bundle.51d95adb.min.js | 29 + assets/javascripts/bundle.51d95adb.min.js.map | 8 + assets/javascripts/lunr/min/lunr.ar.min.js | 1 + assets/javascripts/lunr/min/lunr.da.min.js | 18 + assets/javascripts/lunr/min/lunr.de.min.js | 18 + assets/javascripts/lunr/min/lunr.du.min.js | 18 + assets/javascripts/lunr/min/lunr.es.min.js | 18 + assets/javascripts/lunr/min/lunr.fi.min.js | 18 + assets/javascripts/lunr/min/lunr.fr.min.js | 18 + assets/javascripts/lunr/min/lunr.hi.min.js | 1 + assets/javascripts/lunr/min/lunr.hu.min.js | 18 + assets/javascripts/lunr/min/lunr.it.min.js | 18 + assets/javascripts/lunr/min/lunr.ja.min.js | 1 + assets/javascripts/lunr/min/lunr.jp.min.js | 1 + assets/javascripts/lunr/min/lunr.ko.min.js | 1 + assets/javascripts/lunr/min/lunr.multi.min.js | 1 + assets/javascripts/lunr/min/lunr.nl.min.js | 18 + assets/javascripts/lunr/min/lunr.no.min.js | 18 + assets/javascripts/lunr/min/lunr.pt.min.js | 18 + assets/javascripts/lunr/min/lunr.ro.min.js | 18 + assets/javascripts/lunr/min/lunr.ru.min.js | 18 + .../lunr/min/lunr.stemmer.support.min.js | 1 + assets/javascripts/lunr/min/lunr.sv.min.js | 18 + assets/javascripts/lunr/min/lunr.ta.min.js | 1 + assets/javascripts/lunr/min/lunr.th.min.js | 1 + assets/javascripts/lunr/min/lunr.tr.min.js | 18 + assets/javascripts/lunr/min/lunr.vi.min.js | 1 + assets/javascripts/lunr/min/lunr.zh.min.js | 1 + assets/javascripts/lunr/tinyseg.js | 206 + assets/javascripts/lunr/wordcut.js | 6708 +++++++++++++++++ .../workers/search.e5c33ebb.min.js | 42 + .../workers/search.e5c33ebb.min.js.map | 8 + assets/stylesheets/main.558e4712.min.css | 1 + assets/stylesheets/main.558e4712.min.css.map | 1 + assets/stylesheets/palette.2505c338.min.css | 1 + .../stylesheets/palette.2505c338.min.css.map | 1 + images/cncf-landscape-left-logo.svg | 1 + images/conceptual.png | Bin 0 -> 91516 bytes images/inlets-hero.png | Bin 0 -> 63748 bytes images/logo-colour.svg | 82 + images/logo.svg | 63 + images/operator-pro-webpage-letsencrypt.png | Bin 0 -> 330882 bytes images/operator-pro-webpage.png | Bin 0 -> 250287 bytes images/tethering-k3s.jpeg | Bin 0 -> 104509 bytes images/uplink-control-plane-dashboard.png | Bin 0 -> 28856 bytes images/uplink-data-plane-dashboard.png | Bin 0 -> 36026 bytes images/uplink-import-dashboard.png | Bin 0 -> 7919 bytes images/uplink-prometheus-data-source.png | Bin 0 -> 24298 bytes index.html | 1389 ++++ reference/faq/index.html | 1275 ++++ reference/index.html | 1074 +++ reference/inlets-operator/index.html | 1255 +++ reference/inletsctl/index.html | 1692 +++++ search/search_index.json | 1 + sitemap.xml | 123 + sitemap.xml.gz | Bin 0 -> 424 bytes tutorial/automated-http-server/index.html | 1179 +++ tutorial/caddy-http-tunnel/index.html | 1173 +++ tutorial/community/index.html | 1041 +++ tutorial/dual-tunnels/index.html | 1073 +++ tutorial/istio-gateway/index.html | 1244 +++ tutorial/kubernetes-api-server/index.html | 1239 +++ tutorial/kubernetes-ingress/index.html | 1332 ++++ tutorial/manual-http-server/index.html | 1090 +++ tutorial/manual-tcp-server/index.html | 1070 +++ tutorial/monitoring-and-metrics/index.html | 1287 ++++ tutorial/postgresql-tcp-tunnel/index.html | 1067 +++ tutorial/ssh-tcp-tunnel/index.html | 1152 +++ uplink/become-a-provider/index.html | 1443 ++++ uplink/connect-to-tunnels/index.html | 1095 +++ uplink/create-tunnels/index.html | 1373 ++++ uplink/ingress-for-tunnels/index.html | 1376 ++++ uplink/manage-tunnels/index.html | 1149 +++ uplink/monitoring-tunnels/index.html | 1171 +++ uplink/overview/index.html | 1015 +++ 79 files changed, 37760 insertions(+) create mode 100644 .nojekyll create mode 100644 404.html create mode 100644 CNAME create mode 100644 assets/images/favicon.png create mode 100644 assets/javascripts/bundle.51d95adb.min.js create mode 100644 assets/javascripts/bundle.51d95adb.min.js.map create mode 100644 assets/javascripts/lunr/min/lunr.ar.min.js create mode 100644 assets/javascripts/lunr/min/lunr.da.min.js create mode 100644 assets/javascripts/lunr/min/lunr.de.min.js create mode 100644 assets/javascripts/lunr/min/lunr.du.min.js create mode 100644 assets/javascripts/lunr/min/lunr.es.min.js create mode 100644 assets/javascripts/lunr/min/lunr.fi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.fr.min.js create mode 100644 assets/javascripts/lunr/min/lunr.hi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.hu.min.js create mode 100644 assets/javascripts/lunr/min/lunr.it.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ja.min.js create mode 100644 assets/javascripts/lunr/min/lunr.jp.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ko.min.js create mode 100644 assets/javascripts/lunr/min/lunr.multi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.nl.min.js create mode 100644 assets/javascripts/lunr/min/lunr.no.min.js create mode 100644 assets/javascripts/lunr/min/lunr.pt.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ro.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ru.min.js create mode 100644 assets/javascripts/lunr/min/lunr.stemmer.support.min.js create mode 100644 assets/javascripts/lunr/min/lunr.sv.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ta.min.js create mode 100644 assets/javascripts/lunr/min/lunr.th.min.js create mode 100644 assets/javascripts/lunr/min/lunr.tr.min.js create mode 100644 assets/javascripts/lunr/min/lunr.vi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.zh.min.js create mode 100644 assets/javascripts/lunr/tinyseg.js create mode 100644 assets/javascripts/lunr/wordcut.js create mode 100644 assets/javascripts/workers/search.e5c33ebb.min.js create mode 100644 assets/javascripts/workers/search.e5c33ebb.min.js.map create mode 100644 assets/stylesheets/main.558e4712.min.css create mode 100644 assets/stylesheets/main.558e4712.min.css.map create mode 100644 assets/stylesheets/palette.2505c338.min.css create mode 100644 assets/stylesheets/palette.2505c338.min.css.map create mode 100644 images/cncf-landscape-left-logo.svg create mode 100644 images/conceptual.png create mode 100644 images/inlets-hero.png create mode 100644 images/logo-colour.svg create mode 100644 images/logo.svg create mode 100644 images/operator-pro-webpage-letsencrypt.png create mode 100644 images/operator-pro-webpage.png create mode 100644 images/tethering-k3s.jpeg create mode 100644 images/uplink-control-plane-dashboard.png create mode 100644 images/uplink-data-plane-dashboard.png create mode 100644 images/uplink-import-dashboard.png create mode 100644 images/uplink-prometheus-data-source.png create mode 100644 index.html create mode 100644 reference/faq/index.html create mode 100644 reference/index.html create mode 100644 reference/inlets-operator/index.html create mode 100644 reference/inletsctl/index.html create mode 100644 search/search_index.json create mode 100644 sitemap.xml create mode 100644 sitemap.xml.gz create mode 100644 tutorial/automated-http-server/index.html create mode 100644 tutorial/caddy-http-tunnel/index.html create mode 100644 tutorial/community/index.html create mode 100644 tutorial/dual-tunnels/index.html create mode 100644 tutorial/istio-gateway/index.html create mode 100644 tutorial/kubernetes-api-server/index.html create mode 100644 tutorial/kubernetes-ingress/index.html create mode 100644 tutorial/manual-http-server/index.html create mode 100644 tutorial/manual-tcp-server/index.html create mode 100644 tutorial/monitoring-and-metrics/index.html create mode 100644 tutorial/postgresql-tcp-tunnel/index.html create mode 100644 tutorial/ssh-tcp-tunnel/index.html create mode 100644 uplink/become-a-provider/index.html create mode 100644 uplink/connect-to-tunnels/index.html create mode 100644 uplink/create-tunnels/index.html create mode 100644 uplink/ingress-for-tunnels/index.html create mode 100644 uplink/manage-tunnels/index.html create mode 100644 uplink/monitoring-tunnels/index.html create mode 100644 uplink/overview/index.html diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 0000000..e69de29 diff --git a/404.html b/404.html new file mode 100644 index 0000000..28bc54b --- /dev/null +++ b/404.html @@ -0,0 +1,949 @@ + + + + + + + + + + + + + + + + + + + + + + Inlets + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ +

404 - Not found

+ +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/CNAME b/CNAME new file mode 100644 index 0000000..9ebe493 --- /dev/null +++ b/CNAME @@ -0,0 +1 @@ +docs.inlets.dev diff --git a/assets/images/favicon.png b/assets/images/favicon.png new file mode 100644 index 0000000000000000000000000000000000000000..1cf13b9f9d978896599290a74f77d5dbe7d1655c GIT binary patch literal 1870 zcmV-U2eJ5xP)Gc)JR9QMau)O=X#!i9;T z37kk-upj^(fsR36MHs_+1RCI)NNu9}lD0S{B^g8PN?Ww(5|~L#Ng*g{WsqleV}|#l zz8@ri&cTzw_h33bHI+12+kK6WN$h#n5cD8OQt`5kw6p~9H3()bUQ8OS4Q4HTQ=1Ol z_JAocz`fLbT2^{`8n~UAo=#AUOf=SOq4pYkt;XbC&f#7lb$*7=$na!mWCQ`dBQsO0 zLFBSPj*N?#u5&pf2t4XjEGH|=pPQ8xh7tpx;US5Cx_Ju;!O`ya-yF`)b%TEt5>eP1ZX~}sjjA%FJF?h7cX8=b!DZl<6%Cv z*G0uvvU+vmnpLZ2paivG-(cd*y3$hCIcsZcYOGh{$&)A6*XX&kXZd3G8m)G$Zz-LV z^GF3VAW^Mdv!)4OM8EgqRiz~*Cji;uzl2uC9^=8I84vNp;ltJ|q-*uQwGp2ma6cY7 z;`%`!9UXO@fr&Ebapfs34OmS9^u6$)bJxrucutf>`dKPKT%%*d3XlFVKunp9 zasduxjrjs>f8V=D|J=XNZp;_Zy^WgQ$9WDjgY=z@stwiEBm9u5*|34&1Na8BMjjgf3+SHcr`5~>oz1Y?SW^=K z^bTyO6>Gar#P_W2gEMwq)ot3; zREHn~U&Dp0l6YT0&k-wLwYjb?5zGK`W6S2v+K>AM(95m2C20L|3m~rN8dprPr@t)5lsk9Hu*W z?pS990s;Ez=+Rj{x7p``4>+c0G5^pYnB1^!TL=(?HLHZ+HicG{~4F1d^5Awl_2!1jICM-!9eoLhbbT^;yHcefyTAaqRcY zmuctDopPT!%k+}x%lZRKnzykr2}}XfG_ne?nRQO~?%hkzo;@RN{P6o`&mMUWBYMTe z6i8ChtjX&gXl`nvrU>jah)2iNM%JdjqoaeaU%yVn!^70x-flljp6Q5tK}5}&X8&&G zX3fpb3E(!rH=zVI_9Gjl45w@{(ITqngWFe7@9{mX;tO25Z_8 zQHEpI+FkTU#4xu>RkN>b3Tnc3UpWzPXWm#o55GKF09j^Mh~)K7{QqbO_~(@CVq! zS<8954|P8mXN2MRs86xZ&Q4EfM@JB94b=(YGuk)s&^jiSF=t3*oNK3`rD{H`yQ?d; ztE=laAUoZx5?RC8*WKOj`%LXEkgDd>&^Q4M^z`%u0rg-It=hLCVsq!Z%^6eB-OvOT zFZ28TN&cRmgU}Elrnk43)!>Z1FCPL2K$7}gwzIc48NX}#!A1BpJP?#v5wkNprhV** z?Cpalt1oH&{r!o3eSKc&ap)iz2BTn_VV`4>9M^b3;(YY}4>#ML6{~(4mH+?%07*qo IM6N<$f(jP3KmY&$ literal 0 HcmV?d00001 diff --git a/assets/javascripts/bundle.51d95adb.min.js b/assets/javascripts/bundle.51d95adb.min.js new file mode 100644 index 0000000..b20ec68 --- /dev/null +++ b/assets/javascripts/bundle.51d95adb.min.js @@ -0,0 +1,29 @@ +"use strict";(()=>{var Hi=Object.create;var xr=Object.defineProperty;var Pi=Object.getOwnPropertyDescriptor;var $i=Object.getOwnPropertyNames,kt=Object.getOwnPropertySymbols,Ii=Object.getPrototypeOf,Er=Object.prototype.hasOwnProperty,an=Object.prototype.propertyIsEnumerable;var on=(e,t,r)=>t in e?xr(e,t,{enumerable:!0,configurable:!0,writable:!0,value:r}):e[t]=r,P=(e,t)=>{for(var r in t||(t={}))Er.call(t,r)&&on(e,r,t[r]);if(kt)for(var r of kt(t))an.call(t,r)&&on(e,r,t[r]);return e};var sn=(e,t)=>{var r={};for(var n in e)Er.call(e,n)&&t.indexOf(n)<0&&(r[n]=e[n]);if(e!=null&&kt)for(var n of kt(e))t.indexOf(n)<0&&an.call(e,n)&&(r[n]=e[n]);return r};var Ht=(e,t)=>()=>(t||e((t={exports:{}}).exports,t),t.exports);var Fi=(e,t,r,n)=>{if(t&&typeof t=="object"||typeof t=="function")for(let o of $i(t))!Er.call(e,o)&&o!==r&&xr(e,o,{get:()=>t[o],enumerable:!(n=Pi(t,o))||n.enumerable});return e};var yt=(e,t,r)=>(r=e!=null?Hi(Ii(e)):{},Fi(t||!e||!e.__esModule?xr(r,"default",{value:e,enumerable:!0}):r,e));var fn=Ht((wr,cn)=>{(function(e,t){typeof wr=="object"&&typeof cn!="undefined"?t():typeof define=="function"&&define.amd?define(t):t()})(wr,function(){"use strict";function e(r){var n=!0,o=!1,i=null,a={text:!0,search:!0,url:!0,tel:!0,email:!0,password:!0,number:!0,date:!0,month:!0,week:!0,time:!0,datetime:!0,"datetime-local":!0};function s(T){return!!(T&&T!==document&&T.nodeName!=="HTML"&&T.nodeName!=="BODY"&&"classList"in T&&"contains"in T.classList)}function f(T){var Ke=T.type,We=T.tagName;return!!(We==="INPUT"&&a[Ke]&&!T.readOnly||We==="TEXTAREA"&&!T.readOnly||T.isContentEditable)}function c(T){T.classList.contains("focus-visible")||(T.classList.add("focus-visible"),T.setAttribute("data-focus-visible-added",""))}function u(T){T.hasAttribute("data-focus-visible-added")&&(T.classList.remove("focus-visible"),T.removeAttribute("data-focus-visible-added"))}function p(T){T.metaKey||T.altKey||T.ctrlKey||(s(r.activeElement)&&c(r.activeElement),n=!0)}function m(T){n=!1}function d(T){s(T.target)&&(n||f(T.target))&&c(T.target)}function h(T){s(T.target)&&(T.target.classList.contains("focus-visible")||T.target.hasAttribute("data-focus-visible-added"))&&(o=!0,window.clearTimeout(i),i=window.setTimeout(function(){o=!1},100),u(T.target))}function v(T){document.visibilityState==="hidden"&&(o&&(n=!0),B())}function B(){document.addEventListener("mousemove",z),document.addEventListener("mousedown",z),document.addEventListener("mouseup",z),document.addEventListener("pointermove",z),document.addEventListener("pointerdown",z),document.addEventListener("pointerup",z),document.addEventListener("touchmove",z),document.addEventListener("touchstart",z),document.addEventListener("touchend",z)}function re(){document.removeEventListener("mousemove",z),document.removeEventListener("mousedown",z),document.removeEventListener("mouseup",z),document.removeEventListener("pointermove",z),document.removeEventListener("pointerdown",z),document.removeEventListener("pointerup",z),document.removeEventListener("touchmove",z),document.removeEventListener("touchstart",z),document.removeEventListener("touchend",z)}function z(T){T.target.nodeName&&T.target.nodeName.toLowerCase()==="html"||(n=!1,re())}document.addEventListener("keydown",p,!0),document.addEventListener("mousedown",m,!0),document.addEventListener("pointerdown",m,!0),document.addEventListener("touchstart",m,!0),document.addEventListener("visibilitychange",v,!0),B(),r.addEventListener("focus",d,!0),r.addEventListener("blur",h,!0),r.nodeType===Node.DOCUMENT_FRAGMENT_NODE&&r.host?r.host.setAttribute("data-js-focus-visible",""):r.nodeType===Node.DOCUMENT_NODE&&(document.documentElement.classList.add("js-focus-visible"),document.documentElement.setAttribute("data-js-focus-visible",""))}if(typeof window!="undefined"&&typeof document!="undefined"){window.applyFocusVisiblePolyfill=e;var t;try{t=new CustomEvent("focus-visible-polyfill-ready")}catch(r){t=document.createEvent("CustomEvent"),t.initCustomEvent("focus-visible-polyfill-ready",!1,!1,{})}window.dispatchEvent(t)}typeof document!="undefined"&&e(document)})});var un=Ht(Sr=>{(function(e){var t=function(){try{return!!Symbol.iterator}catch(c){return!1}},r=t(),n=function(c){var u={next:function(){var p=c.shift();return{done:p===void 0,value:p}}};return r&&(u[Symbol.iterator]=function(){return u}),u},o=function(c){return encodeURIComponent(c).replace(/%20/g,"+")},i=function(c){return decodeURIComponent(String(c).replace(/\+/g," "))},a=function(){var c=function(p){Object.defineProperty(this,"_entries",{writable:!0,value:{}});var m=typeof p;if(m!=="undefined")if(m==="string")p!==""&&this._fromString(p);else if(p instanceof c){var d=this;p.forEach(function(re,z){d.append(z,re)})}else if(p!==null&&m==="object")if(Object.prototype.toString.call(p)==="[object Array]")for(var h=0;hd[0]?1:0}),c._entries&&(c._entries={});for(var p=0;p1?i(d[1]):"")}})})(typeof global!="undefined"?global:typeof window!="undefined"?window:typeof self!="undefined"?self:Sr);(function(e){var t=function(){try{var o=new e.URL("b","http://a");return o.pathname="c d",o.href==="http://a/c%20d"&&o.searchParams}catch(i){return!1}},r=function(){var o=e.URL,i=function(f,c){typeof f!="string"&&(f=String(f)),c&&typeof c!="string"&&(c=String(c));var u=document,p;if(c&&(e.location===void 0||c!==e.location.href)){c=c.toLowerCase(),u=document.implementation.createHTMLDocument(""),p=u.createElement("base"),p.href=c,u.head.appendChild(p);try{if(p.href.indexOf(c)!==0)throw new Error(p.href)}catch(T){throw new Error("URL unable to set base "+c+" due to "+T)}}var m=u.createElement("a");m.href=f,p&&(u.body.appendChild(m),m.href=m.href);var d=u.createElement("input");if(d.type="url",d.value=f,m.protocol===":"||!/:/.test(m.href)||!d.checkValidity()&&!c)throw new TypeError("Invalid URL");Object.defineProperty(this,"_anchorElement",{value:m});var h=new e.URLSearchParams(this.search),v=!0,B=!0,re=this;["append","delete","set"].forEach(function(T){var Ke=h[T];h[T]=function(){Ke.apply(h,arguments),v&&(B=!1,re.search=h.toString(),B=!0)}}),Object.defineProperty(this,"searchParams",{value:h,enumerable:!0});var z=void 0;Object.defineProperty(this,"_updateSearchParams",{enumerable:!1,configurable:!1,writable:!1,value:function(){this.search!==z&&(z=this.search,B&&(v=!1,this.searchParams._fromString(this.search),v=!0))}})},a=i.prototype,s=function(f){Object.defineProperty(a,f,{get:function(){return this._anchorElement[f]},set:function(c){this._anchorElement[f]=c},enumerable:!0})};["hash","host","hostname","port","protocol"].forEach(function(f){s(f)}),Object.defineProperty(a,"search",{get:function(){return this._anchorElement.search},set:function(f){this._anchorElement.search=f,this._updateSearchParams()},enumerable:!0}),Object.defineProperties(a,{toString:{get:function(){var f=this;return function(){return f.href}}},href:{get:function(){return this._anchorElement.href.replace(/\?$/,"")},set:function(f){this._anchorElement.href=f,this._updateSearchParams()},enumerable:!0},pathname:{get:function(){return this._anchorElement.pathname.replace(/(^\/?)/,"/")},set:function(f){this._anchorElement.pathname=f},enumerable:!0},origin:{get:function(){var f={"http:":80,"https:":443,"ftp:":21}[this._anchorElement.protocol],c=this._anchorElement.port!=f&&this._anchorElement.port!=="";return this._anchorElement.protocol+"//"+this._anchorElement.hostname+(c?":"+this._anchorElement.port:"")},enumerable:!0},password:{get:function(){return""},set:function(f){},enumerable:!0},username:{get:function(){return""},set:function(f){},enumerable:!0}}),i.createObjectURL=function(f){return o.createObjectURL.apply(o,arguments)},i.revokeObjectURL=function(f){return o.revokeObjectURL.apply(o,arguments)},e.URL=i};if(t()||r(),e.location!==void 0&&!("origin"in e.location)){var n=function(){return e.location.protocol+"//"+e.location.hostname+(e.location.port?":"+e.location.port:"")};try{Object.defineProperty(e.location,"origin",{get:n,enumerable:!0})}catch(o){setInterval(function(){e.location.origin=n()},100)}}})(typeof global!="undefined"?global:typeof window!="undefined"?window:typeof self!="undefined"?self:Sr)});var Qr=Ht((Lt,Kr)=>{/*! + * clipboard.js v2.0.11 + * https://clipboardjs.com/ + * + * Licensed MIT © Zeno Rocha + */(function(t,r){typeof Lt=="object"&&typeof Kr=="object"?Kr.exports=r():typeof define=="function"&&define.amd?define([],r):typeof Lt=="object"?Lt.ClipboardJS=r():t.ClipboardJS=r()})(Lt,function(){return function(){var e={686:function(n,o,i){"use strict";i.d(o,{default:function(){return ki}});var a=i(279),s=i.n(a),f=i(370),c=i.n(f),u=i(817),p=i.n(u);function m(j){try{return document.execCommand(j)}catch(O){return!1}}var d=function(O){var w=p()(O);return m("cut"),w},h=d;function v(j){var O=document.documentElement.getAttribute("dir")==="rtl",w=document.createElement("textarea");w.style.fontSize="12pt",w.style.border="0",w.style.padding="0",w.style.margin="0",w.style.position="absolute",w.style[O?"right":"left"]="-9999px";var k=window.pageYOffset||document.documentElement.scrollTop;return w.style.top="".concat(k,"px"),w.setAttribute("readonly",""),w.value=j,w}var B=function(O,w){var k=v(O);w.container.appendChild(k);var F=p()(k);return m("copy"),k.remove(),F},re=function(O){var w=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body},k="";return typeof O=="string"?k=B(O,w):O instanceof HTMLInputElement&&!["text","search","url","tel","password"].includes(O==null?void 0:O.type)?k=B(O.value,w):(k=p()(O),m("copy")),k},z=re;function T(j){return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?T=function(w){return typeof w}:T=function(w){return w&&typeof Symbol=="function"&&w.constructor===Symbol&&w!==Symbol.prototype?"symbol":typeof w},T(j)}var Ke=function(){var O=arguments.length>0&&arguments[0]!==void 0?arguments[0]:{},w=O.action,k=w===void 0?"copy":w,F=O.container,q=O.target,Le=O.text;if(k!=="copy"&&k!=="cut")throw new Error('Invalid "action" value, use either "copy" or "cut"');if(q!==void 0)if(q&&T(q)==="object"&&q.nodeType===1){if(k==="copy"&&q.hasAttribute("disabled"))throw new Error('Invalid "target" attribute. Please use "readonly" instead of "disabled" attribute');if(k==="cut"&&(q.hasAttribute("readonly")||q.hasAttribute("disabled")))throw new Error(`Invalid "target" attribute. You can't cut text from elements with "readonly" or "disabled" attributes`)}else throw new Error('Invalid "target" value, use a valid Element');if(Le)return z(Le,{container:F});if(q)return k==="cut"?h(q):z(q,{container:F})},We=Ke;function Ie(j){return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?Ie=function(w){return typeof w}:Ie=function(w){return w&&typeof Symbol=="function"&&w.constructor===Symbol&&w!==Symbol.prototype?"symbol":typeof w},Ie(j)}function Ti(j,O){if(!(j instanceof O))throw new TypeError("Cannot call a class as a function")}function nn(j,O){for(var w=0;w0&&arguments[0]!==void 0?arguments[0]:{};this.action=typeof F.action=="function"?F.action:this.defaultAction,this.target=typeof F.target=="function"?F.target:this.defaultTarget,this.text=typeof F.text=="function"?F.text:this.defaultText,this.container=Ie(F.container)==="object"?F.container:document.body}},{key:"listenClick",value:function(F){var q=this;this.listener=c()(F,"click",function(Le){return q.onClick(Le)})}},{key:"onClick",value:function(F){var q=F.delegateTarget||F.currentTarget,Le=this.action(q)||"copy",Rt=We({action:Le,container:this.container,target:this.target(q),text:this.text(q)});this.emit(Rt?"success":"error",{action:Le,text:Rt,trigger:q,clearSelection:function(){q&&q.focus(),window.getSelection().removeAllRanges()}})}},{key:"defaultAction",value:function(F){return yr("action",F)}},{key:"defaultTarget",value:function(F){var q=yr("target",F);if(q)return document.querySelector(q)}},{key:"defaultText",value:function(F){return yr("text",F)}},{key:"destroy",value:function(){this.listener.destroy()}}],[{key:"copy",value:function(F){var q=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body};return z(F,q)}},{key:"cut",value:function(F){return h(F)}},{key:"isSupported",value:function(){var F=arguments.length>0&&arguments[0]!==void 0?arguments[0]:["copy","cut"],q=typeof F=="string"?[F]:F,Le=!!document.queryCommandSupported;return q.forEach(function(Rt){Le=Le&&!!document.queryCommandSupported(Rt)}),Le}}]),w}(s()),ki=Ri},828:function(n){var o=9;if(typeof Element!="undefined"&&!Element.prototype.matches){var i=Element.prototype;i.matches=i.matchesSelector||i.mozMatchesSelector||i.msMatchesSelector||i.oMatchesSelector||i.webkitMatchesSelector}function a(s,f){for(;s&&s.nodeType!==o;){if(typeof s.matches=="function"&&s.matches(f))return s;s=s.parentNode}}n.exports=a},438:function(n,o,i){var a=i(828);function s(u,p,m,d,h){var v=c.apply(this,arguments);return u.addEventListener(m,v,h),{destroy:function(){u.removeEventListener(m,v,h)}}}function f(u,p,m,d,h){return typeof u.addEventListener=="function"?s.apply(null,arguments):typeof m=="function"?s.bind(null,document).apply(null,arguments):(typeof u=="string"&&(u=document.querySelectorAll(u)),Array.prototype.map.call(u,function(v){return s(v,p,m,d,h)}))}function c(u,p,m,d){return function(h){h.delegateTarget=a(h.target,p),h.delegateTarget&&d.call(u,h)}}n.exports=f},879:function(n,o){o.node=function(i){return i!==void 0&&i instanceof HTMLElement&&i.nodeType===1},o.nodeList=function(i){var a=Object.prototype.toString.call(i);return i!==void 0&&(a==="[object NodeList]"||a==="[object HTMLCollection]")&&"length"in i&&(i.length===0||o.node(i[0]))},o.string=function(i){return typeof i=="string"||i instanceof String},o.fn=function(i){var a=Object.prototype.toString.call(i);return a==="[object Function]"}},370:function(n,o,i){var a=i(879),s=i(438);function f(m,d,h){if(!m&&!d&&!h)throw new Error("Missing required arguments");if(!a.string(d))throw new TypeError("Second argument must be a String");if(!a.fn(h))throw new TypeError("Third argument must be a Function");if(a.node(m))return c(m,d,h);if(a.nodeList(m))return u(m,d,h);if(a.string(m))return p(m,d,h);throw new TypeError("First argument must be a String, HTMLElement, HTMLCollection, or NodeList")}function c(m,d,h){return m.addEventListener(d,h),{destroy:function(){m.removeEventListener(d,h)}}}function u(m,d,h){return Array.prototype.forEach.call(m,function(v){v.addEventListener(d,h)}),{destroy:function(){Array.prototype.forEach.call(m,function(v){v.removeEventListener(d,h)})}}}function p(m,d,h){return s(document.body,m,d,h)}n.exports=f},817:function(n){function o(i){var a;if(i.nodeName==="SELECT")i.focus(),a=i.value;else if(i.nodeName==="INPUT"||i.nodeName==="TEXTAREA"){var s=i.hasAttribute("readonly");s||i.setAttribute("readonly",""),i.select(),i.setSelectionRange(0,i.value.length),s||i.removeAttribute("readonly"),a=i.value}else{i.hasAttribute("contenteditable")&&i.focus();var f=window.getSelection(),c=document.createRange();c.selectNodeContents(i),f.removeAllRanges(),f.addRange(c),a=f.toString()}return a}n.exports=o},279:function(n){function o(){}o.prototype={on:function(i,a,s){var f=this.e||(this.e={});return(f[i]||(f[i]=[])).push({fn:a,ctx:s}),this},once:function(i,a,s){var f=this;function c(){f.off(i,c),a.apply(s,arguments)}return c._=a,this.on(i,c,s)},emit:function(i){var a=[].slice.call(arguments,1),s=((this.e||(this.e={}))[i]||[]).slice(),f=0,c=s.length;for(f;f{"use strict";/*! + * escape-html + * Copyright(c) 2012-2013 TJ Holowaychuk + * Copyright(c) 2015 Andreas Lubbe + * Copyright(c) 2015 Tiancheng "Timothy" Gu + * MIT Licensed + */var is=/["'&<>]/;Jo.exports=as;function as(e){var t=""+e,r=is.exec(t);if(!r)return t;var n,o="",i=0,a=0;for(i=r.index;i0&&i[i.length-1])&&(c[0]===6||c[0]===2)){r=0;continue}if(c[0]===3&&(!i||c[1]>i[0]&&c[1]=e.length&&(e=void 0),{value:e&&e[n++],done:!e}}};throw new TypeError(t?"Object is not iterable.":"Symbol.iterator is not defined.")}function W(e,t){var r=typeof Symbol=="function"&&e[Symbol.iterator];if(!r)return e;var n=r.call(e),o,i=[],a;try{for(;(t===void 0||t-- >0)&&!(o=n.next()).done;)i.push(o.value)}catch(s){a={error:s}}finally{try{o&&!o.done&&(r=n.return)&&r.call(n)}finally{if(a)throw a.error}}return i}function D(e,t,r){if(r||arguments.length===2)for(var n=0,o=t.length,i;n1||s(m,d)})})}function s(m,d){try{f(n[m](d))}catch(h){p(i[0][3],h)}}function f(m){m.value instanceof Xe?Promise.resolve(m.value.v).then(c,u):p(i[0][2],m)}function c(m){s("next",m)}function u(m){s("throw",m)}function p(m,d){m(d),i.shift(),i.length&&s(i[0][0],i[0][1])}}function mn(e){if(!Symbol.asyncIterator)throw new TypeError("Symbol.asyncIterator is not defined.");var t=e[Symbol.asyncIterator],r;return t?t.call(e):(e=typeof xe=="function"?xe(e):e[Symbol.iterator](),r={},n("next"),n("throw"),n("return"),r[Symbol.asyncIterator]=function(){return this},r);function n(i){r[i]=e[i]&&function(a){return new Promise(function(s,f){a=e[i](a),o(s,f,a.done,a.value)})}}function o(i,a,s,f){Promise.resolve(f).then(function(c){i({value:c,done:s})},a)}}function A(e){return typeof e=="function"}function at(e){var t=function(n){Error.call(n),n.stack=new Error().stack},r=e(t);return r.prototype=Object.create(Error.prototype),r.prototype.constructor=r,r}var $t=at(function(e){return function(r){e(this),this.message=r?r.length+` errors occurred during unsubscription: +`+r.map(function(n,o){return o+1+") "+n.toString()}).join(` + `):"",this.name="UnsubscriptionError",this.errors=r}});function De(e,t){if(e){var r=e.indexOf(t);0<=r&&e.splice(r,1)}}var Fe=function(){function e(t){this.initialTeardown=t,this.closed=!1,this._parentage=null,this._finalizers=null}return e.prototype.unsubscribe=function(){var t,r,n,o,i;if(!this.closed){this.closed=!0;var a=this._parentage;if(a)if(this._parentage=null,Array.isArray(a))try{for(var s=xe(a),f=s.next();!f.done;f=s.next()){var c=f.value;c.remove(this)}}catch(v){t={error:v}}finally{try{f&&!f.done&&(r=s.return)&&r.call(s)}finally{if(t)throw t.error}}else a.remove(this);var u=this.initialTeardown;if(A(u))try{u()}catch(v){i=v instanceof $t?v.errors:[v]}var p=this._finalizers;if(p){this._finalizers=null;try{for(var m=xe(p),d=m.next();!d.done;d=m.next()){var h=d.value;try{dn(h)}catch(v){i=i!=null?i:[],v instanceof $t?i=D(D([],W(i)),W(v.errors)):i.push(v)}}}catch(v){n={error:v}}finally{try{d&&!d.done&&(o=m.return)&&o.call(m)}finally{if(n)throw n.error}}}if(i)throw new $t(i)}},e.prototype.add=function(t){var r;if(t&&t!==this)if(this.closed)dn(t);else{if(t instanceof e){if(t.closed||t._hasParent(this))return;t._addParent(this)}(this._finalizers=(r=this._finalizers)!==null&&r!==void 0?r:[]).push(t)}},e.prototype._hasParent=function(t){var r=this._parentage;return r===t||Array.isArray(r)&&r.includes(t)},e.prototype._addParent=function(t){var r=this._parentage;this._parentage=Array.isArray(r)?(r.push(t),r):r?[r,t]:t},e.prototype._removeParent=function(t){var r=this._parentage;r===t?this._parentage=null:Array.isArray(r)&&De(r,t)},e.prototype.remove=function(t){var r=this._finalizers;r&&De(r,t),t instanceof e&&t._removeParent(this)},e.EMPTY=function(){var t=new e;return t.closed=!0,t}(),e}();var Or=Fe.EMPTY;function It(e){return e instanceof Fe||e&&"closed"in e&&A(e.remove)&&A(e.add)&&A(e.unsubscribe)}function dn(e){A(e)?e():e.unsubscribe()}var Ae={onUnhandledError:null,onStoppedNotification:null,Promise:void 0,useDeprecatedSynchronousErrorHandling:!1,useDeprecatedNextContext:!1};var st={setTimeout:function(e,t){for(var r=[],n=2;n0},enumerable:!1,configurable:!0}),t.prototype._trySubscribe=function(r){return this._throwIfClosed(),e.prototype._trySubscribe.call(this,r)},t.prototype._subscribe=function(r){return this._throwIfClosed(),this._checkFinalizedStatuses(r),this._innerSubscribe(r)},t.prototype._innerSubscribe=function(r){var n=this,o=this,i=o.hasError,a=o.isStopped,s=o.observers;return i||a?Or:(this.currentObservers=null,s.push(r),new Fe(function(){n.currentObservers=null,De(s,r)}))},t.prototype._checkFinalizedStatuses=function(r){var n=this,o=n.hasError,i=n.thrownError,a=n.isStopped;o?r.error(i):a&&r.complete()},t.prototype.asObservable=function(){var r=new U;return r.source=this,r},t.create=function(r,n){return new wn(r,n)},t}(U);var wn=function(e){ne(t,e);function t(r,n){var o=e.call(this)||this;return o.destination=r,o.source=n,o}return t.prototype.next=function(r){var n,o;(o=(n=this.destination)===null||n===void 0?void 0:n.next)===null||o===void 0||o.call(n,r)},t.prototype.error=function(r){var n,o;(o=(n=this.destination)===null||n===void 0?void 0:n.error)===null||o===void 0||o.call(n,r)},t.prototype.complete=function(){var r,n;(n=(r=this.destination)===null||r===void 0?void 0:r.complete)===null||n===void 0||n.call(r)},t.prototype._subscribe=function(r){var n,o;return(o=(n=this.source)===null||n===void 0?void 0:n.subscribe(r))!==null&&o!==void 0?o:Or},t}(E);var Et={now:function(){return(Et.delegate||Date).now()},delegate:void 0};var wt=function(e){ne(t,e);function t(r,n,o){r===void 0&&(r=1/0),n===void 0&&(n=1/0),o===void 0&&(o=Et);var i=e.call(this)||this;return i._bufferSize=r,i._windowTime=n,i._timestampProvider=o,i._buffer=[],i._infiniteTimeWindow=!0,i._infiniteTimeWindow=n===1/0,i._bufferSize=Math.max(1,r),i._windowTime=Math.max(1,n),i}return t.prototype.next=function(r){var n=this,o=n.isStopped,i=n._buffer,a=n._infiniteTimeWindow,s=n._timestampProvider,f=n._windowTime;o||(i.push(r),!a&&i.push(s.now()+f)),this._trimBuffer(),e.prototype.next.call(this,r)},t.prototype._subscribe=function(r){this._throwIfClosed(),this._trimBuffer();for(var n=this._innerSubscribe(r),o=this,i=o._infiniteTimeWindow,a=o._buffer,s=a.slice(),f=0;f0?e.prototype.requestAsyncId.call(this,r,n,o):(r.actions.push(this),r._scheduled||(r._scheduled=ut.requestAnimationFrame(function(){return r.flush(void 0)})))},t.prototype.recycleAsyncId=function(r,n,o){var i;if(o===void 0&&(o=0),o!=null?o>0:this.delay>0)return e.prototype.recycleAsyncId.call(this,r,n,o);var a=r.actions;n!=null&&((i=a[a.length-1])===null||i===void 0?void 0:i.id)!==n&&(ut.cancelAnimationFrame(n),r._scheduled=void 0)},t}(Ut);var On=function(e){ne(t,e);function t(){return e!==null&&e.apply(this,arguments)||this}return t.prototype.flush=function(r){this._active=!0;var n=this._scheduled;this._scheduled=void 0;var o=this.actions,i;r=r||o.shift();do if(i=r.execute(r.state,r.delay))break;while((r=o[0])&&r.id===n&&o.shift());if(this._active=!1,i){for(;(r=o[0])&&r.id===n&&o.shift();)r.unsubscribe();throw i}},t}(Wt);var we=new On(Tn);var R=new U(function(e){return e.complete()});function Dt(e){return e&&A(e.schedule)}function kr(e){return e[e.length-1]}function Qe(e){return A(kr(e))?e.pop():void 0}function Se(e){return Dt(kr(e))?e.pop():void 0}function Vt(e,t){return typeof kr(e)=="number"?e.pop():t}var pt=function(e){return e&&typeof e.length=="number"&&typeof e!="function"};function zt(e){return A(e==null?void 0:e.then)}function Nt(e){return A(e[ft])}function qt(e){return Symbol.asyncIterator&&A(e==null?void 0:e[Symbol.asyncIterator])}function Kt(e){return new TypeError("You provided "+(e!==null&&typeof e=="object"?"an invalid object":"'"+e+"'")+" where a stream was expected. You can provide an Observable, Promise, ReadableStream, Array, AsyncIterable, or Iterable.")}function Ki(){return typeof Symbol!="function"||!Symbol.iterator?"@@iterator":Symbol.iterator}var Qt=Ki();function Yt(e){return A(e==null?void 0:e[Qt])}function Gt(e){return ln(this,arguments,function(){var r,n,o,i;return Pt(this,function(a){switch(a.label){case 0:r=e.getReader(),a.label=1;case 1:a.trys.push([1,,9,10]),a.label=2;case 2:return[4,Xe(r.read())];case 3:return n=a.sent(),o=n.value,i=n.done,i?[4,Xe(void 0)]:[3,5];case 4:return[2,a.sent()];case 5:return[4,Xe(o)];case 6:return[4,a.sent()];case 7:return a.sent(),[3,2];case 8:return[3,10];case 9:return r.releaseLock(),[7];case 10:return[2]}})})}function Bt(e){return A(e==null?void 0:e.getReader)}function $(e){if(e instanceof U)return e;if(e!=null){if(Nt(e))return Qi(e);if(pt(e))return Yi(e);if(zt(e))return Gi(e);if(qt(e))return _n(e);if(Yt(e))return Bi(e);if(Bt(e))return Ji(e)}throw Kt(e)}function Qi(e){return new U(function(t){var r=e[ft]();if(A(r.subscribe))return r.subscribe(t);throw new TypeError("Provided object does not correctly implement Symbol.observable")})}function Yi(e){return new U(function(t){for(var r=0;r=2;return function(n){return n.pipe(e?_(function(o,i){return e(o,i,n)}):me,Oe(1),r?He(t):zn(function(){return new Xt}))}}function Nn(){for(var e=[],t=0;t=2,!0))}function fe(e){e===void 0&&(e={});var t=e.connector,r=t===void 0?function(){return new E}:t,n=e.resetOnError,o=n===void 0?!0:n,i=e.resetOnComplete,a=i===void 0?!0:i,s=e.resetOnRefCountZero,f=s===void 0?!0:s;return function(c){var u,p,m,d=0,h=!1,v=!1,B=function(){p==null||p.unsubscribe(),p=void 0},re=function(){B(),u=m=void 0,h=v=!1},z=function(){var T=u;re(),T==null||T.unsubscribe()};return g(function(T,Ke){d++,!v&&!h&&B();var We=m=m!=null?m:r();Ke.add(function(){d--,d===0&&!v&&!h&&(p=jr(z,f))}),We.subscribe(Ke),!u&&d>0&&(u=new et({next:function(Ie){return We.next(Ie)},error:function(Ie){v=!0,B(),p=jr(re,o,Ie),We.error(Ie)},complete:function(){h=!0,B(),p=jr(re,a),We.complete()}}),$(T).subscribe(u))})(c)}}function jr(e,t){for(var r=[],n=2;ne.next(document)),e}function K(e,t=document){return Array.from(t.querySelectorAll(e))}function V(e,t=document){let r=se(e,t);if(typeof r=="undefined")throw new ReferenceError(`Missing element: expected "${e}" to be present`);return r}function se(e,t=document){return t.querySelector(e)||void 0}function _e(){return document.activeElement instanceof HTMLElement&&document.activeElement||void 0}function tr(e){return L(b(document.body,"focusin"),b(document.body,"focusout")).pipe(ke(1),l(()=>{let t=_e();return typeof t!="undefined"?e.contains(t):!1}),N(e===_e()),Y())}function Be(e){return{x:e.offsetLeft,y:e.offsetTop}}function Yn(e){return L(b(window,"load"),b(window,"resize")).pipe(Ce(0,we),l(()=>Be(e)),N(Be(e)))}function rr(e){return{x:e.scrollLeft,y:e.scrollTop}}function dt(e){return L(b(e,"scroll"),b(window,"resize")).pipe(Ce(0,we),l(()=>rr(e)),N(rr(e)))}var Bn=function(){if(typeof Map!="undefined")return Map;function e(t,r){var n=-1;return t.some(function(o,i){return o[0]===r?(n=i,!0):!1}),n}return function(){function t(){this.__entries__=[]}return Object.defineProperty(t.prototype,"size",{get:function(){return this.__entries__.length},enumerable:!0,configurable:!0}),t.prototype.get=function(r){var n=e(this.__entries__,r),o=this.__entries__[n];return o&&o[1]},t.prototype.set=function(r,n){var o=e(this.__entries__,r);~o?this.__entries__[o][1]=n:this.__entries__.push([r,n])},t.prototype.delete=function(r){var n=this.__entries__,o=e(n,r);~o&&n.splice(o,1)},t.prototype.has=function(r){return!!~e(this.__entries__,r)},t.prototype.clear=function(){this.__entries__.splice(0)},t.prototype.forEach=function(r,n){n===void 0&&(n=null);for(var o=0,i=this.__entries__;o0},e.prototype.connect_=function(){!zr||this.connected_||(document.addEventListener("transitionend",this.onTransitionEnd_),window.addEventListener("resize",this.refresh),xa?(this.mutationsObserver_=new MutationObserver(this.refresh),this.mutationsObserver_.observe(document,{attributes:!0,childList:!0,characterData:!0,subtree:!0})):(document.addEventListener("DOMSubtreeModified",this.refresh),this.mutationEventsAdded_=!0),this.connected_=!0)},e.prototype.disconnect_=function(){!zr||!this.connected_||(document.removeEventListener("transitionend",this.onTransitionEnd_),window.removeEventListener("resize",this.refresh),this.mutationsObserver_&&this.mutationsObserver_.disconnect(),this.mutationEventsAdded_&&document.removeEventListener("DOMSubtreeModified",this.refresh),this.mutationsObserver_=null,this.mutationEventsAdded_=!1,this.connected_=!1)},e.prototype.onTransitionEnd_=function(t){var r=t.propertyName,n=r===void 0?"":r,o=ya.some(function(i){return!!~n.indexOf(i)});o&&this.refresh()},e.getInstance=function(){return this.instance_||(this.instance_=new e),this.instance_},e.instance_=null,e}(),Jn=function(e,t){for(var r=0,n=Object.keys(t);r0},e}(),Zn=typeof WeakMap!="undefined"?new WeakMap:new Bn,eo=function(){function e(t){if(!(this instanceof e))throw new TypeError("Cannot call a class as a function.");if(!arguments.length)throw new TypeError("1 argument required, but only 0 present.");var r=Ea.getInstance(),n=new Ra(t,r,this);Zn.set(this,n)}return e}();["observe","unobserve","disconnect"].forEach(function(e){eo.prototype[e]=function(){var t;return(t=Zn.get(this))[e].apply(t,arguments)}});var ka=function(){return typeof nr.ResizeObserver!="undefined"?nr.ResizeObserver:eo}(),to=ka;var ro=new E,Ha=I(()=>H(new to(e=>{for(let t of e)ro.next(t)}))).pipe(x(e=>L(Te,H(e)).pipe(C(()=>e.disconnect()))),J(1));function de(e){return{width:e.offsetWidth,height:e.offsetHeight}}function ge(e){return Ha.pipe(S(t=>t.observe(e)),x(t=>ro.pipe(_(({target:r})=>r===e),C(()=>t.unobserve(e)),l(()=>de(e)))),N(de(e)))}function bt(e){return{width:e.scrollWidth,height:e.scrollHeight}}function ar(e){let t=e.parentElement;for(;t&&(e.scrollWidth<=t.scrollWidth&&e.scrollHeight<=t.scrollHeight);)t=(e=t).parentElement;return t?e:void 0}var no=new E,Pa=I(()=>H(new IntersectionObserver(e=>{for(let t of e)no.next(t)},{threshold:0}))).pipe(x(e=>L(Te,H(e)).pipe(C(()=>e.disconnect()))),J(1));function sr(e){return Pa.pipe(S(t=>t.observe(e)),x(t=>no.pipe(_(({target:r})=>r===e),C(()=>t.unobserve(e)),l(({isIntersecting:r})=>r))))}function oo(e,t=16){return dt(e).pipe(l(({y:r})=>{let n=de(e),o=bt(e);return r>=o.height-n.height-t}),Y())}var cr={drawer:V("[data-md-toggle=drawer]"),search:V("[data-md-toggle=search]")};function io(e){return cr[e].checked}function qe(e,t){cr[e].checked!==t&&cr[e].click()}function je(e){let t=cr[e];return b(t,"change").pipe(l(()=>t.checked),N(t.checked))}function $a(e,t){switch(e.constructor){case HTMLInputElement:return e.type==="radio"?/^Arrow/.test(t):!0;case HTMLSelectElement:case HTMLTextAreaElement:return!0;default:return e.isContentEditable}}function Ia(){return L(b(window,"compositionstart").pipe(l(()=>!0)),b(window,"compositionend").pipe(l(()=>!1))).pipe(N(!1))}function ao(){let e=b(window,"keydown").pipe(_(t=>!(t.metaKey||t.ctrlKey)),l(t=>({mode:io("search")?"search":"global",type:t.key,claim(){t.preventDefault(),t.stopPropagation()}})),_(({mode:t,type:r})=>{if(t==="global"){let n=_e();if(typeof n!="undefined")return!$a(n,r)}return!0}),fe());return Ia().pipe(x(t=>t?R:e))}function Me(){return new URL(location.href)}function ot(e){location.href=e.href}function so(){return new E}function co(e,t){if(typeof t=="string"||typeof t=="number")e.innerHTML+=t.toString();else if(t instanceof Node)e.appendChild(t);else if(Array.isArray(t))for(let r of t)co(e,r)}function M(e,t,...r){let n=document.createElement(e);if(t)for(let o of Object.keys(t))typeof t[o]!="undefined"&&(typeof t[o]!="boolean"?n.setAttribute(o,t[o]):n.setAttribute(o,""));for(let o of r)co(n,o);return n}function fr(e){if(e>999){let t=+((e-950)%1e3>99);return`${((e+1e-6)/1e3).toFixed(t)}k`}else return e.toString()}function fo(){return location.hash.substring(1)}function uo(e){let t=M("a",{href:e});t.addEventListener("click",r=>r.stopPropagation()),t.click()}function Fa(){return b(window,"hashchange").pipe(l(fo),N(fo()),_(e=>e.length>0),J(1))}function po(){return Fa().pipe(l(e=>se(`[id="${e}"]`)),_(e=>typeof e!="undefined"))}function Nr(e){let t=matchMedia(e);return Zt(r=>t.addListener(()=>r(t.matches))).pipe(N(t.matches))}function lo(){let e=matchMedia("print");return L(b(window,"beforeprint").pipe(l(()=>!0)),b(window,"afterprint").pipe(l(()=>!1))).pipe(N(e.matches))}function qr(e,t){return e.pipe(x(r=>r?t():R))}function ur(e,t={credentials:"same-origin"}){return ve(fetch(`${e}`,t)).pipe(ce(()=>R),x(r=>r.status!==200?Tt(()=>new Error(r.statusText)):H(r)))}function Ue(e,t){return ur(e,t).pipe(x(r=>r.json()),J(1))}function mo(e,t){let r=new DOMParser;return ur(e,t).pipe(x(n=>n.text()),l(n=>r.parseFromString(n,"text/xml")),J(1))}function pr(e){let t=M("script",{src:e});return I(()=>(document.head.appendChild(t),L(b(t,"load"),b(t,"error").pipe(x(()=>Tt(()=>new ReferenceError(`Invalid script: ${e}`))))).pipe(l(()=>{}),C(()=>document.head.removeChild(t)),Oe(1))))}function ho(){return{x:Math.max(0,scrollX),y:Math.max(0,scrollY)}}function bo(){return L(b(window,"scroll",{passive:!0}),b(window,"resize",{passive:!0})).pipe(l(ho),N(ho()))}function vo(){return{width:innerWidth,height:innerHeight}}function go(){return b(window,"resize",{passive:!0}).pipe(l(vo),N(vo()))}function yo(){return Q([bo(),go()]).pipe(l(([e,t])=>({offset:e,size:t})),J(1))}function lr(e,{viewport$:t,header$:r}){let n=t.pipe(X("size")),o=Q([n,r]).pipe(l(()=>Be(e)));return Q([r,t,o]).pipe(l(([{height:i},{offset:a,size:s},{x:f,y:c}])=>({offset:{x:a.x-f,y:a.y-c+i},size:s})))}(()=>{function e(n,o){parent.postMessage(n,o||"*")}function t(...n){return n.reduce((o,i)=>o.then(()=>new Promise(a=>{let s=document.createElement("script");s.src=i,s.onload=a,document.body.appendChild(s)})),Promise.resolve())}var r=class{constructor(n){this.url=n,this.onerror=null,this.onmessage=null,this.onmessageerror=null,this.m=a=>{a.source===this.w&&(a.stopImmediatePropagation(),this.dispatchEvent(new MessageEvent("message",{data:a.data})),this.onmessage&&this.onmessage(a))},this.e=(a,s,f,c,u)=>{if(s===this.url.toString()){let p=new ErrorEvent("error",{message:a,filename:s,lineno:f,colno:c,error:u});this.dispatchEvent(p),this.onerror&&this.onerror(p)}};let o=new EventTarget;this.addEventListener=o.addEventListener.bind(o),this.removeEventListener=o.removeEventListener.bind(o),this.dispatchEvent=o.dispatchEvent.bind(o);let i=document.createElement("iframe");i.width=i.height=i.frameBorder="0",document.body.appendChild(this.iframe=i),this.w.document.open(),this.w.document.write(` + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + +

Inlets documentation

+

Inlets brings secure tunnels to Cloud Native workloads.

+

Inlets logo

+
+

You can visit the inlets homepage at https://inlets.dev/

+
+

With inlets you are in control of your data, unlike with a SaaS tunnel where shared servers mean your data may be at risk. You can use inlets for local development and in your production environment. It works just as well on bare-metal as in VMs, containers and Kubernetes clusters.

+

inlets is not just compatible with tricky networks and Cloud Native architecture, it was purpose-built for them.

+

Common use-cases include:

+
    +
  • Exposing local HTTPS, TCP, or websocket endpoints on the Internet
  • +
  • Replacing SaaS tunnels that are too restrictive
  • +
  • Self-hosting from a homelab or on-premises datacenter
  • +
  • Deploying and monitoring apps across multiple locations
  • +
  • Receiving webhooks and testing OAuth integrations
  • +
  • Remote customer support
  • +
+
+

Do you want to connect to hundreds of remote services without exposing them on the Internet? You may be looking for inlets uplink

+
+

How does it work?

+

Inlets tunnels connect to each other over a secure websocket with TLS encryption. Over that private connection, you can then tunnel HTTPS or TCP traffic to computers in another network or to the Internet.

+

One of the most common use-cases is to expose a local HTTP endpoint on the Internet via a HTTPS tunnel. You may be working with webhooks, integrating with OAuth, sharing a draft of a blog post or integrating with a partner's API.

+

Access a local service remotely

+
+

After deploying an inlets HTTPS server on a public cloud VM, you can then connect the client and access it.

+
+

There is more that inlets can do for you than exposing local endpoints. inlets also supports local forwarding and can be used to replace more cumbersome services like SSH, complex VPNs or expensive direct connect uplinks.

+

Read more in the: the inlets FAQ.

+

Getting started

+

These guides walk you through a specific use-case with inlets. If you have questions or cannot find what you need, there are options for connecting with the community at the end of this page.

+

Inlets can tunnel either HTTP or TCP traffic:

+
    +
  • HTTP (L7) tunnels can be used to connect one or more HTTP endpoints from one network to another. A single tunnel can expose multiple websites or hosts, including LoadBalancing and multiple clients to one server.
  • +
  • TCP (L4) tunnels can be used to connect TCP services such as a database, a reverse proxy, RDP, Kubernetes or SSH to the Internet. A single tunnel can expose multiple ports on an exit-server and load balance between clients
  • +
+

Downloading inlets

+

inlets is available for Windows, MacOS (including M1) and Linux (including ARM):

+ +

You can also use the container image from ghcr.io: ghcr.io/inlets/inlets-pro:latest

+

Your first HTTPS tunnel with an automated tunnel server (Intermediate)

+

Expose one or more HTTPS domains from your local machine.

+ +

Running a HTTP tunnel server manually (Advanced)

+

If you don't want to use automation tools to create a server for the inlets-pro server, then you can follow this manual guide to generate and install a systemd service instead.

+ +

Tunnelling TCP services

+

inlets is not limited to HTTP connections, you can also tunnel TCP protocols like RDP, VNC, SSH, TLS and databases.

+ +

Running multiple tunnel servers on the same host (Advanced)

+

If you want to mix HTTP and TCP tunnels on the same tunnel server, you could either only use TCP ports, or enable both.

+ +

If you're looking to scale inlets to host many tunnels, then Kubernetes is probably a better option.

+

Local port forwarding (Intermediate)

+ +

Connecting with Kubernetes

+

You may have an on-premises Kubernetes cluster that needs ingress. Perhaps you have a homelab, or Raspberry Pi cluster, that you want to self host services on.

+ +

Some teams want to have dev work like production, with tools Istio working locally just like in the cloud.

+ +

See also: helm charts

+

Becoming a tunnel provider or operating a hosting service

+

The Inlets Uplink distribution is a Kubernetes operator that makes it quick and easy to onboard hundreds or thousands of customers, each with their own dedicated tunnel. It can also be used for remote management and command and control of IT systems and IoT devices.

+

Learn more: Inlets Uplink

+

Monitoring and metrics

+

Inlets offers you multiple options to monitor your tunnels and get insight in their performance. Find out tunnel statistics, uptime and connected clients with the inlets-pro status command or collect the Prometheus metrics from the monitoring endpoint.

+ +

Reference documentation

+

inletsctl

+

Learn how to use inletsctl to provision tunnel servers on various public clouds.

+ +

inlets-operator

+

Learn how to set up the inlets-operator for Kubernetes, which provisions public cloud VMs and gives IP addresses to your public LoadBalancers.

+ +

Other resources

+

For news, use-cases and guides check out the blog:

+ +

Watch a video, or read a blog post from the community:

+ +

Open Source tools for managing inlets tunnels:

+ +

Connecting with the inlets community

+

Who built inlets? Inlets ® is a commercial solution developed and supported by OpenFaaS Ltd.

+

You can also contact the team via the contact page.

+

The code for this website is open source and available on GitHub

+
+

inlets is proud to be featured on the Cloud Native Landscape in the Service Proxy category.

+
+

CNCF Landscape

+ + + + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/faq/index.html b/reference/faq/index.html new file mode 100644 index 0000000..212f913 --- /dev/null +++ b/reference/faq/index.html @@ -0,0 +1,1275 @@ + + + + + + + + + + + + + + + + + + + + + + + + Inlets FAQ - Inlets + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + +

Inlets FAQ

+

Inlets concepts and Frequently Asked Questions (FAQ)

+

Why did we build inlets?

+

We built inlets to make it easy to expose a local service on the Internet and to overcome limitations with SaaS tunnels and VPNs.

+
    +
  • It was built to overcome limitations in SaaS tunnels - such as lack of privacy, control and rate-limits
  • +
  • It doesn't just integrate with containers and Kubernetes, it was purpose-built to run in them
  • +
  • It's easy to run on Windows, Linux and MacOS with a self-contained binary
  • +
  • It doesn't need to run as root, doesn't depend on iptables, doesn't need a tun device or NET_ADMIN capability
  • +
+

There are many different networking tools available such as VPNs and SaaS tunnels - each with its own set of pros and cons, and use-cases. It's very likely that you will use several tools together to get the best out of each of them.

+

How does inlets compare to other tools and solutions?

+

Are you curious about the advantages of using inlets vs. alternatives? We must first ask, advantages vs. what other tool or service.

+

SaaS tunnels provide a convenient way to expose services for the purposes of development, however they are often:

+
    +
  • blocked by corporate IT
  • +
  • running on shared infrastructure (servers) with other customers
  • +
  • subject to stringent rate-limits that affect productivity
  • +
  • priced per subdomain
  • +
  • unable to obtain high value TCP ports like 22, 80, 443 and so on
  • +
+

You run inlets on your own servers, so you do not run into those restrictions. Your data remains your own and is kept private.

+

When compared to VPNs such as Wireguard, Tailscale and OpenVPN, we have to ask what the use-case is.

+

A traditional VPN is built to connect hosts and entire IP ranges together. This can potentially expose a large number of machines and users to each other and requires complex Access Control Lists or authorization rules. If this is your use-case, a traditional VPN is probably the right tool for the job.

+

Inlets is designed to connect or expose services between networks - either HTTP or TCP.

+

For example:

+
    +
  • Receiving webhooks to a local application
  • +
  • Sharing a blog post draft with a colleague or client
  • +
  • Providing remote access to your homelab when away from home
  • +
  • Self-hosting websites or services on Kubernetes clusters
  • +
  • Getting working LoadBalancers with public IPs for local Kubernetes clusters
  • +
+

You can also use inlets to replace Direct Connect or a VPN when you just need to connect a number of services privately and not an entire network range.

+

Many of the inlets community use a VPN alongside inlets, because they are different tools for different use-cases.

+
+

We often write about use-cases for public and private inlets tunnels on the blog.

+
+

What's the difference between inlets, inletsctl and inlets-operator?

+

inlets-pro aka "inlets" is the command-line tool that contains both the client and server required to set up HTTP and TCP tunnels.

+

The inlets-pro server is usually set up on a computer with a public IP address, then the inlets-pro client is run on your own machine, or a separate computer that can reach the service or server you want to expose.

+

You can download inlets-pro and inletsctl with the "curl | sh" commands provided at the start of each tutorial, this works best on a Linux host, or with Git Bash if using Windows.

+
+

Did you know? You can also download binaries for inlets-pro and inletsctl on GitHub, for Windows users you'll want "inlets-pro.exe" and for MacOS, you'll want "inlets-pro-darwin".

+
+

For instance, on Windows machines you'll need "inlets-pro.exe"

+

See also: inlets-pro releases

+

inletsctl is a tool that can set up a tunnel server for you on around a dozen popular clouds. It exists to make it quicker and more convenience to set up a HTTPS or TCP tunnel to expose a local service.

+

It has three jobs:

+
    +
  1. Create the VM for you
  2. +
  3. Install the inlets-pro server in TCP or HTTPS mode (as specified) with systemd
  4. +
  5. Inform you of the token and connection string
  6. +
+

You can download the inletsctl tool with "curl | sh" or from the inletsctl releases page.

+

Find out more: inletsctl reference page

+

inlets-operator is a Kubernetes Operator that will create tunnel servers for you, on your chosen cloud for any LoadBalancers that you expose within a private cluster.

+

Find out more: inlets-operator reference page

+

What is the networking model for inlets?

+

Whilst some networking tools such as Bittorrent use a peer-to-peer network, inlets uses a more traditional client/server model.

+

One or more client tunnels connect to a tunnel server and advertise which services they are able to provide. Then, whenever the server receives traffic for one of those advertised services, it will forward it through the tunnel to the client. The tunnel client will then forward that on to the service it advertised.

+
+

The tunnel server may also be referred to as an "exit" server because it is the connection point for the client to another network or the Internet.

+
+

If you install and run the inlets server on a computer, it can be referred to as a tunnel server or exit server. These servers can also be automated through cloud-init, terraform, or tools maintained by the inlets community such as inletsctl.

+

Conceptual architecture

+
+

Pictured: the website http://127.0.0.1:3000 is exposed through an encrypted tunnel to users at: https://example.com

+
+

For remote forwarding, the client tends to be run within a private network, with an --upstream flag used to specify where incoming traffic needs to be routed. The tunnel server can then be run on an Internet-facing network, or any other network reachable by the client.

+

What kind of layers and protocols are supported?

+

Inlets works at a higher level than traditional VPNs because it is designed to connect services together, rather than hosts directly.

+
    +
  • HTTP - Layer 7 of the OSI model, used for web traffic such as websites and RESTful APIs
  • +
  • TCP - Layer 4 of the OSI model, used for TCP traffic like SSH, TLS, databases, RDP, etc
  • +
+

Because VPNs are designed to connect hosts together over a shared IP space, they also involve tedious IP address management and allocation.

+

Inlets connects services, so for TCP traffic, you need only think about TCP ports.

+

For HTTP traffic, you need only to think about domain names.

+

Do I want a TCP or HTTPS tunnel?

+

If you're exposing websites, blogs, docs, APIs and webhooks, you should use a HTTPS tunnel.

+

For HTTP tunnels, Rate Error and Duration (RED) metrics are collected for any service you expose, even if it doesn't have its own instrumentation support.

+

For anything that doesn't fit into that model, a TCP tunnel may be a better option.

+

Common examples are: RDP, VNC, SSH, TLS, database protocols, legacy medical protocols such as DiCom.

+

TCP tunnels can also be used to forward traffic to a reverse proxy like Nginx, Caddy, or Traefik, sitting behind a firewall or NAT by forwarding port 80 and 443.

+

TCP traffic is forwarded directly between the two hosts without any decryption of bytes. The active connection count and frequency can be monitored along with the amount of throughput.

+

Does inlets use TCP or UDP?

+

Inlets uses a websocket over TCP, so that it can penetrate HTTP proxies, captive portals, firewalls, and other kinds of NAT. As long as the client can make an outbound connection, a tunnel can be established. The use of HTTPS means that inlets will have similar latency and throughput to a HTTPS server or SSH tunnel.

+

Once you have an inlets tunnel established, you can use it to tunnel traffic to TCP and HTTPS sockets within the private network of the client.

+

Most VPNs tend to use UDP for communication due to its low overhead which results in lower latency and higher throughput. Certain tools and products such as OpenVPN, SSH and Tailscale can be configured to emulate a TCP stack over a TCP connection, this can lead to unexpected issues.

+

Inlets connections send data, rather than emulating a TCP over TCP stack, so doesn't suffer from this problem.

+

Are both remote and local forwarding supported?

+

Remote forwarding is where a local service is forwarded from the client's network to the inlets tunnel server.

+

Remote forwarding pushes a local endpoint to a remote host for access on another network

+
+

Remote forwarding pushes a local endpoint to a remote host for access on another network

+
+

This is the most common use-case and would be used to expose a local HTTP server to the public Internet via a tunnel server.

+

Local forwarding is used to forward a service on the tunnel server or tunnel server's network back to the client, so that it can be accessed using a port on localhost.

+

Local forwarding brings a remote service back to localhost for accessing

+
+

Local forwarding brings a remote service back to localhost for accessing

+
+

An example would be that you have a webserver and MySQL database. The HTTP server is public and can access the database via its own loopback adapter, but the Internet cannot. So how do you access that MySQL database from CI, or from your local machine? Connect a client with local forwarding, and bring the MySQL port back to your local machine or CI runner, and then use the MySQL CLI to access it.

+

A developer at the UK Government uses inlets to forward a NATS message queue from a staging environment to his local machine for testing. Learn more

+

What's the difference between the data plane and control plane?

+

The data plane is any service or port that carries traffic from the tunnel server to the tunnel client, and your private TCP or HTTP services. It can be exposed on all interfaces, or only bound to loopback for private access, in a similar way to a VPN.

+

If you were exposing SSH on an internal machine from port 2222, your data-plane may be exposed on port 2222

+

The control-plane is a TLS-encrypted, authenticated websocket that is used to connect clients to servers. All traffic ultimately passes over the control-plane's link, so remains encrypted and private.

+

Your control-plane's port is usually 8123 when used directly, or 443 when used behind a reverse proxy or Kubernetes Ingress Controller.

+

An example from the article: The Simple Way To Connect Existing Apps to Public Cloud

+

A legacy MSSQL server runs on Windows Server behind the firewall in a private datacenter. Your organisation cannot risk migrating it to an AWS EC2 instance at this time, but can move the microservice that needs to access it.

+

The inlets tunnel allows for the MSSQL service to be tunneled privately to the EC2 instance's local network for accessing, but is not exposed on the Internet. All traffic is encrypted over the wire due to the TLS connection of inlets.

+

Hybrid Cloud in action using an inlets tunnel to access the on-premises database

+
+

Hybrid Cloud in action using an inlets tunnel to access the on-premises database

+
+

This concept is referred to as a a "split plane" because the control plane is available to public clients on all adapters, and the data plane is only available on local or private adapters on the server.

+

Is there a reference guide to the CLI?

+

The inlets-pro binary has built-in help commands and examples, just run inlets-pro tcp/http client/server --help.

+

A separate CLI reference guide is also available here: inlets-pro CLI reference

+

Is inlets secure?

+

All traffic sent over an inlets tunnel is encapsulated in a TLS-encrypted websocket, which prevents eavesdropping. This is technically similar to HTTPS, but you'll see a URL of wss:// instead.

+

The tunnel client is authenticated using an API token which is generated by the tunnel administrator, or by automated tooling.

+

Additional authentication mechanisms can be set up using a reverse proxy such as Nginx.

+

Do I have to expose services on the Internet to use inlets?

+

No, inlets can be used to tunnel one or more services to another network without exposing them on the Internet.

+

The --data-addr 127.0.0.1: flag for inlets servers binds the data plane to the server's loopback address, meaning that only other processing running on it can access the tunneled services. You could also use a private network adapter or VPC IP address in the --data-addr flag.

+

How do I monitor inlets?

+

See the following blog post for details on the inlets status command and the various Prometheus metrics that are made available.

+

Measure and monitor your inlets tunnels

+

How do you scale inlets?

+

Inlets HTTP servers can support a high number of clients, either for load-balancing the same internal service to a number of clients, or for a number of distinct endpoints.

+

Tunnel servers are easy to scale through the use of containers, and can benefit from the resilience that a Kubernetes cluster can bring:

+

See also: How we scaled inlets to thousands of tunnels with Kubernetes

+

Does inlets support High Availability (HA)?

+

For the inlets client, it is possible to connect multiple inlets tunnel clients for the same service, such as a company blog. Traffic will be distributed across the clients and if one of those clients goes down or crashes, the other will continue to serve requests.

+

For the inlets tunnel server, the easiest option is to run the server in a supervisor that can restart the tunnel service quickly or allow it to run more than one replica. Systemd can be used to restart tunnel servers should they run into issues, likewise you can run the server in a container, or as a Kubernetes Pod.

+

HA VIP

+
+

HA example with an AWS ELB

+
+

For example, you may place a cloud load-balancer in front of the data-plane port of two inlets server processes. Requests to the stable load-balancer IP address will be distributed between the two virtual machines and their respective inlets server tunnel processes.

+

Is IPv6 supported?

+

Yes, see also: How to serve traffic to IPv6 users with inlets

+

What if the websocket disconnects?

+

The client will reconnect automatically and can be configured with systemd or a Windows service to stay running in the background. See also inlets pro tcp/http server/client --generate=systemd for generating systemd unit files.

+

When used in combination with a Kubernetes ingress controller or reverse proxy of your own, then the websocket may timeout. These timeout settings can usually be configured to remove any potential issue.

+

Monitoring in inlets allows for you to monitor the reliability of your clients and servers, which are often running in distinct networks.

+

How much does inlets cost?

+

Monthly and annual subscriptions are available via Gumroad.

+

You can also purchase a static license for offline or air-gapped environments.

+

For more, see the Pricing page

+

What happens when the license expires?

+

If you're using a Gumroad license, and keep your billing relationship active, then the software will work for as long as you keep paying. The Gumroad license server needs to be reachable by the inlets client.

+

If you're using a static license, then the software will continue to run, even after your license has expired, unless you restart the software. You can either rotate the token on your inlets clients in an automated or manual fashion, or purchase a token for a longer period of time up front.

+

Can I get professional help?

+

Inlets is designed to be self-service and is well documented, but perhaps you could use some direction?

+

Business licenses come with support via email, however you are welcome to contact OpenFaaS Ltd to ask about a consulting project.

+ + + + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/index.html b/reference/index.html new file mode 100644 index 0000000..8e2b252 --- /dev/null +++ b/reference/index.html @@ -0,0 +1,1074 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + Overview - Inlets + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+ +
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/inlets-operator/index.html b/reference/inlets-operator/index.html new file mode 100644 index 0000000..6bdebfd --- /dev/null +++ b/reference/inlets-operator/index.html @@ -0,0 +1,1255 @@ + + + + + + + + + + + + + + + + + + + + + + + + inlets-operator reference documentation - Inlets + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + +

inlets-operator reference documentation

+

The inlets/inlets-operator brings LoadBalancers with public IP addresses to your local Kubernetes clusters.

+
+

It works by creating VMs and running an inlets Pro tunnel server for you, the VM's public IP is then attached to the cluster and an inlets client Pod runs for you.

+
+

You can install the inlets-operator using a single command with arkade or with helm. arkade is an open-source Kubernetes marketplace and easier to use.

+

For each provider, the minimum requirements tend to be:

+
    +
  • An access token - for the operator to create VMs for inlets Pro servers
  • +
  • A region - where to create the VMs
  • +
+
+

You can subscribe to inlets for personal or commercial use via Gumroad

+
+

Install using arkade

+
arkade install inlets-operator \
+ --provider $PROVIDER \ # Name of the cloud provider to provision the exit-node on.
+ --region $REGION \ # Used with cloud providers that require a region.
+ --zone $ZONE \ # Used with cloud providers that require zone (e.g. gce).
+ --token-file $HOME/Downloads/key.json \ # Token file/Service Account Key file with the access to the cloud provider.
+ --license-file $HOME/.inlets/LICENSE
+
+

Install using helm

+

Checkout the inlets-operator helm chart README to know more about the values that can be passed to --set and to see provider specific example commands.

+
# Create a secret to store the service account key file
+kubectl create secret generic inlets-access-key \
+  --from-file=inlets-access-key=key.json
+
+# Add and update the inlets-operator helm repo
+helm repo add inlets https://inlets.github.io/inlets-operator/
+
+# Create a namespace for inlets-operator
+kubectl create namespace inlets
+
+# Create a secret to store the inlets-pro license
+kubectl create secret generic -n inlets \
+  inlets-license --from-file license=$HOME/.inlets/LICENSE
+
+# Update the local repository
+helm repo update
+
+# Install inlets-operator with the required fields
+helm upgrade inlets-operator --install inlets/inlets-operator \
+  --set provider=$PROJECTID,zone=$ZONE,region=$REGION \
+  --set projectID=$PROJECTID \
+  --set inletsProLicense=$LICENSE
+
+

View the code and chart on GitHub: inlets/inlets-operator

+

Instructions per cloud

+

Create tunnel servers on DigitalOcean

+

Install with inlets Pro on DigitalOcean.

+

Assuming you have created an API key and saved it to $HOME/Downloads/do-access-token, run:

+
arkade install inlets-operator \
+ --provider digitalocean \
+ --region lon1 \
+ --token-file $HOME/Downloads/do-access-token \
+ --license-file $HOME/.inlets/LICENSE
+
+

Create tunnel servers on AWS EC2

+

Instructions for AWS EC2

+

To use the instructions below you must have the AWS CLI configured with sufficient permissions to create users and roles.

+
    +
  • Create a AWS IAM Policy with the following:
  • +
+

Create a file named policy.json with the following content

+
{
+  "Version": "2012-10-17",
+  "Statement": [
+    {
+      "Effect": "Allow",
+      "Action": [
+        "ec2:AuthorizeSecurityGroupIngress",
+        "ec2:DescribeInstances",
+        "ec2:DescribeImages",
+        "ec2:TerminateInstances",
+        "ec2:CreateSecurityGroup",
+        "ec2:CreateTags",
+        "ec2:DeleteSecurityGroup",
+        "ec2:RunInstances",
+        "ec2:DescribeInstanceStatus"
+      ],
+      "Resource": ["*"]
+    }
+  ]
+}
+
+

Create the policy in AWS

+
aws iam create-policy --policy-name inlets-automation --policy-document file://policy.json
+
+
    +
  • Create an IAM user
  • +
+
aws iam create-user --user-name inlets-automation
+
+
    +
  • Add the Policy to the IAM user
  • +
+

We need to use the policy arn generated above, it should have been printed to the console on success. It also follows the format below.

+
export AWS_ACCOUNT_NUMBER="Your AWS Account Number"
+aws iam attach-user-policy --user-name inlets-automation --policy-arn arn:aws:iam::${AWS_ACCOUNT_NUMBER}:policy/inlets-automation
+
+
    +
  • Generate an access key for your IAM User
  • +
+

The below commands will create a set of credentials and save them into files for use later on.

+
+

we are using jq here. It can be installed using the link provided. +Alternatively you can print ACCESS_KEY_JSON and create the files manually.

+
+
ACCESS_KEY_JSON=$(aws iam create-access-key --user-name inlets-automation)
+echo $ACCESS_KEY_JSON | jq -r .AccessKey.AccessKeyId > access-key
+echo $ACCESS_KEY_JSON | jq -r .AccessKey.SecretAccessKey > secret-access-key
+
+

Install with inlets Pro:

+
arkade install inlets-operator \
+ --provider ec2 \
+ --region eu-west-1 \
+ --token-file $HOME/Downloads/access-key \
+ --secret-key-file $HOME/Downloads/secret-access-key \
+ --license-file $HOME/.inlets/LICENSE
+
+

Create tunnel servers on Google Compute Engine (GCE)

+

Instructions for Google Cloud

+

It is assumed that you have gcloud installed and configured on your machine. +If not, then follow the instructions here

+

To get your service account key file, follow the steps below:

+
# Get current projectID
+export PROJECTID=$(gcloud config get-value core/project 2>/dev/null)
+
+# Create a service account
+gcloud iam service-accounts create inlets \
+  --description "inlets-operator service account" \
+  --display-name "inlets"
+
+# Get service account email
+export SERVICEACCOUNT=$(gcloud iam service-accounts list | grep inlets | awk '{print $2}')
+
+# Assign appropriate roles to inlets service account
+gcloud projects add-iam-policy-binding $PROJECTID \
+  --member serviceAccount:$SERVICEACCOUNT \
+  --role roles/compute.admin
+
+gcloud projects add-iam-policy-binding $PROJECTID \
+  --member serviceAccount:$SERVICEACCOUNT \
+  --role roles/iam.serviceAccountUser
+
+# Create inlets service account key file
+gcloud iam service-accounts keys create key.json \
+  --iam-account $SERVICEACCOUNT
+
+

Install the operator:

+
arkade install inlets-operator \
+    --provider gce \
+    --project-id $PROJECTID \
+    --zone us-central1-a \
+    --token-file key.json \
+    --license-file $HOME/.inlets/LICENSE
+
+

Create tunnel servers on Azure

+

Instructions for Azure

+

Prerequisites:

+ +

Generate Azure authentication file:

+
SUBSCRIPTION_ID="YOUR_SUBSCRIPTION_ID"
+az ad sp create-for-rbac --role Contributor --scopes "/subscriptions/$SUBSCRIPTION_ID" --sdk-auth \
+  > $HOME/Downloads/client_credentials.json
+
+

Find your region code with:

+
az account list-locations -o table
+
+DisplayName               Name                 RegionalDisplayName
+------------------------  -------------------  -------------------------------------
+United Kingdom            ukwest               United Kingdom
+
+

Install using helm:

+
export SUBSCRIPTION_ID="YOUR_SUBSCRIPTION_ID"
+export AZURE_REGION="ukwest"
+export INLETS_LICENSE="$(cat ~/.inlets/LICENSE)"
+export ACCESS_KEY="$HOME/Downloads/client_credentials.json"
+
+kubectl create secret generic inlets-access-key \
+  --from-file=inlets-access-key=$ACCESS_KEY
+
+helm repo add inlets https://inlets.github.io/inlets-operator/
+helm repo update
+
+helm upgrade inlets-operator --install inlets/inlets-operator \
+  --set provider=azure,region=$AZURE_REGION \
+  --set subscriptionID=$SUBSCRIPTION_ID
+
+

Create tunnel servers on Linode

+

Instructions for Linode

+

Install using helm:

+
# Create a secret to store the service account key file
+kubectl create secret generic inlets-access-key --from-literal inlets-access-key=<Linode API Access Key>
+
+# Add and update the inlets-operator helm repo
+helm repo add inlets https://inlets.github.io/inlets-operator/
+
+helm repo update
+
+# Install inlets-operator with the required fields
+helm upgrade inlets-operator --install inlets/inlets-operator \
+  --set provider=linode \
+  --set region=us-east
+
+

You can also install the inlets-operator using a single command using arkade, arkade runs against any Kubernetes cluster.

+

Install with inlets Pro:

+
arkade install inlets-operator \
+ --provider linode \
+ --region us-east \
+ --access-key $LINODE_ACCESS_KEY \
+ --license-file $HOME/.inlets/LICENSE
+
+ + + + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/reference/inletsctl/index.html b/reference/inletsctl/index.html new file mode 100644 index 0000000..3ba0949 --- /dev/null +++ b/reference/inletsctl/index.html @@ -0,0 +1,1692 @@ + + + + + + + + + + + + + + + + + + + + + + + + inletsctl reference documentation - Inlets + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + +

inletsctl reference documentation

+

inletsctl is an automation tool for inlets/-pro.

+

Features:

+
    +
  • create / delete cloud VMs with inlets/-pro pre-installed via systemd
  • +
  • download [--pro] - download the inlets/-pro binaries to your local computer
  • +
  • kfwd - forward services from a Kubernetes cluster to your local machine using inlets/-pro
  • +
+

View the code on GitHub: inlets/inletsctl

+

Install inletsctl

+

You can install inletsctl using its installer, or from the GitHub releases page

+
# Install to local directory (and for Windows users)
+curl -sLSf https://inletsctl.inlets.dev | sh
+
+# Install directly to /usr/local/bin/
+curl -sLSf https://inletsctl.inlets.dev | sudo sh
+
+

Windows users are encouraged to use git bash to install the inletsctl binary.

+

Downloading inlets-pro

+

The inletsctl download command can be used to download the inlets/-pro binaries.

+

Example usage:

+
# Download the latest inlets-pro binary
+inletsctl download
+
+# Download a specific version of inlets-pro
+inletsctl download --version 0.8.5
+
+

The create command

+

Create a HTTPS tunnel with a custom domain

+

This example uses DigitalOcean to create a cloud VM and then exposes a local service via the newly created exit-server.

+

Let's say we want to expose a Grafana server on our internal network to the Internet via Let's Encrypt and HTTPS?

+
export DOMAIN="grafana.example.com"
+
+inletsctl create \
+  --provider digitalocean \
+  --region="lon1" \
+  --access-token-file $HOME/do-access-token \
+  --letsencrypt-domain $DOMAIN \
+  --letsencrypt-email webmaster@$DOMAIN \
+  --letsencrypt-issuer prod
+
+

You can also use --letsencrypt-issuer with the staging value whilst testing since Let's Encrypt rate-limits how many certificates you can obtain within a week.

+

Create a DNS A record for the IP address so that grafana.example.com for instance resolves to that IP. For instance you could run:

+
doctl compute domain create \
+  --ip-address 46.101.60.161 grafana.example.com
+
+

Now run the command that you were given, and if you wish, change the upstream to point to the domain explicitly:

+
# Obtain a license at https://inlets.dev
+# Store it at $HOME/.inlets/LICENSE or use --help for more options
+
+# Where to route traffic from the inlets server
+export UPSTREAM="grafana.example.com=http://192.168.0.100:3000"
+
+inlets-pro http client --url "wss://46.101.60.161:8123" \
+--token "lRdRELPrkhA0kxwY0eWoaviWvOoYG0tj212d7Ff0zEVgpnAfh5WjygUVVcZ8xJRJ" \
+--upstream $UPSTREAM
+
+To delete:
+  inletsctl delete --provider digitalocean --id "248562460"
+
+

You can also specify more than one domain and upstream for the same tunnel, so you could expose OpenFaaS and Grafana separately for instance.

+

Update the inletsctl create command with multiple domains such as: --letsencrypt-domain openfaas.example.com --letsencrypt-domain grafana.example.com

+

Then for the inlets-pro client command, update the upstream in the same way by repeating the flag once per upstream mapping: --upstream openfaas.example.com=http://127.0.0.1:8080 --upstream grafana.example.com=http://192.168.0.100:3000.

+
+

Note that in previous inlets versions, multiple upstream values were given in a single flag, separated by commas, this has now been deprecated for the above syntax.

+
+

Create a HTTP tunnel

+

This example uses Linode to create a cloud VM and then exposes a local service via the newly created exit-server.

+
export REGION="eu-west"
+
+inletsctl create \
+  --provider linode \
+  --region="$REGION" \
+  --access-token-file $HOME/do-access-token
+
+

You'll see the host being provisioned, it usually takes just a few seconds:

+
Using provider: linode
+Requesting host: peaceful-lewin8 in eu-west, from linode
+2021/06/01 15:56:03 Provisioning host with Linode
+Host: 248561704, status: 
+[1/500] Host: 248561704, status: new
+...
+[11/500] Host: 248561704, status: active
+
+inlets Pro (0.7.0) exit-server summary:
+  IP: 188.166.168.90
+  Auth-token: dZTkeCNYgrTPvFGLifyVYW6mlP78ny3jhyKM1apDL5XjmHMLYY6MsX8S2aUoj8uI
+
+

Now run the command given to you, changing the --upstream URL to match a local URL such as http://localhost:3000

+
# Obtain a license at https://inlets.dev
+export LICENSE="$HOME/.inlets/license"
+
+# Give a single value or comma-separated
+export PORTS="3000"
+
+# Where to route traffic from the inlets server
+export UPSTREAM="localhost"
+
+inlets-pro tcp client --url "wss://188.166.168.90:8123/connect" \
+  --token "dZTkeCNYgrTPvFGLifyVYW6mlP78ny3jhyKM1apDL5XjmHMLYY6MsX8S2aUoj8uI" \
+  --upstream $UPSTREAM \
+  --ports $PORTS
+
+
+

The client will look for your license in $HOME/.inlets/LICENSE, but you can also use the --license/--license-file flag if you wish.

+
+

You can then access your local website via the Internet and the exit-server's IP at:

+

http://188.166.168.90

+

When you're done, you can delete the host using its ID or IP address:

+
  inletsctl delete --provider linode --id "248561704"
+  inletsctl delete --provider linode --ip "188.166.168.90"
+
+

Create a tunnel for a TCP service

+

This example is similar to the previous one, but also adds link-level encryption between your local service and the exit-server.

+

In addition, you can also expose pure TCP traffic such as SSH or Postgresql.

+
inletsctl create \
+  --provider digitalocean \
+  --access-token-file $HOME/do-access-token \
+  --pro
+
+

Note the output:

+
inlets Pro (0.7.0) exit-server summary:
+  IP: 142.93.34.79
+  Auth-token: TUSQ3Dkr9QR1VdHM7go9cnTUouoJ7HVSdiLq49JVzY5MALaJUnlhSa8kimlLwBWb
+
+Command:
+  export LICENSE=""
+  export PORTS="8000"
+  export UPSTREAM="localhost"
+
+  inlets-pro tcp client --url "wss://142.93.34.79:8123/connect" \
+        --token "TUSQ3Dkr9QR1VdHM7go9cnTUouoJ7HVSdiLq49JVzY5MALaJUnlhSa8kimlLwBWb" \
+        --license "$LICENSE" \
+        --upstream $UPSTREAM \
+        --ports $PORTS
+
+To Delete:
+          inletsctl delete --provider digitalocean --id "205463570"
+
+

Run a local service that uses TCP such as MariaDB:

+
head -c 16 /dev/urandom |shasum 
+8cb3efe58df984d3ab89bcf4566b31b49b2b79b9
+
+export PASSWORD="8cb3efe58df984d3ab89bcf4566b31b49b2b79b9"
+
+docker run --name mariadb \
+-p 3306:3306 \
+-e MYSQL_ROOT_PASSWORD=8cb3efe58df984d3ab89bcf4566b31b49b2b79b9 \
+-ti mariadb:latest
+
+

Connect to the tunnel updating the ports to 3306

+
export LICENSE="$(cat ~/LICENSE)"
+export PORTS="3306"
+export UPSTREAM="localhost"
+
+inlets-pro tcp client --url "wss://142.93.34.79:8123/connect" \
+      --token "TUSQ3Dkr9QR1VdHM7go9cnTUouoJ7HVSdiLq49JVzY5MALaJUnlhSa8kimlLwBWb" \
+      --license "$LICENSE" \
+      --upstream $UPSTREAM \
+      --ports $PORTS
+
+

Now connect to your MariaDB instance from its public IP address:

+
export PASSWORD="8cb3efe58df984d3ab89bcf4566b31b49b2b79b9"
+export EXIT_IP="142.93.34.79"
+
+docker run -it --rm mariadb:latest mysql -h $EXIT_IP -P 3306 -uroot -p$PASSWORD
+
+Welcome to the MariaDB monitor.  Commands end with ; or \g.
+Your MariaDB connection id is 3
+Server version: 10.5.5-MariaDB-1:10.5.5+maria~focal mariadb.org binary distribution
+
+Copyright (c) 2000, 2018, Oracle, MariaDB Corporation Ab and others.
+
+Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.
+
+MariaDB [(none)]> create database test; 
+Query OK, 1 row affected (0.039 sec)
+
+

Examples for specific cloud providers

+

Example usage with AWS EC2

+

To use the instructions below you must have the AWS CLI configured with sufficient permissions to +create users and roles.

+
    +
  • Create a AWS IAM Policy with the following:
  • +
+

Create a file named policy.json with the following content

+
{
+    "Version": "2012-10-17",
+    "Statement": [  
+        {
+            "Effect": "Allow",
+            "Action": [
+                "ec2:AuthorizeSecurityGroupIngress",
+                "ec2:DescribeInstances",
+                "ec2:DescribeImages",
+                "ec2:TerminateInstances",
+                "ec2:CreateSecurityGroup",
+                "ec2:CreateTags",
+                "ec2:DeleteSecurityGroup",
+                "ec2:RunInstances",
+                "ec2:DescribeInstanceStatus"
+            ],
+            "Resource": ["*"]
+        }
+    ]
+}
+
+

Create the policy in AWS

+
aws iam create-policy --policy-name inlets-automation --policy-document file://policy.json
+
+
    +
  • Create an IAM user
  • +
+
aws iam create-user --user-name inlets-automation
+
+
    +
  • Add the Policy to the IAM user
  • +
+

We need to use the policy arn generated above, it should have been printed to the console on success. It also follows the format below.

+
export AWS_ACCOUNT_NUMBER="Your AWS Account Number"
+aws iam attach-user-policy --user-name inlets-automation --policy-arn arn:aws:iam::${AWS_ACCOUNT_NUMBER}:policy/inlets-automation
+
+
    +
  • Generate an access key for your IAM User
  • +
+

The below commands will create a set of credentials and save them into files for use later on.

+
+

we are using jq here. It can be installed using the link provided. +Alternatively you can print ACCESS_KEY_JSON and create the files manually.

+
+
ACCESS_KEY_JSON=$(aws iam create-access-key --user-name inlets-automation)
+echo $ACCESS_KEY_JSON | jq -r .AccessKey.AccessKeyId > access-key.txt
+echo $ACCESS_KEY_JSON | jq -r .AccessKey.SecretAccessKey > secret-key.txt
+
+
    +
  • Create an exit-server:
  • +
+
inletsctl create \
+  --provider ec2 \
+  --region eu-west-1 \
+  --access-token-file ./access-key.txt \
+  --secret-key-file ./secret-key.txt
+
+
    +
  • Delete an exit-server:
  • +
+
export IP=""
+
+inletsctl create \
+  --provider ec2 \
+  --region eu-west-1 \
+  --access-token-file ./access-key.txt \
+  --secret-key-file ./secret-key.txt \
+  --ip $IP
+
+

Example usage with AWS EC2 Temporary Credentials

+

To use the instructions below you must have the AWS CLI configured with sufficient permissions to +create users and roles.

+

The following instructions use get-session-token to illustrate the concept. However, it is expected that real world usage would more likely make use of assume-role to obtain temporary credentials.

+
    +
  • Create a AWS IAM Policy with the following:
  • +
+

Create a file named policy.json with the following content

+
{
+    "Version": "2012-10-17",
+    "Statement": [  
+        {
+            "Effect": "Allow",
+            "Action": [
+                "ec2:AuthorizeSecurityGroupIngress",
+                "ec2:DescribeInstances",
+                "ec2:DescribeImages",
+                "ec2:TerminateInstances",
+                "ec2:CreateSecurityGroup",
+                "ec2:CreateTags",
+                "ec2:DeleteSecurityGroup",
+                "ec2:RunInstances",
+                "ec2:DescribeInstanceStatus"
+            ],
+            "Resource": ["*"]
+        }
+    ]
+}
+
+
    +
  • Create the policy in AWS
  • +
+
aws iam create-policy --policy-name inlets-automation --policy-document file://policy.json
+
+
    +
  • Create an IAM user
  • +
+
aws iam create-user --user-name inlets-automation
+
+
    +
  • Add the Policy to the IAM user
  • +
+

We need to use the policy arn generated above, it should have been printed to the console on success. It also follows the format below.

+
export AWS_ACCOUNT_NUMBER="Your AWS Account Number"
+aws iam attach-user-policy --user-name inlets-automation --policy-arn arn:aws:iam::${AWS_ACCOUNT_NUMBER}:policy/inlets-automation
+
+
    +
  • Generate an access key for your IAM User
  • +
+

The below commands will create a set of credentials and save them into files for use later on.

+
+

we are using jq here. It can be installed using the link provided. +Alternatively you can print ACCESS_KEY_JSON and create the files manually.

+
+
ACCESS_KEY_JSON=$(aws iam create-access-key --user-name inlets-automation)
+export AWS_ACCESS_KEY_ID=$(echo $ACCESS_KEY_JSON | jq -r .AccessKey.AccessKeyId)
+export AWS_SECRET_ACCESS_KEY=$(echo $ACCESS_KEY_JSON | jq -r .AccessKey.SecretAccessKey)
+
+
    +
  • Check that calls are now being executed by the inlets-automation IAM User.
  • +
+
aws sts get-caller-identity
+
+
    +
  • Ask STS for some temporary credentials
  • +
+
TEMP_CREDS=$(aws sts get-session-token)
+
+
    +
  • Break out the required elements
  • +
+
echo $TEMP_CREDS | jq -r .Credentials.AccessKeyId > access-key.txt    
+echo $TEMP_CREDS | jq -r .Credentials.SecretAccessKey > secret-key.txt
+echo $TEMP_CREDS | jq -r .Credentials.SessionToken > session-token.txt
+
+
    +
  • Create an exit-server using temporary credentials:
  • +
+
inletsctl create \
+  --provider ec2 \
+  --region eu-west-1 \
+  --access-token-file ./access-key.txt \
+  --secret-key-file ./secret-key.txt \
+  --session-token-file ./session-token.txt
+
+
    +
  • Delete an exit-server using temporary credentials:
  • +
+
export INSTANCEID=""
+
+inletsctl delete \
+  --provider ec2 \
+  --id $INSTANCEID
+  --access-token-file ./access-key.txt \
+  --secret-key-file ./secret-key.txt \
+  --session-token-file ./session-token.txt
+
+

Example usage with Google Compute Engine

+

Bear in mind that standard GCE VMs are created with an ephemeral IP address, which is subject to change. In order to make your tunnel's address stable, you should Reserve a static IP address and assign it to your VM. A static IP costs around 2.88 USD / mo.

+
    +
  • One time setup required for a service account key
  • +
+
+

It is assumed that you have gcloud installed and configured on your machine. +If not, then follow the instructions here

+
+
# Get current projectID
+export PROJECTID=$(gcloud config get-value core/project 2>/dev/null)
+
+# Create a service account
+gcloud iam service-accounts create inlets \
+--description "inlets-operator service account" \
+--display-name "inlets"
+
+# Get service account email
+export SERVICEACCOUNT=$(gcloud iam service-accounts list | grep inlets | awk '{print $2}')
+
+# Assign appropriate roles to inlets service account
+gcloud projects add-iam-policy-binding $PROJECTID \
+--member serviceAccount:$SERVICEACCOUNT \
+--role roles/compute.admin
+
+gcloud projects add-iam-policy-binding $PROJECTID \
+--member serviceAccount:$SERVICEACCOUNT \
+--role roles/iam.serviceAccountUser
+
+# Create inlets service account key file
+gcloud iam service-accounts keys create key.json \
+--iam-account $SERVICEACCOUNT
+
+
    +
  • Create a tunnel using the service account and project ID
  • +
+
# Create a TCP tunnel server
+inletsctl create \
+  --provider gce \
+  --project-id=$PROJECTID \
+  --access-token-file=key.json \
+  --tcp
+
+# Create a HTTP / HTTPS tunnel server
+inletsctl create \
+  -p gce \
+  --project-id=$PROJECTID \
+  -f=key.json
+
+# Or specify any valid Google Cloud Zone optional zone, by default it get provisioned in us-central1-a
+inletsctl create -p gce \
+  --project-id=$PROJECTID \
+  -f key.json \
+  --zone=us-central1-a
+
+

If you need the tunnel server for any period of time, remember to Reserve a static IP address and assign it to your VM.

+

Then SSH into the host and make sure you update inlets to make use of it:

+

Edit IP= in /etc/default/inlets-pro then run sudo systemctl daemon-reload && sudo systemctl restart inlets-pro

+

The inlets-pro http/tcp --url wss://... flag should also be updated with the static IP.

+

In a future version of inletsctl, we may automate the above.

+

Example usage with Azure

+

Prerequisites:

+ +

Generate Azure auth file +

SUBSCRIPTION_ID="YOUR_SUBSCRIPTION_ID"
+az ad sp create-for-rbac --role Contributor --scopes "/subscriptions/$SUBSCRIPTION_ID" --sdk-auth \
+  > $HOME/Downloads/client_credentials.json
+

+

List Azure available regions +

az account list-locations -o table
+

+

Create +

inletsctl create --provider=azure --subscription-id=4d68ee0c-7079-48d2-b15c-f294f9b11a9e \
+  --region=eastus --access-token-file=~/Downloads/client_credentials.json 
+

+

Delete +

inletsctl delete --provider=azure --id inlets-clever-volhard8 \
+  --subscription-id=4d68ee0c-7079-48d2-b15c-f294f9b11a9e \
+  --region=eastus --access-token-file=~/Downloads/client_credentials.json
+

+

Example usage with Hetzner

+
# Obtain the API token from Hetzner Cloud Console.
+export TOKEN=""
+
+inletsctl create --provider hetzner \
+  --access-token $TOKEN \
+  --region hel1
+
+

Available regions are hel1 (Helsinki), nur1 (Nuremberg), fsn1 (Falkenstein).

+

Example usage with Linode

+

Prerequisites:

+ +

Create +

inletsctl create --provider=linode --access-token=<API Access Token> --region=us-east
+

+

Delete +

inletsctl delete --provider=linode --access-token=<API Access Token> --id <instance id>
+

+

Example usage with Scaleway

+
# Obtain from your Scaleway dashboard:
+export TOKEN=""
+export SECRET_KEY=""
+export ORG_ID=""
+
+inletsctl create --provider scaleway \
+  --access-token $TOKEN
+  --secret-key $SECRET_KEY --organisation-id $ORG_ID
+
+

The region is hard-coded to France / Paris 1.

+

Example usage with OVHcloud

+

You need to create API keys for the ovhCloud country/continent you're going to deploy with inletsctl. +For an overview of available endpoint check supported-apis documentation

+

For, example, Europe visit https://eu.api.ovh.com/createToken to create your API keys.

+

However, the specific value for the endpoint flag are following:

+
    +
  • ovh-eu for OVH Europe API
  • +
  • ovh-us for OVH US API
  • +
  • ovh-ca for OVH Canada API
  • +
  • soyoustart-eu for So you Start Europe API
  • +
  • soyoustart-ca for So you Start Canada API
  • +
  • kimsufi-eu for Kimsufi Europe API
  • +
  • kimsufi-ca for Kimsufi Canada API
  • +
+

ovh-eu is the default endpoint and DE1 the default region.

+

For the proper rights choose all HTTP Verbs (GET,PUT,DELETE, POST), and we need only the /cloud/ API.

+
export APPLICATION_KEY=""
+export APPLICATION_SECRET=""
+export CONSUMER_KEY=""
+export ENDPOINT=""
+export PROJECT_ID=""
+
+inletsctl create --provider ovh \
+  --access-token $APPLICATION_KEY \
+  --secret-key $APPLICATION_SECRET 
+  --consumer-key $CONSUMER_KEY \ 
+  --project-id $SERVICENAME \
+  --endpoint $ENDPOINT
+
+

The delete command

+

The delete command takes an id or IP address which are given to you at the end of the inletsctl create command. You'll also need to specify your cloud access token.

+
inletsctl delete \
+  --provider digitalocean \
+  --access-token-file ~/Downloads/do-access-token \
+  --id 164857028 \
+
+

Or delete via IP:

+
inletsctl delete \
+  --provider digitalocean \
+  --access-token-file ~/Downloads/do-access-token \
+  --ip 209.97.131.180 \
+
+

kfwd - Kubernetes service forwarding

+

kfwd runs an inlets-pro server on your local computer, then deploys an inlets client in your Kubernetes cluster using a Pod. This enables your local computer to access services from within the cluster as if they were running on your laptop.

+

inlets Pro allows you to access any TCP service within the cluster, using an encrypted link:

+

Forward the figlet pod from openfaas-fn on port 8080:

+
inletsctl kfwd \
+  --pro \
+  --license $(cat ~/LICENSE)
+  --from figlet:8080 \
+  --namespace openfaas-fn \
+  --if 192.168.0.14
+
+

Note the if parameter is the IP address of your local computer, this must be reachable from the Kubernetes cluster.

+

Then access the service via http://127.0.0.1:8080.

+

Troubleshooting

+

inletsctl provisions a host called an exit node or exit server using public cloud APIs. It then +prints out a connection string.

+

Are you unable to connect your client to the exit server?

+

Troubleshooting inlets Pro

+

If using auto-tls (the default), check that port 8123 is accessible. It should be serving a file with a self-signed certificate, run the following:

+
export IP=192.168.0.1
+curl -k https://$IP:8123/.well-known/ca.crt
+
+

If you see connection refused, log in to the host over SSH and check the service via systemctl:

+
sudo systemctl status inlets-pro
+
+# Check its logs
+sudo journalctl -u inlets-pro
+
+

You can also check the configuration in /etc/default/inlets-pro, to make sure that an IP address and token are configured.

+

Configuration using environment variables

+

You may want to set an environment variable that points at your access-token-file or secret-key-file

+

Inlets will look for the following:

+
# For providers that use --access-token-file
+INLETS_ACCESS_TOKEN
+
+# For providers that use --secret-key-file
+INLETS_SECRET_KEY
+
+

With the correct one of these set you wont need to add the flag on every command execution.

+

You can set the following syntax in your bashrc (or equivalent for your shell)

+
export INLETS_ACCESS_TOKEN=$(cat my-token.txt)
+
+# or set the INLETS_SECRET_KEY for those providors that use this
+export INLETS_SECRET_KEY=$(cat my-token.txt)
+
+ + + + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/search/search_index.json b/search/search_index.json new file mode 100644 index 0000000..914eff8 --- /dev/null +++ b/search/search_index.json @@ -0,0 +1 @@ +{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"Inlets documentation","text":"

Inlets brings secure tunnels to Cloud Native workloads.

You can visit the inlets homepage at https://inlets.dev/

With inlets you are in control of your data, unlike with a SaaS tunnel where shared servers mean your data may be at risk. You can use inlets for local development and in your production environment. It works just as well on bare-metal as in VMs, containers and Kubernetes clusters.

inlets is not just compatible with tricky networks and Cloud Native architecture, it was purpose-built for them.

Common use-cases include:

  • Exposing local HTTPS, TCP, or websocket endpoints on the Internet
  • Replacing SaaS tunnels that are too restrictive
  • Self-hosting from a homelab or on-premises datacenter
  • Deploying and monitoring apps across multiple locations
  • Receiving webhooks and testing OAuth integrations
  • Remote customer support

Do you want to connect to hundreds of remote services without exposing them on the Internet? You may be looking for inlets uplink

"},{"location":"#how-does-it-work","title":"How does it work?","text":"

Inlets tunnels connect to each other over a secure websocket with TLS encryption. Over that private connection, you can then tunnel HTTPS or TCP traffic to computers in another network or to the Internet.

One of the most common use-cases is to expose a local HTTP endpoint on the Internet via a HTTPS tunnel. You may be working with webhooks, integrating with OAuth, sharing a draft of a blog post or integrating with a partner's API.

After deploying an inlets HTTPS server on a public cloud VM, you can then connect the client and access it.

There is more that inlets can do for you than exposing local endpoints. inlets also supports local forwarding and can be used to replace more cumbersome services like SSH, complex VPNs or expensive direct connect uplinks.

Read more in the: the inlets FAQ.

"},{"location":"#getting-started","title":"Getting started","text":"

These guides walk you through a specific use-case with inlets. If you have questions or cannot find what you need, there are options for connecting with the community at the end of this page.

Inlets can tunnel either HTTP or TCP traffic:

  • HTTP (L7) tunnels can be used to connect one or more HTTP endpoints from one network to another. A single tunnel can expose multiple websites or hosts, including LoadBalancing and multiple clients to one server.
  • TCP (L4) tunnels can be used to connect TCP services such as a database, a reverse proxy, RDP, Kubernetes or SSH to the Internet. A single tunnel can expose multiple ports on an exit-server and load balance between clients
"},{"location":"#downloading-inlets","title":"Downloading inlets","text":"

inlets is available for Windows, MacOS (including M1) and Linux (including ARM):

  • Download a release

You can also use the container image from ghcr.io: ghcr.io/inlets/inlets-pro:latest

"},{"location":"#your-first-https-tunnel-with-an-automated-tunnel-server-intermediate","title":"Your first HTTPS tunnel with an automated tunnel server (Intermediate)","text":"

Expose one or more HTTPS domains from your local machine.

  • Tutorial: Expose one or more local HTTP services via HTTPS
"},{"location":"#running-a-http-tunnel-server-manually-advanced","title":"Running a HTTP tunnel server manually (Advanced)","text":"

If you don't want to use automation tools to create a server for the inlets-pro server, then you can follow this manual guide to generate and install a systemd service instead.

  • Tutorial: Setting up a HTTP tunnel server manually
"},{"location":"#tunnelling-tcp-services","title":"Tunnelling TCP services","text":"

inlets is not limited to HTTP connections, you can also tunnel TCP protocols like RDP, VNC, SSH, TLS and databases.

  • Tutorial: Expose a private SSH server over a TCP tunnel
  • Tutorial: Tunnel a private Postgresql database
  • Tutorial: Tunnel ports 80 and 443 over TCP for a reverse proxy
"},{"location":"#running-multiple-tunnel-servers-on-the-same-host-advanced","title":"Running multiple tunnel servers on the same host (Advanced)","text":"

If you want to mix HTTP and TCP tunnels on the same tunnel server, you could either only use TCP ports, or enable both.

  • Advanced: Setting up dual TCP and HTTPS tunnels

If you're looking to scale inlets to host many tunnels, then Kubernetes is probably a better option.

"},{"location":"#local-port-forwarding-intermediate","title":"Local port forwarding (Intermediate)","text":"
  • Case-study: Reliable local port-forwarding from Kubernetes
"},{"location":"#connecting-with-kubernetes","title":"Connecting with Kubernetes","text":"

You may have an on-premises Kubernetes cluster that needs ingress. Perhaps you have a homelab, or Raspberry Pi cluster, that you want to self host services on.

  • Tutorial: Expose a local IngressController with the inlets-operator
  • Tutorial: Expose Kubernetes services in short-lived clusters with helm

Some teams want to have dev work like production, with tools Istio working locally just like in the cloud.

  • Tutorial: Expose an Istio gateway with the inlets-operator

  • Tutorial: Access the Kubernetes API server from anywhere like managed service

See also: helm charts

"},{"location":"#becoming-a-tunnel-provider-or-operating-a-hosting-service","title":"Becoming a tunnel provider or operating a hosting service","text":"

The Inlets Uplink distribution is a Kubernetes operator that makes it quick and easy to onboard hundreds or thousands of customers, each with their own dedicated tunnel. It can also be used for remote management and command and control of IT systems and IoT devices.

Learn more: Inlets Uplink

"},{"location":"#monitoring-and-metrics","title":"Monitoring and metrics","text":"

Inlets offers you multiple options to monitor your tunnels and get insight in their performance. Find out tunnel statistics, uptime and connected clients with the inlets-pro status command or collect the Prometheus metrics from the monitoring endpoint.

  • Monitoring and metrics
"},{"location":"#reference-documentation","title":"Reference documentation","text":""},{"location":"#inletsctl","title":"inletsctl","text":"

Learn how to use inletsctl to provision tunnel servers on various public clouds.

  • inletsctl reference
"},{"location":"#inlets-operator","title":"inlets-operator","text":"

Learn how to set up the inlets-operator for Kubernetes, which provisions public cloud VMs and gives IP addresses to your public LoadBalancers.

  • inlets-operator reference
"},{"location":"#other-resources","title":"Other resources","text":"

For news, use-cases and guides check out the blog:

  • Official Inlets blog

Watch a video, or read a blog post from the community:

  • Community tutorials

Open Source tools for managing inlets tunnels:

  • Inlets Operator for Kubernetes LoadBalancers
  • inletsctl to provision tunnel servers
  • inlets helm charts for clients and servers
"},{"location":"#connecting-with-the-inlets-community","title":"Connecting with the inlets community","text":"

Who built inlets? Inlets \u00ae is a commercial solution developed and supported by OpenFaaS Ltd.

You can also contact the team via the contact page.

The code for this website is open source and available on GitHub

inlets is proud to be featured on the Cloud Native Landscape in the Service Proxy category.

"},{"location":"reference/","title":"Reference documentation","text":""},{"location":"reference/#inletsctl","title":"inletsctl","text":"

Learn how to use inletsctl to provision tunnel servers on various public clouds.

  • inletsctl reference
"},{"location":"reference/#inlets-operator","title":"inlets-operator","text":"

Learn how to set up the inlets-operator for Kubernetes, which provisions public cloud VMs and gives IP addresses to your public LoadBalancers.

  • inlets-operator reference
"},{"location":"reference/#github-repositories","title":"GitHub repositories","text":"
  • inlets-pro
  • inlets-operator
  • inletsctl
  • inlets helm charts
"},{"location":"reference/faq/","title":"Inlets FAQ","text":"

Inlets concepts and Frequently Asked Questions (FAQ)

"},{"location":"reference/faq/#why-did-we-build-inlets","title":"Why did we build inlets?","text":"

We built inlets to make it easy to expose a local service on the Internet and to overcome limitations with SaaS tunnels and VPNs.

  • It was built to overcome limitations in SaaS tunnels - such as lack of privacy, control and rate-limits
  • It doesn't just integrate with containers and Kubernetes, it was purpose-built to run in them
  • It's easy to run on Windows, Linux and MacOS with a self-contained binary
  • It doesn't need to run as root, doesn't depend on iptables, doesn't need a tun device or NET_ADMIN capability

There are many different networking tools available such as VPNs and SaaS tunnels - each with its own set of pros and cons, and use-cases. It's very likely that you will use several tools together to get the best out of each of them.

"},{"location":"reference/faq/#how-does-inlets-compare-to-other-tools-and-solutions","title":"How does inlets compare to other tools and solutions?","text":"

Are you curious about the advantages of using inlets vs. alternatives? We must first ask, advantages vs. what other tool or service.

SaaS tunnels provide a convenient way to expose services for the purposes of development, however they are often:

  • blocked by corporate IT
  • running on shared infrastructure (servers) with other customers
  • subject to stringent rate-limits that affect productivity
  • priced per subdomain
  • unable to obtain high value TCP ports like 22, 80, 443 and so on

You run inlets on your own servers, so you do not run into those restrictions. Your data remains your own and is kept private.

When compared to VPNs such as Wireguard, Tailscale and OpenVPN, we have to ask what the use-case is.

A traditional VPN is built to connect hosts and entire IP ranges together. This can potentially expose a large number of machines and users to each other and requires complex Access Control Lists or authorization rules. If this is your use-case, a traditional VPN is probably the right tool for the job.

Inlets is designed to connect or expose services between networks - either HTTP or TCP.

For example:

  • Receiving webhooks to a local application
  • Sharing a blog post draft with a colleague or client
  • Providing remote access to your homelab when away from home
  • Self-hosting websites or services on Kubernetes clusters
  • Getting working LoadBalancers with public IPs for local Kubernetes clusters

You can also use inlets to replace Direct Connect or a VPN when you just need to connect a number of services privately and not an entire network range.

Many of the inlets community use a VPN alongside inlets, because they are different tools for different use-cases.

We often write about use-cases for public and private inlets tunnels on the blog.

"},{"location":"reference/faq/#whats-the-difference-between-inlets-inletsctl-and-inlets-operator","title":"What's the difference between inlets, inletsctl and inlets-operator?","text":"

inlets-pro aka \"inlets\" is the command-line tool that contains both the client and server required to set up HTTP and TCP tunnels.

The inlets-pro server is usually set up on a computer with a public IP address, then the inlets-pro client is run on your own machine, or a separate computer that can reach the service or server you want to expose.

You can download inlets-pro and inletsctl with the \"curl | sh\" commands provided at the start of each tutorial, this works best on a Linux host, or with Git Bash if using Windows.

Did you know? You can also download binaries for inlets-pro and inletsctl on GitHub, for Windows users you'll want \"inlets-pro.exe\" and for MacOS, you'll want \"inlets-pro-darwin\".

For instance, on Windows machines you'll need \"inlets-pro.exe\"

See also: inlets-pro releases

inletsctl is a tool that can set up a tunnel server for you on around a dozen popular clouds. It exists to make it quicker and more convenience to set up a HTTPS or TCP tunnel to expose a local service.

It has three jobs:

  1. Create the VM for you
  2. Install the inlets-pro server in TCP or HTTPS mode (as specified) with systemd
  3. Inform you of the token and connection string

You can download the inletsctl tool with \"curl | sh\" or from the inletsctl releases page.

Find out more: inletsctl reference page

inlets-operator is a Kubernetes Operator that will create tunnel servers for you, on your chosen cloud for any LoadBalancers that you expose within a private cluster.

Find out more: inlets-operator reference page

"},{"location":"reference/faq/#what-is-the-networking-model-for-inlets","title":"What is the networking model for inlets?","text":"

Whilst some networking tools such as Bittorrent use a peer-to-peer network, inlets uses a more traditional client/server model.

One or more client tunnels connect to a tunnel server and advertise which services they are able to provide. Then, whenever the server receives traffic for one of those advertised services, it will forward it through the tunnel to the client. The tunnel client will then forward that on to the service it advertised.

The tunnel server may also be referred to as an \"exit\" server because it is the connection point for the client to another network or the Internet.

If you install and run the inlets server on a computer, it can be referred to as a tunnel server or exit server. These servers can also be automated through cloud-init, terraform, or tools maintained by the inlets community such as inletsctl.

Pictured: the website http://127.0.0.1:3000 is exposed through an encrypted tunnel to users at: https://example.com

For remote forwarding, the client tends to be run within a private network, with an --upstream flag used to specify where incoming traffic needs to be routed. The tunnel server can then be run on an Internet-facing network, or any other network reachable by the client.

"},{"location":"reference/faq/#what-kind-of-layers-and-protocols-are-supported","title":"What kind of layers and protocols are supported?","text":"

Inlets works at a higher level than traditional VPNs because it is designed to connect services together, rather than hosts directly.

  • HTTP - Layer 7 of the OSI model, used for web traffic such as websites and RESTful APIs
  • TCP - Layer 4 of the OSI model, used for TCP traffic like SSH, TLS, databases, RDP, etc

Because VPNs are designed to connect hosts together over a shared IP space, they also involve tedious IP address management and allocation.

Inlets connects services, so for TCP traffic, you need only think about TCP ports.

For HTTP traffic, you need only to think about domain names.

"},{"location":"reference/faq/#do-i-want-a-tcp-or-https-tunnel","title":"Do I want a TCP or HTTPS tunnel?","text":"

If you're exposing websites, blogs, docs, APIs and webhooks, you should use a HTTPS tunnel.

For HTTP tunnels, Rate Error and Duration (RED) metrics are collected for any service you expose, even if it doesn't have its own instrumentation support.

For anything that doesn't fit into that model, a TCP tunnel may be a better option.

Common examples are: RDP, VNC, SSH, TLS, database protocols, legacy medical protocols such as DiCom.

TCP tunnels can also be used to forward traffic to a reverse proxy like Nginx, Caddy, or Traefik, sitting behind a firewall or NAT by forwarding port 80 and 443.

TCP traffic is forwarded directly between the two hosts without any decryption of bytes. The active connection count and frequency can be monitored along with the amount of throughput.

"},{"location":"reference/faq/#does-inlets-use-tcp-or-udp","title":"Does inlets use TCP or UDP?","text":"

Inlets uses a websocket over TCP, so that it can penetrate HTTP proxies, captive portals, firewalls, and other kinds of NAT. As long as the client can make an outbound connection, a tunnel can be established. The use of HTTPS means that inlets will have similar latency and throughput to a HTTPS server or SSH tunnel.

Once you have an inlets tunnel established, you can use it to tunnel traffic to TCP and HTTPS sockets within the private network of the client.

Most VPNs tend to use UDP for communication due to its low overhead which results in lower latency and higher throughput. Certain tools and products such as OpenVPN, SSH and Tailscale can be configured to emulate a TCP stack over a TCP connection, this can lead to unexpected issues.

Inlets connections send data, rather than emulating a TCP over TCP stack, so doesn't suffer from this problem.

"},{"location":"reference/faq/#are-both-remote-and-local-forwarding-supported","title":"Are both remote and local forwarding supported?","text":"

Remote forwarding is where a local service is forwarded from the client's network to the inlets tunnel server.

Remote forwarding pushes a local endpoint to a remote host for access on another network

This is the most common use-case and would be used to expose a local HTTP server to the public Internet via a tunnel server.

Local forwarding is used to forward a service on the tunnel server or tunnel server's network back to the client, so that it can be accessed using a port on localhost.

Local forwarding brings a remote service back to localhost for accessing

An example would be that you have a webserver and MySQL database. The HTTP server is public and can access the database via its own loopback adapter, but the Internet cannot. So how do you access that MySQL database from CI, or from your local machine? Connect a client with local forwarding, and bring the MySQL port back to your local machine or CI runner, and then use the MySQL CLI to access it.

A developer at the UK Government uses inlets to forward a NATS message queue from a staging environment to his local machine for testing. Learn more

"},{"location":"reference/faq/#whats-the-difference-between-the-data-plane-and-control-plane","title":"What's the difference between the data plane and control plane?","text":"

The data plane is any service or port that carries traffic from the tunnel server to the tunnel client, and your private TCP or HTTP services. It can be exposed on all interfaces, or only bound to loopback for private access, in a similar way to a VPN.

If you were exposing SSH on an internal machine from port 2222, your data-plane may be exposed on port 2222

The control-plane is a TLS-encrypted, authenticated websocket that is used to connect clients to servers. All traffic ultimately passes over the control-plane's link, so remains encrypted and private.

Your control-plane's port is usually 8123 when used directly, or 443 when used behind a reverse proxy or Kubernetes Ingress Controller.

An example from the article: The Simple Way To Connect Existing Apps to Public Cloud

A legacy MSSQL server runs on Windows Server behind the firewall in a private datacenter. Your organisation cannot risk migrating it to an AWS EC2 instance at this time, but can move the microservice that needs to access it.

The inlets tunnel allows for the MSSQL service to be tunneled privately to the EC2 instance's local network for accessing, but is not exposed on the Internet. All traffic is encrypted over the wire due to the TLS connection of inlets.

Hybrid Cloud in action using an inlets tunnel to access the on-premises database

This concept is referred to as a a \"split plane\" because the control plane is available to public clients on all adapters, and the data plane is only available on local or private adapters on the server.

"},{"location":"reference/faq/#is-there-a-reference-guide-to-the-cli","title":"Is there a reference guide to the CLI?","text":"

The inlets-pro binary has built-in help commands and examples, just run inlets-pro tcp/http client/server --help.

A separate CLI reference guide is also available here: inlets-pro CLI reference

"},{"location":"reference/faq/#is-inlets-secure","title":"Is inlets secure?","text":"

All traffic sent over an inlets tunnel is encapsulated in a TLS-encrypted websocket, which prevents eavesdropping. This is technically similar to HTTPS, but you'll see a URL of wss:// instead.

The tunnel client is authenticated using an API token which is generated by the tunnel administrator, or by automated tooling.

Additional authentication mechanisms can be set up using a reverse proxy such as Nginx.

"},{"location":"reference/faq/#do-i-have-to-expose-services-on-the-internet-to-use-inlets","title":"Do I have to expose services on the Internet to use inlets?","text":"

No, inlets can be used to tunnel one or more services to another network without exposing them on the Internet.

The --data-addr 127.0.0.1: flag for inlets servers binds the data plane to the server's loopback address, meaning that only other processing running on it can access the tunneled services. You could also use a private network adapter or VPC IP address in the --data-addr flag.

"},{"location":"reference/faq/#how-do-i-monitor-inlets","title":"How do I monitor inlets?","text":"

See the following blog post for details on the inlets status command and the various Prometheus metrics that are made available.

Measure and monitor your inlets tunnels

"},{"location":"reference/faq/#how-do-you-scale-inlets","title":"How do you scale inlets?","text":"

Inlets HTTP servers can support a high number of clients, either for load-balancing the same internal service to a number of clients, or for a number of distinct endpoints.

Tunnel servers are easy to scale through the use of containers, and can benefit from the resilience that a Kubernetes cluster can bring:

See also: How we scaled inlets to thousands of tunnels with Kubernetes

"},{"location":"reference/faq/#does-inlets-support-high-availability-ha","title":"Does inlets support High Availability (HA)?","text":"

For the inlets client, it is possible to connect multiple inlets tunnel clients for the same service, such as a company blog. Traffic will be distributed across the clients and if one of those clients goes down or crashes, the other will continue to serve requests.

For the inlets tunnel server, the easiest option is to run the server in a supervisor that can restart the tunnel service quickly or allow it to run more than one replica. Systemd can be used to restart tunnel servers should they run into issues, likewise you can run the server in a container, or as a Kubernetes Pod.

HA example with an AWS ELB

For example, you may place a cloud load-balancer in front of the data-plane port of two inlets server processes. Requests to the stable load-balancer IP address will be distributed between the two virtual machines and their respective inlets server tunnel processes.

"},{"location":"reference/faq/#is-ipv6-supported","title":"Is IPv6 supported?","text":"

Yes, see also: How to serve traffic to IPv6 users with inlets

"},{"location":"reference/faq/#what-if-the-websocket-disconnects","title":"What if the websocket disconnects?","text":"

The client will reconnect automatically and can be configured with systemd or a Windows service to stay running in the background. See also inlets pro tcp/http server/client --generate=systemd for generating systemd unit files.

When used in combination with a Kubernetes ingress controller or reverse proxy of your own, then the websocket may timeout. These timeout settings can usually be configured to remove any potential issue.

Monitoring in inlets allows for you to monitor the reliability of your clients and servers, which are often running in distinct networks.

"},{"location":"reference/faq/#how-much-does-inlets-cost","title":"How much does inlets cost?","text":"

Monthly and annual subscriptions are available via Gumroad.

You can also purchase a static license for offline or air-gapped environments.

For more, see the Pricing page

"},{"location":"reference/faq/#what-happens-when-the-license-expires","title":"What happens when the license expires?","text":"

If you're using a Gumroad license, and keep your billing relationship active, then the software will work for as long as you keep paying. The Gumroad license server needs to be reachable by the inlets client.

If you're using a static license, then the software will continue to run, even after your license has expired, unless you restart the software. You can either rotate the token on your inlets clients in an automated or manual fashion, or purchase a token for a longer period of time up front.

"},{"location":"reference/faq/#can-i-get-professional-help","title":"Can I get professional help?","text":"

Inlets is designed to be self-service and is well documented, but perhaps you could use some direction?

Business licenses come with support via email, however you are welcome to contact OpenFaaS Ltd to ask about a consulting project.

"},{"location":"reference/inlets-operator/","title":"inlets-operator reference documentation","text":"

The inlets/inlets-operator brings LoadBalancers with public IP addresses to your local Kubernetes clusters.

It works by creating VMs and running an inlets Pro tunnel server for you, the VM's public IP is then attached to the cluster and an inlets client Pod runs for you.

You can install the inlets-operator using a single command with arkade or with helm. arkade is an open-source Kubernetes marketplace and easier to use.

For each provider, the minimum requirements tend to be:

  • An access token - for the operator to create VMs for inlets Pro servers
  • A region - where to create the VMs

You can subscribe to inlets for personal or commercial use via Gumroad

"},{"location":"reference/inlets-operator/#install-using-arkade","title":"Install using arkade","text":"
arkade install inlets-operator \\\n--provider $PROVIDER \\ # Name of the cloud provider to provision the exit-node on.\n--region $REGION \\ # Used with cloud providers that require a region.\n--zone $ZONE \\ # Used with cloud providers that require zone (e.g. gce).\n--token-file $HOME/Downloads/key.json \\ # Token file/Service Account Key file with the access to the cloud provider.\n--license-file $HOME/.inlets/LICENSE\n
"},{"location":"reference/inlets-operator/#install-using-helm","title":"Install using helm","text":"

Checkout the inlets-operator helm chart README to know more about the values that can be passed to --set and to see provider specific example commands.

# Create a secret to store the service account key file\nkubectl create secret generic inlets-access-key \\\n--from-file=inlets-access-key=key.json\n\n# Add and update the inlets-operator helm repo\nhelm repo add inlets https://inlets.github.io/inlets-operator/\n\n# Create a namespace for inlets-operator\nkubectl create namespace inlets\n\n# Create a secret to store the inlets-pro license\nkubectl create secret generic -n inlets \\\ninlets-license --from-file license=$HOME/.inlets/LICENSE\n\n# Update the local repository\nhelm repo update\n\n# Install inlets-operator with the required fields\nhelm upgrade inlets-operator --install inlets/inlets-operator \\\n--set provider=$PROJECTID,zone=$ZONE,region=$REGION \\\n--set projectID=$PROJECTID \\\n--set inletsProLicense=$LICENSE\n

View the code and chart on GitHub: inlets/inlets-operator

"},{"location":"reference/inlets-operator/#instructions-per-cloud","title":"Instructions per cloud","text":""},{"location":"reference/inlets-operator/#create-tunnel-servers-on-digitalocean","title":"Create tunnel servers on DigitalOcean","text":"

Install with inlets Pro on DigitalOcean.

Assuming you have created an API key and saved it to $HOME/Downloads/do-access-token, run:

arkade install inlets-operator \\\n--provider digitalocean \\\n--region lon1 \\\n--token-file $HOME/Downloads/do-access-token \\\n--license-file $HOME/.inlets/LICENSE\n
"},{"location":"reference/inlets-operator/#create-tunnel-servers-on-aws-ec2","title":"Create tunnel servers on AWS EC2","text":"

Instructions for AWS EC2

To use the instructions below you must have the AWS CLI configured with sufficient permissions to create users and roles.

  • Create a AWS IAM Policy with the following:

Create a file named policy.json with the following content

{\n\"Version\": \"2012-10-17\",\n\"Statement\": [\n{\n\"Effect\": \"Allow\",\n\"Action\": [\n\"ec2:AuthorizeSecurityGroupIngress\",\n\"ec2:DescribeInstances\",\n\"ec2:DescribeImages\",\n\"ec2:TerminateInstances\",\n\"ec2:CreateSecurityGroup\",\n\"ec2:CreateTags\",\n\"ec2:DeleteSecurityGroup\",\n\"ec2:RunInstances\",\n\"ec2:DescribeInstanceStatus\"\n],\n\"Resource\": [\"*\"]\n}\n]\n}\n

Create the policy in AWS

aws iam create-policy --policy-name inlets-automation --policy-document file://policy.json\n
  • Create an IAM user
aws iam create-user --user-name inlets-automation\n
  • Add the Policy to the IAM user

We need to use the policy arn generated above, it should have been printed to the console on success. It also follows the format below.

export AWS_ACCOUNT_NUMBER=\"Your AWS Account Number\"\naws iam attach-user-policy --user-name inlets-automation --policy-arn arn:aws:iam::${AWS_ACCOUNT_NUMBER}:policy/inlets-automation\n
  • Generate an access key for your IAM User

The below commands will create a set of credentials and save them into files for use later on.

we are using jq here. It can be installed using the link provided. Alternatively you can print ACCESS_KEY_JSON and create the files manually.

ACCESS_KEY_JSON=$(aws iam create-access-key --user-name inlets-automation)\necho $ACCESS_KEY_JSON | jq -r .AccessKey.AccessKeyId > access-key\necho $ACCESS_KEY_JSON | jq -r .AccessKey.SecretAccessKey > secret-access-key\n

Install with inlets Pro:

arkade install inlets-operator \\\n--provider ec2 \\\n--region eu-west-1 \\\n--token-file $HOME/Downloads/access-key \\\n--secret-key-file $HOME/Downloads/secret-access-key \\\n--license-file $HOME/.inlets/LICENSE\n
"},{"location":"reference/inlets-operator/#create-tunnel-servers-on-google-compute-engine-gce","title":"Create tunnel servers on Google Compute Engine (GCE)","text":"

Instructions for Google Cloud

It is assumed that you have gcloud installed and configured on your machine. If not, then follow the instructions here

To get your service account key file, follow the steps below:

# Get current projectID\nexport PROJECTID=$(gcloud config get-value core/project 2>/dev/null)\n\n# Create a service account\ngcloud iam service-accounts create inlets \\\n--description \"inlets-operator service account\" \\\n--display-name \"inlets\"\n\n# Get service account email\nexport SERVICEACCOUNT=$(gcloud iam service-accounts list | grep inlets | awk '{print $2}')\n\n# Assign appropriate roles to inlets service account\ngcloud projects add-iam-policy-binding $PROJECTID \\\n--member serviceAccount:$SERVICEACCOUNT \\\n--role roles/compute.admin\n\ngcloud projects add-iam-policy-binding $PROJECTID \\\n--member serviceAccount:$SERVICEACCOUNT \\\n--role roles/iam.serviceAccountUser\n\n# Create inlets service account key file\ngcloud iam service-accounts keys create key.json \\\n--iam-account $SERVICEACCOUNT\n

Install the operator:

arkade install inlets-operator \\\n--provider gce \\\n--project-id $PROJECTID \\\n--zone us-central1-a \\\n--token-file key.json \\\n--license-file $HOME/.inlets/LICENSE\n
"},{"location":"reference/inlets-operator/#create-tunnel-servers-on-azure","title":"Create tunnel servers on Azure","text":"

Instructions for Azure

Prerequisites:

  • You will need az. See Install the Azure CLI
  • You'll need to have run az login also

Generate Azure authentication file:

SUBSCRIPTION_ID=\"YOUR_SUBSCRIPTION_ID\"\naz ad sp create-for-rbac --role Contributor --scopes \"/subscriptions/$SUBSCRIPTION_ID\" --sdk-auth \\\n> $HOME/Downloads/client_credentials.json\n

Find your region code with:

az account list-locations -o table\n\nDisplayName               Name                 RegionalDisplayName\n------------------------  -------------------  -------------------------------------\nUnited Kingdom            ukwest               United Kingdom\n

Install using helm:

export SUBSCRIPTION_ID=\"YOUR_SUBSCRIPTION_ID\"\nexport AZURE_REGION=\"ukwest\"\nexport INLETS_LICENSE=\"$(cat ~/.inlets/LICENSE)\"\nexport ACCESS_KEY=\"$HOME/Downloads/client_credentials.json\"\n\nkubectl create secret generic inlets-access-key \\\n--from-file=inlets-access-key=$ACCESS_KEY\n\nhelm repo add inlets https://inlets.github.io/inlets-operator/\nhelm repo update\n\nhelm upgrade inlets-operator --install inlets/inlets-operator \\\n--set provider=azure,region=$AZURE_REGION \\\n--set subscriptionID=$SUBSCRIPTION_ID\n
"},{"location":"reference/inlets-operator/#create-tunnel-servers-on-linode","title":"Create tunnel servers on Linode","text":"

Instructions for Linode

Install using helm:

# Create a secret to store the service account key file\nkubectl create secret generic inlets-access-key --from-literal inlets-access-key=<Linode API Access Key>\n\n# Add and update the inlets-operator helm repo\nhelm repo add inlets https://inlets.github.io/inlets-operator/\n\nhelm repo update\n\n# Install inlets-operator with the required fields\nhelm upgrade inlets-operator --install inlets/inlets-operator \\\n--set provider=linode \\\n--set region=us-east\n

You can also install the inlets-operator using a single command using arkade, arkade runs against any Kubernetes cluster.

Install with inlets Pro:

arkade install inlets-operator \\\n--provider linode \\\n--region us-east \\\n--access-key $LINODE_ACCESS_KEY \\\n--license-file $HOME/.inlets/LICENSE\n
"},{"location":"reference/inletsctl/","title":"inletsctl reference documentation","text":"

inletsctl is an automation tool for inlets/-pro.

Features:

  • create / delete cloud VMs with inlets/-pro pre-installed via systemd
  • download [--pro] - download the inlets/-pro binaries to your local computer
  • kfwd - forward services from a Kubernetes cluster to your local machine using inlets/-pro

View the code on GitHub: inlets/inletsctl

"},{"location":"reference/inletsctl/#install-inletsctl","title":"Install inletsctl","text":"

You can install inletsctl using its installer, or from the GitHub releases page

# Install to local directory (and for Windows users)\ncurl -sLSf https://inletsctl.inlets.dev | sh\n\n# Install directly to /usr/local/bin/\ncurl -sLSf https://inletsctl.inlets.dev | sudo sh\n

Windows users are encouraged to use git bash to install the inletsctl binary.

"},{"location":"reference/inletsctl/#downloading-inlets-pro","title":"Downloading inlets-pro","text":"

The inletsctl download command can be used to download the inlets/-pro binaries.

Example usage:

# Download the latest inlets-pro binary\ninletsctl download\n\n# Download a specific version of inlets-pro\ninletsctl download --version 0.8.5\n
"},{"location":"reference/inletsctl/#the-create-command","title":"The create command","text":""},{"location":"reference/inletsctl/#create-a-https-tunnel-with-a-custom-domain","title":"Create a HTTPS tunnel with a custom domain","text":"

This example uses DigitalOcean to create a cloud VM and then exposes a local service via the newly created exit-server.

Let's say we want to expose a Grafana server on our internal network to the Internet via Let's Encrypt and HTTPS?

export DOMAIN=\"grafana.example.com\"\n\ninletsctl create \\\n--provider digitalocean \\\n--region=\"lon1\" \\\n--access-token-file $HOME/do-access-token \\\n--letsencrypt-domain $DOMAIN \\\n--letsencrypt-email webmaster@$DOMAIN \\\n--letsencrypt-issuer prod\n

You can also use --letsencrypt-issuer with the staging value whilst testing since Let's Encrypt rate-limits how many certificates you can obtain within a week.

Create a DNS A record for the IP address so that grafana.example.com for instance resolves to that IP. For instance you could run:

doctl compute domain create \\\n--ip-address 46.101.60.161 grafana.example.com\n

Now run the command that you were given, and if you wish, change the upstream to point to the domain explicitly:

# Obtain a license at https://inlets.dev\n# Store it at $HOME/.inlets/LICENSE or use --help for more options\n\n# Where to route traffic from the inlets server\nexport UPSTREAM=\"grafana.example.com=http://192.168.0.100:3000\"\n\ninlets-pro http client --url \"wss://46.101.60.161:8123\" \\\n--token \"lRdRELPrkhA0kxwY0eWoaviWvOoYG0tj212d7Ff0zEVgpnAfh5WjygUVVcZ8xJRJ\" \\\n--upstream $UPSTREAM\n\nTo delete:\n  inletsctl delete --provider digitalocean --id \"248562460\"\n

You can also specify more than one domain and upstream for the same tunnel, so you could expose OpenFaaS and Grafana separately for instance.

Update the inletsctl create command with multiple domains such as: --letsencrypt-domain openfaas.example.com --letsencrypt-domain grafana.example.com

Then for the inlets-pro client command, update the upstream in the same way by repeating the flag once per upstream mapping: --upstream openfaas.example.com=http://127.0.0.1:8080 --upstream grafana.example.com=http://192.168.0.100:3000.

Note that in previous inlets versions, multiple upstream values were given in a single flag, separated by commas, this has now been deprecated for the above syntax.

"},{"location":"reference/inletsctl/#create-a-http-tunnel","title":"Create a HTTP tunnel","text":"

This example uses Linode to create a cloud VM and then exposes a local service via the newly created exit-server.

export REGION=\"eu-west\"\n\ninletsctl create \\\n--provider linode \\\n--region=\"$REGION\" \\\n--access-token-file $HOME/do-access-token\n

You'll see the host being provisioned, it usually takes just a few seconds:

Using provider: linode\nRequesting host: peaceful-lewin8 in eu-west, from linode\n2021/06/01 15:56:03 Provisioning host with Linode\nHost: 248561704, status: \n[1/500] Host: 248561704, status: new\n...\n[11/500] Host: 248561704, status: active\n\ninlets Pro (0.7.0) exit-server summary:\n  IP: 188.166.168.90\n  Auth-token: dZTkeCNYgrTPvFGLifyVYW6mlP78ny3jhyKM1apDL5XjmHMLYY6MsX8S2aUoj8uI\n

Now run the command given to you, changing the --upstream URL to match a local URL such as http://localhost:3000

# Obtain a license at https://inlets.dev\nexport LICENSE=\"$HOME/.inlets/license\"\n\n# Give a single value or comma-separated\nexport PORTS=\"3000\"\n\n# Where to route traffic from the inlets server\nexport UPSTREAM=\"localhost\"\n\ninlets-pro tcp client --url \"wss://188.166.168.90:8123/connect\" \\\n--token \"dZTkeCNYgrTPvFGLifyVYW6mlP78ny3jhyKM1apDL5XjmHMLYY6MsX8S2aUoj8uI\" \\\n--upstream $UPSTREAM \\\n--ports $PORTS\n

The client will look for your license in $HOME/.inlets/LICENSE, but you can also use the --license/--license-file flag if you wish.

You can then access your local website via the Internet and the exit-server's IP at:

http://188.166.168.90

When you're done, you can delete the host using its ID or IP address:

  inletsctl delete --provider linode --id \"248561704\"\ninletsctl delete --provider linode --ip \"188.166.168.90\"\n
"},{"location":"reference/inletsctl/#create-a-tunnel-for-a-tcp-service","title":"Create a tunnel for a TCP service","text":"

This example is similar to the previous one, but also adds link-level encryption between your local service and the exit-server.

In addition, you can also expose pure TCP traffic such as SSH or Postgresql.

inletsctl create \\\n--provider digitalocean \\\n--access-token-file $HOME/do-access-token \\\n--pro\n

Note the output:

inlets Pro (0.7.0) exit-server summary:\n  IP: 142.93.34.79\n  Auth-token: TUSQ3Dkr9QR1VdHM7go9cnTUouoJ7HVSdiLq49JVzY5MALaJUnlhSa8kimlLwBWb\n\nCommand:\n  export LICENSE=\"\"\nexport PORTS=\"8000\"\nexport UPSTREAM=\"localhost\"\n\ninlets-pro tcp client --url \"wss://142.93.34.79:8123/connect\" \\\n--token \"TUSQ3Dkr9QR1VdHM7go9cnTUouoJ7HVSdiLq49JVzY5MALaJUnlhSa8kimlLwBWb\" \\\n--license \"$LICENSE\" \\\n--upstream $UPSTREAM \\\n--ports $PORTS\n\nTo Delete:\n          inletsctl delete --provider digitalocean --id \"205463570\"\n

Run a local service that uses TCP such as MariaDB:

head -c 16 /dev/urandom |shasum \n8cb3efe58df984d3ab89bcf4566b31b49b2b79b9\n\nexport PASSWORD=\"8cb3efe58df984d3ab89bcf4566b31b49b2b79b9\"\n\ndocker run --name mariadb \\\n-p 3306:3306 \\\n-e MYSQL_ROOT_PASSWORD=8cb3efe58df984d3ab89bcf4566b31b49b2b79b9 \\\n-ti mariadb:latest\n

Connect to the tunnel updating the ports to 3306

export LICENSE=\"$(cat ~/LICENSE)\"\nexport PORTS=\"3306\"\nexport UPSTREAM=\"localhost\"\n\ninlets-pro tcp client --url \"wss://142.93.34.79:8123/connect\" \\\n--token \"TUSQ3Dkr9QR1VdHM7go9cnTUouoJ7HVSdiLq49JVzY5MALaJUnlhSa8kimlLwBWb\" \\\n--license \"$LICENSE\" \\\n--upstream $UPSTREAM \\\n--ports $PORTS\n

Now connect to your MariaDB instance from its public IP address:

export PASSWORD=\"8cb3efe58df984d3ab89bcf4566b31b49b2b79b9\"\nexport EXIT_IP=\"142.93.34.79\"\n\ndocker run -it --rm mariadb:latest mysql -h $EXIT_IP -P 3306 -uroot -p$PASSWORD\n\nWelcome to the MariaDB monitor.  Commands end with ; or \\g.\nYour MariaDB connection id is 3\nServer version: 10.5.5-MariaDB-1:10.5.5+maria~focal mariadb.org binary distribution\n\nCopyright (c) 2000, 2018, Oracle, MariaDB Corporation Ab and others.\n\nType 'help;' or '\\h' for help. Type '\\c' to clear the current input statement.\n\nMariaDB [(none)]> create database test; \nQuery OK, 1 row affected (0.039 sec)\n
"},{"location":"reference/inletsctl/#examples-for-specific-cloud-providers","title":"Examples for specific cloud providers","text":""},{"location":"reference/inletsctl/#example-usage-with-aws-ec2","title":"Example usage with AWS EC2","text":"

To use the instructions below you must have the AWS CLI configured with sufficient permissions to create users and roles.

  • Create a AWS IAM Policy with the following:

Create a file named policy.json with the following content

{\n\"Version\": \"2012-10-17\",\n\"Statement\": [  {\n\"Effect\": \"Allow\",\n\"Action\": [\n\"ec2:AuthorizeSecurityGroupIngress\",\n\"ec2:DescribeInstances\",\n\"ec2:DescribeImages\",\n\"ec2:TerminateInstances\",\n\"ec2:CreateSecurityGroup\",\n\"ec2:CreateTags\",\n\"ec2:DeleteSecurityGroup\",\n\"ec2:RunInstances\",\n\"ec2:DescribeInstanceStatus\"\n],\n\"Resource\": [\"*\"]\n}\n]\n}\n

Create the policy in AWS

aws iam create-policy --policy-name inlets-automation --policy-document file://policy.json\n
  • Create an IAM user
aws iam create-user --user-name inlets-automation\n
  • Add the Policy to the IAM user

We need to use the policy arn generated above, it should have been printed to the console on success. It also follows the format below.

export AWS_ACCOUNT_NUMBER=\"Your AWS Account Number\"\naws iam attach-user-policy --user-name inlets-automation --policy-arn arn:aws:iam::${AWS_ACCOUNT_NUMBER}:policy/inlets-automation\n
  • Generate an access key for your IAM User

The below commands will create a set of credentials and save them into files for use later on.

we are using jq here. It can be installed using the link provided. Alternatively you can print ACCESS_KEY_JSON and create the files manually.

ACCESS_KEY_JSON=$(aws iam create-access-key --user-name inlets-automation)\necho $ACCESS_KEY_JSON | jq -r .AccessKey.AccessKeyId > access-key.txt\necho $ACCESS_KEY_JSON | jq -r .AccessKey.SecretAccessKey > secret-key.txt\n
  • Create an exit-server:
inletsctl create \\\n--provider ec2 \\\n--region eu-west-1 \\\n--access-token-file ./access-key.txt \\\n--secret-key-file ./secret-key.txt\n
  • Delete an exit-server:
export IP=\"\"\n\ninletsctl create \\\n--provider ec2 \\\n--region eu-west-1 \\\n--access-token-file ./access-key.txt \\\n--secret-key-file ./secret-key.txt \\\n--ip $IP\n
"},{"location":"reference/inletsctl/#example-usage-with-aws-ec2-temporary-credentials","title":"Example usage with AWS EC2 Temporary Credentials","text":"

To use the instructions below you must have the AWS CLI configured with sufficient permissions to create users and roles.

The following instructions use get-session-token to illustrate the concept. However, it is expected that real world usage would more likely make use of assume-role to obtain temporary credentials.

  • Create a AWS IAM Policy with the following:

Create a file named policy.json with the following content

{\n\"Version\": \"2012-10-17\",\n\"Statement\": [  {\n\"Effect\": \"Allow\",\n\"Action\": [\n\"ec2:AuthorizeSecurityGroupIngress\",\n\"ec2:DescribeInstances\",\n\"ec2:DescribeImages\",\n\"ec2:TerminateInstances\",\n\"ec2:CreateSecurityGroup\",\n\"ec2:CreateTags\",\n\"ec2:DeleteSecurityGroup\",\n\"ec2:RunInstances\",\n\"ec2:DescribeInstanceStatus\"\n],\n\"Resource\": [\"*\"]\n}\n]\n}\n
  • Create the policy in AWS
aws iam create-policy --policy-name inlets-automation --policy-document file://policy.json\n
  • Create an IAM user
aws iam create-user --user-name inlets-automation\n
  • Add the Policy to the IAM user

We need to use the policy arn generated above, it should have been printed to the console on success. It also follows the format below.

export AWS_ACCOUNT_NUMBER=\"Your AWS Account Number\"\naws iam attach-user-policy --user-name inlets-automation --policy-arn arn:aws:iam::${AWS_ACCOUNT_NUMBER}:policy/inlets-automation\n
  • Generate an access key for your IAM User

The below commands will create a set of credentials and save them into files for use later on.

we are using jq here. It can be installed using the link provided. Alternatively you can print ACCESS_KEY_JSON and create the files manually.

ACCESS_KEY_JSON=$(aws iam create-access-key --user-name inlets-automation)\nexport AWS_ACCESS_KEY_ID=$(echo $ACCESS_KEY_JSON | jq -r .AccessKey.AccessKeyId)\nexport AWS_SECRET_ACCESS_KEY=$(echo $ACCESS_KEY_JSON | jq -r .AccessKey.SecretAccessKey)\n
  • Check that calls are now being executed by the inlets-automation IAM User.
aws sts get-caller-identity\n
  • Ask STS for some temporary credentials
TEMP_CREDS=$(aws sts get-session-token)\n
  • Break out the required elements
echo $TEMP_CREDS | jq -r .Credentials.AccessKeyId > access-key.txt    echo $TEMP_CREDS | jq -r .Credentials.SecretAccessKey > secret-key.txt\necho $TEMP_CREDS | jq -r .Credentials.SessionToken > session-token.txt\n
  • Create an exit-server using temporary credentials:
inletsctl create \\\n--provider ec2 \\\n--region eu-west-1 \\\n--access-token-file ./access-key.txt \\\n--secret-key-file ./secret-key.txt \\\n--session-token-file ./session-token.txt\n
  • Delete an exit-server using temporary credentials:
export INSTANCEID=\"\"\n\ninletsctl delete \\\n--provider ec2 \\\n--id $INSTANCEID\n--access-token-file ./access-key.txt \\\n--secret-key-file ./secret-key.txt \\\n--session-token-file ./session-token.txt\n
"},{"location":"reference/inletsctl/#example-usage-with-google-compute-engine","title":"Example usage with Google Compute Engine","text":"

Bear in mind that standard GCE VMs are created with an ephemeral IP address, which is subject to change. In order to make your tunnel's address stable, you should Reserve a static IP address and assign it to your VM. A static IP costs around 2.88 USD / mo.

  • One time setup required for a service account key

It is assumed that you have gcloud installed and configured on your machine. If not, then follow the instructions here

# Get current projectID\nexport PROJECTID=$(gcloud config get-value core/project 2>/dev/null)\n\n# Create a service account\ngcloud iam service-accounts create inlets \\\n--description \"inlets-operator service account\" \\\n--display-name \"inlets\"\n\n# Get service account email\nexport SERVICEACCOUNT=$(gcloud iam service-accounts list | grep inlets | awk '{print $2}')\n\n# Assign appropriate roles to inlets service account\ngcloud projects add-iam-policy-binding $PROJECTID \\\n--member serviceAccount:$SERVICEACCOUNT \\\n--role roles/compute.admin\n\ngcloud projects add-iam-policy-binding $PROJECTID \\\n--member serviceAccount:$SERVICEACCOUNT \\\n--role roles/iam.serviceAccountUser\n\n# Create inlets service account key file\ngcloud iam service-accounts keys create key.json \\\n--iam-account $SERVICEACCOUNT\n
  • Create a tunnel using the service account and project ID
# Create a TCP tunnel server\ninletsctl create \\\n--provider gce \\\n--project-id=$PROJECTID \\\n--access-token-file=key.json \\\n--tcp\n\n# Create a HTTP / HTTPS tunnel server\ninletsctl create \\\n-p gce \\\n--project-id=$PROJECTID \\\n-f=key.json\n\n# Or specify any valid Google Cloud Zone optional zone, by default it get provisioned in us-central1-a\ninletsctl create -p gce \\\n--project-id=$PROJECTID \\\n-f key.json \\\n--zone=us-central1-a\n

If you need the tunnel server for any period of time, remember to Reserve a static IP address and assign it to your VM.

Then SSH into the host and make sure you update inlets to make use of it:

Edit IP= in /etc/default/inlets-pro then run sudo systemctl daemon-reload && sudo systemctl restart inlets-pro

The inlets-pro http/tcp --url wss://... flag should also be updated with the static IP.

In a future version of inletsctl, we may automate the above.

"},{"location":"reference/inletsctl/#example-usage-with-azure","title":"Example usage with Azure","text":"

Prerequisites:

  • You will need az. See Install the Azure CLI

Generate Azure auth file

SUBSCRIPTION_ID=\"YOUR_SUBSCRIPTION_ID\"\naz ad sp create-for-rbac --role Contributor --scopes \"/subscriptions/$SUBSCRIPTION_ID\" --sdk-auth \\\n> $HOME/Downloads/client_credentials.json\n

List Azure available regions

az account list-locations -o table\n

Create

inletsctl create --provider=azure --subscription-id=4d68ee0c-7079-48d2-b15c-f294f9b11a9e \\\n--region=eastus --access-token-file=~/Downloads/client_credentials.json 

Delete

inletsctl delete --provider=azure --id inlets-clever-volhard8 \\\n--subscription-id=4d68ee0c-7079-48d2-b15c-f294f9b11a9e \\\n--region=eastus --access-token-file=~/Downloads/client_credentials.json\n

"},{"location":"reference/inletsctl/#example-usage-with-hetzner","title":"Example usage with Hetzner","text":"
# Obtain the API token from Hetzner Cloud Console.\nexport TOKEN=\"\"\n\ninletsctl create --provider hetzner \\\n--access-token $TOKEN \\\n--region hel1\n

Available regions are hel1 (Helsinki), nur1 (Nuremberg), fsn1 (Falkenstein).

"},{"location":"reference/inletsctl/#example-usage-with-linode","title":"Example usage with Linode","text":"

Prerequisites:

  • Prepare a Linode API Access Token. See Create Linode API Access token

Create

inletsctl create --provider=linode --access-token=<API Access Token> --region=us-east\n

Delete

inletsctl delete --provider=linode --access-token=<API Access Token> --id <instance id>\n

"},{"location":"reference/inletsctl/#example-usage-with-scaleway","title":"Example usage with Scaleway","text":"
# Obtain from your Scaleway dashboard:\nexport TOKEN=\"\"\nexport SECRET_KEY=\"\"\nexport ORG_ID=\"\"\n\ninletsctl create --provider scaleway \\\n--access-token $TOKEN\n--secret-key $SECRET_KEY --organisation-id $ORG_ID\n

The region is hard-coded to France / Paris 1.

"},{"location":"reference/inletsctl/#example-usage-with-ovhcloud","title":"Example usage with OVHcloud","text":"

You need to create API keys for the ovhCloud country/continent you're going to deploy with inletsctl. For an overview of available endpoint check supported-apis documentation

For, example, Europe visit https://eu.api.ovh.com/createToken to create your API keys.

However, the specific value for the endpoint flag are following:

  • ovh-eu for OVH Europe API
  • ovh-us for OVH US API
  • ovh-ca for OVH Canada API
  • soyoustart-eu for So you Start Europe API
  • soyoustart-ca for So you Start Canada API
  • kimsufi-eu for Kimsufi Europe API
  • kimsufi-ca for Kimsufi Canada API

ovh-eu is the default endpoint and DE1 the default region.

For the proper rights choose all HTTP Verbs (GET,PUT,DELETE, POST), and we need only the /cloud/ API.

export APPLICATION_KEY=\"\"\nexport APPLICATION_SECRET=\"\"\nexport CONSUMER_KEY=\"\"\nexport ENDPOINT=\"\"\nexport PROJECT_ID=\"\"\n\ninletsctl create --provider ovh \\\n--access-token $APPLICATION_KEY \\\n--secret-key $APPLICATION_SECRET --consumer-key $CONSUMER_KEY \\ \n--project-id $SERVICENAME \\\n--endpoint $ENDPOINT\n
"},{"location":"reference/inletsctl/#the-delete-command","title":"The delete command","text":"

The delete command takes an id or IP address which are given to you at the end of the inletsctl create command. You'll also need to specify your cloud access token.

inletsctl delete \\\n--provider digitalocean \\\n--access-token-file ~/Downloads/do-access-token \\\n--id 164857028 \\\n

Or delete via IP:

inletsctl delete \\\n--provider digitalocean \\\n--access-token-file ~/Downloads/do-access-token \\\n--ip 209.97.131.180 \\\n
"},{"location":"reference/inletsctl/#kfwd-kubernetes-service-forwarding","title":"kfwd - Kubernetes service forwarding","text":"

kfwd runs an inlets-pro server on your local computer, then deploys an inlets client in your Kubernetes cluster using a Pod. This enables your local computer to access services from within the cluster as if they were running on your laptop.

inlets Pro allows you to access any TCP service within the cluster, using an encrypted link:

Forward the figlet pod from openfaas-fn on port 8080:

inletsctl kfwd \\\n--pro \\\n--license $(cat ~/LICENSE)\n--from figlet:8080 \\\n--namespace openfaas-fn \\\n--if 192.168.0.14\n

Note the if parameter is the IP address of your local computer, this must be reachable from the Kubernetes cluster.

Then access the service via http://127.0.0.1:8080.

"},{"location":"reference/inletsctl/#troubleshooting","title":"Troubleshooting","text":"

inletsctl provisions a host called an exit node or exit server using public cloud APIs. It then prints out a connection string.

Are you unable to connect your client to the exit server?

"},{"location":"reference/inletsctl/#troubleshooting-inlets-pro","title":"Troubleshooting inlets Pro","text":"

If using auto-tls (the default), check that port 8123 is accessible. It should be serving a file with a self-signed certificate, run the following:

export IP=192.168.0.1\ncurl -k https://$IP:8123/.well-known/ca.crt\n

If you see connection refused, log in to the host over SSH and check the service via systemctl:

sudo systemctl status inlets-pro\n\n# Check its logs\nsudo journalctl -u inlets-pro\n

You can also check the configuration in /etc/default/inlets-pro, to make sure that an IP address and token are configured.

"},{"location":"reference/inletsctl/#configuration-using-environment-variables","title":"Configuration using environment variables","text":"

You may want to set an environment variable that points at your access-token-file or secret-key-file

Inlets will look for the following:

# For providers that use --access-token-file\nINLETS_ACCESS_TOKEN\n\n# For providers that use --secret-key-file\nINLETS_SECRET_KEY\n

With the correct one of these set you wont need to add the flag on every command execution.

You can set the following syntax in your bashrc (or equivalent for your shell)

export INLETS_ACCESS_TOKEN=$(cat my-token.txt)\n\n# or set the INLETS_SECRET_KEY for those providors that use this\nexport INLETS_SECRET_KEY=$(cat my-token.txt)\n
"},{"location":"tutorial/automated-http-server/","title":"Automated http server","text":""},{"location":"tutorial/automated-http-server/#automate-a-http-tunnel-server","title":"Automate a HTTP tunnel server","text":"

Learn how to serve traffic from your private network over a private tunnel server.

At the end of this tutorial, you'll have a a secure TLS public endpoint using your own DNS and domain, which you can use to access your internal services or webpages.

I'll show you how to:

  • automate a tunnel server on a public cloud provider with inlets pre-loaded onto it,
  • how to connect a client from your home or private network
  • how to tunnel one or more services
  • and what else you can do

In a previous article, I explained some of the differences between SaaS and private tunnel servers.

"},{"location":"tutorial/automated-http-server/#create-your-tunnel-server","title":"Create your tunnel server","text":"

With SaaS tunnels, your tunnels server processes run on shared servers with other users. With a private tunnel server like inlets, you need to create a server somewhere on the Internet to run the tunnel. It should be created with a public IP address that you can use to accept traffic and proxy it into your private network.

Pictured: Inlets Conceptual architecture

The simplest way to do this is to use the inletsctl tool, which supports around a dozen clouds. The alternative is to set up a VPS or install inlets-pro onto a server you already have set up, and then add a systemd unit file so that it restarts if the tunnel or server should crash for any reason.

To see a list of supported clouds run:

inletsctl create --help\n

For instructions on how to create an API key or service account for each, feel free to browse the docs.

inletsctl create \\\n--region lon1 \\\n--provider digitalocean \\\n--access-token-file ~/digital-ocean-api-key.txt \\\n--letsencrypt-domain blog.example.com \\\n--letsencrypt-email webmaster@example.com\n

A VM will be created in your account using the cheapest plan available, for DigitalOcean this costs 5 USD / mo at time of writing.

You can also run your tunnel server in the free tier of GCP, Oracle Cloud or on Fly.io at no additional cost.

Once the tunnel server has been created, you will receive:

  • The IP address
  • An endpoint for the inlets client to connect to
  • A token for the inlets client to use when connecting

Take a note of these.

Now create a DNS \"A\" record for the IP address of the tunnel server on your domain control panel.

Personally, I'm a fan of Google Domains and the .dev domains, but DigitalOcean can also manage domains through their CLI:

export IP=\"\"\nexport SUBDOMAIN=\"blog.example.com\"\n\ndoctl compute domain create $SUBDOMAIN \\\n--ip-address $IP\n

How does the TLS encryption work?

The inlets server process will attempt to get a TLS certificate from Let's Encrypt using a HTTP01 Acme challenge.

What if I have multiple sites?

You can pass a number of sub-domains, for instance:

 --letsencrypt-domain blog.example.com,grafana.example.com \\\n--letsencrypt-email webmaster@example.com\n
"},{"location":"tutorial/automated-http-server/#connect-your-tunnel-client","title":"Connect your tunnel client","text":"

The tunnel client can be run as and when required, or you can generate a systemd unit file so that you can have it running in the background. You can run the tunnel on the same machine as the service that you're proxying, or you can run it on another computer. It's entirely up to you.

So you could have a Raspberry Pi which just runs Raspberry Pi OS Lite and an inlets client, and nothing else. In this way you're creating a kind of router appliance.

Let's imagine you've run a Node.js express service on your computer:

$ git clone https://github.com/alexellis/alexellis.io \\\n--depth=1\n$ cd alexellis.io/\n$ npm install\n$ npm start\n\nalexellis.io started on port: http://0.0.0.0:3000\n

inlets also has its own built-in file-server with password protection and the ability to disable browsing for sharing private links. You can expose the built-in file-server when you want to share files directly, without having to upload them first: The simple way to share files directly from your computer

You can download the inlets client using the inletsctl tool:

$ sudo inletsctl download\n

Now you can start the tunnel client and start serving a test version of my personal homepage alexellis.io:

$ export URL=\"\"\n$ export TOKEN=\"\"\n\n$ inlets-pro http client \\\n--url $URL \\\n--token $TOKEN \\\n--upstream blog.example.com=http://127.0.0.1:3000\n

What if my services are running on different computers?

If they are all within the same network, then you can run the client in one place and have it point at the various internal IP addresses.

$ inlets-pro http client \\\n--url $URL \\\n--token $TOKEN \\\n--upstream blog.example.com=http://127.0.0.1:3000 \\\n--upstream grafana.example.com=http://192.168.0.100:3000\n

If they are on different networks, you can simply run multiple clients, just change the --upstream flag on each client.

How can I run the client in the background?

For Linux hosts, you can generate a systemd unit file for inlets by using the --generate systemd flag to the client or server command.

Then simply copy the resulting file to the correct location on your system and install it:

$ export URL=\"\"\n$ export TOKEN=\"\"\n\n$ inlets-pro http client \\\n--url $URL \\\n--token $TOKEN \\\n--upstream blog.example.com=http://127.0.0.1:3000 \\\n--generate=systemd > inlets.service\n\n$ sudo cp inlets.service /etc/systemd/system/\n$ sudo systemctl enable inlets\n

You can then check the logs or service status:

$ sudo journalctl -u inlets\n$ sudo systemctl status inlets\n
"},{"location":"tutorial/automated-http-server/#access-your-website-over-the-tunnel","title":"Access your website over the tunnel","text":"

You can now access your local website being served at http://127.0.0.1:3000 over the tunnel by visiting the domain you created:

https://blog.example.com/

"},{"location":"tutorial/automated-http-server/#your-ip-goes-where-you-go","title":"Your IP goes where you go","text":"

You can close the lid on your laptop, and open it again in Starbucks or your favourite local independent coffee shop. As soon as you reconnect the client, your local server will be available over the tunnel at the same IP address and domain: https://blog.example.com/

I used this technique to test a live demo for the KubeCon conference. I then took a flight from London to San Diego and was able to receive traffic to my Raspberry Pi whilst tethering on a local SIM card.

Tethering my Raspberry Pi with K3s in San Diego

"},{"location":"tutorial/automated-http-server/#wrapping-up","title":"Wrapping up","text":"

In a very short period of time we created a private tunnel server on a public cloud of our choice, then we created a DNS record for it, and connected a client and accessed our local website.

You can get started with inlets through a monthly subscription, or save on a yearly plan.

When would you need this?

  • If you're self-hosting websites, you already have some equipment at home, so it can work out cheaper.
  • If you're running a Kubernetes cluster or K3s on a Raspberry Pi, it can be much cheaper over the course of a year.
  • But it's also incredibly convenient for sharing files and for testing APIs or OAuth flows during development.

Ben Potter at Coder is writing up a tutorial on how to access a private VSCode server from anywhere using a private tunnel. If you would like to learn more, follow @inletsdev for when it gets published.

Andrew Meier put it this way:

\"I prefer to play around with different projects without having to worry about my costs skyrocketing. I had a few Raspberry Pis and wondered if I could use them as a cluster. After a bit of searching #k3s and inlets gave me my answer\"

Andrew's K3s cluster, with inlets

Read his blog post: Personal Infrastructure with Inlets, k3s, and Pulumi

"},{"location":"tutorial/automated-http-server/#you-may-also-like","title":"You may also like","text":"
  • Tunnel a service or ingress from Kubernetes
  • Share a file without uploading it through inlets tunnels
  • Connecting my boat to the Internet with inlets
"},{"location":"tutorial/caddy-http-tunnel/","title":"Caddy http tunnel","text":""},{"location":"tutorial/caddy-http-tunnel/#custom-reverse-proxy-with-caddy","title":"Custom reverse proxy with Caddy","text":"

In this tutorial we'll set up an inlets TCP tunnel server to forward ports 80 and 443 to a reverse proxy server running on our local machine. Caddy will receive a TCP stream from the public tunnel server for ports 80 and 443. It can terminate TLS and also allow you to host multiple sites with ease.

Caddy is a free and open-source reverse proxy. It's often used on web-servers to add TLS to one or more virtual websites.

"},{"location":"tutorial/caddy-http-tunnel/#pre-reqs","title":"Pre-reqs","text":"
  • A Linux server, Windows and MacOS are also supported
  • The inlets-pro binary at /usr/local/bin/
  • Access to a DNS control plane for a domain you control

You can run through the same instructions with other reverse proxies such as Nginx, or Traefik.

Scenario: * You want to share a file such as a VM image or a ISO over the Internet, with HTTPS, directly from your laptop. * You have one or more websites or APIs running on-premises or within your home-lab and want to expose them on the Internet.

You can subscribe to inlets for personal or commercial use via Gumroad

"},{"location":"tutorial/caddy-http-tunnel/#setup-your-exit-node","title":"Setup your exit node","text":"

Provision a cloud VM on DigitalOcean or another IaaS provider using inletsctl:

inletsctl create \\\n--provider digitalocean \\\n--region lon1 \\\n--pro\n

Note the --url and TOKEN given to you in this step.

"},{"location":"tutorial/caddy-http-tunnel/#setup-your-dns-a-record","title":"Setup your DNS A record","text":"

Setup a DNS A record for the site you want to expose using the public IP of the cloud VM

  • 178.128.40.109 = service.example.com
"},{"location":"tutorial/caddy-http-tunnel/#run-a-local-server-to-share-files","title":"Run a local server to share files","text":"

Do not run this command in your home folder.

mkdir -p /tmp/shared/\ncd /tmp/shared/\n\necho \"Hello world\" > WELCOME.txt\n\n# If using Python 2.x\npython -m SimpleHTTPServer\n\n# Python 3.x\npython3 -m http.server\n

This will listen on port 8000 by default.

"},{"location":"tutorial/caddy-http-tunnel/#setup-caddy-1x","title":"Setup Caddy 1.x","text":"
  • Download the latest Caddy 1.x binary from the Releases page

Pick your operating system, for instance Darwin for MacOS, or Linux.

Download the binary, extract it and install it to /usr/local/bin:

mkdir -p /tmp/caddy\ncurl -sLSf https://github.com/caddyserver/caddy/releases/download/v1.0.4/caddy_v1.0.4_darwin_amd64.zip > caddy.tar.gz\ntar -xvf caddy.tar.gz --strip-components=0 -C /tmp/caddy\n\nsudo cp /tmp/caddy/caddy /usr/local/bin/\n
  • Create a Caddyfile

The Caddyfile configures which websites Caddy will expose, and which sites need a TLS certificate.

Replace service.example.com with your own domain.

Next, edit proxy / 127.0.0.1:8000 and change the port 8000 to the port of your local webserver, for instance 3000 or 8080. For our example, keep it as 8000.

service.example.com\n\nproxy / 127.0.0.1:8000 {\ntransparent\n}\n

Start the Caddy binary, it will listen on port 80 and 443.

sudo ./caddy\n

If you have more than one website, you can add them to the Caddyfile on new lines.

You'll need to run caddy as sudo so that it can bind to ports 80, and 443 which require additional privileges.

"},{"location":"tutorial/caddy-http-tunnel/#start-the-inlets-pro-client-on-your-local-side","title":"Start the inlets-pro client on your local side","text":"

Downloads the inlets Pro client:

sudo inletsctl download\n

Run the inlets-pro client, using the TOKEN and IP given to you from the previous step.

The client will look for your license in $HOME/.inlets/LICENSE, but you can also use the --license/--license-file flag if you wish.

export IP=\"\"        # take this from the exit-server\nexport TOKEN=\"\"     # take this from the exit-server\n\ninlets-pro tcp client \\\n--url wss://$IP:8123/connect \\\n--ports 80,443 \\\n--token $TOKEN \\\n--upstream localhost\n

Note that --upstream localhost will connect to Caddy running on your computer, if you are running Caddy on another machine, use its IP address here.

"},{"location":"tutorial/caddy-http-tunnel/#check-it-all-worked","title":"Check it all worked","text":"

You'll see that Caddy can now obtain a TLS certificate.

Go ahead and visit: https://service.example.com

Congratulations, you've now served a TLS certificate directly from your laptop. You can close caddy and open it again at a later date. Caddy will re-use the certificate it already obtained and it will be valid for 3 months. To renew, just keep Caddy running or open it again whenever you need it.

"},{"location":"tutorial/caddy-http-tunnel/#setup-caddy-2x","title":"Setup Caddy 2.x","text":"

For Caddy 2.x, the Caddyfile format changes.

Let's say you're running a Node.js service on port 3000, and want to expose it with TLS on the domain \"service.example.com\":

git clone https://github.com/alexellis/expressjs-k8s/\ncd expressjs-k8s\n\nnpm install\nhttp_port=3000 npm start\n

The local site will be served at http://127.0.0.1:3000

{\n  acme_ca https://acme-staging-v02.api.letsencrypt.org/directory\n}\n\nservice.example.com\n\nreverse_proxy 127.0.0.1:3000 {\n}\n

Note the acme_ca being used will receive a staging certificate, remove it to obtain a production TLS certificate.

Now download Caddy 2.x for your operating system.

sudo ./caddy run \\\n-config ./Caddyfile\n

sudo - is required to bind to port 80 and 443, although you can potentially update your OS to allow binding to low ports without root access.

You should now be able to access the Node.js website via the https://service.example.com URL.

Caddy also supports multiple domains within the same file, so that you can expose multiple internal or private websites through the same tunnel.

{\n  email \"webmaster@example.com\"\n}\n\nblog.example.com {\n  reverse_proxy 127.0.0.1:4000\n}\n\nopenfaas.example.com {\n      reverse_proxy 127.0.0.1:8080\n}\n

If you have services running on other machines you can change 127.0.0.1:8080 to a different IP address such as that of your Raspberry Pi if you had something like OpenFaaS running there.

"},{"location":"tutorial/community/","title":"Community tutorials and guides","text":"

Note: Any material not hosted on inlets.dev may be written by a third-party.

If you have a tutorial or video to submit, feel free to send a Pull Request

"},{"location":"tutorial/community/#case-studies","title":"Case studies","text":"

You can read testimonials on the main homepage

  • Connecting my boat to the Internet with inlets by Mark Sharpley
  • How Riskfuel is using Inlets to build machine learning models at scale by Addison van den Hoeven
  • Ingress to ECS Anywhere, from anywhere, using Inlets by Nathan Peck
  • Reliable local port-forwarding from Kubernetes for a Developer at UK Gov
"},{"location":"tutorial/community/#videos","title":"Videos","text":"

Webinars:

  • A tale of two networks - demos and use-cases for inlets tunnels (Mar 2021) by Alex Ellis and Johan Siebens
  • Crossing network boundaries with Kubernetes and inlets (Mar 2021) by Alex Ellis and Johan Siebens

Walk-through videos:

  • inlets-operator - Get Ingress and Public IPs for private Kubernetes (Mar 2020) by Alex Ellis
  • Inlets Operator - get a LoadBalancer from any Kubernetes cluster (Oct 2019) by Alex Ellis
  • Hacking on the Inlets Operator for Equinix Metal (Jul 2021) by Alex Ellis and David McKay
"},{"location":"tutorial/community/#tutorials","title":"Tutorials","text":"
  • A Tour of Inlets - A Tunnel Built for the Cloud (Aug 2021) by Zespre Schmidt
  • Control Access to your on-prem services with Cloud IAP and inlets Pro (Dec 2020) by Johan Siebens
  • Secure access using HashiCorp Boundary & inlets Pro Better Together (Oct 2020) by Johan Siebens
  • Quake III Arena, k3s and a Raspberry Pi (Nov 2020) by Johan Siebens
  • Argo CD for your private Raspberry Pi k3s cluster (Aug 2020) by Johan Siebens
  • Get a TLS-enabled Docker registry in 5 minutes (Feb 2020) by Alex Ellis
  • A bit of Istio before tea-time (May 2021) by Alex Ellis
  • Access your local cluster like a managed Kubernetes engine by Alex Ellis
  • Exploring Kubernetes Operator Pattern (Jan 2021) by Ivan Velichko
"},{"location":"tutorial/community/#official-blog-posts","title":"Official blog posts","text":"

See inlets.dev/blog

"},{"location":"tutorial/dual-tunnels/","title":"Dual tunnels","text":""},{"location":"tutorial/dual-tunnels/#setting-up-dual-tcp-and-https-tunnels","title":"Setting up dual TCP and HTTPS tunnels","text":"

In this tutorial we will set both a dual tunnel for exposing HTTP and TCP services from the same server.

Whilst it's easier to automate two separate servers or cloud instances for your tunnels, you may want to reduce your costs.

The use-case may be that you have a number of OpenFaaS functions running on your Raspberry Pi which serve traffic to users, but you also want to connect via SSH and VNC.

"},{"location":"tutorial/dual-tunnels/#pre-reqs","title":"Pre-reqs","text":"
  • A Linux server, Windows and MacOS are also supported
  • The inlets-pro binary at /usr/local/bin/
  • Access to a DNS control plane for a domain you control
"},{"location":"tutorial/dual-tunnels/#create-the-https-tunnel-server-first","title":"Create the HTTPS tunnel server first","text":"

Create a HTTPS tunnel server using the manual tutorial or automated tutorial.

Once it's running, check you can connect to it, and then log in with SSH.

You'll find a systemd service named inlets-pro running the HTTPS tunnel with a specific authentication token and set of parameters.

Now, generate a new systemd unit file for the TCP tunnel.

I would suggest generating a new token for this tunnel.

TOKEN=\"$(head -c 32 /dev/urandom | base64 | cut -d \"-\" -f1)\"\n\n# Find the instance's public IPv4 address:\nPUBLIC_IP=\"$(curl -s https://checkip.amazonaws.com)\"\n

Let's imagine the public IP resolved to 46.101.128.5 which is part of the DigitalOcean range.

inlets-pro tcp server \\\n--token \"$TOKEN\" \\\n--auto-tls-san $PUBLIC_IP \\\n--generate=systemd > inlets-pro-tcp.service\n

Example:

[Unit]\nDescription=inlets Pro TCP Server\nAfter=network.target\n\n[Service]\nType=simple\nRestart=always\nRestartSec=5\nStartLimitInterval=0\nExecStart=/usr/local/bin/inlets-pro tcp server --auto-tls --auto-tls-san=46.101.128.5 --control-addr=0.0.0.0 --token=\"k1wCR+2j41TXqqq/UTLJzcuzhmSJbU5NY32VqnNOnog=\" --control-port=8124 --auto-tls-path=/tmp/inlets-pro-tcp\n\n[Install]\nWantedBy=multi-user.target\n

We need to update the control-port for this inlets tunnel server via the --control-port flag. Use port 8124 since 8123 is already in use by the HTTP tunnel. Add --control-port 8124 to the ExecStart line.

We need to add a new flag so that generated TLS certificates are placed in a unique directory, and don't clash. Add --auto-tls-path /tmp/inlets-pro-tcp/ to the same line.

Next install the unit file with:

sudo cp inlets-pro-tcp.service /etc/systemd/system/\nsudo systemctl daemon-reload\nsudo systemctl enable inlets-pro-tcp.service\n\nsudo systemctl restart inlets-pro-tcp.service\n

You'll now be able to check the logs for the server:

sudo journalctl -u inlets-pro-tcp\n

Finally you can connect your TCP client:

inlets-pro tcp client \\\n--token \"k1wCR+2j41TXqqq/UTLJzcuzhmSJbU5NY32VqnNOnog=\" \\\n--upstream 192.168.0.15 \\\n--ports 2222,5900 \\\n--url wss://46.101.128.5:8124\n

Note that 5900 is the default port for VNC. Port 2222 was used for SSH as not to conflict with the version running on the tunnel server.

You can now connect to the public IP of your server via SSH and VNC:

For example:

ssh -p 2222 pi@46.101.128.5\n
"},{"location":"tutorial/dual-tunnels/#wrapping-up","title":"Wrapping up","text":"

You now have a TCP and HTTPS tunnel server running on the same host. This was made possibly by changing the control-plane port and auto-TLS path for the second server, and having it start automatically through a separate systemd service.

This technique may save you a few dollars per month, but it may not be worth your time compared to how quick and easy it is to create two separate servers with inletsctl create.

"},{"location":"tutorial/istio-gateway/","title":"Tutorial: Expose an Istio gateway with the inlets-operator","text":"

In this tutorial we will configure the inlets-operator to get a public IP for the Istio Ingress Gateway. This will allow you to receive HTTPS certificates via LetsEncrypt and cert-manager and access services running in your cluster on their own public domain.

"},{"location":"tutorial/istio-gateway/#install-arkade","title":"Install arkade","text":"

Arkade is a simple CLI tool that provides a quick way to install various apps and download common binaries much quicker.

To install arkade run:

curl -sSLf https://get.arkade.dev/ | sudo sh\n
"},{"location":"tutorial/istio-gateway/#create-a-kubernetes-cluster-with-kind","title":"Create a kubernetes cluster with kinD","text":"

We're going to use KinD, which runs inside a container with Docker for Mac or the Docker daemon. MacOS cannot actually run containers or Kubernetes itself, so projects like Docker for Mac create a small Linux VM and hide it away.

Download the kind and kubectl binaries if you don't have them already:

arkade get kind\narkade get kubectl\n

Now create a cluster:

$ kind create cluster\n

The initial creation could take a few minutes, but subsequent clusters creations are much faster.

Creating cluster \"kind\" ...\n \u2713 Ensuring node image (kindest/node:v1.19.0) \ud83d\uddbc\n \u2713 Preparing nodes \ud83d\udce6  \n \u2713 Writing configuration \ud83d\udcdc \n \u2713 Starting control-plane \ud83d\udd79\ufe0f \n \u2713 Installing CNI \ud83d\udd0c \n \u2713 Installing StorageClass \ud83d\udcbe \nSet kubectl context to \"kind-kind\"\nYou can now use your cluster with:\n\nkubectl cluster-info --context kind-kind\n\nHave a nice day! \ud83d\udc4b\n
kubectl get node -o wide\n\nNAME                 STATUS     ROLES    AGE   VERSION   INTERNAL-IP   EXTERNAL-IP   OS-IMAGE       KERNEL-VERSION     CONTAINER-RUNTIME\nkind-control-plane      Ready   master   35s   v1.18.0   172.17.0.2    <none>        Ubuntu 19.10   5.3.0-26-generic   containerd://1.3.2\n

The above shows one node is Ready, so we can move on and install Istio.

"},{"location":"tutorial/istio-gateway/#install-istio","title":"Install Istio","text":"

You can install Istio using the documentation site at Istio.io, but we're going to use arkade instead since it gives us a one-line install and also bundles a version of Istio configuration for constrained development environments like a KinD cluster.

It is always possible to use the --set flag to override or pass in additional values for the Istio chart.

arkade install istio --help\n\nInstall istio\n\nUsage:\n  arkade install istio [flags]\n\nExamples:\n  arkade install istio --loadbalancer\n\nFlags:\n      --cpu string               Allocate CPU resource (default \"100m\")\n  -h, --help                     help for istio\n      --istio-namespace string   Namespace for the app (default \"istio-system\")\n      --memory string            Allocate Memory resource (default \"100Mi\")\n      --namespace string         Namespace for the app (default \"default\")\n      --profile string           Set istio profile (default \"default\")\n      --set stringArray          Use custom flags or override existing flags \n                                 (example --set prometheus.enabled=false)\n  -v, --version string           Specify a version of Istio (default \"1.11.4\")\n\nGlobal Flags:\n      --kubeconfig string   Local path for your kubeconfig file\n      --wait                If we should wait for the resource to be ready before returning (helm3 only, default false)\n

Install Istio:

arkade install istio\n

At the moment we don't have a public IP for the Istio gateway. The next step is te install the inlets operator so we can get one.

kubectl get -n istio-system \\\n  svc/istio-ingressgateway\n\nNAME                   TYPE           CLUSTER-IP     EXTERNAL-IP   PORT(S)                                      AGE\nistio-ingressgateway   LoadBalancer   10.43.92.145   <pending>     15021:32382/TCP,80:31487/TCP,443:31692/TCP   3m28s\n
"},{"location":"tutorial/istio-gateway/#install-the-inlets-operator","title":"Install the inlets-operator","text":"

The inlets-operator lets you get public LoadBalancers on your local Kubernetes cluster. It does this by creating a VM to run an inlets tunnel server in the cloud of your choice for each LoadBalancer. It then plumbs in an inlets client to connect to it using a deployment.

The inlets-operator can also be installed with arkade.

Save an access token for your cloud provider as $HOME/access-token, in this example we're using DigitalOcean. Other providers may also need a secret token in addition to the API key.

Your inlets license should be already saved at: $HOME/.inlets/LICENSE, if it's not, you can move it there or use the --license-file flag.

export ACCESS_TOKEN=$HOME/access-token\n\narkade install inlets-operator \\\n--provider digitalocean \\\n--region lon1 \\\n--token-file $ACCESS_TOKEN \\\n--license-file \"$HOME/.inlets/LICENSE\"\n

You can run arkade install inlets-operator --help to see a list of other cloud providers or take a look at the inlets-operator reference documentation.

  • Set the --region flag as required, it's best to have low latency between your current location and where the exit-servers will be provisioned.

Once the inlets-operator is installed we can start watching for the public IP to appear.

kubectl get -n istio-system \\\nsvc/istio-ingressgateway -w\n\nNAME                   TYPE           CLUSTER-IP     EXTERNAL-IP\nistio-ingressgateway   LoadBalancer   10.106.220.170   <pending>\nistio-ingressgateway   LoadBalancer   10.106.220.170   165.227.237.77\n
"},{"location":"tutorial/istio-gateway/#install-cert-manager","title":"Install cert-manager","text":"

Install cert-manager, which can be integrated with Istio gateways to manage TLS certificates.

arkade install cert-manager\n
"},{"location":"tutorial/istio-gateway/#a-quick-recap","title":"A quick recap","text":"

This is what we have so far:

  • Istio

    The istio service mesh. Among other things, it comes with the istio Ingress Gateway that will get a public address via an inlets tunnel.

  • inlets-operator

    The inlets operator provides us with a public VirtualIP for the istio Ingress Gateway

  • cert-manager

    Integrates with Istio gateways to provide TLS certificates through the HTTP01 or DNS01 challenges from LetsEncrypt.

"},{"location":"tutorial/istio-gateway/#deploy-an-application-and-get-a-tls-certificate","title":"Deploy an application and get a TLS certificate","text":"

Istio uses the Bookinfo Application as an example in their documentation. We will also use this example.

Enable side-car injection and then deploy the BookInfo manifests:

kubectl label namespace default istio-injection=enabled\n\nkubectl apply -f https://raw.githubusercontent.com/istio/istio/release-1.15/samples/bookinfo/platform/kube/bookinfo.yaml\n\nkubectl apply -f https://raw.githubusercontent.com/istio/istio/release-1.15/samples/bookinfo/networking/bookinfo-gateway.yaml\n

We can verify that the book application is up and running and accessible form our local computer on local host by running:

kubectl port-forward -n istio-system \\\n  svc/istio-ingressgateway 31380:80\n

Then send a request to it with curl:

curl -sS http://127.0.0.1:31380/productpage | grep -o \"<title>.*</title>\"\n<title>Simple Bookstore App</title>\n

Since we set up the inlets operator in the previous step to get an external IP for the Istio ingress gateway we should now also be able to access the app using that public IP.

Open a browser and navigate to the /productpage URL using the EXTERNAL-IP:

http://165.227.237.77/productpage\n

TLS certificates require a domain name and DNS A or CNAME entry. You can create those in the admin panel of your provider. They should point to the external IP of the Istio Ingress gateway. We will use the bookinfo.example.com domain as an example.

export EMAIL=\"you@example.com\"\n\ncat > issuer-prod.yaml <<EOF\napiVersion: cert-manager.io/v1\nkind: ClusterIssuer\nmetadata:\n  name: letsencrypt-prod\nspec:\n  acme:\n    server: https://acme-v02.api.letsencrypt.org/directory\n    email: $EMAIL\n    privateKeySecretRef:\n      name: letsencrypt-prod\n    solvers:\n    - selector: {}\n      http01:\n        ingress:\n          class: istio\nEOF\n

Note that ingress class is set to class: istio.

We are using the Let's Encrypt production server which has strict limits on the API. A staging server is also available at https://acme-staging-v02.api.letsencrypt.org/directory. If you are creating a lot of certificates while testing a deployment it would be better to use the staging server.

Edit email, then run: kubectl apply -f issuer-prod.yaml.

Create a new certificate resource

apiVersion: cert-manager.io/v1\nkind: Certificate\nmetadata:\nname: ingress-cert\nnamespace: istio-system\nspec:\nsecretName: ingress-cert\ncommonName: bookinfo.example.com\ndnsNames:\n- bookinfo.example.com\nissuerRef:\nname: letsencrypt-prod\nkind: ClusterIssuer\n

Edit the bookinfo gateway, kubectl edit gateway/bookinfo-gateway and reference the certificate secret in the TLS configuration under credentialName.

apiVersion: networking.istio.io/v1beta1\nkind: Gateway\nmetadata:\nname: bookinfo-gateway\nspec:\nselector:\nistio: ingressgateway # use istio default controller\nservers:\n- port:\nnumber: 443\nname: https\nprotocol: HTTPS\ntls:\nmode: SIMPLE\ncredentialName: ingress-cert # This should match the Certificate secretName\nhosts:\n- bookinfo.example.com\n

You can always checkout the Istio documentation for more information on how to integrate cert-manager.

We can use curl again to access the bookinfo application this time with our custom domain and over a secure connection. Alternatively you can open the URL in your browser.

curl -sS https://bookinfo.example.com/productpage | grep -o \"<title>.*</title>\"\n<title>Simple Bookstore App</title>\n
"},{"location":"tutorial/istio-gateway/#wrapping-up","title":"Wrapping up","text":"

Through the use of the inlets-operator we were able to get a public IP for the Istio Ingress gateway. This allows you to access services on your cluster whether you are running it in an on-premises datacenter, within a VM or on your local laptop.

There is no need to open a firewall port, set-up port-forwarding rules, configure dynamic DNS or any of the usual hacks. You will get a public IP and it will \"just work\" for any TCP traffic you may have.

"},{"location":"tutorial/kubernetes-api-server/","title":"Tutorial: Expose a local Kubernetes API Server","text":"

In this tutorial, we'll show you how to expose a local Kubernetes API Server on the Internet, so that you can access it from anywhere, just like with a managed cloud provider.

"},{"location":"tutorial/kubernetes-api-server/#pre-reqs","title":"Pre-reqs","text":"
  • A computer or laptop running MacOS or Linux, or Git Bash or WSL on Windows
  • Docker for Mac / Docker Daemon - installed in the normal way, you probably have this already
  • Kubernetes running locally with kubeadm, K3s, K3d, Minikube, KinD, Docker Desktop, etc
"},{"location":"tutorial/kubernetes-api-server/#the-kubernetes-cluster","title":"The Kubernetes cluster","text":"

By default every Kubernetes cluster has TLS enabled to encrypt any HTTP REST messages that go over its control-plane. The TLS certificate has to be bound to a certain name, sometimes called a TLS SAN.

The certificate is usually only valid for \"kubernetes.default.svc\", and can only be accessed from within the cluster.

Kubernetes on tour - get access to your cluster from anywhere, without having to resort to complex tooling like VPNs.

When a managed cloud provider provisions you a cluster, they'll add additional names into the certificate like \"customer1.lke.eu.linode.com\" which is then added to your generated kubeconfig file that you download in the dashboard.

We have five steps run through to expose the API server:

  1. Create a Kubernetes cluster
  2. Create a VM on the public cloud with an inlets TCP server running onit
  3. Create a DNS entry for the public VM's IP address
  4. Configure a TLS SAN, if possible with a new domain name
  5. Set up an inlets client as a Pod to forward traffic to the Kubernetes API Server

Once we have all this in place, we can take our existing kubeconfig file and edit the URL, so that instead of pointing at our LAN IP or localhost, it points to the domain mapped to the public VM.

"},{"location":"tutorial/kubernetes-api-server/#create-a-cluster","title":"Create a cluster","text":"

You can create a cluster on any machine by using KinD:

arkade get kind\nkind create cluster\n

If you have a Raspberry Pi or a Linux Server, you can install K3s using k3sup:

arkade get k3sup\n\nk3sup install --ip 192.168.1.101 --user pi\n

In either case, you'll get back a kubeconfig file.

Here's a snippet of what I got back from running k3sup install:

apiVersion: v1\nclusters:\n- cluster:\nserver: https://192.168.1.101:6443\n

The server field will need to be changed to the new public address later on.

"},{"location":"tutorial/kubernetes-api-server/#create-a-vm-on-the-public-cloud-with-an-inlets-tcp-server-running-on-it","title":"Create a VM on the public cloud with an inlets TCP server running on it","text":"

Just like when Linode Kubernetes Engine provisions us a domain like \"customer1.lke.eu.linode.com\", we'll need our own subdomain too, so that the certificate can be issued for it.

In order to create the DNS record, we a public IP which we will get by creating a tunnel server on our preferred cloud and in a region that's close to us.

arkade get inletsctl\n\nexport ACCESS_TOKEN=\"\" # Retreive this from your cloud dashboard\n\ninletsctl create \\\n--provider linode \\\n--tcp \\\n--access-token $ACCESS_TOKEN \\\n--region eu-west\n

Save the connection info from inletsctl into a text file for later.

# Give a single value or comma-separated\nexport PORTS=\"8000\"\n\n# Where to route traffic from the inlets server\nexport UPSTREAM=\"localhost\"\n\ninlets-pro tcp client --url \"wss://139.160.201.143:8123\" \\\n  --token \"f2cXtOouRpuVbAn4arVvdSMx//uKD3jDnssr3X9P338\" \\\n  --upstream $UPSTREAM \\\n  --ports $PORTS\n

Create a DNS subdomain for the IP address you were given:

  • k3s.example.com => 139.160.201.143

Check that you can resolve the IP with a ping ping -c 1 k3s.example.com

Now check the status of the inlets server:

export TOKEN=\"f2cXtOouRpuVbAn4arVvdSMx//uKD3jDnssr3X9P338\"\n\ninlets-pro status --url \"wss://139.160.201.143:8123\" \\\n--token \"$TOKEN\"\n

Output:

inlets server status. Version: 0.9.3 - 8e96997499ae53c6fb2ae9f9e13fa9b48dcb6514\n\nServer info:\nHostname:       localhost\nProcess uptime: 5 seconds ago\nMode:           tcp\nVersion:        0.9.3 8e96997499ae53c6fb2ae9f9e13fa9b48dcb6514\n\nNo clients connected.\n

We can now move onto the next step.

"},{"location":"tutorial/kubernetes-api-server/#configure-a-tls-san-if-possible-with-a-new-domain-name","title":"Configure a TLS SAN, if possible with a new domain name","text":"

With k3s, it's trivial to add additional TLS SAN names for the Kubernetes API Server.

If you run the k3sup install command again, it'll update your configuration:

k3sup install \\\n--ip 192.168.1.101 \\\n--user pi \\\n--tls-san k3s.example.com\n

You'll now have the custom domain along with the default kubernetes.default.svc as valid names in the generated certificate.

If you're not running on k3s, or use a service where you cannot change the TLS SAN, then we'll show you what to do in the next step.

"},{"location":"tutorial/kubernetes-api-server/#update-your-kubeconfig-file-with-the-new-endpoint","title":"Update your kubeconfig file with the new endpoint","text":"

We need to update our kubeconfig file to point at the custom domain instead of at whatever loopback or LAN address it currently does.

For K3s users, change the server URL:

apiVersion: v1\nclusters:\n- cluster:\nserver: https://192.168.1.101:6443\n

To:

apiVersion: v1\nclusters:\n- cluster:\nserver: https://k3s.example.com:443\n

For any user where you cannot regenerate the TLS certificate for the API Server, you can specify the server name in the config file:

apiVersion: v1\nclusters:\n- cluster:\nserver: https://k3s.example.com:443\ntls-server-name: kubernetes\n

For more details see: Support TLS Server Name overrides in kubeconfig file #88769

Save the changes to your kubeconfig file.

"},{"location":"tutorial/kubernetes-api-server/#connect-the-tunnel","title":"Connect the tunnel","text":"

The tunnel acts like a router, it takes any TCP packets sent to port 6443 (k3s) or 443 (Kubernetes) and forwards them down the tunnel to the inlets client. The inlets client then looks at its own \"--upstream\" value to decide where to finally send the data.

Save inlets-k8s-api.yaml:

export LICENSE=\"$(cat $HOME/.inlets/LICENSE)\"\nexport TOKEN=\"f2cXtOouRpuVbAn4arVvdSMx//uKD3jDnssr3X9P338\" # populate with the token from inletsctl\nexport SERVER_IP=\"139.160.201.143\" # populate with the server IP, not the domain\n\ncat > inlets-k8s-api.yaml <<EOF\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: inlets-client\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: inlets-client\n  template:\n    metadata:\n      labels:\n        app: inlets-client\n    spec:\n      containers:\n      - name: inlets-client\n        image: ghcr.io/inlets/inlets-pro:0.9.9\n        imagePullPolicy: IfNotPresent\n        command: [\"inlets-pro\"]\n        args:\n        - \"tcp\"\n        - \"client\"\n        - \"--url=wss://$SERVER_IP:8123\"\n        - \"--upstream=kubernetes.default.svc\"\n        - \"--port=443\"\n        - \"--port=6443\"\n        - \"--token=$TOKEN\"\n        - \"--license=$LICENSE\"\n---\nEOF\n

You'll see the tunnel client up and running and ready to receive requests:

kubectl logs deploy/inlets-client\n2022/06/24 09:51:18 Licensed to: Alex <contact@openfaas.com>, expires: 128 day(s)\n2022/06/24 09:51:18 Upstream server: kubernetes.default.svc, for ports: 443, 6443\ntime=\"2022/06/24 09:51:18\" level=info msg=\"Connecting to proxy\" url=\"wss://139.160.201.143:8123/connect\"\ninlets-pro TCP client. Copyright OpenFaaS Ltd 2021\ntime=\"2022/06/24 09:51:18\" level=info msg=\"Connection established\" client_id=5309466072564c1c90ce0a0bcaa22b74\n

Check the tunnel server's status to confirm the connection:

export TOKEN=\"f2cXtOouRpuVbAn4arVvdSMx//uKD3jDnssr3X9P338\"\n\ninlets-pro status --url \"wss://139.160.201.143:8123\" \\\n--token \"$TOKEN\"\n\ninlets server status. Version: 0.9.3 - 8e96997499ae53c6fb2ae9f9e13fa9b48dcb6514\n\nServer info:\nHostname:       localhost\nProcess uptime: 15 minutes ago\nMode:           tcp\nVersion:        0.9.3 8e96997499ae53c6fb2ae9f9e13fa9b48dcb6514\n\nConnected clients:\nClient ID                        Remote Address        Connected  Upstreams\n5309466072564c1c90ce0a0bcaa22b74 192.168.1.101:16368 43 seconds kubernetes.default.svc:443, kubernetes.default.svc:6443\n

Finally prove that it's working with the new, public address:

$ kubectl cluster-info\nKubernetes control plane is running at https://k3s.example.com:443\nCoreDNS is running at https://k3s.example.com:443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy\nMetrics-server is running at https://k3s.example.com:443/api/v1/namespaces/kube-system/services/https:metrics-server:https/proxy\n\nTo further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.\n
"},{"location":"tutorial/kubernetes-api-server/#wrapping-up","title":"Wrapping up","text":"

In a relatively short period of time, with a custom domain, and a small VM, we set up a tunnel server to route traffic from the public Internet to a K3s server on an internal network.

This gives you a similar experience to a managed public cloud Kubernetes engine, but running on your own infrastructure, or perhaps within a restrictive VPC.

You may also like:

  • Learn how to manage apps across multiple Kubernetes clusters by Johan Siebens

If you'd like to talk to us about this tutorial, feel free to reach out for a meeting:

Set up a meeting

"},{"location":"tutorial/kubernetes-ingress/","title":"Tutorial: Expose a local IngressController with the inlets-operator","text":"

In this quick-start we will configure the inlets-operator to use inlets-pro in TCP mode to expose ports 80 and 443 of an Ingress Controller (ingress-nginx) so that it can receive HTTPS certificates via LetsEncrypt and cert-manager.

The inlets-operator creates a VM for each tunnel server in the cloud of your choice, then plumbs in an inlets client to connect to it using a Deployment. There is an alternative approach that we also recommend which involves creating the tunnel server with inletsctl, followed by installing the inlets client with Helm: Fixing Ingress for short-lived local Kubernetes clusters.

You can subscribe to inlets for personal or commercial use via Gumroad

"},{"location":"tutorial/kubernetes-ingress/#pre-reqs","title":"Pre-reqs","text":"
  • A computer or laptop running MacOS or Linux, or Git Bash or WSL on Windows
  • Docker for Mac / Docker Daemon - installed in the normal way, you probably have this already
  • KinD - the \"darling\" of the Kubernetes community is Kubernetes IN Docker, a small one-shot cluster that can run inside a Docker container
  • arkade - arkade is an app installer that takes a helm chart and bundles it behind a simple CLI
"},{"location":"tutorial/kubernetes-ingress/#install-arkade","title":"Install arkade","text":"

You can use arkade or helm to install the various applications we are going to add to the cluster below. arkade provides an apps ecosystem that makes things much quicker.

MacOS and Linux users:

curl -sSLf https://get.arkade.dev/ | sudo sh\n

Windows users should install Git Bash and run the above without sudo.

"},{"location":"tutorial/kubernetes-ingress/#create-a-kubernetes-cluster-with-kind","title":"Create a Kubernetes cluster with KinD","text":"

We're going to use KinD, which runs inside a container with Docker for Mac or the Docker daemon. MacOS cannot actually run containers or Kubernetes itself, so projects like Docker for Mac create a small Linux VM and hide it away.

You can use an alternative to KinD if you have a preferred tool.

Get a KinD binary release and kubectl (the Kubernetes CLI):

arkade get kind --version v0.9.0\narkade get kubectl --version v1.19.3\n

Now create a cluster:

$ kind create cluster\n

The initial creation could take a few minutes, but subsequent clusters creations are much faster.

Creating cluster \"kind\" ...\n \u2713 Ensuring node image (kindest/node:v1.19.0) \ud83d\uddbc\n \u2713 Preparing nodes \ud83d\udce6  \n \u2713 Writing configuration \ud83d\udcdc \n \u2713 Starting control-plane \ud83d\udd79\ufe0f \n \u2713 Installing CNI \ud83d\udd0c \n \u2713 Installing StorageClass \ud83d\udcbe \nSet kubectl context to \"kind-kind\"\nYou can now use your cluster with:\n\nkubectl cluster-info --context kind-kind\n\nHave a nice day! \ud83d\udc4b\n

We can check that our single node is ready now:

kubectl get node -o wide\n\nNAME                 STATUS     ROLES    AGE   VERSION   INTERNAL-IP   EXTERNAL-IP   OS-IMAGE       KERNEL-VERSION     CONTAINER-RUNTIME\nkind-control-plane      Ready   master   35s   v1.18.0   172.17.0.2    <none>        Ubuntu 19.10   5.3.0-26-generic   containerd://1.3.2\n

The above shows one node Ready, so we are ready to move on.

"},{"location":"tutorial/kubernetes-ingress/#install-the-inlets-operator","title":"Install the inlets-operator","text":"

Save an access token for your cloud provider as $HOME/access-token, in this example we're using DigitalOcean. Other providers may also need a secret token in addition to the API key.

Your inlets license should be already saved at: $HOME/.inlets/LICENSE, if it's not, you can move it there or use the --license-file flag.

export ACCESS_TOKEN=$HOME/access-token\n\narkade install inlets-operator \\\n--provider digitalocean \\\n--region lon1 \\\n--token-file $ACCESS_TOKEN \\\n--license-file \"$HOME/.inlets/LICENSE\"\n

You can run arkade install inlets-operator --help to see a list of other cloud providers.

  • Set the --region flag as required, it's best to have low latency between your current location and where the exit-servers will be provisioned.
"},{"location":"tutorial/kubernetes-ingress/#install-nginx-ingress","title":"Install nginx-ingress","text":"

This installs nginx-ingress using its Helm chart:

arkade install nginx-ingress\n
"},{"location":"tutorial/kubernetes-ingress/#install-cert-manager","title":"Install cert-manager","text":"

Install cert-manager, which can obtain TLS certificates through NginxIngress.

arkade install cert-manager\n
"},{"location":"tutorial/kubernetes-ingress/#a-quick-review","title":"A quick review","text":"

Here's what we have so far:

  • nginx-ingress

    An IngressController, Traefik or Caddy are also valid options. It comes with a Service\u00a0of type LoadBalancer that will get a public address via the tunnel

  • inlets-operator configured to use inlets-pro in TCP mode

    Provides us with a public VirtualIP for the IngressController service.

  • cert-manager

    Provides TLS certificates through the HTTP01 or DNS01 challenges from LetsEncrypt

"},{"location":"tutorial/kubernetes-ingress/#deploy-an-application-and-get-a-tls-certificate","title":"Deploy an application and get a TLS certificate","text":"

This is the final step that shows everything working end to end.

TLS certificates require a domain name and DNS A or CNAME entry, so let's set that up

Find the External-IP:

kubectl get svc\n

Now create a DNS A record in your admin panel, so for example: expressjs.example.com.

Now when you install a Kubernetes application with an Ingress definition, NginxIngress and cert-manager will work together to provide a TLS certificate.

Create a staging issuer for cert-manager staging-issuer.yaml and make sure you edit the email value.

export EMAIL=\"you@example.com\"\n\ncat > issuer-staging.yaml <<EOF\napiVersion: cert-manager.io/v1\nkind: Issuer\nmetadata:\n  name: letsencrypt-staging\n  namespace: default\nspec:\n  acme:\n    server: https://acme-staging-v02.api.letsencrypt.org/directory\n    email: $EMAIL\n    privateKeySecretRef:\n      name: letsencrypt-staging\n    solvers:\n    - selector: {}\n      http01:\n        ingress:\n          class: nginx\nEOF\n

Apply the file with kubectl apply -f staging-issuer.yaml

While the Let's Encrypt production server has strict limits on the API, the staging server is more forgiving, and should be used while you are testing a deployment.

Edit email, then run: kubectl apply -f issuer.yaml.

Let's use helm3 to install Alex's example Node.js API available on GitHub

Create a custom.yaml file with the following:

ingress:\nenabled: true\nannotations:\nkubernetes.io/ingress.class: nginx\ncert-manager.io/issuer: \"letsencrypt-staging\"\nhosts:\n- host: expressjs.inlets.dev\npaths: [\"/\"]\ntls:\n- secretName: expressjs-tls\nhosts:\n- expressjs.inlets.dev\n

Replace the string expressjs.inlets.dev with your own sub-domain created earlier i.e. expressjs.example.com.

You can download around a dozen other CLI tools using arkade including helm. Use arkade to download helm and put it in your PATH:

arkade get helm\n\n# Put arkade in your path:\nexport PATH=$PATH:$HOME/.arkade/bin/helm3/\n\n# Or alternatively install to /usr/local/bin\nsudo cp $HOME/.arkade/bin/helm3/helm /usr/local/bin/\n

Now install the chart using helm:

helm repo add expressjs-k8s https://alexellis.github.io/expressjs-k8s/\n\n# Then they run an update\nhelm repo update\n\n# And finally they install\nhelm upgrade --install express expressjs-k8s/expressjs-k8s \\\n--values custom.yaml\n
"},{"location":"tutorial/kubernetes-ingress/#test-it-out","title":"Test it out","text":"

Now check the certificate has been created and visit the webpage in a browser:

kubectl get certificate\n\nNAME            READY   SECRET          AGE\nexpressjs-tls   True    expressjs-tls   49s\n

Open the webpage i.e. https://api.example.com. Since this is a staging certificate, you will get a warning from your browser. You can accept the certificate in order to test your site.

"},{"location":"tutorial/kubernetes-ingress/#getting-a-production-certificate","title":"Getting a Production Certificate","text":"

Create a production certificate issuer issuer-prod.yaml, similar to the staging issuer you produced earlier. Be sure to change the email address to your email.

export EMAIL=\"you@example.com\"\n\ncat > issuer-prod.yaml <<EOF\napiVersion: cert-manager.io/v1\nkind: Issuer\nmetadata:\n  name: letsencrypt-prod\n  namespace: default\nspec:\n  acme:\n    server: https://acme-v02.api.letsencrypt.org/directory\n    email: $EMAIL\n    privateKeySecretRef:\n      name: letsencrypt-prod\n    solvers:\n    - selector: {}\n      http01:\n        ingress:\n          class: nginx\nEOF\n

Then run kubectl apply -f issuer-prod.yaml

Now you must update your expressjs deployment to use the new certificate issuer. Create a new helm3 overrides file custom-prod.yaml:

cat > custom-prod.yaml <<EOF\ningress:\n  enabled: true\n  annotations:\n    kubernetes.io/ingress.class: nginx\n    cert-manager.io/issuer: \"letsencrypt-prod\"\n  hosts:\n    - host: expressjs.inlets.dev\n      paths: [\"/\"]\n  tls:\n   - secretName: expressjs-tls\n     hosts:\n       - expressjs.inlets.dev\nEOF\n

Be sure to change the above domain name to your domain name for the sample server.

You can update your deployment using the helm command below:

helm upgrade express expressjs-k8s/expressjs-k8s \\\n--values custom-prod.yaml\n

Here's my example on my own domain:

You can view the certificate the certificate that's being served directly from your local cluster and see that it's valid:

"},{"location":"tutorial/kubernetes-ingress/#install-a-real-world-application","title":"Install a real-world application","text":"

Using arkade you can now install OpenFaaS or a Docker Registry with a couple of commands, and since you have Nginx and cert-manager in place, this will only take a few moments.

"},{"location":"tutorial/kubernetes-ingress/#openfaas-with-tls","title":"OpenFaaS with TLS","text":"

OpenFaaS is a platform for Kubernetes that provides FaaS functionality and microservices. The motto of the project is Serverless Functions Made Simple and you can deploy it along with TLS in just a couple of commands:

export DOMAIN=gateway.example.com\narkade install openfaas\narkade install openfaas-ingress \\\n--email webmaster@$DOMAIN \\\n--domain $DOMAIN\n

That's it, you'll now be able to access your gateway at https://$DOMAIN/

For more, see the OpenFaaS workshop

"},{"location":"tutorial/kubernetes-ingress/#docker-registry-with-tls","title":"Docker Registry with TLS","text":"

A self-hosted Docker Registry with TLS and private authentication can be hard to set up, but we can now do that with two commands.

export DOMAIN=registry.example.com\narkade install docker-registry\narkade install docker-registry-ingress \\\n--email webmaster@$DOMAIN \\\n--domain $DOMAIN\n

Now try your registry:

docker login $DOMAIN\ndocker pull alpine:3.16\ndocker tag alpine:3.16 $DOMAIN/alpine:3.16\n\ndocker push $DOMAIN/alpine:3.16\n

You can even combine the new private registry with OpenFaaS if you like, checkout the docs for more.

"},{"location":"tutorial/kubernetes-ingress/#wrapping-up","title":"Wrapping up","text":"

Through the use of inlets-pro we have an encrypted control-plane for the websocket tunnel, and encryption for the traffic going to our Express.js app using a TLS certificate from LetsEncrypt.

You can now get a green lock and a valid TLS certificate for your local cluster, which also means that this will work with bare-metal Kubernetes, on-premises and with your Raspberry Pi cluster.

"},{"location":"tutorial/manual-http-server/","title":"Manual http server","text":""},{"location":"tutorial/manual-http-server/#setting-up-a-http-tunnel-server-manually","title":"Setting up a HTTP tunnel server manually","text":"

In this tutorial we will set up an inlets HTTP tunnel server to serve a local website over HTTPS using Let's Encrypt. The steps will be manual, but usually, we would use a provisioning tool like inletsctl to automate everything for us.

This may be useful for understanding how the server binary works, and how to use it on existing servers that you may have. Or perhaps you want to run inlets across an internal or private network.

"},{"location":"tutorial/manual-http-server/#pre-reqs","title":"Pre-reqs","text":"
  • A Linux server, Windows and MacOS are also supported
  • The inlets-pro binary at /usr/local/bin/
  • Access to a DNS control plane for a domain you control
"},{"location":"tutorial/manual-http-server/#run-the-server","title":"Run the server","text":"

For this example, your tunnel server should be accessible from the Internet. The tunnel client will connect to it and then expose one or more local websites so that you can access them remotely.

Create a DNS A record for the subdomain or subdomains you want to use, and have each of them point to the public IP address of the server you have provisioned. These short have a short TTL such as 60s to avoid waiting too long for DNS to propagate throughout the Internet. You can increase this value to a higher number later.

First generate an authentication token that the client will use to log in:

TOKEN=\"$(head -c 32 /dev/urandom | base64 | cut -d \"-\" -f1)\"\n

We'll use the built-in support for Let's Encrypt to get a valid HTTPS certificate for any services you wish to expose via your tunnel server. It is also possible to turn off Let's Encrypt support and use your own reverse proxy such as Caddy or Nginx.

export DOMAIN=\"example.com\"\n\ninlets-pro http server \\\n--auto-tls \\\n--control-port 8123 \\\n--auto-tls-san 192.168.0.10 \\\n--letsencrypt-domain subdomain1.$DOMAIN \\\n--letsencrypt-domain subdomain2.$DOMAIN \\\n--letsencrypt-email contact@$DOMAIN \\\n--letsencrypt-issuer staging\n  --token $TOKEN\n

Notice that --letsencrypt-domain can be provided more than one, for each of your subdomains.

We are also defaulting to the \"staging\" provider for TLS certificates which allows us to obtain a large number of certificates for experimentation purposes only. The default value, if this field is left off is prod as you will see by running inlets-pro http server --help.

Now the following will happen:

  • The tunnel server will start up and listen to TCP traffic on port 80 and 443.
  • The server will try to resolve each of your domains passed via --letsencrypt-domain.
  • Then once each resolves, Let's Encrypt will be contacted for a HTTP01 ACME challenge.
  • Once the certificates are obtained, the server will start serving the HTTPS traffic.

Now you can connect your client running on another machine.

Of course you can tunnel whatever HTTP service you like, if you already have one.

Inlets has a built-in HTTP server that we can run on our local / private machine to share files with others. Let's use that as our example:

mkdir -p /tmp/share\n\necho \"Welcome to my filesharing service.\" > /tmp/share/welcome.txt\n\ninlets-pro fileserver \\\n--allow-browsing \\\n--webroot /tmp/share/\n --port 8080\n

Next let's expose that local service running on localhost:8080 via the tunnel server:

export TOKEN=\"\" # Obtain this from your server\nexport SERVER_IP=\"\" # Your server's IP\nexport DOMAIN=\"example.com\"\n\ninlets-pro http client \\\n--url wss://$SERVER_IP:8123 \\\n--token $TOKEN \\\n--upstream http://localhost:8080/\n

If you set up your server for more than one sub-domain then you can specify a domain for each local service such as:

  --upstream subdomain1.$DOMAIN=http://localhost:8080/,subdomain2.$DOMAIN=http://localhost:3000/\n

Now that your client is connected, you can access the HTTP fileserver we set up earlier via the public DNS name:

curl -k -v https://subdomain1.$DOMAIN/welcome.txt\n

Now that you can see everything working, with a staging certificate, you can run the server command again and switch out the --letsencrypt-issuer staging flag for --letsencrypt-issuer prod.

"},{"location":"tutorial/manual-http-server/#wrapping-up","title":"Wrapping up","text":"

You have now installed an inlets HTTP tunnel server to a machine by hand. The same can be achieved by running the inletsctl tool, which does all of this automatically on a number of cloud providers.

  • Can I connect more than one client to the same server? Yes, and each can connect difference services. So client 1 exposes subdomain1.DOMAIN and client 2 exposes subdomain2.DOMAIN. Alternatively, you can have multiple clients exposing the same domain, for high availability.

  • How do I keep the inlets server process running? You can run it in the background, by using a systemd unit file. You can generate these via the inlets-pro http server --generate=systemd command.

  • How do I keep the inlets client process running? Do the same as for a server, but use the inlets-pro http client --generate=systemd command.

  • What else can I do with my server? Browse the available options for the tunnel servers with the inlets-pro http server --help command.

"},{"location":"tutorial/manual-tcp-server/","title":"Manual tcp server","text":""},{"location":"tutorial/manual-tcp-server/#setting-up-a-tcp-server-manually","title":"Setting up a TCP server manually","text":"

In this tutorial we will set up a TCP tunnel server manually.

"},{"location":"tutorial/manual-tcp-server/#pre-reqs","title":"Pre-reqs","text":"
  • A Linux server, Windows and MacOS are also supported
  • The inlets-pro binary at /usr/local/bin/
"},{"location":"tutorial/manual-tcp-server/#log-into-your-existing-vm","title":"Log into your existing VM","text":"

Generate an authentication token for the tunnel:

TOKEN=\"$(openssl rand -base64 32)\" > token.txt\n\n# Find the instance's public IPv4 address:\nPUBLIC_IP=\"$(curl -s https://checkip.amazonaws.com)\"\n

Let's imagine the public IP resolved to 46.101.128.5 which is part of the DigitalOcean range.

inlets-pro tcp server \\\n--token \"$TOKEN\" \\\n--auto-tls-san $PUBLIC_IP \\\n--generate=systemd > inlets-pro.service\n

Example:

[Unit]\nDescription=inlets Pro TCP Server\nAfter=network.target\n\n[Service]\nType=simple\nRestart=always\nRestartSec=5\nStartLimitInterval=0\nExecStart=/usr/local/bin/inlets-pro tcp server --auto-tls --auto-tls-san=46.101.128.5 --control-addr=0.0.0.0 --token=\"ISgW7E2TQk+ZmbJldN9ophfE96B93eZKk8L1+gBysg4=\" --control-port=8124 --auto-tls-path=/tmp/inlets-pro\n\n[Install]\nWantedBy=multi-user.target\n

Next install the unit file with:

sudo cp inlets-pro.service /etc/systemd/system/\nsudo systemctl daemon-reload\nsudo systemctl enable inlets-pro.service\n\nsudo systemctl restart inlets-pro.service\n

You'll now be able to check the logs for the server:

sudo journalctl -u inlets-pro\n

Finally you can connect your TCP client from a remote network. In this case, port 5900 is being exposed for VNC, along with port 2222 for SSH. Port 2222 is an extra port added to the /etc/ssh/sshd_config file on the Linux machine to avoid conflicting with SSH on the tunnel server itself.

inlets-pro tcp client \\\n--token \"ISgW7E2TQk+ZmbJldN9ophfE96B93eZKk8L1+gBysg4=\" \\\n--upstream 192.168.0.15 \\\n--port 2222 \\\n--port 5900 \\\n--url wss://46.101.128.5:8124\n

You can now connect to the public IP of your server via SSH and VNC:

For example:

ssh -p 2222 pi@46.101.128.5\n
"},{"location":"tutorial/manual-tcp-server/#wrapping-up","title":"Wrapping up","text":"

You now have a TCP tunnel server that you can connect as and when you like.

  • You can change the ports of the connected client
  • You can change the upstream
  • You can run multiple inlets-pro tcp client commands to load-balance traffic

But bear in mind that you cannot have two clients exposing different ports at the same time unless you're an inlets uplink user.

We would recommend creating TCP tunnel servers via inletsctl which automates all of the above in a few seconds.

"},{"location":"tutorial/monitoring-and-metrics/","title":"Monitoring and metrics","text":"

Learn how you can monitor your tunnel servers using the status command and Prometheus metrics.

This can help you understand how tunnels are being used and answer questions like:

  • What are the Rate, Error, Duration (RED) metrics for any HTTP APIs or websites that are being hosted?
  • How many connections are open at this point in time, and on which ports?
  • Have any clients attempted to connect which failed authentication?
"},{"location":"tutorial/monitoring-and-metrics/#introduction","title":"Introduction","text":"

All the information for monitoring tunnels is exposed via the inlets control-plane. It provides a connection endpoint for clients, a status endpoint and a monitoring endpoint.

Checkout the FAQ to learn about the difference between the data-plane and control-plane

Inlets provides two distinct ways to monitor tunnels. You can use the status command that is part of the CLI or collect Prometheus metrics for background monitoring and alerting. We will explore both methods.

"},{"location":"tutorial/monitoring-and-metrics/#the-status-command","title":"The status command","text":"

With the inlets-pro status command you can find out some basic tunnel statistics without logging in with a console SSH session. It shows you a list of the connected clients along with the version and uptime information of the server and can be used with both HTTP and TCP tunnels.

Here\u2019s an example of a TCP tunnel server:

$ inlets-pro status \\\n--url wss://178.62.70.130:8123 \\\n--token \"$TOKEN\" \\\n--auto-tls\n\nQuerying server status. Version DEV - unknown\nHostname: unruffled-banzai4\nStarted: 49 minutes\nMode: tcp\nVersion:        0.8.9-rc1\n\nClient ID                        Remote Address     Connected Upstreams\n730aa1bb96474cbc9f7e76c135e81da8 81.99.136.188:58102 15 minutes localhost:8001, localhost:8000, localhost:2222\n22fbfe123c884e8284ee0da3680c1311 81.99.136.188:64018 6 minutes  localhost:8001, localhost:8000, localhost:2222\n

We can see the clients that are connected and the ports they make available on the server. In this case there are two clients. All traffic to the data plane for ports 8001, 8000 and 2222 will be load-balanced between the two clients for HA.

The response from a HTTP tunnel:

$ inlets-pro status \\\n--url wss://147.62.70.101:8123 \\\n--token \"$TOKEN\"  \\\n--auto-tls\n\nServer info:\nHostname: creative-pine6\nStarted: 1 day\nMode:           http\nVersion:        0.8.9-rc1\nConnected clients:\nClient ID                        Remote Address     Connected Upstreams\n4e35edf5c6a646b79cc580984eac4ea9 192.168.0.19:34988 5 minutes example.com=http://localhost:8000, prometheus.example.com=http://localhost:9090\n

In this example we can see that there is only one client connected to the server at the moment. This client provides two separate domains.

The command uses the status endpoint that is exposed on the control-plane. It is possible to invoke the HTTP endpoint yourself. The token that is set up for the server has to be set in the Authorization header.

$ curl -ksLS https://127.0.0.1:8123/status \\\n-H \"Authorization: Bearer $TOKEN\"\n

Example response from a HTTP tunnel:

{\n\"info\": {\n\"version\": \"0.8.9-18-gf4fc15b\",\n\"sha\": \"f4fc15b9604efd0b0ca3cc604c19c200ae6a1d7b\",\n\"mode\": \"http\",\n\"startTime\": \"2021-08-13T12:23:17.321388+01:00\",\n\"hostname\": \"am1.local\"\n},\n\"clients\": [\n{\n\"clientID\": \"0c5f2a1ca0174ee3a177c3be7cd6d950\",\n\"remoteAddr\": \"[::1]:63671\",\n\"since\": \"2021-08-13T12:23:19.72286+01:00\",\n\"upstreams\": [\n\"*=http://127.0.0.1:8080\"\n]\n}\n]\n}\n
"},{"location":"tutorial/monitoring-and-metrics/#monitor-inlets-with-prometheus","title":"Monitor inlets with Prometheus","text":"

The server collects metrics for both the data-plane and the control-plane. These metrics are exposed through the monitoring endpoint on the control-plane. Prometheus can be set up for metrics collection and alerting.

The name of the metrics and the kind of metrics that are exported will depend on the mode that the server is running in. For TCP tunnels the metric name starts with tcp_ for HTTP tunnels this will be http_.

You don\u2019t need to be a Kubernetes user to take advantage of Prometheus. You can run it locally on your machine by downloading the binary here.

As an alternative, Grafana Cloud can give you a complete monitoring stack for your tunnels without having to worry about finding somewhere to run and maintain Prometheus and Grafana. We have a write up on our blog that shows you how to set this up: Monitor inlets tunnels with Grafana Cloud.

Create a prometheus.yaml file to configure Prometheus. Replace TOKEN with the token from your server.

# my global config\nglobal:\nscrape_interval:     15s # Set the scrape interval to every 15 seconds. Default is every 1 minute.\nevaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.\n# scrape_timeout is set to the global default (10s).\n\n# Alertmanager configuration\nalerting:\nalertmanagers:\n- static_configs:\n- targets:\n# - alertmanager:9093\n\n# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.\nrule_files:\n# - \"first_rules.yml\"\n# - \"second_rules.yml\"\n\n# A scrape configuration containing exactly one endpoint to scrape:\n# Here it's Prometheus itself.\nscrape_configs:\n# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.\n- job_name: 'prometheus'\n\n# metrics_path defaults to '/metrics'\n# scheme defaults to 'http'.\nstatic_configs:\n- targets: ['localhost:9090']\n# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.\n- job_name: 'http-tunnel'\n\n# metrics_path defaults to '/metrics'\n# scheme defaults to 'http'.\nstatic_configs:\n- targets: ['localhost:8123']\nscheme: https\n\nauthorization:\ntype: Bearer\ncredentials: TOKEN\ntls_config:\ninsecure_skip_verify: true\n

Start Prometheus with this command. It will listen on port 9090.

$ prometheus --config.file=./prometheus.yaml\n\nlevel=info ts=2021-08-13T11:25:31.791Z caller=main.go:428 msg=\"Starting Prometheus\" version=\"(version=2.29.1, branch=HEAD, revision=dcb07e8eac34b5ea37cd229545000b857f1c1637)\"\nlevel=info ts=2021-08-13T11:25:31.931Z caller=main.go:784 msg=\"Server is ready to receive web requests.\"\n
"},{"location":"tutorial/monitoring-and-metrics/#metrics-for-the-control-plane","title":"Metrics for the control-plane","text":"

The control-plane metrics can give you insights into the number of clients that are connected and the number of http requests made to the different control-plane endpoints.

HTTP tunnels

Metric Type Description Labels http_controlplane_connected_gauge gauge gauge of inlets clients connected to the control plane http_controlplane_requests_total counter total HTTP requests processed by connecting clients on the control plane code, path

TCP tunnels

Metric Type Description Labels tcp_controlplane_connected_gauge gauge gauge of inlets clients connected to the control plane tcp_controlplane_requests_total counter total HTTP requests processed by connecting clients on the control plane code, path

These metrics can for instance be used to tell you whether there are a lot of clients that attempted to connect but failed authentication.

If running on Kubernetes, the connected gauge could be used to scale tunnels down to zero replicas, and back up again in a similar way to OpenFaaS. This could be important for very large-scale installations of devices or tenants that have partial connectivity.

"},{"location":"tutorial/monitoring-and-metrics/#metrics-for-the-data-plane","title":"Metrics for the data-plane","text":"

The data-plane metrics can give you insights in the services that are exposed through your tunnel.

HTTP tunnels

Metric Type Description Labels http_dataplane_requests_total counter total HTTP requests processed code, host, method http_dataplane_request_duration_seconds histogram Seconds spent serving HTTP requests. code, host, method

TCP tunnels

Metric Type Description Labels tcp_dataplane_connections_gauge gauge gauge of TCP connections established over data plane port tcp_dataplane_connections_total counter total count of TCP connections established over data plane port

For HTTP tunnels these metrics can be used to get Rate, Error, Duration (RED) information for any API or website that is connected through the tunnel. This essentially allows you to collect basic metrics for your services even if they do not export any metrics themselves.

For TCP tunnels these metrics can help answer questions like:

  • How many connections are open at this point in time, and on which ports? i.e. if exposing SSH on port 2222, how many connections are open?
"},{"location":"tutorial/monitoring-and-metrics/#wrapping-up","title":"Wrapping up","text":"

We showed two different options that can be used to monitor your inlets tunnels.

The CLI provides a quick and easy way to get some status information for a tunnel. The endpoint that exposes this information can also be invoked directly using HTTP.

Prometheus metrics can be collected from the monitoring endpoint. These metrics are useful for background monitoring and alerting. They can provide you with Rate, Error, Duration (RED) metrics for HTTP services that are exposed through Inlets.

"},{"location":"tutorial/monitoring-and-metrics/#you-may-also-like","title":"You may also like","text":"
  • Blog post: Measure and monitor your inlets tunnels
"},{"location":"tutorial/postgresql-tcp-tunnel/","title":"Tutorial: Tunnel a private Postgresql database","text":"

In this tutorial we will tunnel Postgresql over inlets Pro to a remote machine. From there you can expose it to the Internet, or bind it to the local network for private VPN-like access.

You can subscribe to inlets for personal or commercial use via Gumroad

"},{"location":"tutorial/postgresql-tcp-tunnel/#setup-your-exit-node","title":"Setup your exit node","text":"

Provision a cloud VM on DigitalOcean or another IaaS provider using inletsctl:

inletsctl create \\\n--provider digitalocean \\\n--region lon1 \\\n--pro\n

Note the --url and TOKEN given to you in this step.

"},{"location":"tutorial/postgresql-tcp-tunnel/#run-postgresql-on-your-private-server","title":"Run Postgresql on your private server","text":"

We can run a Postgresql instance using Docker:

head -c 16 /dev/urandom |shasum \n8cb3efe58df984d3ab89bcf4566b31b49b2b79b9\n\nexport PASSWORD=\"8cb3efe58df984d3ab89bcf4566b31b49b2b79b9\"\n\ndocker run --rm --name postgres -p 5432:5432 -e POSTGRES_PASSWORD=8cb3efe58df984d3ab89bcf4566b31b49b2b79b9 -ti postgres:latest\n
"},{"location":"tutorial/postgresql-tcp-tunnel/#connect-the-inlets-pro-client","title":"Connect the inlets Pro client","text":"

Fill in the below with the outputs you received from inletsctl create.

Note that UPSTREAM=\"localhost\" can be changed to point at a host or IP address accessible from your client. The choice of localhost is suitable when you are running Postgresql in Docker on the same computer as the inlets Pro client.

The client will look for your license in $HOME/.inlets/LICENSE, but you can also use the --license/--license-file flag if you wish.

export EXIT_IP=\"134.209.21.155\"\nexport TCP_PORTS=\"5432\"\nexport LICENSE_FILE=\"$HOME/LICENSE.txt\"\nexport TOKEN=\"KXJ5Iq1Z5Cc8GjFXdXJrqNhUzoScXnZXOSRKeh8x3f6tdGq1ijdENWQ2IfzdCg4U\"\nexport UPSTREAM=\"localhost\"\n\ninlets-pro tcp client --connect \"wss://$EXIT_IP:8123/connect\" \\\n--token \"$TOKEN\" \\\n--upstream $UPSTREAM \\\n--ports $TCP_PORTS\n
"},{"location":"tutorial/postgresql-tcp-tunnel/#connect-to-your-private-postgresql-server-from-the-internet","title":"Connect to your private Postgresql server from the Internet","text":"

You can run this command from anywhere, since your exit-server has a public IP:

export PASSWORD=\"8cb3efe58df984d3ab89bcf4566b31b49b2b79b9\"\nexport EXIT_IP=\"209.97.141.140\"\n\ndocker run -it -e PGPORT=5432 -e PGPASSWORD=$PASSWORD --rm postgres:latest psql -U postgres -h $EXIT_IP\n

Try a command such as CREATE database or \\dt.

"},{"location":"tutorial/postgresql-tcp-tunnel/#treat-the-database-as-private-like-a-vpn","title":"Treat the database as private - like a VPN","text":"

A split data and control-plane mean that tunnels do not need to be exposed on the Internet and can replace a VPN or a bespoke solution with SSH tunnels

A split data and control-plane mean that tunnels do not need to be exposed on the Internet and can replace a VPN or a bespoke solution with SSH tunnels

If you would like to keep the database service and port private, you can run the exit-server as a Pod in a Kubernetes cluster, or add an iptables rule to block access from external IPs.

Log into your exit-server and update /etc/systemd/system/inlets-pro.service

To listen on loopback, add: --listen-data=127.0.0.1: To listen on a private adapter such as 10.1.0.10, add: --listen-data=10.1.0.10:

Restart the service, and you'll now find that the database port 5432 can only be accessed from within the network you specified in --listen-data

Other databases such as Cassandra, MongoDB and Mysql/MariaDB also work exactly the same. Just change the port from 5432 to the port of your database.

"},{"location":"tutorial/ssh-tcp-tunnel/","title":"Tutorial: Expose a private SSH server over a TCP tunnel","text":"

In this tutorial we will use inlets-pro to access your computer behind NAT or a firewall. We'll do this by tunnelling SSH over inlets-pro, and clients will connect to your exit-server.

Scenario: You want to allow SSH access to a computer that doesn't have a public IP, is inside a private network or behind a firewall. A common scenario is connecting to a Raspberry Pi on a home network or a home-lab.

You can subscribe to inlets for personal or commercial use via Gumroad

"},{"location":"tutorial/ssh-tcp-tunnel/#setup-your-tunnel-server-with-inletsctl","title":"Setup your tunnel server with inletsctl","text":"

For this tutorial you will need to have an account and API key with one of the supported providers, or you can create an exit-server manually and install inlets Pro there yourself.

For this tutorial, the DigitalOcean provider will be used. You can get free credits on DigitalOcean with this link.

Create an API key in the DigitalOcean dashboard with Read and Write permissions, and download it to a file called do-access-token in your home directory.

You need to know the IP of the machine you to connect to on your local network, for instance 192.168.0.35 or 127.0.0.1 if you are running inlets Pro on the same host as SSH.

You can use the inletsctl utility to provision exit-servers with inlets Pro preinstalled, it can also download the inlets-pro CLI.

curl -sLSf https://inletsctl.inlets.dev | sh\nsudo mv inletsctl /usr/local/bin/\nsudo inletsctl download\n

If you already have inletsctl installed, then make sure you update it with inletsctl update.

"},{"location":"tutorial/ssh-tcp-tunnel/#create-an-tunnel-server","title":"Create an tunnel server","text":""},{"location":"tutorial/ssh-tcp-tunnel/#a-automate-your-tunnel-server","title":"A) Automate your tunnel server","text":"

The inletsctl tool can create a tunnel server for you in the region and cloud of your choice.

inletsctl create \\\n--provider digitalocean \\\n--access-token-file ~/do-access-token \\\n--region lon1\n

Run inletsctl create --help to see all the options.

After the machine has been created, inletsctl will output a sample command for the inlets-pro client command:

inlets-pro tcp client --url \"wss://206.189.114.179:8123/connect\" \\\n--token \"4NXIRZeqsiYdbZPuFeVYLLlYTpzY7ilqSdqhA0HjDld1QjG8wgfKk04JwX4i6c6F\"\n

Don't run this command, but note down the --url and --token parameters for later

"},{"location":"tutorial/ssh-tcp-tunnel/#b-manual-setup-of-your-tunnel-server","title":"B) Manual setup of your tunnel server","text":"

Use B) if you want to provision your virtual machine manually, or if you already have a host from another provider.

Log in to your remote tunnel server with ssh and obtain the binary using inletsctl:

curl -sLSf https://inletsctl.inlets.dev | sh\nsudo mv inletsctl /usr/local/bin/\nsudo inletsctl download\n

Find your public IP:

export IP=$(curl -s ifconfig.co)\n

Confirm the IP with echo $IP and save it, you need it for the client

Get an auth token and save it for later to use with the client

export TOKEN=\"$(head -c 16 /dev/urandom |shasum|cut -d'-' -f1)\"\n\necho $TOKEN\n

Start the server:

inlets-pro \\\ntcp \\\nserver \\\n--auto-tls \\\n--auto-tls-san $IP \\\n--token $TOKEN\n

If running the inlets client on the same host as SSH, you can simply set PROXY_TO_HERE to localhost. Or if you are running SSH on a different computer to the inlets client, then you can specify a DNS entry or an IP address like 192.168.0.15.

If using this manual approach to install inlets Pro, you should create a systemd unit file.

The easiest option is to run the server with the --generate=systemd flag, which will generate a systemd unit file to stdout. You can then copy the output to /etc/systemd/system/inlets-pro.service and enable it with systemctl enable inlets-pro.

"},{"location":"tutorial/ssh-tcp-tunnel/#configure-the-private-ssh-servers-listening-port","title":"Configure the private SSH server's listening port","text":"

It's very likely (almost certain) that your exit server will already be listening for traffic on the standard ssh port 22. Therefore you will need to configure your internal server to use an additional TCP port such as 2222.

Once configured, you'll still be able to connect to the internal server on port 22, but to connect via the tunnel, you'll use port 2222

Add the following to /etc/ssh/sshd_config:

Port 22\nPort 2222\n

For (optional) additional security, you could also disable password authentication, but make sure that you have inserted your SSH key to the internal server with ssh-copy-id user@ip before reloading the SSH service.

PasswordAuthentication no\n

Now need to reload the service so these changes take effect

sudo systemctl daemon-reload\nsudo systemctl restart sshd\n

Check that you can still connect on the internal IP on port 22, and the new port 2222.

Use the -p flag to specify the SSH port:

export IP=\"192.168.0.35\"\n\nssh -p 22 $IP \"uptime\"\nssh -p 2222 $IP \"uptime\"\n
"},{"location":"tutorial/ssh-tcp-tunnel/#start-the-inlets-pro-client","title":"Start the inlets Pro client","text":"

First download the inlets-pro client onto the private SSH server:

sudo inletsctl download\n

Use the command from earlier to start the client on the server:

export IP=\"206.189.114.179\"\nexport TCP_PORTS=\"2222\"\nexport LICENSE_FILE=\"$HOME/LICENSE.txt\"\nexport UPSTREAM=\"localhost\"\n\ninlets-pro tcp client --url \"wss://$IP:8123/connect\" \\\n--token \"4NXIRZeqsiYdbZPuFeVYLLlYTpzY7ilqSdqhA0HjDld1QjG8wgfKk04JwX4i6c6F\" \\\n--license-file \"$LICENSE_FILE\" \\\n--upstream \"$UPSTREAM\" \\\n--ports $TCP_PORTS\n

The localhost value will be used for --upstream because the tunnel client is running on the same machine as the SSH service. However, you could run the client on another machine within the network, and then change the flag to point to the private SSH server's IP.

"},{"location":"tutorial/ssh-tcp-tunnel/#try-it-out","title":"Try it out","text":"

Verify the installation by trying to SSH to the public IP, using port 2222.

ssh -p 2222 user@206.189.114.179\n

You should now have access to your server via SSH over the internet with the IP of the exit server.

You can also use other compatible tools like sftp, scp and rsync, just make sure that you set the appropriate port flag. The port flag for sftp is -P rather than -p.

"},{"location":"tutorial/ssh-tcp-tunnel/#wrapping-up","title":"Wrapping up","text":"

The principles in this tutorial can be adapted for other protocols that run over TCP such as MongoDB or PostgreSQL, just adapt the port number as required.

  • Quick-start: Tunnel a private database over inlets Pro
  • Purchase inlets for personal or commercial use
"},{"location":"uplink/become-a-provider/","title":"Become an inlets uplink provider","text":"

inlets uplink makes it easy for Service Providers and SaaS companies to deliver their product and services to customer networks.

To become a provider, you'll need a Kubernetes cluster, an inlets uplink subscription and to install the inlets-uplink-provider Helm chart.

  • Read the Inlets Uplink announcement
"},{"location":"uplink/become-a-provider/#before-you-start","title":"Before you start","text":"

Before you start, you'll need the following:

  • A Kubernetes cluster with LoadBalancer capabilities (i.e. public cloud).
  • A domain name clients can use to connect to the tunnel control plane.
  • An inlets uplink license (an inlets-pro license cannot be used)
  • Optional: arkade - a tool for installing popular Kubernetes tools

    To install arkade run:

    curl -sSLf https://get.arkade.dev/ | sudo sh\n

Inlets uplink has its own independent subscription from inlets-pro.

Sign-up here: inlets uplink plans.

"},{"location":"uplink/become-a-provider/#create-a-kubernetes-cluster","title":"Create a Kubernetes cluster","text":"

We recommend creating a Kubernetes cluster with a minimum of three nodes. Each node should have a minimum of 2GB of RAM and 2 CPU cores.

"},{"location":"uplink/become-a-provider/#install-cert-manager","title":"Install cert-manager","text":"

Install cert-manager, which is used to manage TLS certificates for inlets-uplink.

You can use Helm, or arkade:

arkade install cert-manager\n
"},{"location":"uplink/become-a-provider/#create-a-namespace-for-the-inlets-uplink-provider-and-install-your-license","title":"Create a namespace for the inlets-uplink-provider and install your license","text":"

Make sure to create the target namespace for you installation first.

kubectl create namespace inlets\n

Create the required secret with your inlets-uplink license.

Check that your license key is in lower-case

There is a known issue with LemonSqueezy where the UI will copy the license key in lower-case, it needs to be converted to upper-case before being used with Inlets Uplink.

Convert the license to upper-case, if it's in lower-case:

(\nmv $HOME/.inlets/LICENSE_UPLINK{,.lower}\n\ncat $HOME/.inlets/LICENSE_UPLINK.lower | tr '[:lower:]' '[:upper:]' > $HOME/.inlets/LICENSE_UPLINK\n  rm $HOME/.inlets/LICENSE_UPLINK.lower\n)\n

Create the secret for the license:

kubectl create secret generic \\\n-n inlets inlets-uplink-license \\\n--from-file license=$HOME/.inlets/LICENSE_UPLINK\n
"},{"location":"uplink/become-a-provider/#setup-up-ingress-for-customer-tunnels","title":"Setup up ingress for customer tunnels","text":"

Tunnels on your customers' network will connect to your own inlets-uplink-provider.

There are two options for deploying the inlets-uplink-provider.

Use Option A if you're not sure, if your team already uses Istio or prefers Istio, use Option B.

"},{"location":"uplink/become-a-provider/#a-install-with-kubernetes-ingress","title":"A) Install with Kubernetes Ingress","text":"

We recommend ingress-nginx, and have finely tuned the configuration to work well for the underlying websocket for inlets. That said, you can change the IngressController if you wish.

Install ingress-nginx using arkade or Helm:

arkade install ingress-nginx\n

Create a values.yaml file for the inlets-uplink-provider chart:

clientRouter:\n# Customer tunnels will connect with a URI of:\n# wss://uplink.example.com/namespace/tunnel\ndomain: uplink.example.com\n\ntls:\nissuerName: letsencrypt-prod\n\n# When set, a production issuer will be generated for you\n# to use a pre-existing issuer, set issuer.enabled=false\nissuer:\n# Create a production issuer as part of the chart installation\nenabled: true\n\n# Email address used for ACME registration for the production issuer\nemail: \"user@example.com\"\n\ningress:\nenabled: true\nclass: \"nginx\"      

Make sure to replace the domain and email with your actual domain name and email address.

Want to use the staging issuer for testing?

To use the Let's Encrypt staging issuer, pre-create your own issuer, update clientRouter.tls.issuerName with the name you have chosen, and then update clientRouter.tls.issuer.enabled and set it to false.

"},{"location":"uplink/become-a-provider/#b-install-with-istio","title":"B) Install with Istio","text":"

We have added support in the inlets-uplink chart for Istio to make it as simple as possible to configure with a HTTP01 challenge.

If you don't have Istio setup already you can deploy it with arkade.

arkade install istio\n

Label the inlets namespace so that Istio can inject its sidecars:

kubectl label namespace inlets \\\nistio-injection=enabled --overwrite\n

Create a values.yaml file for the inlets-uplink chart:

clientRouter:\n# Customer tunnels will connect with a URI of:\n# wss://uplink.example.com/namespace/tunnel\ndomain: uplink.example.com\n\ntls:\nissuerName: letsencrypt-prod\n\n# When set, a production issuer will be generated for you\n# to use a pre-existing issuer, set issuer.enabled=false\nissuer:\n# Create a production issuer as part of the chart installation\nenabled: true\n\n# Email address used for ACME registration for the production issuer\nemail: \"user@example.com\"\n\nistio:\nenabled: true\n

Make sure to replace the domain and email with your actual domain name and email address.

"},{"location":"uplink/become-a-provider/#deploy-with-helm","title":"Deploy with Helm","text":"

The chart is served through a container registry (OCI), not GitHub pages

Many Helm charts are served over GitHub pages, from a public repository, making it easy to browse and read the source code. We are using an OCI artifact in a container registry, which makes for a more modern alternative. If you want to browse the source, you can simply run helm template instead of helm upgrade.

Unauthorized?

The chart artifacts are public and do not require authentication, however if you run into an \"Access denied\" or authorization error when interacting with ghcr.io, try running helm registry login ghcr.io to refresh your credentials, or docker logout ghcr.io.

The Helm chart is called inlets-uplink-provider, you can deploy it using the custom values.yaml file created above:

helm upgrade --install inlets-uplink \\\noci://ghcr.io/openfaasltd/inlets-uplink-provider \\\n--namespace inlets \\\n--values ./values.yaml\n

If you want to pin the version of the Helm chart, you can do so with the --version flag.

Where can I see the various options for values.yaml?

All of the various options for the Helm chart are documented in the configuration reference.

How can I view the source code?

See the note on helm template under the configuration reference.

How can I find the latest version of the chart?

If you omit a version, Helm will use the latest published OCI artifact, however if you do want to pin it, you can browse all versions of the Helm chart on GitHub

As an alternative to using ghcr.io's UI, you can get the list of tags, including the latest tag via the crane CLI:

arkade get crane\n\n# List versions\ncrane ls ghcr.io/openfaasltd/inlets-uplink-provider\n\n# Get the latest version\nLATEST=$(crane ls ghcr.io/openfaasltd/inlets-uplink-provider |tail -n 1)\necho $LATEST\n
"},{"location":"uplink/become-a-provider/#verify-the-installation","title":"Verify the installation","text":"

Once you've installed inlets-uplink, you can verify it is deployed correctly by checking the inlets namespace for running pods:

$ kubectl get pods --namespace inlets\n\nNAME                               READY   STATUS    RESTARTS   AGE\nclient-router-b5857cf6f-7vrdh      1/1     Running   0          92s\nprometheus-74d8d7db9b-2hptm        1/1     Running   0          16s\nuplink-operator-7fccc9bdbc-twd2q   1/1     Running   0          92s\n

You should see the client-router and cloud-operator in a Running state.

If you installed inlets-uplink with Kubernetes ingress, you can verify that ingress for the client-router is setup and that a TLS certificate is issued for your domain using these two commands:

$ kubectl get -n inlets ingress/client-router\n\nNAME            CLASS    HOSTS                ADDRESS           PORTS     AGE\nclient-router   <none>   uplink.example.com   188.166.194.102   80, 443   31m\n
$ kubectl get -n inlets cert/client-router-cert\n\nNAME                 READY   SECRET               AGE\nclient-router-cert   True    client-router-cert   30m\n
"},{"location":"uplink/become-a-provider/#download-the-tunnel-cli","title":"Download the tunnel CLI","text":"

We provide a CLI to help you create and manage tunnels. It is available as a plugin for the inlets-pro CLI.

Download the inlets-pro binary:

  • Download it from the GitHub releases
  • Get it with arkade: arkade get inlets-pro

Get the tunnel plugin:

inlets-pro plugin get tunnel\n

Run inlets-pro tunnel --help to see all available commands.

"},{"location":"uplink/become-a-provider/#setup-the-first-customer-tunnel","title":"Setup the first customer tunnel","text":"

Continue the setup here: Create a customer tunnel

"},{"location":"uplink/become-a-provider/#upgrading-the-chart-and-components","title":"Upgrading the chart and components","text":"

If you have a copy of values.yaml with pinned image versions, you should update these manually.

Next, run the Helm chart installation command again, and remember to use the sames values.yaml file that you used to install the software originally.

Over time, you may find using a tool like FluxCD or ArgoCD to manage the installation and updates makes more sense than running Helm commands manually.

If the Custom Resource Definition (CRD) has changed, you can extract it from the Chart repo and install it before or after upgrading. As a rule, Helm won't install or upgrade CRDs a second time if there's already an existing version:

helm template oci://ghcr.io/openfaasltd/inlets-uplink-provider \\\n--include-crds=true \\\n--output-dir=/tmp\n\nkubectl apply -f /\n  tmp/inlets-uplink-provider/crds/uplink.inlets.dev_tunnels.yaml\n
"},{"location":"uplink/become-a-provider/#upgrading-existing-customer-tunnels","title":"Upgrading existing customer tunnels","text":"

The operator will upgrade the image: version of all deployed inlets uplink tunnels automatically based upon the tag set in values.yaml.

If no value is set in your overridden values.yaml file, then whatever the default is in the chart will be used.

inletsVersion: 0.9.23\n

When a tunnel is upgraded, you'll see a log line like this:

2024-01-11T12:25:15.442Z        info    operator/controller.go:860      Upgrading version       {\"tunnel\": \"ce.inlets\", \"from\": \"0.9.21\", \"to\": \"0.9.23\"}\n
"},{"location":"uplink/become-a-provider/#configuration-reference","title":"Configuration reference","text":"

Looking for the source for the Helm chart? The source is published directly to a container registry as an OCI bundle. View the source with: helm template oci://ghcr.io/openfaasltd/inlets-uplink-provider

If you need a configuration option outside of what's already available, feel free to raise an issue on the inlets-pro repository.

Overview of inlets-uplink parameters in values.yaml.

Parameter Description Default pullPolicy The a imagePullPolicy applied to inlets-uplink components. Always operator.image Container image used for the uplink operator. ghcr.io/openfaasltd/uplink-operator:0.1.5 clientRouter.image Container image used for the client router. ghcr.io/openfaasltd/uplink-client-router:0.1.5 clientRouter.domain Domain name for inlets uplink. Customer tunnels will connect with a URI of: wss://uplink.example.com/namespace/tunnel. \"\" clientRouter.tls.issuerName Name of cert-manager Issuer for the clientRouter domain. letsencrypt-prod clientRouter.tls.issuer.enabled Create a cert-manager Issuer for the clientRouter domain. Set to false if you wish to specify your own pre-existing object in the clientRouter.tls.issuerName field. true clientRouter.tls.issuer.email Let's Encrypt email. Only used for certificate renewing notifications. \"\" clientRouter.tls.ingress.enabled Enable ingress for the client router. enabled clientRouter.tls.ingress.class Ingress class for client router ingress. nginx clientRouter.tls.ingress.annotations Annotations to be added to the client router ingress resource. {} clientRouter.tls.istio.enabled Use an Istio Gateway for incoming traffic to the client router. false clientRouter.service.type Client router service type ClusterIP clientRouter.service.nodePort Client router service port for NodePort service type, assigned automatically when left empty. (only if clientRouter.service.type is set to \"NodePort\") nil tunnelsNamespace Deployments, Services and Secrets will be created in this namespace. Leave blank for a cluster-wide scope, with tunnels in multiple namespaces. \"\" inletsVersion Inlets Pro release version for tunnel server Pods. 0.9.12 clientApi.enabled Enable tunnel management REST API. false clientApi.image Container image used for the client API. ghcr.io/openfaasltd/uplink-api:0.1.5 prometheus.create Create the Prometheus monitoring component. true prometheus.resources Resource limits and requests for prometheus containers. {} prometheus.image Container image used for prometheus. prom/prometheus:v2.40.1 prometheus.service.type Prometheus service type ClusterIP prometheus.service.nodePort Prometheus service port for NodePort service type, assigned automatically when left empty. (only if prometheus.service.type is set to \"NodePort\") nil nodeSelector Node labels for pod assignment. {} affinity Node affinity for pod assignments. {} tolerations Node tolerations for pod assignment. []

Specify each parameter using the --set key=value[,key=value] argument to helm install

"},{"location":"uplink/become-a-provider/#telemetry-and-usage-data","title":"Telemetry and usage data","text":"

The inlets-uplink Kubernetes operator will send telemetry data to OpenFaaS Ltd on a periodic basis. This information is used for calculating accurate usage metrics for billing purposes. This data is sent over HTTPS, does not contain any personal information, and is not shared with any third parties.

This data includes the following:

  • Number of tunnels deployed
  • Number of namespaces with at least one tunnel contained
  • Kubernetes version
  • Inlets Uplink version
  • Number of installations of Inlets Uplink
"},{"location":"uplink/connect-to-tunnels/","title":"Connect to tunnels","text":"

The tunnel plugin for the inlets-pro CLI can be used to get connection instructions for a tunnel.

Whether the client needs to be deployed as a systemd service on the customers server or as a Kubernetes service, with the CLI it is easy to generate connection instructions for these different formats by setting the --format flag.

Supported formats:

  • CLI command
  • Systemd
  • Kubernetes YAML Deployment

Make sure you have the latest version of the tunnel command available:

inlets-pro plugin get tunnel\n
"},{"location":"uplink/connect-to-tunnels/#get-connection-instructions","title":"Get connection instructions","text":"

Generate the client command for the selected tunnel:

$ inlets-pro tunnel connect openfaas \\\n--domain uplink.example.com \\\n--upstream http://127.0.0.1:8080\n\n# Access your HTTP tunnel via: http://openfaas.tunnels:8000\n\n# Access your TCP tunnel via ClusterIP: \n#  openfaas.tunnels:5432\n\ninlets-pro uplink client \\\n--url=wss://uplink.example.com/tunnels/openfaas \\\n--token=tbAd4HooCKLRicfcaB5tZvG3Qj36pjFSL3Qob6b9DBlgtslmildACjWZUD \\\n--upstream=http://127.0.0.1:8080\n

Optionally the --quiet flag can be set to print the CLI command without the additional info.

"},{"location":"uplink/connect-to-tunnels/#deploy-the-client-as-a-systemd-service","title":"Deploy the client as a systemd service","text":"

To generate a systemd service file for the tunnel client command set the --format flag to systemd.

$ inlets-pro tunnel connect openfaas \\\n--domain uplink.example.com \\ \n--upstream http://127.0.0.1:8080 \\\n--format systemd\n\n[Unit]\nDescription=openfaas inlets client\nAfter=network.target\n\n[Service]\nType=simple\nRestart=always\nRestartSec=5\nStartLimitInterval=0\nExecStart=/usr/local/bin/inlets-pro uplink client --url=wss://uplink.example.com/tunnels/openfaas --token=tbAd4HooCKLRicfcaB5tZvG3Qj36pjFSL3Qob6b9DBlgtslmildACjWZUD --upstream=http://127.0.0.1:8080\n\n[Install]\nWantedBy=multi-user.target\n

Copy the service file over to the customer's host. Save the unit file as: /etc/systemd/system/openfaas-tunnel.service.

Once the file is in place start the service for the first time:

sudo systemctl daemon-reload\nsudo systemctl enable --now openfaas-tunnel\n

Verify the tunnel client is running:

systemctl status openfaas-tunnel\n

You can also check the logs to see if the client connected successfully:

journalctl -u openfaas-tunnel\n
"},{"location":"uplink/connect-to-tunnels/#deploy-the-client-in-a-kubernetes-cluster","title":"Deploy the client in a Kubernetes cluster","text":"

To generate a YAML deployment for a selected tunnel, set the --format flag to k8s_yaml. The generated resource can be deployed in the customers cluster.

inlets-pro tunnel connect openfaas \\\n--domain uplink.example.com \\\n--upstream http://gateway.openfaas:8080 \\\n--format k8s_yaml\n\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: openfaas-inlets-client\nspec:\n  replicas: 1\nselector:\n    matchLabels:\n      app: openfaas-inlets-client\n  template:\n    metadata:\n      labels:\n        app: openfaas-inlets-client\n    spec:\n      containers:\n      - name: openfaas-inlets-client\n        image: ghcr.io/inlets/inlets-pro:0.9.14\n        imagePullPolicy: IfNotPresent\n        command: [\"inlets-pro\"]\nargs:\n        - \"uplink\"\n- \"client\"\n- \"--url=wss://uplink.example.com/tunnels/openfaas\"\n- \"--token=tbAd4HooCKLRicfcaB5tZvG3Qj36pjFSL3Qob6b9DBlgtslmildACjWZUD\"\n- \"--upstream=http://gateway.openfaas:8080\"\n

In this example we create a tunnel to uplink an OpenFaaS deployment.

Get the logs for the client and check it connected successfully:

kubectl logs deploy/openfaas-inlets-client\n
"},{"location":"uplink/create-tunnels/","title":"Create a tunnel for a customer","text":""},{"location":"uplink/create-tunnels/#use-separate-namespaces-for-your-tunnels","title":"Use separate namespaces for your tunnels","text":"

The inlets namespace contains the control plane for inlets uplink, so you'll need to create at least one additional namespace for your customer tunnels.

  1. Create a namespace per customer (recommended)

    This approach avoids conflicts on names, and gives better isolation between tenants.

    kubectl create namespace acmeco\n

    Then, create a copy of the license secret in the new namespace:

    export NS=\"n1\"\nexport LICENSE=$(kubectl get secret -n inlets inlets-uplink-license -o jsonpath='{.data.license}' | base64 -d)\n\nkubectl create secret generic \\\n-n $NS \\\ninlets-uplink-license \\\n--from-literal license=$LICENSE\n
  2. A single namespace for all customer tunnels (not recommended)

    For development purposes, you could create a single namespace for all your customers.

    kubectl create namespace tunnels\n

Finally, if you're using Istio, then you need to label each additional namespace to enable sidecar injection:

kubectl label namespace inlets \\\nistio-injection=enabled --overwrite\n
"},{"location":"uplink/create-tunnels/#create-a-tunnel-with-an-auto-generated-token","title":"Create a Tunnel with an auto-generated token","text":"

Tunnel describes an inlets-uplink tunnel server. The specification describes a set of ports to use for TCP tunnels.

For example the following Tunnel configuration sets up a http tunnel on port 8000 by default and adds port 8080 for use with TCP tunnels. The licenceRef needs to reference a secret containing an inlets-uplink license.

apiVersion: uplink.inlets.dev/v1alpha1\nkind: Tunnel\nmetadata:\nname: acmeco\nnamespace: tunnels\nspec:\nlicenseRef:\nname: inlets-uplink-license\nnamespace: tunnels\ntcpPorts:\n- 8080 

Alternatively the CLI can be used to create a tunnel:

inlets-pro tunnel create acmeco \\\n-n tunnels \\\n--port 8080\n
"},{"location":"uplink/create-tunnels/#create-a-tunnel-with-a-pre-defined-token","title":"Create a Tunnel with a pre-defined token","text":"

If you delete a Tunnel with an auto-generated token, and re-create it later, the token will change. So we recommend that you pre-define your tokens. This style works well for GitOps and automated deployments with Helm.

Make sure the secret is in the same namespace as the Tunnel Custom Resource.

You can use openssl to generate a secure token:

openssl rand -base64 32 |tr -d '\\n' > token.txt\n

Note that the tr command is used to remove the newline character from the output, so that there is no new-line within the token.

Create a Kubernetes secret for the token named custom-token:

kubectl create secret generic \\\n-n tunnels acmeco-token \\\n--from-file token=./token.txt\n

Reference the token when creating a tunnel, to expose ports 8080 over TCP.

apiVersion: uplink.inlets.dev/v1alpha1\nkind: Tunnel\nmetadata:\nname: acmeco\nnamespace: tunnels\nspec:\nlicenseRef:\nname: inlets-uplink-license\nnamespace: tunnels\ntokenRef:\nname: acmeco-token\nnamespace: tunnels\ntcpPorts:\n- 8080\n

Clients can now connect to the tunnel using the custom token.

"},{"location":"uplink/create-tunnels/#node-selection-and-annotations-for-tunnels","title":"Node selection and annotations for tunnels","text":"

The tunnel spec has a nodeSelector field that can be used to assign tunnel pods to Nodes. See Assign Pods to Nodes from the kubernetes docs for more information.

It is also possible to set additional annotations on the tunnel pod using the podAnnotations field in the tunnel spec.

The following example adds an annotation with the customer name to the tunnel pod and uses the node selector to specify a target node with a specific region label.

apiVersion: uplink.inlets.dev/v1alpha1\nkind: Tunnel\nmetadata:\nname: acmeco\nnamespace: tunnels\nspec:\nlicenseRef:\nname: inlets-uplink-license\nnamespace: tunnels\ntcpPorts:\n- 8080\npodAnnotations:\ncutomer: acmeco\nnodeSelector:\nregion: east\n
"},{"location":"uplink/create-tunnels/#connect-to-tunnels","title":"Connect to tunnels","text":"

The uplink client command is part of the inlets-pro binary. It is used to connect to tunnels and expose services over the tunnel.

There are several ways to get the binary:

  • Download it from the GitHub releases
  • Get it with arkade: arkade get inlets-pro
  • Use the inlets-pro docker image
"},{"location":"uplink/create-tunnels/#example-tunnel-a-customer-http-service","title":"Example: Tunnel a customer HTTP service","text":"

We'll use inlets-pro's built in file server as an example of how to tunnel a HTTP service.

Run this command on a private network or on your workstation:

mkdir -p /tmp/share\ncd /tmp/share\necho \"Hello World\" > README.md\n\ninlets-pro fileserver -w /tmp/share -a\n\nStarting inlets Pro fileserver. Version: 0.9.10-rc1-1-g7bc49ae - 7bc49ae494bd9ec789fc5e9eaf500f2b1fe60786\nServing files from: /tmp/share\nListening on: 127.0.0.1:8080, allow browsing: true, auth: false\n

Once the server is running connect to your tunnel using the inlets-uplink client. We will connect to the tunnel called acmeco (see the example in Create a tunnel for a customer using the Custom Resource to create this tunnel).

Retrieve the token for the tunnel:

kubectlcli
kubectl get -n tunnels \\\nsecret/acmeco -o jsonpath=\"{.data.token}\" | base64 --decode > token.txt 
inlets-pro tunnel token acmeco \\\n-n tunnels > token.txt\n

The contents will be saved in token.txt.

Start the tunnel client:

inlets-pro uplink client \\\n--url wss://uplink.example.com/tunnels/acmeco \\\n--upstream http://127.0.0.1:8080 \\\n--token-file ./token.txt\n

Tip: get connection instructions

The tunnel plugin for the inlets-pro CLI can be used to get connection instructions for a tunnel.

inlets-pro tunnel connect acmeco \\\n--domain uplink.example.com \\\n--upstream http://127.0.0.1:8080\n

Running the command above will print out the instructions to connect to the tunnel:

# Access your tunnel via ClusterIP: acmeco.tunnels\ninlets-pro uplink client \\\n--url=wss://uplink.example.com/tunnels/acmeco \\\n--upstream=http://127.0.0.1:8080 \\\n--token=z4oubxcamiv89V0dy8ytmjUEPwAmY0yFyQ6uaBmXsIQHKtAzlT3PcGZRgK\n

Run a container in the cluster to check the file server is accessible through the http tunnel using curl: curl -i acmeco.tunnels:8000

$ kubectl run -t -i curl --rm \\\n--image ghcr.io/openfaas/curl:latest /bin/sh   \n\n$ curl -i acmeco.tunnels:8000\nHTTP/1.1 200 OK\nContent-Type: text/html; charset=utf-8\nDate: Thu, 17 Nov 2022 08:39:48 GMT\nLast-Modified: Mon, 14 Nov 2022 20:52:53 GMT\nContent-Length: 973\n\n<pre>\n<a href=\"README.md\">README.md</a>\n</pre>\n
"},{"location":"uplink/create-tunnels/#how-to-tunnel-multiple-http-services-from-a-customer","title":"How to tunnel multiple HTTP services from a customer","text":"

The following example shows how to access more than one HTTP service over the same tunnel. It is possible to expose multiple upstream services over a single tunnel.

You can add upstreamDomains to the Tunnel resource. Uplink wil create additional Services for each domain so the HTTP data plane is available on different domains.

apiVersion: uplink.inlets.dev/v1alpha1\nkind: Tunnel\nmetadata:\n name: acmeco\n  namespace: tunnels\nspec:\n licenseRef:\n    name: inlets-uplink-license\n    namespace: tunnels\n  tcpPorts:\n  - 8080\n+  upstreamDomains:\n+  - gateway\n+  - prometheus\n

Upstreams can also be added while creating a tunnel with with cli:

inlets-pro tunnel create acmeco \\\n--namespace tunnels \\\n--upstream gateway \\\n--upstream prometheus\n

Start a tunnel client and add multiple upstreams:

inlets-pro uplink client \\\n--url wss://uplink.example.com/tunnels/acmeco \\\n--upstream prometheus.tunnels=http://127.0.0.1:9090 \\\n--upstream gateway.tunnels=http://127.0.0.1:8080 \\\n--token-file ./token.txt\n

Access both services using curl:

$ kubectl run -t -i curl --rm \\\n--image ghcr.io/openfaas/curl:latest /bin/sh   \n\n$ curl -i gateway.tunnels:8000\nHTTP/1.1 302 Found\nContent-Length: 29\nContent-Type: text/html; charset=utf-8\nDate: Thu, 16 Feb 2023 16:29:09 GMT\nLocation: /graph\n\n<a href=\"/graph\">Found</a>.\n\n\n$ curl -i -H prometheus.tunnels:8000\nHTTP/1.1 301 Moved Permanently\nContent-Length: 39\nContent-Type: text/html; charset=utf-8\nDate: Thu, 16 Feb 2023 16:29:11 GMT\nLocation: /ui/\n\n<a href=\"/ui/\">Moved Permanently</a>.\n

Note that the Host header has to be set in the request so the tunnel knows which upstream to send the request to.

"},{"location":"uplink/create-tunnels/#tunnel-a-customers-tcp-service","title":"Tunnel a customer's TCP service","text":"

Perhaps you need to access a customer's Postgres database from their private network?

"},{"location":"uplink/create-tunnels/#create-a-tcp-tunnel-using-a-custom-resource","title":"Create a TCP tunnel using a Custom Resource","text":"

Example Custom Resource to deploy a tunnel for acmeco\u2019s production Postgres database:

apiVersion: uplink.inlets.dev/v1alpha1\nkind: Tunnel\nmetadata:\nname: prod-database\nnamespace: acmeco\nspec:\nlicenseRef:\nname: inlets-uplink-license\nnamespace: acmeco\ntcpPorts:\n- 5432\n

Alternatively the cli can be used to create a new tunnel:

inlets-pro tunnel create prod-database \\\n-n acmeco\n  --port 5432\n
"},{"location":"uplink/create-tunnels/#run-postgresql-on-your-private-server","title":"Run postgresql on your private server","text":"

The quickest way to spin up a Postgres instance on your own machine would be to use Docker:

head -c 16 /dev/urandom |shasum \n8cb3efe58df984d3ab89bcf4566b31b49b2b79b9\n\nexport PASSWORD=\"8cb3efe58df984d3ab89bcf4566b31b49b2b79b9\"\n\ndocker run --rm --name postgres \\\n-p 5432:5432 \\\n-e POSTGRES_PASSWORD=8cb3efe58df984d3ab89bcf4566b31b49b2b79b9 \\\n-ti postgres:latest\n
"},{"location":"uplink/create-tunnels/#connect-with-an-inlets-uplink-client","title":"Connect with an inlets uplink client","text":"
export UPLINK_DOMAIN=\"uplink.example.com\"\n\ninlets-pro uplink client \\\n--url wss://${UPLINK_DOMAIN}/acmeco/prod-database \\\n--upstream 127.0.0.1:5432 \\\n--token-file ./token.txt\n
"},{"location":"uplink/create-tunnels/#access-the-customer-database-from-within-kubernetes","title":"Access the customer database from within Kubernetes","text":"

Now that the tunnel is established, you can connect to the customer's Postgres database from within Kubernetes using its ClusterIP prod-database.acmeco.svc.cluster.local:

Try it out:

export PASSWORD=\"8cb3efe58df984d3ab89bcf4566b31b49b2b79b9\"\n\nkubectl run -i -t psql \\\n--env PGPORT=5432 \\\n--env PGPASSWORD=$PASSWORD --rm \\\n--image postgres:latest -- psql -U postgres -h prod-database.acmeco\n

Try a command such as CREATE database websites (url TEXT), \\dt or \\l.

"},{"location":"uplink/create-tunnels/#getting-help","title":"Getting help","text":"

Feel free to reach out to our team via email for technical support.

"},{"location":"uplink/ingress-for-tunnels/","title":"Ingress for tunnels","text":"

Info

Inlets Uplink is designed to connect customer services to a remote Kubernetes cluster for command and control as part of a SaaS product.

Any tunnelled service can be accessed directly from within the cluster and does not need to be exposed to the public Internet for access.

Beware: by following these instructions, you are exposing one or more of those tunnels to the public Internet.

Make inlets uplink HTTP tunnels publicly accessible by setting up ingress for the data plane.

The instructions assume that you want to expose two HTTP tunnels. We will configure ingress for the first tunnel, called grafana, on the domain grafana.example.com. The second tunnel, called openfaas, will use the domain openfaas.example.com.

Both tunnels can be created with kubectl or the inlets-pro cli. See create tunnels for more info:

kubectlcli
$ cat <<EOF | kubectl apply -f - \napiVersion: uplink.inlets.dev/v1alpha1\nkind: Tunnel\nmetadata:\n  name: grafana\n  namespace: tunnels\nspec:\n  licenseRef:\n    name: inlets-uplink-license\n    namespace: tunnels\n---\napiVersion: uplink.inlets.dev/v1alpha1\nkind: Tunnel\nmetadata:\n  name: openfaas\n  namespace: tunnels\nspec:\n  licenseRef:\n    name: inlets-uplink-license\n    namespace: tunnels\nEOF\n
$ inlets-pro tunnel create grafana\nCreated tunnel openfaas. OK.\n\n$ inlets-pro tunnel create openfaas\nCreated tunnel openfaas. OK.\n

Follow the instruction for Kubernetes Ingress or Istio depending on how you deployed inlets uplink.

"},{"location":"uplink/ingress-for-tunnels/#setup-tunnel-ingress","title":"Setup tunnel ingress","text":"
  1. Create a new certificate Issuer for tunnels:

    export EMAIL=\"you@example.com\"\n\ncat > tunnel-issuer-prod.yaml <<EOF\napiVersion: cert-manager.io/v1\nkind: Issuer\nmetadata:\n  name: tunnels-letsencrypt-prod\n  namespace: inlets\nspec:\n  acme:\n    server: https://acme-v02.api.letsencrypt.org/directory\n    email: $EMAIL\n    privateKeySecretRef:\n    name: tunnels-letsencrypt-prod\n    solvers:\n    - http01:\n        ingress:\n          class: \"nginx\"\nEOF\n
  2. Create an ingress resource for the tunnel:

    apiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\nname: grafana-tunnel-ingress\nnamespace: inlets\nannotations:\nkubernetes.io/ingress.class: nginx\ncert-manager.io/issuer: tunnels-letsencrypt-prod\nspec:\nrules:\n- host: grafana.example.com\nhttp:\npaths:\n- path: /\npathType: Prefix\nbackend:\nservice:\nname: grafana.tunnels\nport:\nnumber: 8000\ntls:\n- hosts:\n- grafana.example.com\nsecretName: grafana-cert\n

    Note that the annotation cert-manager.io/issuer is used to reference the certificate issuer created in the first step.

To setup ingress for multiple tunnels simply define multiple ingress resources. For example apply a second ingress resource for the openfaas tunnel:

apiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\nname: openfaas-tunnel-ingress\nnamespace: inlets\nannotations:\nkubernetes.io/ingress.class: nginx\ncert-manager.io/issuer: tunnels-letsencrypt-prod\nspec:\nrules:\n- host: openfaas.example.com\nhttp:\npaths:\n- path: /\npathType: Prefix\nbackend:\nservice:\nname: openfaas.tunnels\nport:\nnumber: 8000\ntls:\n- hosts:\n- openfaas.example.com\nsecretName: openfaas-cert\n
"},{"location":"uplink/ingress-for-tunnels/#setup-tunnel-ingress-with-an-istio-ingress-gateway","title":"Setup tunnel ingress with an Istio Ingress gateway","text":"
  1. Create a new certificate Issuer for tunnels:

    export EMAIL=\"you@example.com\"\n\ncat > tunnel-issuer-prod.yaml <<EOF\napiVersion: cert-manager.io/v1\nkind: Issuer\nmetadata:\n  name: tunnels-letsencrypt-prod\n  namespace: istio-system\nspec:\n  acme:\n    server: https://acme-v02.api.letsencrypt.org/directory\n    email: $EMAIL\n    privateKeySecretRef:\n      name: tunnels-letsencrypt-prod\n    solvers:\n    - http01:\n        ingress:\n          class: \"istio\"\nEOF\n

    We are using the Let's Encrypt production server which has strict limits on the API. A staging server is also available at https://acme-staging-v02.api.letsencrypt.org/directory. If you are creating a lot of certificates while testing it would be better to use the staging server.

  2. Create a new certificate resource. In this case we want to expose two tunnels on their own domain, grafana.example.com and openfaas.example.com. This will require two certificates, one for each domain:

    apiVersion: cert-manager.io/v1\nkind: Certificate\nmetadata:\nname: grafana-cert\nnamespace: istio-system\nspec:\nsecretName: grafana-cert\ncommonName: grafana.example.com\ndnsNames:\n- grafana.example.com\nissuerRef:\nname: tunnels-letsencrypt-prod\nkind: Issuer\n\n---\napiVersion: cert-manager.io/v1\nkind: Certificate\nmetadata:\nname: openfaas-cert\nnamespace: istio-system\nspec:\nsecretName: openfaas-cert\ncommonName: openfaas.example.com\ndnsNames:\n- openfaas.example.com\nissuerRef:\nname: tunnels-letsencrypt-prod\nkind: Issuer\n

    Note that both the certificates and issuer are created in the istio-system namespace.

  3. Configure the ingress gateway for both tunnels. In this case we create a single resource for both hosts but you could also split the configuration into multiple Gateway resources.

    apiVersion: networking.istio.io/v1alpha3\nkind: Gateway\nmetadata:\nname: tunnel-gateway\nnamespace: inlets\nspec:\nselector:\nistio: ingressgateway # use Istio default gateway implementation\nservers:\n- port:\nnumber: 443\nname: https\nprotocol: HTTPS  tls:\nmode: SIMPLE\ncredentialName: grafana-cert\nhosts:\n- grafana.example.com\n- port:\nnumber: 443\nname: https\nprotocol: HTTPS\ntls:\nmode: SIMPLE\ncredentialName: openfaas-cert\nhosts:\n- openfaas.example.com\n

    Note that the credentialsName references the secrets for the certificates created in the previous step.

  4. Configure the gateway's traffic routes by defining corresponding virtual services:

    apiVersion: networking.istio.io/v1alpha3\nkind: VirtualService\nmetadata:\nname: grafana\nnamespace: inlets\nspec:\nhosts:\n- grafana.example.com\ngateways:\n- tunnel-gateway\nhttp:\n- match:\n- uri:\nprefix: /\nroute:\n- destination:\nhost: grafana.tunnels.svc.cluster.local\nport:\nnumber: 8000\n---\napiVersion: networking.istio.io/v1alpha3\nkind: VirtualService\nmetadata:\nname: openfaas\nnamespace: inlets\nspec:\nhosts:\n- openfaas.example.com\ngateways:\n- tunnel-gateway\nhttp:\n- match:\n- uri:\nprefix: /\nroute:\n- destination:\nhost: openfaas.tunnels.svc.cluster.local\nport:\nnumber: 8000\n

After applying these resources you should be able to access the data plane for both tunnels on their custom domain.

"},{"location":"uplink/ingress-for-tunnels/#wildcard-ingress-with-the-data-router","title":"Wildcard Ingress with the data-router","text":"

As an alternative to creating individual sets of Ingress records, DNS A/CNAME entries and TLS certificates for each tunnel, you can use the data-router to route traffic to the correct tunnel based on the hostname. This approach uses a wildcard DNS entry and a single TLS certificate for all tunnels.

The following example is adapted from the cert-manager documentation to use DigitalOcean's DNS servers, however you can find instructions for issuers such as AWS Route53, Cloudflare, and Google Cloud DNS listed.

DNS01 challenges require a secret to be created containing the credentials for the DNS provider. The secret is referenced by the issuer resource.

kubectl create secret generic \\\n-n inlets digitalocean-dns \\\n--from-file access-token=$HOME/do-access-token\n

Create a separate Issuer, assuming a domain of t.example.com, where each tunnel would be i.e. prometheus.t.example.com or api.t.example.com:

export NS=\"inlets\"\nexport ISSUER_NAME=\"inlets-wildcard\"\nexport DOMAIN=\"t.example.com\"\n\ncat <<EOF | kubectl apply -f -\napiVersion: cert-manager.io/v1\nkind: Issuer\nmetadata:\n  name: $ISSUER_NAME\n  namespace: $NS\nspec:\n  acme:\n    email: webmaster@$DOMAIN\n    server: https://acme-v02.api.letsencrypt.org/directory\n    privateKeySecretRef:\n      name: $ISSUER_NAME\n    solvers:\n    - dns01:\n        digitalocean:\n            tokenSecretRef:\n              name: digitalocean-dns\n              key: access-token\nEOF\n

Update values.yaml to enable the dataRouter and to specify the wildcard domain:

## The dataRouter is an option component to enable easy Ingress to connected tunnels.\n## Learn more under \"Ingress for Tunnels\" in the docs: https://docs.inlets.dev/\ndataRouter:\nenabled: true\n\n# Leave out the asterix i.e. *.t.example.com would be: t.example.com\nwildcardDomain: \"t.example.com\"\n\ntls:\nissuerName: \"inlets-wildcard\"\n\ningress:\nclass: \"nginx\"\nannotations:\n# Apply basic rate limiting.\nnginx.ingress.kubernetes.io/limit-connections: \"300\"\nnginx.ingress.kubernetes.io/limit-rpm: \"1000\"\n

Apply the updated values:

helm upgrade --install inlets-uplink \\\noci://ghcr.io/openfaasltd/inlets-uplink-provider \\\n--namespace inlets \\\n--values ./values.yaml\n

Create a tunnel with an Ingress Domain specified in the .Spec field:

export TUNNEL_NS=\"tunnels\"\nexport DOMAIN=\"t.example.com\"\n\ncat <<EOF | kubectl apply -f -\napiVersion: uplink.inlets.dev/v1alpha1\nkind: Tunnel\nmetadata:\n  name: fileshare\n  namespace: $TUNNEL_NS\nspec:\n  licenseRef:\n    name: inlets-uplink-license\n    namespace: $TUNNEL_NS\n  ingressDomains:\n    - fileshare.$DOMAIN\nEOF\n

On a private computer, create a new directory, a file to serve and then run the built-in HTTP server:

cd /tmp\nmkdir -p ./share\ncd ./share\necho \"Hello from inlets\" > index.html\n\ninlets-pro fileserver --port 8080 --allow-browsing --webroot ./\n

Get the instructions to connect to the tunnel.

The --domain flag here is for your uplink control-plane, where tunnels connect, not the data-plane where ingress is served. This is usually i.e. uplink.example.com.

export TUNNEL_NS=\"tunnels\"\nexport UPLINK_DOMAIN=\"uplink.example.com\"\n\ninlets-pro tunnel connect fileshare \\\n--namespace $TUNNEL_NS \\\n--domain $UPLINK_DOMAIN\n

Add the --upstream fileshare.t.example.com=fileshare flag to the command you were given, then run it.

The command below is sample output, do not copy it directly.

inlets-pro uplink client \\\n--url=wss://uplink.example.com/tunnels/fileshare \\\n--token=REDACTED \\\n--upstream fileshare.t.example.com=http://127.0.0.1:8080\n

Now, access the tunneled service via the wildcard domain i.e. https://fileshare.t.example.com.

You should see: \"Hello from inlets\" printed in your browser.

Finally, you can view the logs of the data-router, to see it resolving internal tunnel service names for various hostnames:

kubectl logs -n inlets deploy/data-router\n\n2024-01-24T11:29:16.965Z        info    data-router/main.go:51  Inlets (tm) Uplink - data-router: 2024-01-24T11:29:16.970Z        info    data-router/main.go:90  Listening on: 8080      Tunnel namespace: (all) Kubernetes version: v1.27.4+k3s1\n\nI0124 11:29:58.858772       1 main.go:151] Host: fileshares.t.example.com    Path: /\nI0124 11:29:58.858877       1 roundtripper.go:48] \"No ingress found\" hostname=\"fileshares.t.example.com\" path=\"/\"\n\nI0124 11:30:03.588993       1 main.go:151] Host: fileshare.t.example.com     Path: /\nI0124 11:30:03.589051       1 roundtripper.go:56] \"Resolved\" hostname=\"fileshare.t.example.com\" path=\"/\" tunnel=\"fileshare.tunnels:8000\"\n
"},{"location":"uplink/manage-tunnels/","title":"Manage customer tunnels","text":"

You can use kubectl or the tunnel plugin for the inlets-pro CLI to manage tunnels.

"},{"location":"uplink/manage-tunnels/#list-tunnels","title":"List tunnels","text":"

List tunnels across all namespaces:

kubectlcli
$ kubectl get tunnels -A\n\nNAMESPACE     NAME         AUTHTOKENNAME   DEPLOYMENTNAME   TCP PORTS   DOMAINS\ntunnels       acmeco       acmeco          acmeco           [8080]      \ncustomer1     ssh          ssh             ssh              [50035]\ncustomer1     prometheus   prometheus      prometheus       []         [prometheus.customer1.example.com]\n
$ inlets-pro tunnel list -A\n\nTUNNEL     DOMAINS                              PORTS   CREATED\nacmeco     []                                   [8080]  2022-11-22 11:51:35 +0100 CET\nssh        []                                   [50035] 2022-11-24 18:19:01 +0100 CET\nprometheus [prometheus.customer1.example.com]   []      2022-11-24 11:43:23 +0100 CET\n

To list the tunnels within a namespace:

kubectlcli
$ kubectl get tunnels -n customer1\n\nNAME         AUTHTOKENNAME   DEPLOYMENTNAME   TCP PORTS   DOMAINS\nssh          ssh             ssh              [50035]\n
$ inlets-pro tunnel list -n customer1\n\nTUNNEL     DOMAINS   PORTS   CREATED\nssh        []        [50035] 2022-11-22 11:51:35 +0100 CET\n
"},{"location":"uplink/manage-tunnels/#delete-a-tunnel","title":"Delete a tunnel","text":"

Deleting a tunnel will remove all resources for the tunnel.

To remove a tunnel run:

kubectlcli
kubectl delete -n tunnels \\\ntunnel/acmeco 
inlets-pro tunnel remove acmeco \\\n-n tunnels\n

Do also remember to stop the customer's inlets uplink client.

"},{"location":"uplink/manage-tunnels/#update-the-ports-or-domains-for-a-tunnel","title":"Update the ports or domains for a tunnel","text":"

You can update a tunnel and configure its TCP ports or domain names by editing the Tunnel Custom Resource:

kubectl edit -n tunnels \\\ntunnel/acmeco  

Imagine you wanted to add port 8081, when you already had port 8080 exposed:

apiVersion: uplink.inlets.dev/v1alpha1\nkind: Tunnel\nmetadata:\n name: acmeco\n  namespace: tunnels\nspec:\n licenseRef:\n    name: inlets-uplink-license\n    namespace: tunnels\n  tcpPorts:\n  - 8080\n+ - 8081\n

Alternatively, if you have the tunnel saved as a YAML file, you can edit it and apply it again with kubectl apply.

"},{"location":"uplink/manage-tunnels/#check-the-logs-of-a-tunnel","title":"Check the logs of a tunnel","text":"

The logs for tunnels can be useful for troubleshooting or to see if clients are connecting successfully.

Get the logs for a tunnel deployment:

$ kubectl logs -n tunnels deploy/acmeco -f\n\n2022/11/22 12:07:38 Inlets Uplink For SaaS & Service Providers (Inlets Uplink for 5x Customers)\n2022/11/22 12:07:38 Licensed to: user@example.com\ninlets (tm) uplink server\nAll rights reserved OpenFaaS Ltd (2022)\n\nMetrics on: 0.0.0.0:8001\nControl-plane on: 0.0.0.0:8123\nHTTP data-plane on: 0.0.0.0:8000\ntime=\"2022/11/22 12:33:34\" level=info msg=\"Added upstream: * => http://127.0.0.1:9090 (9355de15c687471da9766cbe51423e54)\"\ntime=\"2022/11/22 12:33:34\" level=info msg=\"Handling backend connection request [9355de15c687471da9766cbe51423e54]\"\n
"},{"location":"uplink/manage-tunnels/#rotate-the-secret-for-a-tunnel","title":"Rotate the secret for a tunnel","text":"

You may want to rotate a secret for a customer if you think the secret has been leaked. The token can be rotated manually using kubectl or with a single command using the tunnel CLI plugin.

kubectlcli

Delete the token secret. The default secret has the same name as the tunnel. The inlets uplink controller will automatically create a new secret.

kubectl delete -n tunnels \\\nsecret/acmeco 

The tunnel has to be restarted to use the new token.

kubectl rollout restart -n tunnels \\\ndeploy/acmeco\n

Rotate the tunnel token:

inlets-pro tunnel rotate acmeco \\\n-n tunnels\n

Any connected tunnels will disconnect at this point, and won\u2019t be able to reconnect until you configure them with the updated token.

Retrieve the new token for the tunnel and save it to a file:

kubectlcli
kubectl get -n tunnels secret/acmeco \\\n-o jsonpath=\"{.data.token}\" | base64 --decode > token.txt 
inlets-pro tunnel token acmeco \\\n-n tunnels > token.txt\n

The contents will be saved in token.txt

"},{"location":"uplink/monitoring-tunnels/","title":"Monitoring inlets uplink","text":"

Inlets Uplink comes with an integrated Prometheus deployment that automatically collects metrics for each tunnel.

Note

Prometheus is deployed with Inlets Uplink by default. If you don't need monitoring you can disable it in the values.yaml of the Inlets Uplink Helm chart:

prometheus:\ncreate: false\n

You can explore the inlets data using Prometheus's built-in expression browser. To access it, port forward the prometheus service and than navigate to http://localhost:9090/graph

kubectl port-forward \\\n-n inlets \\\nsvc/prometheus 9090:9090\n
"},{"location":"uplink/monitoring-tunnels/#metrics-for-the-control-plane","title":"Metrics for the control-plane","text":"

The control-plane metrics can give you insights into the number of clients that are connected and the number of http requests made to the control-plane endpoint for each tunnel.

Metric Type Description Labels controlplane_connected_gauge gauge gauge of inlets clients connected to the control plane tunnel_name controlplane_requests_total counter total HTTP requests processed by connecting clients on the control plane code, tunnel_name"},{"location":"uplink/monitoring-tunnels/#metrics-for-the-data-plane","title":"Metrics for the data-plane","text":"

The data-plane metrics can give you insights in the services that are exposed through your tunnel.

Metric Type Description Labels dataplane_connections_gauge gauge gauge of connections established over data plane port, type, tunnel_name dataplane_connections_total counter total count of connections established over data plane port, type, tunnel_name dataplane_requests_total counter total HTTP requests processed code, host, method, tunnel_name dataplane_request_duration_seconds histogram seconds spent serving HTTP requests code, host, method, tunnel_name,

The connections metrics show the number of connections that are open at this point in time, and on which ports. The type label indicates whether the connection is for a http or tcp upstream.

The request metrics only include HTTP upstreams. These metrics can be used to get Rate, Error, Duration (RED) information for any API or website that is connected through the tunnel.

"},{"location":"uplink/monitoring-tunnels/#setup-grafana-for-monitoring","title":"Setup Grafana for monitoring","text":"

Grafana can be used to visualize the data collected by the inlets uplink Prometheus instance. We provide a sample dashboard that you can use as a starting point.

Inlets uplink Grafana dashboard

The dashboard can help you get insights in:

  • The number of client connected to each tunnel.
  • Invocation to the control plane for each tunnel. This can help with detecting misbehaving clients.
  • Rate, Error, Duration (RED) information for HTTP tunnels.
  • The number of connections TCP connections opened for each tunnel.
"},{"location":"uplink/monitoring-tunnels/#install-grafana","title":"Install Grafana","text":"

There are three options we recommend for getting access to Grafana.

  • Grafana installed with its Helm chart
  • Grafana Cloud
  • AWS managed Grafana

You can install Grafana in one line with arkade:

arkade install grafana\n
Grafana can also be installed with Helm. See: Grafana Helm Chart

Port forward grafana and retrieve the admin password to login:

# Expose the service via port-forward:\nkubectl --namespace grafana port-forward service/grafana 3000:80\n\n# Get the admin password:\nkubectl get secret --namespace grafana grafana -o jsonpath=\"{.data.admin-password}\" | base64 --decode ; echo\n

Access Grafana on http://127.0.0.1:3000 and login as admin.

"},{"location":"uplink/monitoring-tunnels/#add-a-data-source","title":"Add a data source","text":"

Before you import the dashboard, you need to add the inlets-uplink prometheus instance as a data source:

  1. Select the cog icon on the side menu to show the configuration options.
  2. Select Data sources.

    This opens the data sources page, which displays a list of previously configured data sources for the Grafana instance.

  3. Select Add data source and pick Prometheus from the list of supported data sources.

  4. Configure the inlets Prometheus instance as a data source:

    • In the name field set: inlets-prometheus
    • For the URL use: http://prometheus.inlets:9090

      if you installed inlets uplink in a different namespace this url should be http://prometheus.<namespace>:9090

    • Set the scrape interval field to 30s
"},{"location":"uplink/monitoring-tunnels/#import-the-dashboard","title":"Import the dashboard","text":"

Import the inlets uplink dashboard in Grafana:

  1. Click Dashboards > Import in the side menu.
  2. Copy the dashboard JSON text
  3. Paste the dashboard JSON into the text area.

"},{"location":"uplink/overview/","title":"Inlets Uplink overview","text":"

What's the difference between Inlets Pro and Inlets Uplink?

Inlets Pro is a stand-alone binary that can be use to expose local HTTPs and TCP services on a remote machine or network.

Inlets Uplink is a complete management solution for tunnels for SaaS companies and service providers. It's designed for scale, multi-tenancy and easy management.

Inlets Uplink is our answer to the question: \"How do you access customer services from within your own product?\"

You may consider building your own agent, using a AWS SQS queue, or a VPN.

The first two options involve considerable work both up front and in the long run. VPNs require firewall changes, specific network conditions, and lengthy paperwork.

Inlets Uplink uses a TLS encrypted websocket to make an outbound connection, and can also work over corporate HTTP proxies.

Here are some of the other differences between Inlets Pro and Inlets Uplink:

  • The management solution is built-in, self-hosted and runs on your Kubernetes cluster
  • You can create a tunnel almost instantly via CLI, REST API or the \"Tunnel\" Custom Resource
  • The license is installed on the server, instead of each client needing it
  • TCP ports can be remapped to avoid conflicts
  • A single tunnel can expose HTTP and TCP at the same time
  • All tunnels can be monitored centrally for reliability and usage
  • By default all tunnels are private and only available for access by your own applications

With Uplink, you deploy tunnel servers for a customers to your Kubernetes cluster, and our operator takes care of everything else.

You can read more about why we created inlets uplink in the product announcement.

"},{"location":"uplink/overview/#table-of-contents","title":"Table of Contents","text":"
  • Become a provider
  • Create a tunnel
  • Connect to a tunnel
  • Manage tunnels
  • Expose tunnels publicly (optional)
  • Monitor tunnels

You can reach out to us if you have questions: Contact the inlets team

"}]} \ No newline at end of file diff --git a/sitemap.xml b/sitemap.xml new file mode 100644 index 0000000..05a6667 --- /dev/null +++ b/sitemap.xml @@ -0,0 +1,123 @@ + + + + https://docs.inlets.dev/ + 2024-02-09 + daily + + + https://docs.inlets.dev/reference/ + 2024-02-09 + daily + + + https://docs.inlets.dev/reference/faq/ + 2024-02-09 + daily + + + https://docs.inlets.dev/reference/inlets-operator/ + 2024-02-09 + daily + + + https://docs.inlets.dev/reference/inletsctl/ + 2024-02-09 + daily + + + https://docs.inlets.dev/tutorial/automated-http-server/ + 2024-02-09 + daily + + + https://docs.inlets.dev/tutorial/caddy-http-tunnel/ + 2024-02-09 + daily + + + https://docs.inlets.dev/tutorial/community/ + 2024-02-09 + daily + + + https://docs.inlets.dev/tutorial/dual-tunnels/ + 2024-02-09 + daily + + + https://docs.inlets.dev/tutorial/istio-gateway/ + 2024-02-09 + daily + + + https://docs.inlets.dev/tutorial/kubernetes-api-server/ + 2024-02-09 + daily + + + https://docs.inlets.dev/tutorial/kubernetes-ingress/ + 2024-02-09 + daily + + + https://docs.inlets.dev/tutorial/manual-http-server/ + 2024-02-09 + daily + + + https://docs.inlets.dev/tutorial/manual-tcp-server/ + 2024-02-09 + daily + + + https://docs.inlets.dev/tutorial/monitoring-and-metrics/ + 2024-02-09 + daily + + + https://docs.inlets.dev/tutorial/postgresql-tcp-tunnel/ + 2024-02-09 + daily + + + https://docs.inlets.dev/tutorial/ssh-tcp-tunnel/ + 2024-02-09 + daily + + + https://docs.inlets.dev/uplink/become-a-provider/ + 2024-02-09 + daily + + + https://docs.inlets.dev/uplink/connect-to-tunnels/ + 2024-02-09 + daily + + + https://docs.inlets.dev/uplink/create-tunnels/ + 2024-02-09 + daily + + + https://docs.inlets.dev/uplink/ingress-for-tunnels/ + 2024-02-09 + daily + + + https://docs.inlets.dev/uplink/manage-tunnels/ + 2024-02-09 + daily + + + https://docs.inlets.dev/uplink/monitoring-tunnels/ + 2024-02-09 + daily + + + https://docs.inlets.dev/uplink/overview/ + 2024-02-09 + daily + + \ No newline at end of file diff --git a/sitemap.xml.gz b/sitemap.xml.gz new file mode 100644 index 0000000000000000000000000000000000000000..bd4b9c6f551c4ebe503b5ce652721fff73341ca8 GIT binary patch literal 424 zcmV;Z0ayMXiwFq#PR3;d|8r?{Wo=<_E_iKh0M(VjZrd;nhVOlfz;{`0(8D@7?yXPI zJ%B1Pj!;>2L^@vHzI5#kzV0x9)x{J|5FdU}{M(n`Lpt1`P{e$y9_yw$K=v47?oZXP zw^#F{dg&h8sU!kENO7i9bzvTVT075kO_5=6Bh^@XOa3AkvRl$N^>fud9ONtw*68cB z34OP8A!!rPQyp^xraHjQws!fuFLuNOhVIxLpG z-i2^qS$WDCG;8Z|7^WQgu9z`QE^SJZV$4V!(eyH?v(qf+$Mg;*0|Oa1M*Yue$16r% zfHV)m;PNu9-(yj2ungB%Cm_102A#$f^M`#0 zu`Dpoj0JBoXq?%N)T0RL*)ZxguRo{-#J1F9ZtLHiv8ZR2-RgS%lJ3^3c2pFDx<#1( SKj7DgfV)p-^`a~?5C8xskI{<& literal 0 HcmV?d00001 diff --git a/tutorial/automated-http-server/index.html b/tutorial/automated-http-server/index.html new file mode 100644 index 0000000..3cec72d --- /dev/null +++ b/tutorial/automated-http-server/index.html @@ -0,0 +1,1179 @@ + + + + + + + + + + + + + + + + + + + + + + + + Automated http server - Inlets + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

Automated http server

+ +

Automate a HTTP tunnel server

+

Learn how to serve traffic from your private network over a private tunnel server.

+

At the end of this tutorial, you'll have a a secure TLS public endpoint using your own DNS and domain, which you can use to access your internal services or webpages.

+

I'll show you how to:

+
    +
  • automate a tunnel server on a public cloud provider with inlets pre-loaded onto it,
  • +
  • how to connect a client from your home or private network
  • +
  • how to tunnel one or more services
  • +
  • and what else you can do
  • +
+

In a previous article, I explained some of the differences between SaaS and private tunnel servers.

+

Create your tunnel server

+

With SaaS tunnels, your tunnels server processes run on shared servers with other users. With a private tunnel server like inlets, you need to create a server somewhere on the Internet to run the tunnel. It should be created with a public IP address that you can use to accept traffic and proxy it into your private network.

+

Inlets Conceptual architecture

+
+

Pictured: Inlets Conceptual architecture

+
+

The simplest way to do this is to use the inletsctl tool, which supports around a dozen clouds. The alternative is to set up a VPS or install inlets-pro onto a server you already have set up, and then add a systemd unit file so that it restarts if the tunnel or server should crash for any reason.

+

To see a list of supported clouds run:

+
inletsctl create --help
+
+

For instructions on how to create an API key or service account for each, feel free to browse the docs.

+
inletsctl create \
+ --region lon1 \
+ --provider digitalocean \
+ --access-token-file ~/digital-ocean-api-key.txt \
+ --letsencrypt-domain blog.example.com \
+ --letsencrypt-email webmaster@example.com
+
+

A VM will be created in your account using the cheapest plan available, for DigitalOcean this costs 5 USD / mo at time of writing.

+

You can also run your tunnel server in the free tier of GCP, Oracle Cloud or on Fly.io at no additional cost.

+

Once the tunnel server has been created, you will receive:

+
    +
  • The IP address
  • +
  • An endpoint for the inlets client to connect to
  • +
  • A token for the inlets client to use when connecting
  • +
+

Take a note of these.

+

Now create a DNS "A" record for the IP address of the tunnel server on your domain control panel.

+

Personally, I'm a fan of Google Domains and the .dev domains, but DigitalOcean can also manage domains through their CLI:

+
export IP=""
+export SUBDOMAIN="blog.example.com"
+
+doctl compute domain create $SUBDOMAIN \
+  --ip-address $IP
+
+

How does the TLS encryption work?

+

The inlets server process will attempt to get a TLS certificate from Let's Encrypt using a HTTP01 Acme challenge.

+

What if I have multiple sites?

+

You can pass a number of sub-domains, for instance:

+
 --letsencrypt-domain blog.example.com,grafana.example.com \
+ --letsencrypt-email webmaster@example.com
+
+

Connect your tunnel client

+

The tunnel client can be run as and when required, or you can generate a systemd unit file so that you can have it running in the background. You can run the tunnel on the same machine as the service that you're proxying, or you can run it on another computer. It's entirely up to you.

+

So you could have a Raspberry Pi which just runs Raspberry Pi OS Lite and an inlets client, and nothing else. In this way you're creating a kind of router appliance.

+

Let's imagine you've run a Node.js express service on your computer:

+
$ git clone https://github.com/alexellis/alexellis.io \
+  --depth=1
+$ cd alexellis.io/
+$ npm install
+$ npm start
+
+alexellis.io started on port: http://0.0.0.0:3000
+
+

inlets also has its own built-in file-server with password protection and the ability to disable browsing for sharing private links. You can expose the built-in file-server when you want to share files directly, without having to upload them first: The simple way to share files directly from your computer

+

You can download the inlets client using the inletsctl tool:

+
$ sudo inletsctl download
+
+

Now you can start the tunnel client and start serving a test version of my personal homepage alexellis.io:

+
$ export URL=""
+$ export TOKEN=""
+
+$ inlets-pro http client \
+  --url $URL \
+  --token $TOKEN \
+  --upstream blog.example.com=http://127.0.0.1:3000
+
+

What if my services are running on different computers?

+

If they are all within the same network, then you can run the client in one place and have it point at the various internal IP addresses.

+
$ inlets-pro http client \
+  --url $URL \
+  --token $TOKEN \
+  --upstream blog.example.com=http://127.0.0.1:3000 \
+  --upstream grafana.example.com=http://192.168.0.100:3000
+
+

If they are on different networks, you can simply run multiple clients, just change the --upstream flag on each client.

+

How can I run the client in the background?

+

For Linux hosts, you can generate a systemd unit file for inlets by using the --generate systemd flag to the client or server command.

+

Then simply copy the resulting file to the correct location on your system and install it:

+
$ export URL=""
+$ export TOKEN=""
+
+$ inlets-pro http client \
+  --url $URL \
+  --token $TOKEN \
+  --upstream blog.example.com=http://127.0.0.1:3000 \
+  --generate=systemd > inlets.service
+
+$ sudo cp inlets.service /etc/systemd/system/
+$ sudo systemctl enable inlets
+
+

You can then check the logs or service status:

+
$ sudo journalctl -u inlets
+$ sudo systemctl status inlets
+
+

Access your website over the tunnel

+

You can now access your local website being served at http://127.0.0.1:3000 over the tunnel by visiting the domain you created:

+

https://blog.example.com/

+

Your IP goes where you go

+

You can close the lid on your laptop, and open it again in Starbucks or your favourite local independent coffee shop. As soon as you reconnect the client, your local server will be available over the tunnel at the same IP address and domain: https://blog.example.com/

+

I used this technique to test a live demo for the KubeCon conference. I then took a flight from London to San Diego and was able to receive traffic to my Raspberry Pi whilst tethering on a local SIM card.

+

Tethering my Raspberry Pi with K3s in San Diego

+
+

Tethering my Raspberry Pi with K3s in San Diego

+
+

Wrapping up

+

In a very short period of time we created a private tunnel server on a public cloud of our choice, then we created a DNS record for it, and connected a client and accessed our local website.

+

You can get started with inlets through a monthly subscription, or save on a yearly plan.

+

When would you need this?

+
    +
  • If you're self-hosting websites, you already have some equipment at home, so it can work out cheaper.
  • +
  • If you're running a Kubernetes cluster or K3s on a Raspberry Pi, it can be much cheaper over the course of a year.
  • +
  • But it's also incredibly convenient for sharing files and for testing APIs or OAuth flows during development.
  • +
+

Ben Potter at Coder is writing up a tutorial on how to access a private VSCode server from anywhere using a private tunnel. If you would like to learn more, follow @inletsdev for when it gets published.

+

VSCode running in the browser using Coder

+

Andrew Meier put it this way:

+
+

"I prefer to play around with different projects without having to worry about my costs skyrocketing. I had a few Raspberry Pis and wondered if I could use them as a cluster. After a bit of searching #k3s and inlets gave me my answer"

+
+

Andrew's K3s cluster, with inlets

+
+

Andrew's K3s cluster, with inlets

+
+

Read his blog post: Personal Infrastructure with Inlets, k3s, and Pulumi

+

You may also like

+ + + + + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/tutorial/caddy-http-tunnel/index.html b/tutorial/caddy-http-tunnel/index.html new file mode 100644 index 0000000..5295bd7 --- /dev/null +++ b/tutorial/caddy-http-tunnel/index.html @@ -0,0 +1,1173 @@ + + + + + + + + + + + + + + + + + + + + + + + + Caddy http tunnel - Inlets + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + +

Caddy http tunnel

+ +

Custom reverse proxy with Caddy

+

In this tutorial we'll set up an inlets TCP tunnel server to forward ports 80 and 443 to a reverse proxy server running on our local machine. Caddy will receive a TCP stream from the public tunnel server for ports 80 and 443. It can terminate TLS and also allow you to host multiple sites with ease.

+

Caddy is a free and open-source reverse proxy. It's often used on web-servers to add TLS to one or more virtual websites.

+

Pre-reqs

+
    +
  • A Linux server, Windows and MacOS are also supported
  • +
  • The inlets-pro binary at /usr/local/bin/
  • +
  • Access to a DNS control plane for a domain you control
  • +
+

You can run through the same instructions with other reverse proxies such as Nginx, or Traefik.

+

Scenario: +* You want to share a file such as a VM image or a ISO over the Internet, with HTTPS, directly from your laptop. +* You have one or more websites or APIs running on-premises or within your home-lab and want to expose them on the Internet.

+
+

You can subscribe to inlets for personal or commercial use via Gumroad

+
+

Setup your exit node

+

Provision a cloud VM on DigitalOcean or another IaaS provider using inletsctl:

+
inletsctl create \
+ --provider digitalocean \
+ --region lon1 \
+ --pro
+
+

Note the --url and TOKEN given to you in this step.

+

Setup your DNS A record

+

Setup a DNS A record for the site you want to expose using the public IP of the cloud VM

+
    +
  • 178.128.40.109 = service.example.com
  • +
+

Run a local server to share files

+

Do not run this command in your home folder.

+
mkdir -p /tmp/shared/
+cd /tmp/shared/
+
+echo "Hello world" > WELCOME.txt
+
+# If using Python 2.x
+python -m SimpleHTTPServer
+
+# Python 3.x
+python3 -m http.server
+
+

This will listen on port 8000 by default.

+

Setup Caddy 1.x

+ +

Pick your operating system, for instance Darwin for MacOS, or Linux.

+

Download the binary, extract it and install it to /usr/local/bin:

+
mkdir -p /tmp/caddy
+curl -sLSf https://github.com/caddyserver/caddy/releases/download/v1.0.4/caddy_v1.0.4_darwin_amd64.zip > caddy.tar.gz
+tar -xvf caddy.tar.gz --strip-components=0 -C /tmp/caddy
+
+sudo cp /tmp/caddy/caddy /usr/local/bin/
+
+
    +
  • Create a Caddyfile
  • +
+

The Caddyfile configures which websites Caddy will expose, and which sites need a TLS certificate.

+

Replace service.example.com with your own domain.

+

Next, edit proxy / 127.0.0.1:8000 and change the port 8000 to the port of your local webserver, for instance 3000 or 8080. For our example, keep it as 8000.

+
service.example.com
+
+proxy / 127.0.0.1:8000 {
+  transparent
+}
+
+

Start the Caddy binary, it will listen on port 80 and 443.

+
sudo ./caddy
+
+

If you have more than one website, you can add them to the Caddyfile on new lines.

+
+

You'll need to run caddy as sudo so that it can bind to ports 80, and 443 which require additional privileges.

+
+

Start the inlets-pro client on your local side

+

Downloads the inlets Pro client:

+
sudo inletsctl download
+
+

Run the inlets-pro client, using the TOKEN and IP given to you from the previous step.

+

The client will look for your license in $HOME/.inlets/LICENSE, but you can also use the --license/--license-file flag if you wish.

+
export IP=""        # take this from the exit-server
+export TOKEN=""     # take this from the exit-server
+
+inlets-pro tcp client \
+  --url wss://$IP:8123/connect \
+  --ports 80,443 \
+  --token $TOKEN \
+  --upstream localhost
+
+

Note that --upstream localhost will connect to Caddy running on your computer, if you are running Caddy on another machine, use its IP address here.

+

Check it all worked

+

You'll see that Caddy can now obtain a TLS certificate.

+

Go ahead and visit: https://service.example.com

+

Congratulations, you've now served a TLS certificate directly from your laptop. You can close caddy and open it again at a later date. Caddy will re-use the certificate it already obtained and it will be valid for 3 months. To renew, just keep Caddy running or open it again whenever you need it.

+

Setup Caddy 2.x

+

For Caddy 2.x, the Caddyfile format changes.

+

Let's say you're running a Node.js service on port 3000, and want to expose it with TLS on the domain "service.example.com":

+
git clone https://github.com/alexellis/expressjs-k8s/
+cd expressjs-k8s
+
+npm install
+http_port=3000 npm start
+
+

The local site will be served at http://127.0.0.1:3000

+
{
+  acme_ca https://acme-staging-v02.api.letsencrypt.org/directory
+}
+
+service.example.com
+
+reverse_proxy 127.0.0.1:3000 {
+}
+
+

Note the acme_ca being used will receive a staging certificate, remove it to obtain a production TLS certificate.

+

Now download Caddy 2.x for your operating system.

+
sudo ./caddy run \
+  -config ./Caddyfile
+
+

sudo - is required to bind to port 80 and 443, although you can potentially update your OS to allow binding to low ports without root access.

+

You should now be able to access the Node.js website via the https://service.example.com URL.

+

Caddy also supports multiple domains within the same file, so that you can expose multiple internal or private websites through the same tunnel.

+
{
+  email "webmaster@example.com"
+}
+
+blog.example.com {
+  reverse_proxy 127.0.0.1:4000
+}
+
+openfaas.example.com {
+      reverse_proxy 127.0.0.1:8080
+}
+
+

If you have services running on other machines you can change 127.0.0.1:8080 to a different IP address such as that of your Raspberry Pi if you had something like OpenFaaS running there.

+ + + + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/tutorial/community/index.html b/tutorial/community/index.html new file mode 100644 index 0000000..bd9b66a --- /dev/null +++ b/tutorial/community/index.html @@ -0,0 +1,1041 @@ + + + + + + + + + + + + + + + + + + + + + + + + Community tutorials and guides - Inlets + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

Community tutorials and guides

+

Note: Any material not hosted on inlets.dev may be written by a third-party.

+

If you have a tutorial or video to submit, feel free to send a Pull Request

+

Case studies

+

You can read testimonials on the main homepage

+ +

Videos

+

Webinars:

+ +

Walk-through videos:

+ +

Tutorials

+ +

Official blog posts

+

See inlets.dev/blog

+ + + + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/tutorial/dual-tunnels/index.html b/tutorial/dual-tunnels/index.html new file mode 100644 index 0000000..7836a48 --- /dev/null +++ b/tutorial/dual-tunnels/index.html @@ -0,0 +1,1073 @@ + + + + + + + + + + + + + + + + + + + + + + + + Dual tunnels - Inlets + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

Dual tunnels

+ +

Setting up dual TCP and HTTPS tunnels

+

In this tutorial we will set both a dual tunnel for exposing HTTP and TCP services from the same server.

+

Whilst it's easier to automate two separate servers or cloud instances for your tunnels, you may want to reduce your costs.

+

The use-case may be that you have a number of OpenFaaS functions running on your Raspberry Pi which serve traffic to users, but you also want to connect via SSH and VNC.

+

Pre-reqs

+
    +
  • A Linux server, Windows and MacOS are also supported
  • +
  • The inlets-pro binary at /usr/local/bin/
  • +
  • Access to a DNS control plane for a domain you control
  • +
+

Create the HTTPS tunnel server first

+

Create a HTTPS tunnel server using the manual tutorial or automated tutorial.

+

Once it's running, check you can connect to it, and then log in with SSH.

+

You'll find a systemd service named inlets-pro running the HTTPS tunnel with a specific authentication token and set of parameters.

+

Now, generate a new systemd unit file for the TCP tunnel.

+

I would suggest generating a new token for this tunnel.

+
TOKEN="$(head -c 32 /dev/urandom | base64 | cut -d "-" -f1)"
+
+# Find the instance's public IPv4 address:
+PUBLIC_IP="$(curl -s https://checkip.amazonaws.com)"
+
+

Let's imagine the public IP resolved to 46.101.128.5 which is part of the DigitalOcean range.

+
inlets-pro tcp server \
+ --token "$TOKEN" \
+ --auto-tls-san $PUBLIC_IP \
+ --generate=systemd > inlets-pro-tcp.service
+
+

Example:

+
[Unit]
+Description=inlets Pro TCP Server
+After=network.target
+
+[Service]
+Type=simple
+Restart=always
+RestartSec=5
+StartLimitInterval=0
+ExecStart=/usr/local/bin/inlets-pro tcp server --auto-tls --auto-tls-san=46.101.128.5 --control-addr=0.0.0.0 --token="k1wCR+2j41TXqqq/UTLJzcuzhmSJbU5NY32VqnNOnog=" --control-port=8124 --auto-tls-path=/tmp/inlets-pro-tcp
+
+[Install]
+WantedBy=multi-user.target
+
+

We need to update the control-port for this inlets tunnel server via the --control-port flag. Use port 8124 since 8123 is already in use by the HTTP tunnel. Add --control-port 8124 to the ExecStart line.

+

We need to add a new flag so that generated TLS certificates are placed in a unique directory, and don't clash. Add --auto-tls-path /tmp/inlets-pro-tcp/ to the same line.

+

Next install the unit file with:

+
sudo cp inlets-pro-tcp.service /etc/systemd/system/
+sudo systemctl daemon-reload
+sudo systemctl enable inlets-pro-tcp.service
+
+sudo systemctl restart inlets-pro-tcp.service
+
+

You'll now be able to check the logs for the server:

+
sudo journalctl -u inlets-pro-tcp
+
+

Finally you can connect your TCP client:

+
inlets-pro tcp client \
+  --token "k1wCR+2j41TXqqq/UTLJzcuzhmSJbU5NY32VqnNOnog=" \
+  --upstream 192.168.0.15 \
+  --ports 2222,5900 \
+  --url wss://46.101.128.5:8124
+
+

Note that 5900 is the default port for VNC. Port 2222 was used for SSH as not to conflict with the version running on the tunnel server.

+

You can now connect to the public IP of your server via SSH and VNC:

+

For example:

+
ssh -p 2222 pi@46.101.128.5
+
+

Wrapping up

+

You now have a TCP and HTTPS tunnel server running on the same host. This was made possibly by changing the control-plane port and auto-TLS path for the second server, and having it start automatically through a separate systemd service.

+

This technique may save you a few dollars per month, but it may not be worth your time compared to how quick and easy it is to create two separate servers with inletsctl create.

+ + + + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/tutorial/istio-gateway/index.html b/tutorial/istio-gateway/index.html new file mode 100644 index 0000000..c75dc93 --- /dev/null +++ b/tutorial/istio-gateway/index.html @@ -0,0 +1,1244 @@ + + + + + + + + + + + + + + + + + + + + + + + + Tutorial: Expose an Istio gateway with the inlets-operator - Inlets + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + +

Tutorial: Expose an Istio gateway with the inlets-operator

+

In this tutorial we will configure the inlets-operator to get a public IP for the Istio Ingress Gateway. This will allow you to receive HTTPS certificates via LetsEncrypt and cert-manager and access services running in your cluster on their own public domain.

+

Install arkade

+

Arkade is a simple CLI tool that provides a quick way to install various apps and download common binaries much quicker.

+

To install arkade run:

+
curl -sSLf https://get.arkade.dev/ | sudo sh
+
+

Create a kubernetes cluster with kinD

+

We're going to use KinD, which runs inside a container with Docker for Mac or the Docker daemon. MacOS cannot actually run containers or Kubernetes itself, so projects like Docker for Mac create a small Linux VM and hide it away.

+

Download the kind and kubectl binaries if you don't have them already:

+
arkade get kind
+arkade get kubectl
+
+

Now create a cluster:

+
$ kind create cluster
+
+

The initial creation could take a few minutes, but subsequent clusters creations are much faster.

+
Creating cluster "kind" ...
+ ✓ Ensuring node image (kindest/node:v1.19.0) 🖼
+ ✓ Preparing nodes 📦  
+ ✓ Writing configuration 📜 
+ ✓ Starting control-plane 🕹️ 
+ ✓ Installing CNI 🔌 
+ ✓ Installing StorageClass 💾 
+Set kubectl context to "kind-kind"
+You can now use your cluster with:
+
+kubectl cluster-info --context kind-kind
+
+Have a nice day! 👋
+
+
kubectl get node -o wide
+
+NAME                 STATUS     ROLES    AGE   VERSION   INTERNAL-IP   EXTERNAL-IP   OS-IMAGE       KERNEL-VERSION     CONTAINER-RUNTIME
+kind-control-plane      Ready   master   35s   v1.18.0   172.17.0.2    <none>        Ubuntu 19.10   5.3.0-26-generic   containerd://1.3.2
+
+

The above shows one node is Ready, so we can move on and install Istio.

+

Install Istio

+

You can install Istio using the documentation site at Istio.io, but we're going to use arkade instead since it gives us a one-line install and also bundles a version of Istio configuration for constrained development environments like a KinD cluster.

+

It is always possible to use the --set flag to override or pass in additional values for the Istio chart.

+
arkade install istio --help
+
+Install istio
+
+Usage:
+  arkade install istio [flags]
+
+Examples:
+  arkade install istio --loadbalancer
+
+Flags:
+      --cpu string               Allocate CPU resource (default "100m")
+  -h, --help                     help for istio
+      --istio-namespace string   Namespace for the app (default "istio-system")
+      --memory string            Allocate Memory resource (default "100Mi")
+      --namespace string         Namespace for the app (default "default")
+      --profile string           Set istio profile (default "default")
+      --set stringArray          Use custom flags or override existing flags 
+                                 (example --set prometheus.enabled=false)
+  -v, --version string           Specify a version of Istio (default "1.11.4")
+
+Global Flags:
+      --kubeconfig string   Local path for your kubeconfig file
+      --wait                If we should wait for the resource to be ready before returning (helm3 only, default false)
+
+

Install Istio:

+
arkade install istio
+
+

At the moment we don't have a public IP for the Istio gateway. The next step is te install the inlets operator so we can get one.

+
kubectl get -n istio-system \
+  svc/istio-ingressgateway
+
+NAME                   TYPE           CLUSTER-IP     EXTERNAL-IP   PORT(S)                                      AGE
+istio-ingressgateway   LoadBalancer   10.43.92.145   <pending>     15021:32382/TCP,80:31487/TCP,443:31692/TCP   3m28s
+
+

Install the inlets-operator

+

The inlets-operator lets you get public LoadBalancers on your local Kubernetes cluster. It does this by creating a VM to run an inlets tunnel server in the cloud of your choice for each LoadBalancer. It then plumbs in an inlets client to connect to it using a deployment.

+

The inlets-operator can also be installed with arkade.

+

Save an access token for your cloud provider as $HOME/access-token, in this example we're using DigitalOcean. Other providers may also need a secret token in addition to the API key.

+

Your inlets license should be already saved at: $HOME/.inlets/LICENSE, if it's not, you can move it there or use the --license-file flag.

+
export ACCESS_TOKEN=$HOME/access-token
+
+arkade install inlets-operator \
+ --provider digitalocean \
+ --region lon1 \
+ --token-file $ACCESS_TOKEN \
+ --license-file "$HOME/.inlets/LICENSE"
+
+
+

You can run arkade install inlets-operator --help to see a list of other cloud providers or take a look at the inlets-operator reference documentation.

+
+
    +
  • Set the --region flag as required, it's best to have low latency between your current location and where the exit-servers will be provisioned.
  • +
+

Once the inlets-operator is installed we can start watching for the public IP to appear.

+
kubectl get -n istio-system \
+  svc/istio-ingressgateway -w
+
+NAME                   TYPE           CLUSTER-IP     EXTERNAL-IP
+istio-ingressgateway   LoadBalancer   10.106.220.170   <pending>
+istio-ingressgateway   LoadBalancer   10.106.220.170   165.227.237.77
+
+

Install cert-manager

+

Install cert-manager, which can be integrated with Istio gateways to manage TLS certificates.

+
arkade install cert-manager
+
+

A quick recap

+

This is what we have so far:

+
    +
  • +

    Istio

    +

    The istio service mesh. Among other things, it comes with the istio Ingress Gateway that will get a public address via an inlets tunnel.

    +
  • +
  • +

    inlets-operator

    +

    The inlets operator provides us with a public VirtualIP for the istio Ingress Gateway

    +
  • +
  • +

    cert-manager

    +

    Integrates with Istio gateways to provide TLS certificates through the HTTP01 or DNS01 challenges from LetsEncrypt.

    +
  • +
+

Deploy an application and get a TLS certificate

+

Istio uses the Bookinfo Application as an example in their documentation. We will also use this example.

+

Enable side-car injection and then deploy the BookInfo manifests:

+
kubectl label namespace default istio-injection=enabled
+
+kubectl apply -f https://raw.githubusercontent.com/istio/istio/release-1.15/samples/bookinfo/platform/kube/bookinfo.yaml
+
+kubectl apply -f https://raw.githubusercontent.com/istio/istio/release-1.15/samples/bookinfo/networking/bookinfo-gateway.yaml
+
+

We can verify that the book application is up and running and accessible form our local computer on local host by running: +

kubectl port-forward -n istio-system \
+  svc/istio-ingressgateway 31380:80
+

+

Then send a request to it with curl:

+
curl -sS http://127.0.0.1:31380/productpage | grep -o "<title>.*</title>"
+<title>Simple Bookstore App</title>
+
+

Since we set up the inlets operator in the previous step to get an external IP for the Istio ingress gateway we should now also be able to access the app using that public IP.

+

Open a browser and navigate to the /productpage URL using the EXTERNAL-IP:

+
http://165.227.237.77/productpage
+
+

+

TLS certificates require a domain name and DNS A or CNAME entry. You can create those in the admin panel of your provider. They should point to the external IP of the Istio Ingress gateway. We will use the bookinfo.example.com domain as an example.

+
export EMAIL="you@example.com"
+
+cat > issuer-prod.yaml <<EOF
+apiVersion: cert-manager.io/v1
+kind: ClusterIssuer
+metadata:
+  name: letsencrypt-prod
+spec:
+  acme:
+    server: https://acme-v02.api.letsencrypt.org/directory
+    email: $EMAIL
+    privateKeySecretRef:
+      name: letsencrypt-prod
+    solvers:
+    - selector: {}
+      http01:
+        ingress:
+          class: istio
+EOF
+
+

Note that ingress class is set to class: istio.

+

We are using the Let's Encrypt production server which has strict limits on the API. A staging server is also available at https://acme-staging-v02.api.letsencrypt.org/directory. If you are creating a lot of certificates while testing a deployment it would be better to use the staging server.

+

Edit email, then run: kubectl apply -f issuer-prod.yaml.

+

Create a new certificate resource +

apiVersion: cert-manager.io/v1
+kind: Certificate
+metadata:
+  name: ingress-cert
+  namespace: istio-system
+spec:
+  secretName: ingress-cert
+  commonName: bookinfo.example.com
+  dnsNames:
+  - bookinfo.example.com
+  issuerRef:
+    name: letsencrypt-prod
+    kind: ClusterIssuer
+

+

Edit the bookinfo gateway, kubectl edit gateway/bookinfo-gateway and reference the certificate secret in the TLS configuration under credentialName.

+
apiVersion: networking.istio.io/v1beta1
+kind: Gateway
+metadata:
+  name: bookinfo-gateway
+spec:
+  selector:
+    istio: ingressgateway # use istio default controller
+  servers:
+  - port:
+      number: 443
+      name: https
+      protocol: HTTPS
+    tls:
+      mode: SIMPLE
+      credentialName: ingress-cert # This should match the Certificate secretName
+    hosts:
+    - bookinfo.example.com
+
+
+

You can always checkout the Istio documentation for more information on how to integrate cert-manager.

+
+

We can use curl again to access the bookinfo application this time with our custom domain and over a secure connection. Alternatively you can open the URL in your browser.

+
curl -sS https://bookinfo.example.com/productpage | grep -o "<title>.*</title>"
+<title>Simple Bookstore App</title>
+
+

Wrapping up

+

Through the use of the inlets-operator we were able to get a public IP for the Istio Ingress gateway. This allows you to access services on your cluster whether you are running it in an on-premises datacenter, within a VM or on your local laptop.

+

There is no need to open a firewall port, set-up port-forwarding rules, configure dynamic DNS or any of the usual hacks. You will get a public IP and it will "just work" for any TCP traffic you may have.

+ + + + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/tutorial/kubernetes-api-server/index.html b/tutorial/kubernetes-api-server/index.html new file mode 100644 index 0000000..a85dc43 --- /dev/null +++ b/tutorial/kubernetes-api-server/index.html @@ -0,0 +1,1239 @@ + + + + + + + + + + + + + + + + + + + + + + + + Tutorial: Expose a local Kubernetes API Server - Inlets + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + +

Tutorial: Expose a local Kubernetes API Server

+

In this tutorial, we'll show you how to expose a local Kubernetes API Server on the Internet, so that you can access it from anywhere, just like with a managed cloud provider.

+

Pre-reqs

+
    +
  • A computer or laptop running MacOS or Linux, or Git Bash or WSL on Windows
  • +
  • Docker for Mac / Docker Daemon - installed in the normal way, you probably have this already
  • +
  • Kubernetes running locally with kubeadm, K3s, K3d, Minikube, KinD, Docker Desktop, etc
  • +
+

The Kubernetes cluster

+

By default every Kubernetes cluster has TLS enabled to encrypt any HTTP REST messages that go over its control-plane. The TLS certificate has to be bound to a certain name, sometimes called a TLS SAN.

+

The certificate is usually only valid for "kubernetes.default.svc", and can only be accessed from within the cluster.

+

Kubernetes on tour

+
+

Kubernetes on tour - get access to your cluster from anywhere, without having to resort to complex tooling like VPNs.

+
+

When a managed cloud provider provisions you a cluster, they'll add additional names into the certificate like "customer1.lke.eu.linode.com" which is then added to your generated kubeconfig file that you download in the dashboard.

+

We have five steps run through to expose the API server:

+
    +
  1. Create a Kubernetes cluster
  2. +
  3. Create a VM on the public cloud with an inlets TCP server running onit
  4. +
  5. Create a DNS entry for the public VM's IP address
  6. +
  7. Configure a TLS SAN, if possible with a new domain name
  8. +
  9. Set up an inlets client as a Pod to forward traffic to the Kubernetes API Server
  10. +
+

Once we have all this in place, we can take our existing kubeconfig file and edit the URL, so that instead of pointing at our LAN IP or localhost, it points to the domain mapped to the public VM.

+

Create a cluster

+

You can create a cluster on any machine by using KinD:

+
arkade get kind
+kind create cluster
+
+

If you have a Raspberry Pi or a Linux Server, you can install K3s using k3sup:

+
arkade get k3sup
+
+k3sup install --ip 192.168.1.101 --user pi
+
+

In either case, you'll get back a kubeconfig file.

+

Here's a snippet of what I got back from running k3sup install:

+
apiVersion: v1
+clusters:
+- cluster:
+    server: https://192.168.1.101:6443
+
+

The server field will need to be changed to the new public address later on.

+

Create a VM on the public cloud with an inlets TCP server running on it

+

Just like when Linode Kubernetes Engine provisions us a domain like "customer1.lke.eu.linode.com", we'll need our own subdomain too, so that the certificate can be issued for it.

+

In order to create the DNS record, we a public IP which we will get by creating a tunnel server on our preferred cloud and in a region that's close to us.

+
arkade get inletsctl
+
+export ACCESS_TOKEN="" # Retreive this from your cloud dashboard
+
+inletsctl create \
+  --provider linode \
+  --tcp \
+  --access-token $ACCESS_TOKEN \
+  --region eu-west
+
+

Save the connection info from inletsctl into a text file for later.

+
# Give a single value or comma-separated
+export PORTS="8000"
+
+# Where to route traffic from the inlets server
+export UPSTREAM="localhost"
+
+inlets-pro tcp client --url "wss://139.160.201.143:8123" \
+  --token "f2cXtOouRpuVbAn4arVvdSMx//uKD3jDnssr3X9P338" \
+  --upstream $UPSTREAM \
+  --ports $PORTS
+
+

Create a DNS subdomain for the IP address you were given:

+
    +
  • k3s.example.com => 139.160.201.143
  • +
+

Check that you can resolve the IP with a ping ping -c 1 k3s.example.com

+

Now check the status of the inlets server:

+
export TOKEN="f2cXtOouRpuVbAn4arVvdSMx//uKD3jDnssr3X9P338"
+
+inlets-pro status --url "wss://139.160.201.143:8123" \
+  --token "$TOKEN"
+
+

Output:

+
inlets server status. Version: 0.9.3 - 8e96997499ae53c6fb2ae9f9e13fa9b48dcb6514
+
+Server info:
+Hostname:       localhost
+Process uptime: 5 seconds ago
+Mode:           tcp
+Version:        0.9.3 8e96997499ae53c6fb2ae9f9e13fa9b48dcb6514
+
+No clients connected.
+
+

We can now move onto the next step.

+

Configure a TLS SAN, if possible with a new domain name

+

With k3s, it's trivial to add additional TLS SAN names for the Kubernetes API Server.

+

If you run the k3sup install command again, it'll update your configuration:

+
k3sup install \
+  --ip 192.168.1.101 \
+  --user pi \
+  --tls-san k3s.example.com
+
+

You'll now have the custom domain along with the default kubernetes.default.svc as valid names in the generated certificate.

+

If you're not running on k3s, or use a service where you cannot change the TLS SAN, then we'll show you what to do in the next step.

+

Update your kubeconfig file with the new endpoint

+

We need to update our kubeconfig file to point at the custom domain instead of at whatever loopback or LAN address it currently does.

+

For K3s users, change the server URL:

+
apiVersion: v1
+clusters:
+- cluster:
+    server: https://192.168.1.101:6443
+
+

To:

+
apiVersion: v1
+clusters:
+- cluster:
+    server: https://k3s.example.com:443
+
+

For any user where you cannot regenerate the TLS certificate for the API Server, you can specify the server name in the config file:

+
apiVersion: v1
+clusters:
+- cluster:
+    server: https://k3s.example.com:443
+    tls-server-name: kubernetes
+
+

For more details see: Support TLS Server Name overrides in kubeconfig file #88769

+

Save the changes to your kubeconfig file.

+

Connect the tunnel

+

The tunnel acts like a router, it takes any TCP packets sent to port 6443 (k3s) or 443 (Kubernetes) and forwards them down the tunnel to the inlets client. The inlets client then looks at its own "--upstream" value to decide where to finally send the data.

+

Save inlets-k8s-api.yaml:

+
export LICENSE="$(cat $HOME/.inlets/LICENSE)"
+export TOKEN="f2cXtOouRpuVbAn4arVvdSMx//uKD3jDnssr3X9P338" # populate with the token from inletsctl
+export SERVER_IP="139.160.201.143" # populate with the server IP, not the domain
+
+cat > inlets-k8s-api.yaml <<EOF
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: inlets-client
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: inlets-client
+  template:
+    metadata:
+      labels:
+        app: inlets-client
+    spec:
+      containers:
+      - name: inlets-client
+        image: ghcr.io/inlets/inlets-pro:0.9.9
+        imagePullPolicy: IfNotPresent
+        command: ["inlets-pro"]
+        args:
+        - "tcp"
+        - "client"
+        - "--url=wss://$SERVER_IP:8123"
+        - "--upstream=kubernetes.default.svc"
+        - "--port=443"
+        - "--port=6443"
+        - "--token=$TOKEN"
+        - "--license=$LICENSE"
+---
+EOF
+
+

You'll see the tunnel client up and running and ready to receive requests:

+
kubectl logs deploy/inlets-client
+2022/06/24 09:51:18 Licensed to: Alex <contact@openfaas.com>, expires: 128 day(s)
+2022/06/24 09:51:18 Upstream server: kubernetes.default.svc, for ports: 443, 6443
+time="2022/06/24 09:51:18" level=info msg="Connecting to proxy" url="wss://139.160.201.143:8123/connect"
+inlets-pro TCP client. Copyright OpenFaaS Ltd 2021
+time="2022/06/24 09:51:18" level=info msg="Connection established" client_id=5309466072564c1c90ce0a0bcaa22b74
+
+

Check the tunnel server's status to confirm the connection:

+
export TOKEN="f2cXtOouRpuVbAn4arVvdSMx//uKD3jDnssr3X9P338"
+
+inlets-pro status --url "wss://139.160.201.143:8123" \
+  --token "$TOKEN"
+
+inlets server status. Version: 0.9.3 - 8e96997499ae53c6fb2ae9f9e13fa9b48dcb6514
+
+Server info:
+Hostname:       localhost
+Process uptime: 15 minutes ago
+Mode:           tcp
+Version:        0.9.3 8e96997499ae53c6fb2ae9f9e13fa9b48dcb6514
+
+Connected clients:
+Client ID                        Remote Address        Connected  Upstreams
+5309466072564c1c90ce0a0bcaa22b74 192.168.1.101:16368 43 seconds kubernetes.default.svc:443, kubernetes.default.svc:6443
+
+

Finally prove that it's working with the new, public address:

+
$ kubectl cluster-info
+Kubernetes control plane is running at https://k3s.example.com:443
+CoreDNS is running at https://k3s.example.com:443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy
+Metrics-server is running at https://k3s.example.com:443/api/v1/namespaces/kube-system/services/https:metrics-server:https/proxy
+
+To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.
+
+

Wrapping up

+

In a relatively short period of time, with a custom domain, and a small VM, we set up a tunnel server to route traffic from the public Internet to a K3s server on an internal network.

+

This gives you a similar experience to a managed public cloud Kubernetes engine, but running on your own infrastructure, or perhaps within a restrictive VPC.

+

You may also like:

+ +

If you'd like to talk to us about this tutorial, feel free to reach out for a meeting:

+

Set up a meeting

+ + + + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/tutorial/kubernetes-ingress/index.html b/tutorial/kubernetes-ingress/index.html new file mode 100644 index 0000000..c08e5a2 --- /dev/null +++ b/tutorial/kubernetes-ingress/index.html @@ -0,0 +1,1332 @@ + + + + + + + + + + + + + + + + + + + + + + + + Tutorial: Expose a local IngressController with the inlets-operator - Inlets + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + +

Tutorial: Expose a local IngressController with the inlets-operator

+

In this quick-start we will configure the inlets-operator to use inlets-pro in TCP mode to expose ports 80 and 443 of an Ingress Controller (ingress-nginx) so that it can receive HTTPS certificates via LetsEncrypt and cert-manager.

+

The inlets-operator creates a VM for each tunnel server in the cloud of your choice, then plumbs in an inlets client to connect to it using a Deployment. There is an alternative approach that we also recommend which involves creating the tunnel server with inletsctl, followed by installing the inlets client with Helm: Fixing Ingress for short-lived local Kubernetes clusters.

+
+

You can subscribe to inlets for personal or commercial use via Gumroad

+
+

Pre-reqs

+
    +
  • A computer or laptop running MacOS or Linux, or Git Bash or WSL on Windows
  • +
  • Docker for Mac / Docker Daemon - installed in the normal way, you probably have this already
  • +
  • KinD - the "darling" of the Kubernetes community is Kubernetes IN Docker, a small one-shot cluster that can run inside a Docker container
  • +
  • arkade - arkade is an app installer that takes a helm chart and bundles it behind a simple CLI
  • +
+

Install arkade

+

You can use arkade or helm to install the various applications we are going to add to the cluster below. arkade provides an apps ecosystem that makes things much quicker.

+

MacOS and Linux users:

+
curl -sSLf https://get.arkade.dev/ | sudo sh
+
+

Windows users should install Git Bash and run the above without sudo.

+

Create a Kubernetes cluster with KinD

+

We're going to use KinD, which runs inside a container with Docker for Mac or the Docker daemon. MacOS cannot actually run containers or Kubernetes itself, so projects like Docker for Mac create a small Linux VM and hide it away.

+

You can use an alternative to KinD if you have a preferred tool.

+

Get a KinD binary release and kubectl (the Kubernetes CLI):

+
arkade get kind --version v0.9.0
+arkade get kubectl --version v1.19.3
+
+

Now create a cluster:

+
$ kind create cluster
+
+

The initial creation could take a few minutes, but subsequent clusters creations are much faster.

+
Creating cluster "kind" ...
+ ✓ Ensuring node image (kindest/node:v1.19.0) 🖼
+ ✓ Preparing nodes 📦  
+ ✓ Writing configuration 📜 
+ ✓ Starting control-plane 🕹️ 
+ ✓ Installing CNI 🔌 
+ ✓ Installing StorageClass 💾 
+Set kubectl context to "kind-kind"
+You can now use your cluster with:
+
+kubectl cluster-info --context kind-kind
+
+Have a nice day! 👋
+
+

We can check that our single node is ready now:

+
kubectl get node -o wide
+
+NAME                 STATUS     ROLES    AGE   VERSION   INTERNAL-IP   EXTERNAL-IP   OS-IMAGE       KERNEL-VERSION     CONTAINER-RUNTIME
+kind-control-plane      Ready   master   35s   v1.18.0   172.17.0.2    <none>        Ubuntu 19.10   5.3.0-26-generic   containerd://1.3.2
+
+

The above shows one node Ready, so we are ready to move on.

+

Install the inlets-operator

+

Save an access token for your cloud provider as $HOME/access-token, in this example we're using DigitalOcean. Other providers may also need a secret token in addition to the API key.

+

Your inlets license should be already saved at: $HOME/.inlets/LICENSE, if it's not, you can move it there or use the --license-file flag.

+
export ACCESS_TOKEN=$HOME/access-token
+
+arkade install inlets-operator \
+ --provider digitalocean \
+ --region lon1 \
+ --token-file $ACCESS_TOKEN \
+ --license-file "$HOME/.inlets/LICENSE"
+
+
+

You can run arkade install inlets-operator --help to see a list of other cloud providers.

+
+
    +
  • Set the --region flag as required, it's best to have low latency between your current location and where the exit-servers will be provisioned.
  • +
+

Install nginx-ingress

+

This installs nginx-ingress using its Helm chart:

+
arkade install nginx-ingress
+
+

Install cert-manager

+

Install cert-manager, which can obtain TLS certificates through NginxIngress.

+
arkade install cert-manager
+
+

A quick review

+

Here's what we have so far:

+
    +
  • +

    nginx-ingress

    +

    An IngressController, Traefik or Caddy are also valid options. It comes with a Service of type LoadBalancer that will get a public address via the tunnel

    +
  • +
  • +

    inlets-operator configured to use inlets-pro in TCP mode

    +

    Provides us with a public VirtualIP for the IngressController service.

    +
  • +
  • +

    cert-manager

    +

    Provides TLS certificates through the HTTP01 or DNS01 challenges from LetsEncrypt

    +
  • +
+

Deploy an application and get a TLS certificate

+

This is the final step that shows everything working end to end.

+

TLS certificates require a domain name and DNS A or CNAME entry, so let's set that up

+

Find the External-IP:

+
kubectl get svc
+
+

Now create a DNS A record in your admin panel, so for example: expressjs.example.com.

+

Now when you install a Kubernetes application with an Ingress definition, NginxIngress and cert-manager will work together to provide a TLS certificate.

+

Create a staging issuer for cert-manager staging-issuer.yaml and make sure you edit the email value.

+
export EMAIL="you@example.com"
+
+cat > issuer-staging.yaml <<EOF
+apiVersion: cert-manager.io/v1
+kind: Issuer
+metadata:
+  name: letsencrypt-staging
+  namespace: default
+spec:
+  acme:
+    server: https://acme-staging-v02.api.letsencrypt.org/directory
+    email: $EMAIL
+    privateKeySecretRef:
+      name: letsencrypt-staging
+    solvers:
+    - selector: {}
+      http01:
+        ingress:
+          class: nginx
+EOF
+
+

Apply the file with kubectl apply -f staging-issuer.yaml

+

While the Let's Encrypt production server has strict limits on the API, the staging server is more forgiving, and +should be used while you are testing a deployment.

+

Edit email, then run: kubectl apply -f issuer.yaml.

+

Let's use helm3 to install Alex's example Node.js API available on GitHub

+

Create a custom.yaml file with the following:

+
ingress:
+  enabled: true
+  annotations:
+    kubernetes.io/ingress.class: nginx
+    cert-manager.io/issuer: "letsencrypt-staging"
+  hosts:
+    - host: expressjs.inlets.dev
+      paths: ["/"]
+  tls:
+   - secretName: expressjs-tls
+     hosts:
+       - expressjs.inlets.dev
+
+

Replace the string expressjs.inlets.dev with your own sub-domain created earlier i.e. expressjs.example.com.

+

You can download around a dozen other CLI tools using arkade including helm. Use arkade to download helm and put it in your PATH:

+
arkade get helm
+
+# Put arkade in your path:
+export PATH=$PATH:$HOME/.arkade/bin/helm3/
+
+# Or alternatively install to /usr/local/bin
+sudo cp $HOME/.arkade/bin/helm3/helm /usr/local/bin/
+
+

Now install the chart using helm:

+
helm repo add expressjs-k8s https://alexellis.github.io/expressjs-k8s/
+
+# Then they run an update
+helm repo update
+
+# And finally they install
+helm upgrade --install express expressjs-k8s/expressjs-k8s \
+  --values custom.yaml
+
+

Test it out

+

Now check the certificate has been created and visit the webpage in a browser:

+
kubectl get certificate
+
+NAME            READY   SECRET          AGE
+expressjs-tls   True    expressjs-tls   49s
+
+

Open the webpage i.e. https://api.example.com. Since this is a staging certificate, you will get a warning +from your browser. You can accept the certificate in order to test your site.

+

Getting a Production Certificate

+

Create a production certificate issuer issuer-prod.yaml, similar to the staging issuer you produced +earlier. Be sure to change the email address to your email.

+
export EMAIL="you@example.com"
+
+cat > issuer-prod.yaml <<EOF
+apiVersion: cert-manager.io/v1
+kind: Issuer
+metadata:
+  name: letsencrypt-prod
+  namespace: default
+spec:
+  acme:
+    server: https://acme-v02.api.letsencrypt.org/directory
+    email: $EMAIL
+    privateKeySecretRef:
+      name: letsencrypt-prod
+    solvers:
+    - selector: {}
+      http01:
+        ingress:
+          class: nginx
+EOF
+
+

Then run kubectl apply -f issuer-prod.yaml

+

Now you must update your expressjs deployment to use the new certificate issuer. Create a new +helm3 overrides file custom-prod.yaml:

+
cat > custom-prod.yaml <<EOF
+ingress:
+  enabled: true
+  annotations:
+    kubernetes.io/ingress.class: nginx
+    cert-manager.io/issuer: "letsencrypt-prod"
+  hosts:
+    - host: expressjs.inlets.dev
+      paths: ["/"]
+  tls:
+   - secretName: expressjs-tls
+     hosts:
+       - expressjs.inlets.dev
+EOF
+
+

Be sure to change the above domain name to your domain name for the sample server.

+

You can update your deployment using the helm command below:

+
helm upgrade express expressjs-k8s/expressjs-k8s \
+  --values custom-prod.yaml
+
+

Here's my example on my own domain:

+

The page with TLS

+

You can view the certificate the certificate that's being served directly from your local cluster and see that it's valid:

+

Green lock

+

Install a real-world application

+

Using arkade you can now install OpenFaaS or a Docker Registry with a couple of commands, and since you have Nginx and cert-manager in place, this will only take a few moments.

+

OpenFaaS with TLS

+

OpenFaaS is a platform for Kubernetes that provides FaaS functionality and microservices. The motto of the project is Serverless Functions Made Simple and you can deploy it along with TLS in just a couple of commands:

+
export DOMAIN=gateway.example.com
+arkade install openfaas
+arkade install openfaas-ingress \
+  --email webmaster@$DOMAIN \
+  --domain $DOMAIN
+
+

That's it, you'll now be able to access your gateway at https://$DOMAIN/

+

For more, see the OpenFaaS workshop

+

Docker Registry with TLS

+

A self-hosted Docker Registry with TLS and private authentication can be hard to set up, but we can now do that with two commands.

+
export DOMAIN=registry.example.com
+arkade install docker-registry
+arkade install docker-registry-ingress \
+  --email webmaster@$DOMAIN \
+  --domain $DOMAIN
+
+

Now try your registry:

+
docker login $DOMAIN
+docker pull alpine:3.16
+docker tag alpine:3.16 $DOMAIN/alpine:3.16
+
+docker push $DOMAIN/alpine:3.16
+
+

You can even combine the new private registry with OpenFaaS if you like, checkout the docs for more.

+

Wrapping up

+

Through the use of inlets-pro we have an encrypted control-plane for the websocket tunnel, and encryption for the traffic going to our Express.js app using a TLS certificate from LetsEncrypt.

+

You can now get a green lock and a valid TLS certificate for your local cluster, which also means that this will work with bare-metal Kubernetes, on-premises and with your Raspberry Pi cluster.

+ + + + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/tutorial/manual-http-server/index.html b/tutorial/manual-http-server/index.html new file mode 100644 index 0000000..a39bd1a --- /dev/null +++ b/tutorial/manual-http-server/index.html @@ -0,0 +1,1090 @@ + + + + + + + + + + + + + + + + + + + + + + + + Manual http server - Inlets + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

Manual http server

+ +

Setting up a HTTP tunnel server manually

+

In this tutorial we will set up an inlets HTTP tunnel server to serve a local website over HTTPS using Let's Encrypt. The steps will be manual, but usually, we would use a provisioning tool like inletsctl to automate everything for us.

+

This may be useful for understanding how the server binary works, and how to use it on existing servers that you may have. Or perhaps you want to run inlets across an internal or private network.

+

Pre-reqs

+
    +
  • A Linux server, Windows and MacOS are also supported
  • +
  • The inlets-pro binary at /usr/local/bin/
  • +
  • Access to a DNS control plane for a domain you control
  • +
+

Run the server

+

For this example, your tunnel server should be accessible from the Internet. The tunnel client will connect to it and then expose one or more local websites so that you can access them remotely.

+

Create a DNS A record for the subdomain or subdomains you want to use, and have each of them point to the public IP address of the server you have provisioned. These short have a short TTL such as 60s to avoid waiting too long for DNS to propagate throughout the Internet. You can increase this value to a higher number later.

+

First generate an authentication token that the client will use to log in:

+
TOKEN="$(head -c 32 /dev/urandom | base64 | cut -d "-" -f1)"
+
+

We'll use the built-in support for Let's Encrypt to get a valid HTTPS certificate for any services you wish to expose via your tunnel server. It is also possible to turn off Let's Encrypt support and use your own reverse proxy such as Caddy or Nginx.

+
export DOMAIN="example.com"
+
+  inlets-pro http server \
+  --auto-tls \
+  --control-port 8123 \
+  --auto-tls-san 192.168.0.10 \
+  --letsencrypt-domain subdomain1.$DOMAIN \
+  --letsencrypt-domain subdomain2.$DOMAIN \
+  --letsencrypt-email contact@$DOMAIN \
+  --letsencrypt-issuer staging
+  --token $TOKEN
+
+

Notice that --letsencrypt-domain can be provided more than one, for each of your subdomains.

+

We are also defaulting to the "staging" provider for TLS certificates which allows us to obtain a large number of certificates for experimentation purposes only. The default value, if this field is left off is prod as you will see by running inlets-pro http server --help.

+

Now the following will happen:

+
    +
  • The tunnel server will start up and listen to TCP traffic on port 80 and 443.
  • +
  • The server will try to resolve each of your domains passed via --letsencrypt-domain.
  • +
  • Then once each resolves, Let's Encrypt will be contacted for a HTTP01 ACME challenge.
  • +
  • Once the certificates are obtained, the server will start serving the HTTPS traffic.
  • +
+

Now you can connect your client running on another machine.

+

Of course you can tunnel whatever HTTP service you like, if you already have one.

+

Inlets has a built-in HTTP server that we can run on our local / private machine to share files with others. Let's use that as our example:

+
mkdir -p /tmp/share
+
+echo "Welcome to my filesharing service." > /tmp/share/welcome.txt
+
+inlets-pro fileserver \
+ --allow-browsing \
+ --webroot /tmp/share/
+ --port 8080
+
+

Next let's expose that local service running on localhost:8080 via the tunnel server:

+
export TOKEN="" # Obtain this from your server
+export SERVER_IP="" # Your server's IP
+export DOMAIN="example.com"
+
+inlets-pro http client \
+  --url wss://$SERVER_IP:8123 \
+  --token $TOKEN \
+  --upstream http://localhost:8080/
+
+

If you set up your server for more than one sub-domain then you can specify a domain for each local service such as:

+
  --upstream subdomain1.$DOMAIN=http://localhost:8080/,subdomain2.$DOMAIN=http://localhost:3000/
+
+

Now that your client is connected, you can access the HTTP fileserver we set up earlier via the public DNS name:

+
curl -k -v https://subdomain1.$DOMAIN/welcome.txt
+
+

Now that you can see everything working, with a staging certificate, you can run the server command again and switch out the --letsencrypt-issuer staging flag for --letsencrypt-issuer prod.

+

Wrapping up

+

You have now installed an inlets HTTP tunnel server to a machine by hand. The same can be achieved by running the inletsctl tool, which does all of this automatically on a number of cloud providers.

+
    +
  • +

    Can I connect more than one client to the same server? + Yes, and each can connect difference services. So client 1 exposes subdomain1.DOMAIN and client 2 exposes subdomain2.DOMAIN. Alternatively, you can have multiple clients exposing the same domain, for high availability.

    +
  • +
  • +

    How do I keep the inlets server process running? + You can run it in the background, by using a systemd unit file. You can generate these via the inlets-pro http server --generate=systemd command.

    +
  • +
  • +

    How do I keep the inlets client process running? + Do the same as for a server, but use the inlets-pro http client --generate=systemd command.

    +
  • +
  • +

    What else can I do with my server? + Browse the available options for the tunnel servers with the inlets-pro http server --help command.

    +
  • +
+ + + + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/tutorial/manual-tcp-server/index.html b/tutorial/manual-tcp-server/index.html new file mode 100644 index 0000000..5a8303d --- /dev/null +++ b/tutorial/manual-tcp-server/index.html @@ -0,0 +1,1070 @@ + + + + + + + + + + + + + + + + + + + + + + + + Manual tcp server - Inlets + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

Manual tcp server

+ +

Setting up a TCP server manually

+

In this tutorial we will set up a TCP tunnel server manually.

+

Pre-reqs

+
    +
  • A Linux server, Windows and MacOS are also supported
  • +
  • The inlets-pro binary at /usr/local/bin/
  • +
+

Log into your existing VM

+

Generate an authentication token for the tunnel:

+
TOKEN="$(openssl rand -base64 32)" > token.txt
+
+# Find the instance's public IPv4 address:
+PUBLIC_IP="$(curl -s https://checkip.amazonaws.com)"
+
+

Let's imagine the public IP resolved to 46.101.128.5 which is part of the DigitalOcean range.

+
inlets-pro tcp server \
+ --token "$TOKEN" \
+ --auto-tls-san $PUBLIC_IP \
+ --generate=systemd > inlets-pro.service
+
+

Example:

+
[Unit]
+Description=inlets Pro TCP Server
+After=network.target
+
+[Service]
+Type=simple
+Restart=always
+RestartSec=5
+StartLimitInterval=0
+ExecStart=/usr/local/bin/inlets-pro tcp server --auto-tls --auto-tls-san=46.101.128.5 --control-addr=0.0.0.0 --token="ISgW7E2TQk+ZmbJldN9ophfE96B93eZKk8L1+gBysg4=" --control-port=8124 --auto-tls-path=/tmp/inlets-pro
+
+[Install]
+WantedBy=multi-user.target
+
+

Next install the unit file with:

+
sudo cp inlets-pro.service /etc/systemd/system/
+sudo systemctl daemon-reload
+sudo systemctl enable inlets-pro.service
+
+sudo systemctl restart inlets-pro.service
+
+

You'll now be able to check the logs for the server:

+
sudo journalctl -u inlets-pro
+
+

Finally you can connect your TCP client from a remote network. In this case, port 5900 is being exposed for VNC, along with port 2222 for SSH. Port 2222 is an extra port added to the /etc/ssh/sshd_config file on the Linux machine to avoid conflicting with SSH on the tunnel server itself.

+
inlets-pro tcp client \
+  --token "ISgW7E2TQk+ZmbJldN9ophfE96B93eZKk8L1+gBysg4=" \
+  --upstream 192.168.0.15 \
+  --port 2222 \
+  --port 5900 \
+  --url wss://46.101.128.5:8124
+
+

You can now connect to the public IP of your server via SSH and VNC:

+

For example:

+
ssh -p 2222 pi@46.101.128.5
+
+

Wrapping up

+

You now have a TCP tunnel server that you can connect as and when you like.

+
    +
  • You can change the ports of the connected client
  • +
  • You can change the upstream
  • +
  • You can run multiple inlets-pro tcp client commands to load-balance traffic
  • +
+

But bear in mind that you cannot have two clients exposing different ports at the same time unless you're an inlets uplink user.

+

We would recommend creating TCP tunnel servers via inletsctl which automates all of the above in a few seconds.

+ + + + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/tutorial/monitoring-and-metrics/index.html b/tutorial/monitoring-and-metrics/index.html new file mode 100644 index 0000000..8e92435 --- /dev/null +++ b/tutorial/monitoring-and-metrics/index.html @@ -0,0 +1,1287 @@ + + + + + + + + + + + + + + + + + + + + + + + + Monitoring and metrics - Inlets + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

Monitoring and metrics

+

Learn how you can monitor your tunnel servers using the status command and Prometheus metrics.

+

This can help you understand how tunnels are being used and answer questions like:

+
    +
  • What are the Rate, Error, Duration (RED) metrics for any HTTP APIs or websites that are being hosted?
  • +
  • How many connections are open at this point in time, and on which ports?
  • +
  • Have any clients attempted to connect which failed authentication?
  • +
+

Introduction

+

All the information for monitoring tunnels is exposed via the inlets control-plane. It provides a connection endpoint for clients, a status endpoint and a monitoring endpoint.

+
+

Checkout the FAQ to learn about the difference between the data-plane and control-plane

+
+

Inlets provides two distinct ways to monitor tunnels. You can use the status command that is part of the CLI or collect Prometheus metrics for background monitoring and alerting. We will explore both methods.

+

The status command

+

With the inlets-pro status command you can find out some basic tunnel statistics without logging in with a console SSH session. It shows you a list of the connected clients along with the version and uptime information of the server and can be used with both HTTP and TCP tunnels.

+

Here’s an example of a TCP tunnel server:

+
$ inlets-pro status \
+  --url wss://178.62.70.130:8123 \
+  --token "$TOKEN" \
+  --auto-tls
+
+Querying server status. Version DEV - unknown
+Hostname: unruffled-banzai4
+Started: 49 minutes
+Mode: tcp
+Version:        0.8.9-rc1
+
+Client ID                        Remote Address     Connected Upstreams
+730aa1bb96474cbc9f7e76c135e81da8 81.99.136.188:58102 15 minutes localhost:8001, localhost:8000, localhost:2222
+22fbfe123c884e8284ee0da3680c1311 81.99.136.188:64018 6 minutes  localhost:8001, localhost:8000, localhost:2222
+
+

We can see the clients that are connected and the ports they make available on the server. In this case there are two clients. All traffic to the data plane for ports 8001, 8000 and 2222 will be load-balanced between the two clients for HA.

+

The response from a HTTP tunnel:

+
$ inlets-pro status \
+  --url wss://147.62.70.101:8123 \
+  --token "$TOKEN"  \
+  --auto-tls
+
+Server info:
+Hostname: creative-pine6
+Started: 1 day
+Mode:           http
+Version:        0.8.9-rc1
+Connected clients:
+Client ID                        Remote Address     Connected Upstreams
+4e35edf5c6a646b79cc580984eac4ea9 192.168.0.19:34988 5 minutes example.com=http://localhost:8000, prometheus.example.com=http://localhost:9090
+
+

In this example we can see that there is only one client connected to the server at the moment. This client provides two separate domains.

+

The command uses the status endpoint that is exposed on the control-plane. It is possible to invoke the HTTP endpoint yourself. The token that is set up for the server has to be set in the Authorization header.

+
$ curl -ksLS https://127.0.0.1:8123/status \
+  -H "Authorization: Bearer $TOKEN"
+
+

Example response from a HTTP tunnel:

+
{
+  "info": {
+    "version": "0.8.9-18-gf4fc15b",
+    "sha": "f4fc15b9604efd0b0ca3cc604c19c200ae6a1d7b",
+    "mode": "http",
+    "startTime": "2021-08-13T12:23:17.321388+01:00",
+    "hostname": "am1.local"
+  },
+  "clients": [
+    {
+      "clientID": "0c5f2a1ca0174ee3a177c3be7cd6d950",
+      "remoteAddr": "[::1]:63671",
+      "since": "2021-08-13T12:23:19.72286+01:00",
+      "upstreams": [
+        "*=http://127.0.0.1:8080"
+      ]
+    }
+  ]
+}
+
+

Monitor inlets with Prometheus

+

The server collects metrics for both the data-plane and the control-plane. These metrics are exposed through the monitoring endpoint on the control-plane. Prometheus can be set up for metrics collection and alerting.

+

The name of the metrics and the kind of metrics that are exported will depend on the mode that the server is running in. For TCP tunnels the metric name starts with tcp_ for HTTP tunnels this will be http_.

+

You don’t need to be a Kubernetes user to take advantage of Prometheus. You can run it locally on your machine by downloading the binary here.

+
+

As an alternative, Grafana Cloud can give you a complete monitoring stack for your tunnels without having to worry about finding somewhere to run and maintain Prometheus and Grafana. We have a write up on our blog that shows you how to set this up: Monitor inlets tunnels with Grafana Cloud.

+
+

Create a prometheus.yaml file to configure Prometheus. Replace TOKEN with the token from your server.

+
# my global config
+global:
+  scrape_interval:     15s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
+  evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
+  # scrape_timeout is set to the global default (10s).
+
+# Alertmanager configuration
+alerting:
+  alertmanagers:
+  - static_configs:
+    - targets:
+      # - alertmanager:9093
+
+# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
+rule_files:
+  # - "first_rules.yml"
+  # - "second_rules.yml"
+
+# A scrape configuration containing exactly one endpoint to scrape:
+# Here it's Prometheus itself.
+scrape_configs:
+  # The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
+  - job_name: 'prometheus'
+
+    # metrics_path defaults to '/metrics'
+    # scheme defaults to 'http'.
+    static_configs:
+    - targets: ['localhost:9090']
+  # The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
+  - job_name: 'http-tunnel'
+
+    # metrics_path defaults to '/metrics'
+    # scheme defaults to 'http'.
+    static_configs:
+    - targets: ['localhost:8123']
+    scheme: https
+
+    authorization:
+      type: Bearer
+      credentials: TOKEN
+    tls_config:
+      insecure_skip_verify: true
+
+

Start Prometheus with this command. It will listen on port 9090.

+
$ prometheus --config.file=./prometheus.yaml
+
+level=info ts=2021-08-13T11:25:31.791Z caller=main.go:428 msg="Starting Prometheus" version="(version=2.29.1, branch=HEAD, revision=dcb07e8eac34b5ea37cd229545000b857f1c1637)"
+level=info ts=2021-08-13T11:25:31.931Z caller=main.go:784 msg="Server is ready to receive web requests."
+
+

Metrics for the control-plane

+

The control-plane metrics can give you insights into the number of clients that are connected and the number of http requests made to the different control-plane endpoints.

+

HTTP tunnels

+ + + + + + + + + + + + + + + + + + + + + + + +
MetricTypeDescriptionLabels
http_controlplane_connected_gaugegaugegauge of inlets clients connected to the control plane
http_controlplane_requests_totalcountertotal HTTP requests processed by connecting clients on the control planecode, path
+

TCP tunnels

+ + + + + + + + + + + + + + + + + + + + + + + +
MetricTypeDescriptionLabels
tcp_controlplane_connected_gaugegaugegauge of inlets clients connected to the control plane
tcp_controlplane_requests_totalcountertotal HTTP requests processed by connecting clients on the control planecode, path
+

These metrics can for instance be used to tell you whether there are a lot of clients that attempted to connect but failed authentication.

+

If running on Kubernetes, the connected gauge could be used to scale tunnels down to zero replicas, and back up again in a similar way to OpenFaaS. This could be important for very large-scale installations of devices or tenants that have partial connectivity.

+

Metrics for the data-plane

+

The data-plane metrics can give you insights in the services that are exposed through your tunnel.

+

HTTP tunnels

+ + + + + + + + + + + + + + + + + + + + + + + +
MetricTypeDescriptionLabels
http_dataplane_requests_totalcountertotal HTTP requests processedcode, host, method
http_dataplane_request_duration_secondshistogramSeconds spent serving HTTP requests.code, host, method
+

TCP tunnels

+ + + + + + + + + + + + + + + + + + + + + + + +
MetricTypeDescriptionLabels
tcp_dataplane_connections_gaugegaugegauge of TCP connections established over data planeport
tcp_dataplane_connections_totalcountertotal count of TCP connections established over data planeport
+

For HTTP tunnels these metrics can be used to get Rate, Error, Duration (RED) information for any API or website that is connected through the tunnel. This essentially allows you to collect basic metrics for your services even if they do not export any metrics themselves.

+

For TCP tunnels these metrics can help answer questions like:

+
    +
  • How many connections are open at this point in time, and on which ports? i.e. if exposing SSH on port 2222, how many connections are open?
  • +
+

Wrapping up

+

We showed two different options that can be used to monitor your inlets tunnels.

+

The CLI provides a quick and easy way to get some status information for a tunnel. The endpoint that exposes this information can also be invoked directly using HTTP.

+

Prometheus metrics can be collected from the monitoring endpoint. These metrics are useful for background monitoring and alerting. They can provide you with Rate, Error, Duration (RED) metrics for HTTP services that are exposed through Inlets.

+

You may also like

+ + + + + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/tutorial/postgresql-tcp-tunnel/index.html b/tutorial/postgresql-tcp-tunnel/index.html new file mode 100644 index 0000000..802d5b0 --- /dev/null +++ b/tutorial/postgresql-tcp-tunnel/index.html @@ -0,0 +1,1067 @@ + + + + + + + + + + + + + + + + + + + + + + + + Tutorial: Tunnel a private Postgresql database - Inlets + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + +

Tutorial: Tunnel a private Postgresql database

+

In this tutorial we will tunnel Postgresql over inlets Pro to a remote machine. From there you can expose it to the Internet, or bind it to the local network for private VPN-like access.

+
+

You can subscribe to inlets for personal or commercial use via Gumroad

+
+

Setup your exit node

+

Provision a cloud VM on DigitalOcean or another IaaS provider using inletsctl:

+
inletsctl create \
+ --provider digitalocean \
+ --region lon1 \
+ --pro
+
+

Note the --url and TOKEN given to you in this step.

+

Run Postgresql on your private server

+

We can run a Postgresql instance using Docker:

+
head -c 16 /dev/urandom |shasum 
+8cb3efe58df984d3ab89bcf4566b31b49b2b79b9
+
+export PASSWORD="8cb3efe58df984d3ab89bcf4566b31b49b2b79b9"
+
+docker run --rm --name postgres -p 5432:5432 -e POSTGRES_PASSWORD=8cb3efe58df984d3ab89bcf4566b31b49b2b79b9 -ti postgres:latest
+
+

Connect the inlets Pro client

+

Fill in the below with the outputs you received from inletsctl create.

+

Note that UPSTREAM="localhost" can be changed to point at a host or IP address accessible from your client. The choice of localhost is suitable when you are running Postgresql in Docker on the same computer as the inlets Pro client.

+

The client will look for your license in $HOME/.inlets/LICENSE, but you can also use the --license/--license-file flag if you wish.

+
export EXIT_IP="134.209.21.155"
+export TCP_PORTS="5432"
+export LICENSE_FILE="$HOME/LICENSE.txt"
+export TOKEN="KXJ5Iq1Z5Cc8GjFXdXJrqNhUzoScXnZXOSRKeh8x3f6tdGq1ijdENWQ2IfzdCg4U"
+export UPSTREAM="localhost"
+
+inlets-pro tcp client --connect "wss://$EXIT_IP:8123/connect" \
+  --token "$TOKEN" \
+  --upstream $UPSTREAM \
+  --ports $TCP_PORTS
+
+

Connect to your private Postgresql server from the Internet

+

You can run this command from anywhere, since your exit-server has a public IP:

+
export PASSWORD="8cb3efe58df984d3ab89bcf4566b31b49b2b79b9"
+export EXIT_IP="209.97.141.140"
+
+docker run -it -e PGPORT=5432 -e PGPASSWORD=$PASSWORD --rm postgres:latest psql -U postgres -h $EXIT_IP
+
+

Try a command such as CREATE database or \dt.

+

Treat the database as private - like a VPN

+

A split data and control-plane mean that tunnels do not need to be exposed on the Internet and can replace a VPN or a bespoke solution with SSH tunnels

+
+

A split data and control-plane mean that tunnels do not need to be exposed on the Internet and can replace a VPN or a bespoke solution with SSH tunnels

+
+

If you would like to keep the database service and port private, you can run the exit-server as a Pod in a Kubernetes cluster, or add an iptables rule to block access from external IPs.

+

Log into your exit-server and update /etc/systemd/system/inlets-pro.service

+

To listen on loopback, add: --listen-data=127.0.0.1: +To listen on a private adapter such as 10.1.0.10, add: --listen-data=10.1.0.10:

+

Restart the service, and you'll now find that the database port 5432 can only be accessed from within the network you specified in --listen-data

+

Other databases such as Cassandra, MongoDB and Mysql/MariaDB also work exactly the same. Just change the port from 5432 to the port of your database.

+ + + + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/tutorial/ssh-tcp-tunnel/index.html b/tutorial/ssh-tcp-tunnel/index.html new file mode 100644 index 0000000..91eacbc --- /dev/null +++ b/tutorial/ssh-tcp-tunnel/index.html @@ -0,0 +1,1152 @@ + + + + + + + + + + + + + + + + + + + + + + + + Tutorial: Expose a private SSH server over a TCP tunnel - Inlets + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + +

Tutorial: Expose a private SSH server over a TCP tunnel

+

In this tutorial we will use inlets-pro to access your computer behind NAT or a firewall. We'll do this by tunnelling SSH over inlets-pro, and clients will connect to your exit-server.

+

Scenario: You want to allow SSH access to a computer that doesn't have a public IP, is inside a private network or behind a firewall. A common scenario is connecting to a Raspberry Pi on a home network or a home-lab.

+
+

You can subscribe to inlets for personal or commercial use via Gumroad

+
+

Setup your tunnel server with inletsctl

+

For this tutorial you will need to have an account and API key with one of the supported providers, or you can create an exit-server manually and install inlets Pro there yourself.

+

For this tutorial, the DigitalOcean provider will be used. You can get free credits on DigitalOcean with this link.

+

Create an API key in the DigitalOcean dashboard with Read and Write permissions, and download it to a file called do-access-token in your home directory.

+

You need to know the IP of the machine you to connect to on your local network, for instance 192.168.0.35 or 127.0.0.1 if you are running inlets Pro on the same host as SSH.

+

You can use the inletsctl utility to provision exit-servers with inlets Pro preinstalled, it can also download the inlets-pro CLI.

+
curl -sLSf https://inletsctl.inlets.dev | sh
+sudo mv inletsctl /usr/local/bin/
+sudo inletsctl download
+
+

If you already have inletsctl installed, then make sure you update it with inletsctl update.

+

Create an tunnel server

+

A) Automate your tunnel server

+

The inletsctl tool can create a tunnel server for you in the region and cloud of your choice.

+
inletsctl create \
+  --provider digitalocean \
+  --access-token-file ~/do-access-token \
+  --region lon1
+
+

Run inletsctl create --help to see all the options.

+

After the machine has been created, inletsctl will output a sample command for the inlets-pro client command:

+
inlets-pro tcp client --url "wss://206.189.114.179:8123/connect" \
+    --token "4NXIRZeqsiYdbZPuFeVYLLlYTpzY7ilqSdqhA0HjDld1QjG8wgfKk04JwX4i6c6F"
+
+

Don't run this command, but note down the --url and --token parameters for later

+

B) Manual setup of your tunnel server

+

Use B) if you want to provision your virtual machine manually, or if you already have a host from another provider.

+

Log in to your remote tunnel server with ssh and obtain the binary using inletsctl:

+
curl -sLSf https://inletsctl.inlets.dev | sh
+sudo mv inletsctl /usr/local/bin/
+sudo inletsctl download
+
+

Find your public IP:

+
export IP=$(curl -s ifconfig.co)
+
+

Confirm the IP with echo $IP and save it, you need it for the client

+

Get an auth token and save it for later to use with the client

+
export TOKEN="$(head -c 16 /dev/urandom |shasum|cut -d'-' -f1)"
+
+echo $TOKEN
+
+

Start the server:

+
inlets-pro \
+  tcp \
+  server \
+  --auto-tls \
+  --auto-tls-san $IP \
+  --token $TOKEN
+
+

If running the inlets client on the same host as SSH, you can simply set PROXY_TO_HERE to localhost. Or if you are running SSH on a different computer to the inlets client, then you can specify a DNS entry or an IP address like 192.168.0.15.

+

If using this manual approach to install inlets Pro, you should create a systemd unit file.

+

The easiest option is to run the server with the --generate=systemd flag, which will generate a systemd unit file to stdout. You can then copy the output to /etc/systemd/system/inlets-pro.service and enable it with systemctl enable inlets-pro.

+

Configure the private SSH server's listening port

+

It's very likely (almost certain) that your exit server will already be listening for traffic on the standard ssh port 22. Therefore you will need to configure your internal server to use an additional TCP port such as 2222.

+

Once configured, you'll still be able to connect to the internal server on port 22, but to connect via the tunnel, you'll use port 2222

+

Add the following to /etc/ssh/sshd_config:

+
Port 22
+Port 2222
+
+

For (optional) additional security, you could also disable password authentication, but make sure that you have inserted your SSH key to the internal server with ssh-copy-id user@ip before reloading the SSH service.

+
PasswordAuthentication no
+
+

Now need to reload the service so these changes take effect

+
sudo systemctl daemon-reload
+sudo systemctl restart sshd
+
+

Check that you can still connect on the internal IP on port 22, and the new port 2222.

+

Use the -p flag to specify the SSH port:

+
export IP="192.168.0.35"
+
+ssh -p 22 $IP "uptime"
+ssh -p 2222 $IP "uptime"
+
+

Start the inlets Pro client

+

First download the inlets-pro client onto the private SSH server:

+
sudo inletsctl download
+
+

Use the command from earlier to start the client on the server:

+
export IP="206.189.114.179"
+export TCP_PORTS="2222"
+export LICENSE_FILE="$HOME/LICENSE.txt"
+export UPSTREAM="localhost"
+
+inlets-pro tcp client --url "wss://$IP:8123/connect" \
+  --token "4NXIRZeqsiYdbZPuFeVYLLlYTpzY7ilqSdqhA0HjDld1QjG8wgfKk04JwX4i6c6F" \
+  --license-file "$LICENSE_FILE" \
+  --upstream "$UPSTREAM" \
+  --ports $TCP_PORTS
+
+

The localhost value will be used for --upstream because the tunnel client is running on the same machine as the SSH service. However, you could run the client on another machine within the network, and then change the flag to point to the private SSH server's IP.

+

Try it out

+

Verify the installation by trying to SSH to the public IP, using port 2222.

+
ssh -p 2222 user@206.189.114.179
+
+

You should now have access to your server via SSH over the internet with the IP of the exit server.

+

You can also use other compatible tools like sftp, scp and rsync, just make sure that you set the appropriate port flag. The port flag for sftp is -P rather than -p.

+

Wrapping up

+

The principles in this tutorial can be adapted for other protocols that run over TCP such as MongoDB or PostgreSQL, just adapt the port number as required.

+ + + + + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/uplink/become-a-provider/index.html b/uplink/become-a-provider/index.html new file mode 100644 index 0000000..77e3522 --- /dev/null +++ b/uplink/become-a-provider/index.html @@ -0,0 +1,1443 @@ + + + + + + + + + + + + + + + + + + + + + + + + Become an inlets uplink provider - Inlets + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + +

Become an inlets uplink provider

+

inlets uplink makes it easy for Service Providers and SaaS companies to deliver their product and services to customer networks.

+

To become a provider, you'll need a Kubernetes cluster, an inlets uplink subscription and to install the inlets-uplink-provider Helm chart.

+ +

Before you start

+

Before you start, you'll need the following:

+
    +
  • A Kubernetes cluster with LoadBalancer capabilities (i.e. public cloud).
  • +
  • A domain name clients can use to connect to the tunnel control plane.
  • +
  • An inlets uplink license (an inlets-pro license cannot be used)
  • +
  • +

    Optional: arkade - a tool for installing popular Kubernetes tools

    +

    To install arkade run:

    +
    curl -sSLf https://get.arkade.dev/ | sudo sh
    +
    +
  • +
+

Inlets uplink has its own independent subscription from inlets-pro.

+

Sign-up here: inlets uplink plans.

+

Create a Kubernetes cluster

+

We recommend creating a Kubernetes cluster with a minimum of three nodes. Each node should have a minimum of 2GB of RAM and 2 CPU cores.

+

Install cert-manager

+

Install cert-manager, which is used to manage TLS certificates for inlets-uplink.

+

You can use Helm, or arkade:

+
arkade install cert-manager
+
+ +

Make sure to create the target namespace for you installation first.

+
kubectl create namespace inlets
+
+

Create the required secret with your inlets-uplink license.

+
+

Check that your license key is in lower-case

+

There is a known issue with LemonSqueezy where the UI will copy the license key in lower-case, it needs to be converted to upper-case before being used with Inlets Uplink.

+
+

Convert the license to upper-case, if it's in lower-case:

+
(
+  mv $HOME/.inlets/LICENSE_UPLINK{,.lower}
+
+  cat $HOME/.inlets/LICENSE_UPLINK.lower | tr '[:lower:]' '[:upper:]' > $HOME/.inlets/LICENSE_UPLINK
+  rm $HOME/.inlets/LICENSE_UPLINK.lower
+)
+
+

Create the secret for the license:

+
kubectl create secret generic \
+  -n inlets inlets-uplink-license \
+  --from-file license=$HOME/.inlets/LICENSE_UPLINK
+
+

Setup up ingress for customer tunnels

+

Tunnels on your customers' network will connect to your own inlets-uplink-provider.

+

There are two options for deploying the inlets-uplink-provider.

+

Use Option A if you're not sure, if your team already uses Istio or prefers Istio, use Option B.

+

A) Install with Kubernetes Ingress

+

We recommend ingress-nginx, and have finely tuned the configuration to work well for the underlying websocket for inlets. That said, you can change the IngressController if you wish.

+

Install ingress-nginx using arkade or Helm:

+
arkade install ingress-nginx
+
+

Create a values.yaml file for the inlets-uplink-provider chart:

+
clientRouter:
+  # Customer tunnels will connect with a URI of:
+  # wss://uplink.example.com/namespace/tunnel
+  domain: uplink.example.com
+
+  tls:
+    issuerName: letsencrypt-prod
+
+    # When set, a production issuer will be generated for you
+    # to use a pre-existing issuer, set issuer.enabled=false
+    issuer:
+      # Create a production issuer as part of the chart installation
+      enabled: true
+
+      # Email address used for ACME registration for the production issuer
+      email: "user@example.com"
+
+    ingress:
+      enabled: true
+      class: "nginx"      
+
+

Make sure to replace the domain and email with your actual domain name and email address.

+

Want to use the staging issuer for testing?

+

To use the Let's Encrypt staging issuer, pre-create your own issuer, update clientRouter.tls.issuerName with the name you have chosen, and then update clientRouter.tls.issuer.enabled and set it to false.

+

B) Install with Istio

+

We have added support in the inlets-uplink chart for Istio to make it as simple as possible to configure with a HTTP01 challenge.

+

If you don't have Istio setup already you can deploy it with arkade.

+
arkade install istio
+
+

Label the inlets namespace so that Istio can inject its sidecars:

+
kubectl label namespace inlets \
+  istio-injection=enabled --overwrite
+
+

Create a values.yaml file for the inlets-uplink chart:

+
clientRouter:
+  # Customer tunnels will connect with a URI of:
+  # wss://uplink.example.com/namespace/tunnel
+  domain: uplink.example.com
+
+  tls:
+    issuerName: letsencrypt-prod
+
+    # When set, a production issuer will be generated for you
+    # to use a pre-existing issuer, set issuer.enabled=false
+    issuer:
+      # Create a production issuer as part of the chart installation
+      enabled: true
+
+      # Email address used for ACME registration for the production issuer
+      email: "user@example.com"
+
+    istio:
+      enabled: true
+
+

Make sure to replace the domain and email with your actual domain name and email address.

+

Deploy with Helm

+
+

The chart is served through a container registry (OCI), not GitHub pages

+

Many Helm charts are served over GitHub pages, from a public repository, making it easy to browse and read the source code. We are using an OCI artifact in a container registry, which makes for a more modern alternative. If you want to browse the source, you can simply run helm template instead of helm upgrade.

+

Unauthorized?

+

The chart artifacts are public and do not require authentication, however if you run into an "Access denied" or authorization error when interacting with ghcr.io, try running helm registry login ghcr.io to refresh your credentials, or docker logout ghcr.io.

+
+

The Helm chart is called inlets-uplink-provider, you can deploy it using the custom values.yaml file created above:

+
helm upgrade --install inlets-uplink \
+  oci://ghcr.io/openfaasltd/inlets-uplink-provider \
+  --namespace inlets \
+  --values ./values.yaml
+
+

If you want to pin the version of the Helm chart, you can do so with the --version flag.

+

Where can I see the various options for values.yaml?

+

All of the various options for the Helm chart are documented in the configuration reference.

+

How can I view the source code?

+

See the note on helm template under the configuration reference.

+

How can I find the latest version of the chart?

+

If you omit a version, Helm will use the latest published OCI artifact, however if you do want to pin it, you can browse all versions of the Helm chart on GitHub

+

As an alternative to using ghcr.io's UI, you can get the list of tags, including the latest tag via the crane CLI:

+
arkade get crane
+
+# List versions
+crane ls ghcr.io/openfaasltd/inlets-uplink-provider
+
+# Get the latest version
+LATEST=$(crane ls ghcr.io/openfaasltd/inlets-uplink-provider |tail -n 1)
+echo $LATEST
+
+

Verify the installation

+

Once you've installed inlets-uplink, you can verify it is deployed correctly by checking the inlets namespace for running pods:

+
$ kubectl get pods --namespace inlets
+
+NAME                               READY   STATUS    RESTARTS   AGE
+client-router-b5857cf6f-7vrdh      1/1     Running   0          92s
+prometheus-74d8d7db9b-2hptm        1/1     Running   0          16s
+uplink-operator-7fccc9bdbc-twd2q   1/1     Running   0          92s
+
+

You should see the client-router and cloud-operator in a Running state.

+

If you installed inlets-uplink with Kubernetes ingress, you can verify that ingress for the client-router is setup and that a TLS certificate is issued for your domain using these two commands:

+
$ kubectl get -n inlets ingress/client-router
+
+NAME            CLASS    HOSTS                ADDRESS           PORTS     AGE
+client-router   <none>   uplink.example.com   188.166.194.102   80, 443   31m
+
+
$ kubectl get -n inlets cert/client-router-cert
+
+NAME                 READY   SECRET               AGE
+client-router-cert   True    client-router-cert   30m
+
+

Download the tunnel CLI

+

We provide a CLI to help you create and manage tunnels. It is available as a plugin for the inlets-pro CLI.

+

Download the inlets-pro binary:

+ +

Get the tunnel plugin:

+
inlets-pro plugin get tunnel
+
+

Run inlets-pro tunnel --help to see all available commands.

+

Setup the first customer tunnel

+

Continue the setup here: Create a customer tunnel

+

Upgrading the chart and components

+

If you have a copy of values.yaml with pinned image versions, you should update these manually.

+

Next, run the Helm chart installation command again, and remember to use the sames values.yaml file that you used to install the software originally.

+

Over time, you may find using a tool like FluxCD or ArgoCD to manage the installation and updates makes more sense than running Helm commands manually.

+

If the Custom Resource Definition (CRD) has changed, you can extract it from the Chart repo and install it before or after upgrading. As a rule, Helm won't install or upgrade CRDs a second time if there's already an existing version:

+
helm template oci://ghcr.io/openfaasltd/inlets-uplink-provider \
+  --include-crds=true \
+  --output-dir=/tmp
+
+kubectl apply -f /
+  tmp/inlets-uplink-provider/crds/uplink.inlets.dev_tunnels.yaml
+
+

Upgrading existing customer tunnels

+

The operator will upgrade the image: version of all deployed inlets uplink tunnels automatically based upon the tag set in values.yaml.

+

If no value is set in your overridden values.yaml file, then whatever the default is in the chart will be used.

+
inletsVersion: 0.9.23
+
+

When a tunnel is upgraded, you'll see a log line like this:

+
2024-01-11T12:25:15.442Z        info    operator/controller.go:860      Upgrading version       {"tunnel": "ce.inlets", "from": "0.9.21", "to": "0.9.23"}
+
+

Configuration reference

+

Looking for the source for the Helm chart? The source is published directly to a container registry as an OCI bundle. View the source with: helm template oci://ghcr.io/openfaasltd/inlets-uplink-provider

+

If you need a configuration option outside of what's already available, feel free to raise an issue on the inlets-pro repository.

+

Overview of inlets-uplink parameters in values.yaml.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterDescriptionDefault
pullPolicyThe a imagePullPolicy applied to inlets-uplink components.Always
operator.imageContainer image used for the uplink operator.ghcr.io/openfaasltd/uplink-operator:0.1.5
clientRouter.imageContainer image used for the client router.ghcr.io/openfaasltd/uplink-client-router:0.1.5
clientRouter.domainDomain name for inlets uplink. Customer tunnels will connect with a URI of: wss://uplink.example.com/namespace/tunnel.""
clientRouter.tls.issuerNameName of cert-manager Issuer for the clientRouter domain.letsencrypt-prod
clientRouter.tls.issuer.enabledCreate a cert-manager Issuer for the clientRouter domain. Set to false if you wish to specify your own pre-existing object in the clientRouter.tls.issuerName field.true
clientRouter.tls.issuer.emailLet's Encrypt email. Only used for certificate renewing notifications.""
clientRouter.tls.ingress.enabledEnable ingress for the client router.enabled
clientRouter.tls.ingress.classIngress class for client router ingress.nginx
clientRouter.tls.ingress.annotationsAnnotations to be added to the client router ingress resource.{}
clientRouter.tls.istio.enabledUse an Istio Gateway for incoming traffic to the client router.false
clientRouter.service.typeClient router service typeClusterIP
clientRouter.service.nodePortClient router service port for NodePort service type, assigned automatically when left empty. (only if clientRouter.service.type is set to "NodePort")nil
tunnelsNamespaceDeployments, Services and Secrets will be created in this namespace. Leave blank for a cluster-wide scope, with tunnels in multiple namespaces.""
inletsVersionInlets Pro release version for tunnel server Pods.0.9.12
clientApi.enabledEnable tunnel management REST API.false
clientApi.imageContainer image used for the client API.ghcr.io/openfaasltd/uplink-api:0.1.5
prometheus.createCreate the Prometheus monitoring component.true
prometheus.resourcesResource limits and requests for prometheus containers.{}
prometheus.imageContainer image used for prometheus.prom/prometheus:v2.40.1
prometheus.service.typePrometheus service typeClusterIP
prometheus.service.nodePortPrometheus service port for NodePort service type, assigned automatically when left empty. (only if prometheus.service.type is set to "NodePort")nil
nodeSelectorNode labels for pod assignment.{}
affinityNode affinity for pod assignments.{}
tolerationsNode tolerations for pod assignment.[]
+

Specify each parameter using the --set key=value[,key=value] argument to helm install

+

Telemetry and usage data

+

The inlets-uplink Kubernetes operator will send telemetry data to OpenFaaS Ltd on a periodic basis. This information is used for calculating accurate usage metrics for billing purposes. This data is sent over HTTPS, does not contain any personal information, and is not shared with any third parties.

+

This data includes the following:

+
    +
  • Number of tunnels deployed
  • +
  • Number of namespaces with at least one tunnel contained
  • +
  • Kubernetes version
  • +
  • Inlets Uplink version
  • +
  • Number of installations of Inlets Uplink
  • +
+ + + + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/uplink/connect-to-tunnels/index.html b/uplink/connect-to-tunnels/index.html new file mode 100644 index 0000000..6faa6e5 --- /dev/null +++ b/uplink/connect-to-tunnels/index.html @@ -0,0 +1,1095 @@ + + + + + + + + + + + + + + + + + + + + + + + + Connect to tunnels - Inlets + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

Connect to tunnels

+

The tunnel plugin for the inlets-pro CLI can be used to get connection instructions for a tunnel.

+

Whether the client needs to be deployed as a systemd service on the customers server or as a Kubernetes service, with the CLI it is easy to generate connection instructions for these different formats by setting the --format flag.

+

Supported formats:

+ +

Make sure you have the latest version of the tunnel command available:

+
inlets-pro plugin get tunnel
+
+

Get connection instructions

+

Generate the client command for the selected tunnel:

+
$ inlets-pro tunnel connect openfaas \
+    --domain uplink.example.com \
+    --upstream http://127.0.0.1:8080
+
+# Access your HTTP tunnel via: http://openfaas.tunnels:8000
+
+# Access your TCP tunnel via ClusterIP: 
+#  openfaas.tunnels:5432
+
+inlets-pro uplink client \
+  --url=wss://uplink.example.com/tunnels/openfaas \
+  --token=tbAd4HooCKLRicfcaB5tZvG3Qj36pjFSL3Qob6b9DBlgtslmildACjWZUD \
+  --upstream=http://127.0.0.1:8080
+
+

Optionally the --quiet flag can be set to print the CLI command without the additional info.

+

Deploy the client as a systemd service

+

To generate a systemd service file for the tunnel client command set the --format flag to systemd.

+
$ inlets-pro tunnel connect openfaas \
+    --domain uplink.example.com \ 
+    --upstream http://127.0.0.1:8080 \
+    --format systemd
+
+[Unit]
+Description=openfaas inlets client
+After=network.target
+
+[Service]
+Type=simple
+Restart=always
+RestartSec=5
+StartLimitInterval=0
+ExecStart=/usr/local/bin/inlets-pro uplink client --url=wss://uplink.example.com/tunnels/openfaas --token=tbAd4HooCKLRicfcaB5tZvG3Qj36pjFSL3Qob6b9DBlgtslmildACjWZUD --upstream=http://127.0.0.1:8080
+
+[Install]
+WantedBy=multi-user.target
+
+

Copy the service file over to the customer's host. Save the unit file as: /etc/systemd/system/openfaas-tunnel.service.

+

Once the file is in place start the service for the first time:

+
sudo systemctl daemon-reload
+sudo systemctl enable --now openfaas-tunnel
+
+

Verify the tunnel client is running:

+
systemctl status openfaas-tunnel
+
+

You can also check the logs to see if the client connected successfully:

+
journalctl -u openfaas-tunnel
+
+

Deploy the client in a Kubernetes cluster

+

To generate a YAML deployment for a selected tunnel, set the --format flag to k8s_yaml. The generated resource can be deployed in the customers cluster.

+
inlets-pro tunnel connect openfaas \
+    --domain uplink.example.com \
+    --upstream http://gateway.openfaas:8080 \
+    --format k8s_yaml
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: openfaas-inlets-client
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: openfaas-inlets-client
+  template:
+    metadata:
+      labels:
+        app: openfaas-inlets-client
+    spec:
+      containers:
+      - name: openfaas-inlets-client
+        image: ghcr.io/inlets/inlets-pro:0.9.14
+        imagePullPolicy: IfNotPresent
+        command: ["inlets-pro"]
+        args:
+        - "uplink"
+        - "client"
+        - "--url=wss://uplink.example.com/tunnels/openfaas"
+        - "--token=tbAd4HooCKLRicfcaB5tZvG3Qj36pjFSL3Qob6b9DBlgtslmildACjWZUD"
+        - "--upstream=http://gateway.openfaas:8080"
+
+

In this example we create a tunnel to uplink an OpenFaaS deployment.

+

Get the logs for the client and check it connected successfully:

+
kubectl logs deploy/openfaas-inlets-client
+
+ + + + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/uplink/create-tunnels/index.html b/uplink/create-tunnels/index.html new file mode 100644 index 0000000..9743398 --- /dev/null +++ b/uplink/create-tunnels/index.html @@ -0,0 +1,1373 @@ + + + + + + + + + + + + + + + + + + + + + + + + Create a tunnel for a customer - Inlets + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + + + + + +
+
+ + + + + +

Create a tunnel for a customer

+

Use separate namespaces for your tunnels

+

The inlets namespace contains the control plane for inlets uplink, so you'll need to create at least one additional namespace for your customer tunnels.

+
    +
  1. +

    Create a namespace per customer (recommended)

    +

    This approach avoids conflicts on names, and gives better isolation between tenants.

    +
    kubectl create namespace acmeco
    +
    +

    Then, create a copy of the license secret in the new namespace:

    +
    export NS="n1"
    +export LICENSE=$(kubectl get secret -n inlets inlets-uplink-license -o jsonpath='{.data.license}' | base64 -d)
    +
    +kubectl create secret generic \
    +  -n $NS \
    +  inlets-uplink-license \
    +  --from-literal license=$LICENSE
    +
    +
  2. +
  3. +

    A single namespace for all customer tunnels (not recommended)

    +

    For development purposes, you could create a single namespace for all your customers.

    +
    kubectl create namespace tunnels
    +
    +
  4. +
+

Finally, if you're using Istio, then you need to label each additional namespace to enable sidecar injection:

+
kubectl label namespace inlets \
+  istio-injection=enabled --overwrite
+
+

Create a Tunnel with an auto-generated token

+

Tunnel describes an inlets-uplink tunnel server. The specification describes a set of ports to use for TCP tunnels.

+

For example the following Tunnel configuration sets up a http tunnel on port 8000 by default and adds port 8080 for use with TCP tunnels. The licenceRef needs to reference a secret containing an inlets-uplink license.

+
apiVersion: uplink.inlets.dev/v1alpha1
+kind: Tunnel
+metadata:
+  name: acmeco
+  namespace: tunnels
+spec:
+  licenseRef:
+    name: inlets-uplink-license
+    namespace: tunnels
+  tcpPorts:
+  - 8080 
+
+

Alternatively the CLI can be used to create a tunnel:

+
inlets-pro tunnel create acmeco \
+  -n tunnels \
+  --port 8080
+
+

Create a Tunnel with a pre-defined token

+

If you delete a Tunnel with an auto-generated token, and re-create it later, the token will change. So we recommend that you pre-define your tokens. This style works well for GitOps and automated deployments with Helm.

+

Make sure the secret is in the same namespace as the Tunnel Custom Resource.

+

You can use openssl to generate a secure token:

+
openssl rand -base64 32 |tr -d '\n' > token.txt
+
+

Note that the tr command is used to remove the newline character from the output, so that there is no new-line within the token.

+

Create a Kubernetes secret for the token named custom-token:

+
kubectl create secret generic \
+  -n tunnels acmeco-token \
+  --from-file token=./token.txt
+
+

Reference the token when creating a tunnel, to expose ports 8080 over TCP.

+
apiVersion: uplink.inlets.dev/v1alpha1
+kind: Tunnel
+metadata:
+  name: acmeco
+  namespace: tunnels
+spec:
+  licenseRef:
+    name: inlets-uplink-license
+    namespace: tunnels
+  tokenRef:
+    name: acmeco-token
+    namespace: tunnels
+  tcpPorts:
+  - 8080
+
+

Clients can now connect to the tunnel using the custom token.

+

Node selection and annotations for tunnels

+

The tunnel spec has a nodeSelector field that can be used to assign tunnel pods to Nodes. See Assign Pods to Nodes from the kubernetes docs for more information.

+

It is also possible to set additional annotations on the tunnel pod using the podAnnotations field in the tunnel spec.

+

The following example adds an annotation with the customer name to the tunnel pod and uses the node selector to specify a target node with a specific region label.

+
apiVersion: uplink.inlets.dev/v1alpha1
+kind: Tunnel
+metadata:
+  name: acmeco
+  namespace: tunnels
+spec:
+  licenseRef:
+    name: inlets-uplink-license
+    namespace: tunnels
+  tcpPorts:
+  - 8080
+  podAnnotations:
+    cutomer: acmeco
+  nodeSelector:
+    region: east
+
+

Connect to tunnels

+

The uplink client command is part of the inlets-pro binary. It is used to connect to tunnels and expose services over the tunnel.

+

There are several ways to get the binary:

+ +

Example: Tunnel a customer HTTP service

+

We'll use inlets-pro's built in file server as an example of how to tunnel a HTTP service.

+

Run this command on a private network or on your workstation:

+
mkdir -p /tmp/share
+cd /tmp/share
+echo "Hello World" > README.md
+
+inlets-pro fileserver -w /tmp/share -a
+
+Starting inlets Pro fileserver. Version: 0.9.10-rc1-1-g7bc49ae - 7bc49ae494bd9ec789fc5e9eaf500f2b1fe60786
+Serving files from: /tmp/share
+Listening on: 127.0.0.1:8080, allow browsing: true, auth: false
+
+

Once the server is running connect to your tunnel using the inlets-uplink client. We will connect to the tunnel called acmeco (see the example in Create a tunnel for a customer using the Custom Resource to create this tunnel).

+

Retrieve the token for the tunnel:

+
+
+
+
kubectl get -n tunnels \
+  secret/acmeco -o jsonpath="{.data.token}" | base64 --decode > token.txt 
+
+
+
+
inlets-pro tunnel token acmeco \
+  -n tunnels > token.txt
+
+
+
+
+

The contents will be saved in token.txt.

+

Start the tunnel client:

+
inlets-pro uplink client \
+  --url wss://uplink.example.com/tunnels/acmeco \
+  --upstream http://127.0.0.1:8080 \
+  --token-file ./token.txt
+
+
+

Tip: get connection instructions

+

The tunnel plugin for the inlets-pro CLI can be used to get connection instructions for a tunnel.

+
inlets-pro tunnel connect acmeco \
+  --domain uplink.example.com \
+  --upstream http://127.0.0.1:8080
+
+

Running the command above will print out the instructions to connect to the tunnel:

+
# Access your tunnel via ClusterIP: acmeco.tunnels
+inlets-pro uplink client \
+  --url=wss://uplink.example.com/tunnels/acmeco \
+  --upstream=http://127.0.0.1:8080 \
+  --token=z4oubxcamiv89V0dy8ytmjUEPwAmY0yFyQ6uaBmXsIQHKtAzlT3PcGZRgK
+
+
+

Run a container in the cluster to check the file server is accessible through the http tunnel using curl: curl -i acmeco.tunnels:8000

+
$ kubectl run -t -i curl --rm \
+  --image ghcr.io/openfaas/curl:latest /bin/sh   
+
+$ curl -i acmeco.tunnels:8000
+HTTP/1.1 200 OK
+Content-Type: text/html; charset=utf-8
+Date: Thu, 17 Nov 2022 08:39:48 GMT
+Last-Modified: Mon, 14 Nov 2022 20:52:53 GMT
+Content-Length: 973
+
+<pre>
+<a href="README.md">README.md</a>
+</pre>
+
+

How to tunnel multiple HTTP services from a customer

+

The following example shows how to access more than one HTTP service over the same tunnel. It is possible to expose multiple upstream services over a single tunnel.

+

You can add upstreamDomains to the Tunnel resource. Uplink wil create additional Services for each domain so the HTTP data plane is available on different domains. +

apiVersion: uplink.inlets.dev/v1alpha1
+kind: Tunnel
+metadata:
+  name: acmeco
+  namespace: tunnels
+spec:
+  licenseRef:
+    name: inlets-uplink-license
+    namespace: tunnels
+  tcpPorts:
+  - 8080
++  upstreamDomains:
++  - gateway
++  - prometheus
+

+

Upstreams can also be added while creating a tunnel with with cli:

+
inlets-pro tunnel create acmeco \
+  --namespace tunnels \
+  --upstream gateway \
+  --upstream prometheus
+
+

Start a tunnel client and add multiple upstreams:

+
inlets-pro uplink client \
+  --url wss://uplink.example.com/tunnels/acmeco \
+  --upstream prometheus.tunnels=http://127.0.0.1:9090 \
+  --upstream gateway.tunnels=http://127.0.0.1:8080 \
+  --token-file ./token.txt
+
+

Access both services using curl:

+
$ kubectl run -t -i curl --rm \
+  --image ghcr.io/openfaas/curl:latest /bin/sh   
+
+$ curl -i gateway.tunnels:8000
+HTTP/1.1 302 Found
+Content-Length: 29
+Content-Type: text/html; charset=utf-8
+Date: Thu, 16 Feb 2023 16:29:09 GMT
+Location: /graph
+
+<a href="/graph">Found</a>.
+
+
+$ curl -i -H prometheus.tunnels:8000
+HTTP/1.1 301 Moved Permanently
+Content-Length: 39
+Content-Type: text/html; charset=utf-8
+Date: Thu, 16 Feb 2023 16:29:11 GMT
+Location: /ui/
+
+<a href="/ui/">Moved Permanently</a>.
+
+

Note that the Host header has to be set in the request so the tunnel knows which upstream to send the request to.

+

Tunnel a customer's TCP service

+

Perhaps you need to access a customer's Postgres database from their private network?

+

Create a TCP tunnel using a Custom Resource

+

Example Custom Resource to deploy a tunnel for acmeco’s production Postgres database:

+
apiVersion: uplink.inlets.dev/v1alpha1
+kind: Tunnel
+metadata:
+  name: prod-database
+  namespace: acmeco
+spec:
+  licenseRef:
+    name: inlets-uplink-license
+    namespace: acmeco
+  tcpPorts:
+  - 5432
+
+

Alternatively the cli can be used to create a new tunnel:

+
inlets-pro tunnel create prod-database \
+  -n acmeco
+  --port 5432
+
+

Run postgresql on your private server

+

The quickest way to spin up a Postgres instance on your own machine would be to use Docker:

+
head -c 16 /dev/urandom |shasum 
+8cb3efe58df984d3ab89bcf4566b31b49b2b79b9
+
+export PASSWORD="8cb3efe58df984d3ab89bcf4566b31b49b2b79b9"
+
+docker run --rm --name postgres \
+  -p 5432:5432 \
+  -e POSTGRES_PASSWORD=8cb3efe58df984d3ab89bcf4566b31b49b2b79b9 \
+  -ti postgres:latest
+
+ +
export UPLINK_DOMAIN="uplink.example.com"
+
+inlets-pro uplink client \
+  --url wss://${UPLINK_DOMAIN}/acmeco/prod-database \
+  --upstream 127.0.0.1:5432 \
+  --token-file ./token.txt
+
+

Access the customer database from within Kubernetes

+

Now that the tunnel is established, you can connect to the customer's Postgres database from within Kubernetes using its ClusterIP prod-database.acmeco.svc.cluster.local:

+

Try it out:

+
export PASSWORD="8cb3efe58df984d3ab89bcf4566b31b49b2b79b9"
+
+kubectl run -i -t psql \
+  --env PGPORT=5432 \
+  --env PGPASSWORD=$PASSWORD --rm \
+  --image postgres:latest -- psql -U postgres -h prod-database.acmeco
+
+

Try a command such as CREATE database websites (url TEXT), \dt or \l.

+

Getting help

+

Feel free to reach out to our team via email for technical support.

+ + + + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/uplink/ingress-for-tunnels/index.html b/uplink/ingress-for-tunnels/index.html new file mode 100644 index 0000000..ce72c9d --- /dev/null +++ b/uplink/ingress-for-tunnels/index.html @@ -0,0 +1,1376 @@ + + + + + + + + + + + + + + + + + + + + + + + + Ingress for tunnels - Inlets + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

Ingress for tunnels

+
+

Info

+

Inlets Uplink is designed to connect customer services to a remote Kubernetes cluster for command and control as part of a SaaS product.

+

Any tunnelled service can be accessed directly from within the cluster and does not need to be exposed to the public Internet for access.

+

Beware: by following these instructions, you are exposing one or more of those tunnels to the public Internet.

+
+

Make inlets uplink HTTP tunnels publicly accessible by setting up ingress for the data plane.

+

The instructions assume that you want to expose two HTTP tunnels. We will configure ingress for the first tunnel, called grafana, on the domain grafana.example.com. The second tunnel, called openfaas, will use the domain openfaas.example.com.

+

Both tunnels can be created with kubectl or the inlets-pro cli. See create tunnels for more info:

+
+
+
+
$ cat <<EOF | kubectl apply -f - 
+apiVersion: uplink.inlets.dev/v1alpha1
+kind: Tunnel
+metadata:
+  name: grafana
+  namespace: tunnels
+spec:
+  licenseRef:
+    name: inlets-uplink-license
+    namespace: tunnels
+---
+apiVersion: uplink.inlets.dev/v1alpha1
+kind: Tunnel
+metadata:
+  name: openfaas
+  namespace: tunnels
+spec:
+  licenseRef:
+    name: inlets-uplink-license
+    namespace: tunnels
+EOF
+
+
+
+
$ inlets-pro tunnel create grafana
+Created tunnel openfaas. OK.
+
+$ inlets-pro tunnel create openfaas
+Created tunnel openfaas. OK.
+
+
+
+
+

Follow the instruction for Kubernetes Ingress or Istio depending on how you deployed inlets uplink.

+

Setup tunnel ingress

+
    +
  1. +

    Create a new certificate Issuer for tunnels:

    +
    export EMAIL="you@example.com"
    +
    +cat > tunnel-issuer-prod.yaml <<EOF
    +apiVersion: cert-manager.io/v1
    +kind: Issuer
    +metadata:
    +  name: tunnels-letsencrypt-prod
    +  namespace: inlets
    +spec:
    +  acme:
    +    server: https://acme-v02.api.letsencrypt.org/directory
    +    email: $EMAIL
    +    privateKeySecretRef:
    +    name: tunnels-letsencrypt-prod
    +    solvers:
    +    - http01:
    +        ingress:
    +          class: "nginx"
    +EOF
    +
    +
  2. +
  3. +

    Create an ingress resource for the tunnel:

    +
    apiVersion: networking.k8s.io/v1
    +kind: Ingress
    +metadata:
    +  name: grafana-tunnel-ingress
    +  namespace: inlets
    +  annotations:
    +    kubernetes.io/ingress.class: nginx
    +    cert-manager.io/issuer: tunnels-letsencrypt-prod
    +spec:
    +  rules:
    +  - host: grafana.example.com
    +    http:
    +      paths:
    +      - path: /
    +        pathType: Prefix
    +        backend:
    +          service:
    +            name: grafana.tunnels
    +            port:
    +              number: 8000
    +  tls:
    +  - hosts:
    +    - grafana.example.com
    +    secretName: grafana-cert
    +
    +

    Note that the annotation cert-manager.io/issuer is used to reference the certificate issuer created in the first step.

    +
  4. +
+

To setup ingress for multiple tunnels simply define multiple ingress resources. For example apply a second ingress resource for the openfaas tunnel:

+
apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+  name: openfaas-tunnel-ingress
+  namespace: inlets
+  annotations:
+    kubernetes.io/ingress.class: nginx
+    cert-manager.io/issuer: tunnels-letsencrypt-prod
+spec:
+  rules:
+  - host: openfaas.example.com
+    http:
+      paths:
+      - path: /
+        pathType: Prefix
+        backend:
+          service:
+            name: openfaas.tunnels
+            port:
+              number: 8000
+  tls:
+  - hosts:
+    - openfaas.example.com
+    secretName: openfaas-cert
+
+

Setup tunnel ingress with an Istio Ingress gateway

+
    +
  1. +

    Create a new certificate Issuer for tunnels:

    +
    export EMAIL="you@example.com"
    +
    +cat > tunnel-issuer-prod.yaml <<EOF
    +apiVersion: cert-manager.io/v1
    +kind: Issuer
    +metadata:
    +  name: tunnels-letsencrypt-prod
    +  namespace: istio-system
    +spec:
    +  acme:
    +    server: https://acme-v02.api.letsencrypt.org/directory
    +    email: $EMAIL
    +    privateKeySecretRef:
    +      name: tunnels-letsencrypt-prod
    +    solvers:
    +    - http01:
    +        ingress:
    +          class: "istio"
    +EOF
    +
    +

    We are using the Let's Encrypt production server which has strict limits on the API. A staging server is also available at https://acme-staging-v02.api.letsencrypt.org/directory. If you are creating a lot of certificates while testing it would be better to use the staging server.

    +
  2. +
  3. +

    Create a new certificate resource. In this case we want to expose two tunnels on their own domain, grafana.example.com and openfaas.example.com. This will require two certificates, one for each domain:

    +
    apiVersion: cert-manager.io/v1
    +kind: Certificate
    +metadata:
    +  name: grafana-cert
    +  namespace: istio-system
    +spec:
    +  secretName: grafana-cert
    +  commonName: grafana.example.com
    +  dnsNames:
    +  - grafana.example.com
    +  issuerRef:
    +    name: tunnels-letsencrypt-prod
    +    kind: Issuer
    +
    +---
    +apiVersion: cert-manager.io/v1
    +kind: Certificate
    +metadata:
    +  name: openfaas-cert
    +  namespace: istio-system
    +spec:
    +  secretName: openfaas-cert
    +  commonName: openfaas.example.com
    +  dnsNames:
    +  - openfaas.example.com
    +  issuerRef:
    +    name: tunnels-letsencrypt-prod
    +    kind: Issuer
    +
    +

    Note that both the certificates and issuer are created in the istio-system namespace.

    +
  4. +
  5. +

    Configure the ingress gateway for both tunnels. In this case we create a single resource for both hosts but you could also split the configuration into multiple Gateway resources.

    +
    apiVersion: networking.istio.io/v1alpha3
    +kind: Gateway
    +metadata:
    +  name: tunnel-gateway
    +  namespace: inlets
    +spec:
    +  selector:
    +      istio: ingressgateway # use Istio default gateway implementation
    +  servers:
    +  - port:
    +      number: 443
    +      name: https
    +      protocol: HTTPS  
    +    tls:
    +      mode: SIMPLE
    +      credentialName: grafana-cert
    +    hosts:
    +    - grafana.example.com
    +  - port:
    +      number: 443
    +      name: https
    +      protocol: HTTPS
    +    tls:
    +      mode: SIMPLE
    +      credentialName: openfaas-cert
    +    hosts:
    +    - openfaas.example.com
    +
    +

    Note that the credentialsName references the secrets for the certificates created in the previous step.

    +
  6. +
  7. +

    Configure the gateway's traffic routes by defining corresponding virtual services:

    +
    apiVersion: networking.istio.io/v1alpha3
    +kind: VirtualService
    +metadata:
    +  name: grafana
    +  namespace: inlets
    +spec:
    +  hosts:
    +  - grafana.example.com
    +  gateways:
    +  - tunnel-gateway
    +  http:
    +  - match:
    +    - uri:
    +        prefix: /
    +    route:
    +    - destination:
    +        host: grafana.tunnels.svc.cluster.local
    +        port:
    +          number: 8000
    +---
    +apiVersion: networking.istio.io/v1alpha3
    +kind: VirtualService
    +metadata:
    +  name: openfaas
    +  namespace: inlets
    +spec:
    +  hosts:
    +  - openfaas.example.com
    +  gateways:
    +  - tunnel-gateway
    +  http:
    +  - match:
    +    - uri:
    +        prefix: /
    +    route:
    +    - destination:
    +        host: openfaas.tunnels.svc.cluster.local
    +        port:
    +          number: 8000
    +
    +
  8. +
+

After applying these resources you should be able to access the data plane for both tunnels on their custom domain.

+

Wildcard Ingress with the data-router

+

As an alternative to creating individual sets of Ingress records, DNS A/CNAME entries and TLS certificates for each tunnel, you can use the data-router to route traffic to the correct tunnel based on the hostname. This approach uses a wildcard DNS entry and a single TLS certificate for all tunnels.

+

The following example is adapted from the cert-manager documentation to use DigitalOcean's DNS servers, however you can find instructions for issuers such as AWS Route53, Cloudflare, and Google Cloud DNS listed.

+

DNS01 challenges require a secret to be created containing the credentials for the DNS provider. The secret is referenced by the issuer resource.

+
kubectl create secret generic \
+  -n inlets digitalocean-dns \
+  --from-file access-token=$HOME/do-access-token
+
+

Create a separate Issuer, assuming a domain of t.example.com, where each tunnel would be i.e. prometheus.t.example.com or api.t.example.com:

+
export NS="inlets"
+export ISSUER_NAME="inlets-wildcard"
+export DOMAIN="t.example.com"
+
+cat <<EOF | kubectl apply -f -
+apiVersion: cert-manager.io/v1
+kind: Issuer
+metadata:
+  name: $ISSUER_NAME
+  namespace: $NS
+spec:
+  acme:
+    email: webmaster@$DOMAIN
+    server: https://acme-v02.api.letsencrypt.org/directory
+    privateKeySecretRef:
+      name: $ISSUER_NAME
+    solvers:
+    - dns01:
+        digitalocean:
+            tokenSecretRef:
+              name: digitalocean-dns
+              key: access-token
+EOF
+
+

Update values.yaml to enable the dataRouter and to specify the wildcard domain:

+
## The dataRouter is an option component to enable easy Ingress to connected tunnels.
+## Learn more under "Ingress for Tunnels" in the docs: https://docs.inlets.dev/
+dataRouter:
+  enabled: true
+
+  # Leave out the asterix i.e. *.t.example.com would be: t.example.com
+  wildcardDomain: "t.example.com"
+
+  tls:
+    issuerName: "inlets-wildcard"
+
+    ingress:
+      class: "nginx"
+      annotations:
+        # Apply basic rate limiting.
+        nginx.ingress.kubernetes.io/limit-connections: "300"
+        nginx.ingress.kubernetes.io/limit-rpm: "1000"
+
+

Apply the updated values:

+
helm upgrade --install inlets-uplink \
+  oci://ghcr.io/openfaasltd/inlets-uplink-provider \
+  --namespace inlets \
+  --values ./values.yaml
+
+

Create a tunnel with an Ingress Domain specified in the .Spec field:

+
export TUNNEL_NS="tunnels"
+export DOMAIN="t.example.com"
+
+cat <<EOF | kubectl apply -f -
+apiVersion: uplink.inlets.dev/v1alpha1
+kind: Tunnel
+metadata:
+  name: fileshare
+  namespace: $TUNNEL_NS
+spec:
+  licenseRef:
+    name: inlets-uplink-license
+    namespace: $TUNNEL_NS
+  ingressDomains:
+    - fileshare.$DOMAIN
+EOF
+
+

On a private computer, create a new directory, a file to serve and then run the built-in HTTP server:

+
cd /tmp
+mkdir -p ./share
+cd ./share
+echo "Hello from inlets" > index.html
+
+inlets-pro fileserver --port 8080 --allow-browsing --webroot ./
+
+

Get the instructions to connect to the tunnel.

+

The --domain flag here is for your uplink control-plane, where tunnels connect, not the data-plane where ingress is served. This is usually i.e. uplink.example.com.

+
export TUNNEL_NS="tunnels"
+export UPLINK_DOMAIN="uplink.example.com"
+
+inlets-pro tunnel connect fileshare \
+  --namespace $TUNNEL_NS \
+  --domain $UPLINK_DOMAIN
+
+

Add the --upstream fileshare.t.example.com=fileshare flag to the command you were given, then run it.

+

The command below is sample output, do not copy it directly.

+
inlets-pro uplink client \
+  --url=wss://uplink.example.com/tunnels/fileshare \
+  --token=REDACTED \
+  --upstream fileshare.t.example.com=http://127.0.0.1:8080
+
+

Now, access the tunneled service via the wildcard domain i.e. https://fileshare.t.example.com.

+

You should see: "Hello from inlets" printed in your browser.

+

Finally, you can view the logs of the data-router, to see it resolving internal tunnel service names for various hostnames:

+
kubectl logs -n inlets deploy/data-router
+
+2024-01-24T11:29:16.965Z        info    data-router/main.go:51  Inlets (tm) Uplink - data-router: 
+
+2024-01-24T11:29:16.970Z        info    data-router/main.go:90  Listening on: 8080      Tunnel namespace: (all) Kubernetes version: v1.27.4+k3s1
+
+I0124 11:29:58.858772       1 main.go:151] Host: fileshares.t.example.com    Path: /
+I0124 11:29:58.858877       1 roundtripper.go:48] "No ingress found" hostname="fileshares.t.example.com" path="/"
+
+I0124 11:30:03.588993       1 main.go:151] Host: fileshare.t.example.com     Path: /
+I0124 11:30:03.589051       1 roundtripper.go:56] "Resolved" hostname="fileshare.t.example.com" path="/" tunnel="fileshare.tunnels:8000"
+
+ + + + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/uplink/manage-tunnels/index.html b/uplink/manage-tunnels/index.html new file mode 100644 index 0000000..7fcbb92 --- /dev/null +++ b/uplink/manage-tunnels/index.html @@ -0,0 +1,1149 @@ + + + + + + + + + + + + + + + + + + + + + + + + Manage customer tunnels - Inlets + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

Manage customer tunnels

+

You can use kubectl or the tunnel plugin for the inlets-pro CLI to manage tunnels.

+

List tunnels

+

List tunnels across all namespaces:

+
+
+
+
$ kubectl get tunnels -A
+
+NAMESPACE     NAME         AUTHTOKENNAME   DEPLOYMENTNAME   TCP PORTS   DOMAINS
+tunnels       acmeco       acmeco          acmeco           [8080]      
+customer1     ssh          ssh             ssh              [50035]
+customer1     prometheus   prometheus      prometheus       []         [prometheus.customer1.example.com]
+
+
+
+
$ inlets-pro tunnel list -A
+
+TUNNEL     DOMAINS                              PORTS   CREATED
+acmeco     []                                   [8080]  2022-11-22 11:51:35 +0100 CET
+ssh        []                                   [50035] 2022-11-24 18:19:01 +0100 CET
+prometheus [prometheus.customer1.example.com]   []      2022-11-24 11:43:23 +0100 CET
+
+
+
+
+

To list the tunnels within a namespace:

+
+
+
+
$ kubectl get tunnels -n customer1
+
+NAME         AUTHTOKENNAME   DEPLOYMENTNAME   TCP PORTS   DOMAINS
+ssh          ssh             ssh              [50035]
+
+
+
+
$ inlets-pro tunnel list -n customer1
+
+TUNNEL     DOMAINS   PORTS   CREATED
+ssh        []        [50035] 2022-11-22 11:51:35 +0100 CET
+
+
+
+
+

Delete a tunnel

+

Deleting a tunnel will remove all resources for the tunnel.

+

To remove a tunnel run:

+
+
+
+
kubectl delete -n tunnels \
+  tunnel/acmeco 
+
+
+
+
inlets-pro tunnel remove acmeco \
+  -n tunnels
+
+
+
+
+

Do also remember to stop the customer's inlets uplink client.

+

Update the ports or domains for a tunnel

+

You can update a tunnel and configure its TCP ports or domain names by editing the Tunnel Custom Resource:

+
kubectl edit -n tunnels \
+  tunnel/acmeco  
+
+

Imagine you wanted to add port 8081, when you already had port 8080 exposed:

+
apiVersion: uplink.inlets.dev/v1alpha1
+kind: Tunnel
+metadata:
+  name: acmeco
+  namespace: tunnels
+spec:
+  licenseRef:
+    name: inlets-uplink-license
+    namespace: tunnels
+  tcpPorts:
+  - 8080
++ - 8081
+
+

Alternatively, if you have the tunnel saved as a YAML file, you can edit it and apply it again with kubectl apply.

+

Check the logs of a tunnel

+

The logs for tunnels can be useful for troubleshooting or to see if clients are connecting successfully.

+

Get the logs for a tunnel deployment:

+
$ kubectl logs -n tunnels deploy/acmeco -f
+
+2022/11/22 12:07:38 Inlets Uplink For SaaS & Service Providers (Inlets Uplink for 5x Customers)
+2022/11/22 12:07:38 Licensed to: user@example.com
+inlets (tm) uplink server
+All rights reserved OpenFaaS Ltd (2022)
+
+Metrics on: 0.0.0.0:8001
+Control-plane on: 0.0.0.0:8123
+HTTP data-plane on: 0.0.0.0:8000
+time="2022/11/22 12:33:34" level=info msg="Added upstream: * => http://127.0.0.1:9090 (9355de15c687471da9766cbe51423e54)"
+time="2022/11/22 12:33:34" level=info msg="Handling backend connection request [9355de15c687471da9766cbe51423e54]"
+
+

Rotate the secret for a tunnel

+

You may want to rotate a secret for a customer if you think the secret has been leaked. The token can be rotated manually using kubectl or with a single command using the tunnel CLI plugin.

+
+
+
+

Delete the token secret. The default secret has the same name as the tunnel. The inlets uplink controller will automatically create a new secret.

+
kubectl delete -n tunnels \
+  secret/acmeco 
+
+

The tunnel has to be restarted to use the new token.

+
kubectl rollout restart -n tunnels \
+  deploy/acmeco
+
+
+
+

Rotate the tunnel token:

+
inlets-pro tunnel rotate acmeco \
+  -n tunnels
+
+
+
+
+

Any connected tunnels will disconnect at this point, and won’t be able to reconnect until you configure them with the updated token.

+

Retrieve the new token for the tunnel and save it to a file:

+
+
+
+
kubectl get -n tunnels secret/acmeco \
+  -o jsonpath="{.data.token}" | base64 --decode > token.txt 
+
+
+
+
inlets-pro tunnel token acmeco \
+  -n tunnels > token.txt
+
+
+
+
+

The contents will be saved in token.txt

+ + + + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/uplink/monitoring-tunnels/index.html b/uplink/monitoring-tunnels/index.html new file mode 100644 index 0000000..980c61a --- /dev/null +++ b/uplink/monitoring-tunnels/index.html @@ -0,0 +1,1171 @@ + + + + + + + + + + + + + + + + + + + + + + + + Monitoring inlets uplink - Inlets + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

Monitoring inlets uplink

+

Inlets Uplink comes with an integrated Prometheus deployment that automatically collects metrics for each tunnel.

+
+

Note

+

Prometheus is deployed with Inlets Uplink by default. If you don't need monitoring you can disable it in the values.yaml of the Inlets Uplink Helm chart:

+
prometheus:
+  create: false
+
+
+

You can explore the inlets data using Prometheus's built-in expression browser. To access it, port forward the prometheus service and than navigate to http://localhost:9090/graph

+
kubectl port-forward \
+  -n inlets \
+  svc/prometheus 9090:9090
+
+

Metrics for the control-plane

+

The control-plane metrics can give you insights into the number of clients that are connected and the number of http requests made to the control-plane endpoint for each tunnel.

+ + + + + + + + + + + + + + + + + + + + + + + +
MetricTypeDescriptionLabels
controlplane_connected_gaugegaugegauge of inlets clients connected to the control planetunnel_name
controlplane_requests_totalcountertotal HTTP requests processed by connecting clients on the control planecode, tunnel_name
+

Metrics for the data-plane

+

The data-plane metrics can give you insights in the services that are exposed through your tunnel.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
MetricTypeDescriptionLabels
dataplane_connections_gaugegaugegauge of connections established over data planeport, type, tunnel_name
dataplane_connections_totalcountertotal count of connections established over data planeport, type, tunnel_name
dataplane_requests_totalcountertotal HTTP requests processedcode, host, method, tunnel_name
dataplane_request_duration_secondshistogramseconds spent serving HTTP requestscode, host, method, tunnel_name,
+

The connections metrics show the number of connections that are open at this point in time, and on which ports. The type label indicates whether the connection is for a http or tcp upstream.

+

The request metrics only include HTTP upstreams. These metrics can be used to get Rate, Error, Duration (RED) information for any API or website that is connected through the tunnel.

+

Setup Grafana for monitoring

+

Grafana can be used to visualize the data collected by the inlets uplink Prometheus instance. We provide a sample dashboard that you can use as a starting point.

+

Inlets uplink control plane dashboard +Inlets uplink data plane dashboard

+
+

Inlets uplink Grafana dashboard

+
+

The dashboard can help you get insights in:

+
    +
  • The number of client connected to each tunnel.
  • +
  • Invocation to the control plane for each tunnel. This can help with detecting misbehaving clients.
  • +
  • Rate, Error, Duration (RED) information for HTTP tunnels.
  • +
  • The number of connections TCP connections opened for each tunnel.
  • +
+

Install Grafana

+

There are three options we recommend for getting access to Grafana.

+ +

You can install Grafana in one line with arkade:

+

arkade install grafana
+
+Grafana can also be installed with Helm. See: Grafana Helm Chart

+

Port forward grafana and retrieve the admin password to login:

+
# Expose the service via port-forward:
+kubectl --namespace grafana port-forward service/grafana 3000:80
+
+# Get the admin password:
+kubectl get secret --namespace grafana grafana -o jsonpath="{.data.admin-password}" | base64 --decode ; echo
+
+

Access Grafana on http://127.0.0.1:3000 and login as admin.

+

Add a data source

+

Before you import the dashboard, you need to add the inlets-uplink prometheus instance as a data source:

+
    +
  1. Select the cog icon on the side menu to show the configuration options.
  2. +
  3. +

    Select Data sources.

    +

    This opens the data sources page, which displays a list of previously configured data sources for the Grafana instance.

    +
  4. +
  5. +

    Select Add data source and pick Prometheus from the list of supported data sources.

    +
  6. +
  7. +

    Configure the inlets Prometheus instance as a data source:

    +

    Prometheus data source configuration

    +
      +
    • In the name field set: inlets-prometheus
    • +
    • For the URL use: http://prometheus.inlets:9090
      +

      if you installed inlets uplink in a different namespace this url should be http://prometheus.<namespace>:9090

      +
      +
    • +
    • Set the scrape interval field to 30s
    • +
    +
  8. +
+

Import the dashboard

+

Import the inlets uplink dashboard in Grafana:

+
    +
  1. Click Dashboards > Import in the side menu.
  2. +
  3. Copy the dashboard JSON text
  4. +
  5. +

    Paste the dashboard JSON into the text area.

    +

    Import Prometheus dashboard

    +
  6. +
+ + + + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/uplink/overview/index.html b/uplink/overview/index.html new file mode 100644 index 0000000..64767da --- /dev/null +++ b/uplink/overview/index.html @@ -0,0 +1,1015 @@ + + + + + + + + + + + + + + + + + + + + + + + + Inlets Uplink overview - Inlets + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + +

Inlets Uplink overview

+
+

What's the difference between Inlets Pro and Inlets Uplink?

+

Inlets Pro is a stand-alone binary that can be use to expose local HTTPs and TCP services on a remote machine or network.

+

Inlets Uplink is a complete management solution for tunnels for SaaS companies and service providers. It's designed for scale, multi-tenancy and easy management.

+
+

Inlets Uplink is our answer to the question: "How do you access customer services from within your own product?"

+

You may consider building your own agent, using a AWS SQS queue, or a VPN.

+

The first two options involve considerable work both up front and in the long run. VPNs require firewall changes, specific network conditions, and lengthy paperwork.

+

Inlets Uplink uses a TLS encrypted websocket to make an outbound connection, and can also work over corporate HTTP proxies.

+

Here are some of the other differences between Inlets Pro and Inlets Uplink:

+
    +
  • The management solution is built-in, self-hosted and runs on your Kubernetes cluster
  • +
  • You can create a tunnel almost instantly via CLI, REST API or the "Tunnel" Custom Resource
  • +
  • The license is installed on the server, instead of each client needing it
  • +
  • TCP ports can be remapped to avoid conflicts
  • +
  • A single tunnel can expose HTTP and TCP at the same time
  • +
  • All tunnels can be monitored centrally for reliability and usage
  • +
  • By default all tunnels are private and only available for access by your own applications
  • +
+

With Uplink, you deploy tunnel servers for a customers to your Kubernetes cluster, and our operator takes care of everything else.

+

You can read more about why we created inlets uplink in the product announcement.

+

Table of Contents

+ +

You can reach out to us if you have questions: Contact the inlets team

+ + + + + + + + +
+
+ + +
+ +
+ + + +
+
+
+
+ + + + + + + + + \ No newline at end of file